sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/auth/utils.py | """Authentication utilities for A2A protocol agent communication.
Provides validation and retry logic for various authentication schemes including
OAuth2, API keys, and HTTP authentication methods.
"""
import asyncio
from collections.abc import Awaitable, Callable, MutableMapping
import hashlib
import re
import threading
from typing import Final, Literal, cast
from a2a.client.errors import A2AClientHTTPError
from a2a.types import (
APIKeySecurityScheme,
AgentCard,
HTTPAuthSecurityScheme,
OAuth2SecurityScheme,
)
from httpx import AsyncClient, Response
from crewai.a2a.auth.client_schemes import (
APIKeyAuth,
BearerTokenAuth,
ClientAuthScheme,
HTTPBasicAuth,
HTTPDigestAuth,
OAuth2AuthorizationCode,
OAuth2ClientCredentials,
)
class _AuthStore:
"""Store for authentication schemes with safe concurrent access."""
def __init__(self) -> None:
self._store: dict[str, ClientAuthScheme | None] = {}
self._lock = threading.RLock()
@staticmethod
def compute_key(auth_type: str, auth_data: str) -> str:
"""Compute a collision-resistant key using SHA-256."""
content = f"{auth_type}:{auth_data}"
return hashlib.sha256(content.encode()).hexdigest()
def set(self, key: str, auth: ClientAuthScheme | None) -> None:
"""Store an auth scheme."""
with self._lock:
self._store[key] = auth
def get(self, key: str) -> ClientAuthScheme | None:
"""Retrieve an auth scheme by key."""
with self._lock:
return self._store.get(key)
def __setitem__(self, key: str, value: ClientAuthScheme | None) -> None:
with self._lock:
self._store[key] = value
def __getitem__(self, key: str) -> ClientAuthScheme | None:
with self._lock:
return self._store[key]
_auth_store = _AuthStore()
_SCHEME_PATTERN: Final[re.Pattern[str]] = re.compile(r"(\w+)\s+(.+?)(?=,\s*\w+\s+|$)")
_PARAM_PATTERN: Final[re.Pattern[str]] = re.compile(r'(\w+)=(?:"([^"]*)"|([^\s,]+))')
_SCHEME_AUTH_MAPPING: Final[dict[type, tuple[type[ClientAuthScheme], ...]]] = {
OAuth2SecurityScheme: (
OAuth2ClientCredentials,
OAuth2AuthorizationCode,
BearerTokenAuth,
),
APIKeySecurityScheme: (APIKeyAuth,),
}
_HTTPSchemeType = Literal["basic", "digest", "bearer"]
_HTTP_SCHEME_MAPPING: Final[dict[_HTTPSchemeType, type[ClientAuthScheme]]] = {
"basic": HTTPBasicAuth,
"digest": HTTPDigestAuth,
"bearer": BearerTokenAuth,
}
def _raise_auth_mismatch(
expected_classes: type[ClientAuthScheme] | tuple[type[ClientAuthScheme], ...],
provided_auth: ClientAuthScheme,
) -> None:
"""Raise authentication mismatch error.
Args:
expected_classes: Expected authentication class or tuple of classes.
provided_auth: Actually provided authentication instance.
Raises:
A2AClientHTTPError: Always raises with 401 status code.
"""
if isinstance(expected_classes, tuple):
if len(expected_classes) == 1:
required = expected_classes[0].__name__
else:
names = [cls.__name__ for cls in expected_classes]
required = f"one of ({', '.join(names)})"
else:
required = expected_classes.__name__
msg = (
f"AgentCard requires {required} authentication, "
f"but {type(provided_auth).__name__} was provided"
)
raise A2AClientHTTPError(401, msg)
def parse_www_authenticate(header_value: str) -> dict[str, dict[str, str]]:
"""Parse WWW-Authenticate header into auth challenges.
Args:
header_value: The WWW-Authenticate header value.
Returns:
Dictionary mapping auth scheme to its parameters.
Example: {"Bearer": {"realm": "api", "scope": "read write"}}
"""
if not header_value:
return {}
challenges: dict[str, dict[str, str]] = {}
for match in _SCHEME_PATTERN.finditer(header_value):
scheme = match.group(1)
params_str = match.group(2)
params: dict[str, str] = {}
for param_match in _PARAM_PATTERN.finditer(params_str):
key = param_match.group(1)
value = param_match.group(2) or param_match.group(3)
params[key] = value
challenges[scheme] = params
return challenges
def validate_auth_against_agent_card(
agent_card: AgentCard, auth: ClientAuthScheme | None
) -> None:
"""Validate that provided auth matches AgentCard security requirements.
Args:
agent_card: The A2A AgentCard containing security requirements.
auth: User-provided authentication scheme (or None).
Raises:
A2AClientHTTPError: If auth doesn't match AgentCard requirements (status_code=401).
"""
if not agent_card.security or not agent_card.security_schemes:
return
if not auth:
msg = "AgentCard requires authentication but no auth scheme provided"
raise A2AClientHTTPError(401, msg)
first_security_req = agent_card.security[0] if agent_card.security else {}
for scheme_name in first_security_req.keys():
security_scheme_wrapper = agent_card.security_schemes.get(scheme_name)
if not security_scheme_wrapper:
continue
scheme = security_scheme_wrapper.root
if allowed_classes := _SCHEME_AUTH_MAPPING.get(type(scheme)):
if not isinstance(auth, allowed_classes):
_raise_auth_mismatch(allowed_classes, auth)
return
if isinstance(scheme, HTTPAuthSecurityScheme):
scheme_key = cast(_HTTPSchemeType, scheme.scheme.lower())
if required_class := _HTTP_SCHEME_MAPPING.get(scheme_key):
if not isinstance(auth, required_class):
_raise_auth_mismatch(required_class, auth)
return
msg = "Could not validate auth against AgentCard security requirements"
raise A2AClientHTTPError(401, msg)
async def retry_on_401(
request_func: Callable[[], Awaitable[Response]],
auth_scheme: ClientAuthScheme | None,
client: AsyncClient,
headers: MutableMapping[str, str],
max_retries: int = 3,
) -> Response:
"""Retry a request on 401 authentication error.
Handles 401 errors by:
1. Parsing WWW-Authenticate header
2. Re-acquiring credentials
3. Retrying the request
Args:
request_func: Async function that makes the HTTP request.
auth_scheme: Authentication scheme to refresh credentials with.
client: HTTP client for making requests.
headers: Request headers to update with new auth.
max_retries: Maximum number of retry attempts (default: 3).
Returns:
HTTP response from the request.
Raises:
httpx.HTTPStatusError: If retries are exhausted or auth scheme is None.
"""
last_response: Response | None = None
last_challenges: dict[str, dict[str, str]] = {}
for attempt in range(max_retries):
response = await request_func()
if response.status_code != 401:
return response
last_response = response
if auth_scheme is None:
response.raise_for_status()
return response
www_authenticate = response.headers.get("WWW-Authenticate", "")
challenges = parse_www_authenticate(www_authenticate)
last_challenges = challenges
if attempt >= max_retries - 1:
break
backoff_time = 2**attempt
await asyncio.sleep(backoff_time)
await auth_scheme.apply_auth(client, headers)
if last_response:
last_response.raise_for_status()
return last_response
msg = "retry_on_401 failed without making any requests"
if last_challenges:
challenge_info = ", ".join(
f"{scheme} (realm={params.get('realm', 'N/A')})"
for scheme, params in last_challenges.items()
)
msg = f"{msg}. Server challenges: {challenge_info}"
raise RuntimeError(msg)
def configure_auth_client(
auth: HTTPDigestAuth | APIKeyAuth, client: AsyncClient
) -> None:
"""Configure HTTP client with auth-specific settings.
Only HTTPDigestAuth and APIKeyAuth need client configuration.
Args:
auth: Authentication scheme that requires client configuration.
client: HTTP client to configure.
"""
auth.configure_client(client)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/auth/utils.py",
"license": "MIT License",
"lines": 208,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/config.py | """A2A configuration types.
This module is separate from experimental.a2a to avoid circular imports.
"""
from __future__ import annotations
from pathlib import Path
from typing import Any, ClassVar, Literal, cast
import warnings
from pydantic import (
BaseModel,
ConfigDict,
Field,
FilePath,
PrivateAttr,
SecretStr,
model_validator,
)
from typing_extensions import Self, deprecated
from crewai.a2a.auth.client_schemes import ClientAuthScheme
from crewai.a2a.auth.server_schemes import ServerAuthScheme
from crewai.a2a.extensions.base import ValidatedA2AExtension
from crewai.a2a.types import ProtocolVersion, TransportType, Url
try:
from a2a.types import (
AgentCapabilities,
AgentCardSignature,
AgentInterface,
AgentProvider,
AgentSkill,
SecurityScheme,
)
from crewai.a2a.extensions.server import ServerExtension
from crewai.a2a.updates import UpdateConfig
except ImportError:
UpdateConfig: Any = Any # type: ignore[no-redef]
AgentCapabilities: Any = Any # type: ignore[no-redef]
AgentCardSignature: Any = Any # type: ignore[no-redef]
AgentInterface: Any = Any # type: ignore[no-redef]
AgentProvider: Any = Any # type: ignore[no-redef]
SecurityScheme: Any = Any # type: ignore[no-redef]
AgentSkill: Any = Any # type: ignore[no-redef]
ServerExtension: Any = Any # type: ignore[no-redef]
def _get_default_update_config() -> UpdateConfig:
from crewai.a2a.updates import StreamingConfig
return StreamingConfig()
SigningAlgorithm = Literal[
"RS256",
"RS384",
"RS512",
"ES256",
"ES384",
"ES512",
"PS256",
"PS384",
"PS512",
]
class AgentCardSigningConfig(BaseModel):
"""Configuration for AgentCard JWS signing.
Provides the private key and algorithm settings for signing AgentCards.
Either private_key_path or private_key_pem must be provided, but not both.
Attributes:
private_key_path: Path to a PEM-encoded private key file.
private_key_pem: PEM-encoded private key as a secret string.
key_id: Optional key identifier for the JWS header (kid claim).
algorithm: Signing algorithm (RS256, ES256, PS256, etc.).
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
private_key_path: FilePath | None = Field(
default=None,
description="Path to PEM-encoded private key file",
)
private_key_pem: SecretStr | None = Field(
default=None,
description="PEM-encoded private key",
)
key_id: str | None = Field(
default=None,
description="Key identifier for JWS header (kid claim)",
)
algorithm: SigningAlgorithm = Field(
default="RS256",
description="Signing algorithm (RS256, ES256, PS256, etc.)",
)
@model_validator(mode="after")
def _validate_key_source(self) -> Self:
"""Ensure exactly one key source is provided."""
has_path = self.private_key_path is not None
has_pem = self.private_key_pem is not None
if not has_path and not has_pem:
raise ValueError(
"Either private_key_path or private_key_pem must be provided"
)
if has_path and has_pem:
raise ValueError(
"Only one of private_key_path or private_key_pem should be provided"
)
return self
def get_private_key(self) -> str:
"""Get the private key content.
Returns:
The PEM-encoded private key as a string.
"""
if self.private_key_pem:
return self.private_key_pem.get_secret_value()
if self.private_key_path:
return Path(self.private_key_path).read_text()
raise ValueError("No private key configured")
class GRPCServerConfig(BaseModel):
"""gRPC server transport configuration.
Presence of this config in ServerTransportConfig.grpc enables gRPC transport.
Attributes:
host: Hostname to advertise in agent cards (default: localhost).
Use docker service name (e.g., 'web') for docker-compose setups.
port: Port for the gRPC server.
tls_cert_path: Path to TLS certificate file for gRPC.
tls_key_path: Path to TLS private key file for gRPC.
max_workers: Maximum number of workers for the gRPC thread pool.
reflection_enabled: Whether to enable gRPC reflection for debugging.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
host: str = Field(
default="localhost",
description="Hostname to advertise in agent cards for gRPC connections",
)
port: int = Field(
default=50051,
description="Port for the gRPC server",
)
tls_cert_path: str | None = Field(
default=None,
description="Path to TLS certificate file for gRPC",
)
tls_key_path: str | None = Field(
default=None,
description="Path to TLS private key file for gRPC",
)
max_workers: int = Field(
default=10,
description="Maximum number of workers for the gRPC thread pool",
)
reflection_enabled: bool = Field(
default=False,
description="Whether to enable gRPC reflection for debugging",
)
class GRPCClientConfig(BaseModel):
"""gRPC client transport configuration.
Attributes:
max_send_message_length: Maximum size for outgoing messages in bytes.
max_receive_message_length: Maximum size for incoming messages in bytes.
keepalive_time_ms: Time between keepalive pings in milliseconds.
keepalive_timeout_ms: Timeout for keepalive ping response in milliseconds.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
max_send_message_length: int | None = Field(
default=None,
description="Maximum size for outgoing messages in bytes",
)
max_receive_message_length: int | None = Field(
default=None,
description="Maximum size for incoming messages in bytes",
)
keepalive_time_ms: int | None = Field(
default=None,
description="Time between keepalive pings in milliseconds",
)
keepalive_timeout_ms: int | None = Field(
default=None,
description="Timeout for keepalive ping response in milliseconds",
)
class JSONRPCServerConfig(BaseModel):
"""JSON-RPC server transport configuration.
Presence of this config in ServerTransportConfig.jsonrpc enables JSON-RPC transport.
Attributes:
rpc_path: URL path for the JSON-RPC endpoint.
agent_card_path: URL path for the agent card endpoint.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
rpc_path: str = Field(
default="/a2a",
description="URL path for the JSON-RPC endpoint",
)
agent_card_path: str = Field(
default="/.well-known/agent-card.json",
description="URL path for the agent card endpoint",
)
class JSONRPCClientConfig(BaseModel):
"""JSON-RPC client transport configuration.
Attributes:
max_request_size: Maximum request body size in bytes.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
max_request_size: int | None = Field(
default=None,
description="Maximum request body size in bytes",
)
class HTTPJSONConfig(BaseModel):
"""HTTP+JSON transport configuration.
Presence of this config in ServerTransportConfig.http_json enables HTTP+JSON transport.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
class ServerPushNotificationConfig(BaseModel):
"""Configuration for outgoing webhook push notifications.
Controls how the server signs and delivers push notifications to clients.
Attributes:
signature_secret: Shared secret for HMAC-SHA256 signing of outgoing webhooks.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
signature_secret: SecretStr | None = Field(
default=None,
description="Shared secret for HMAC-SHA256 signing of outgoing push notifications",
)
class ServerTransportConfig(BaseModel):
"""Transport configuration for A2A server.
Groups all transport-related settings including preferred transport
and protocol-specific configurations.
Attributes:
preferred: Transport protocol for the preferred endpoint.
jsonrpc: JSON-RPC server transport configuration.
grpc: gRPC server transport configuration.
http_json: HTTP+JSON transport configuration.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
preferred: TransportType = Field(
default="JSONRPC",
description="Transport protocol for the preferred endpoint",
)
jsonrpc: JSONRPCServerConfig = Field(
default_factory=JSONRPCServerConfig,
description="JSON-RPC server transport configuration",
)
grpc: GRPCServerConfig | None = Field(
default=None,
description="gRPC server transport configuration",
)
http_json: HTTPJSONConfig | None = Field(
default=None,
description="HTTP+JSON transport configuration",
)
def _migrate_client_transport_fields(
transport: ClientTransportConfig,
transport_protocol: TransportType | None,
supported_transports: list[TransportType] | None,
) -> None:
"""Migrate deprecated transport fields to new config."""
if transport_protocol is not None:
warnings.warn(
"transport_protocol is deprecated, use transport=ClientTransportConfig(preferred=...) instead",
FutureWarning,
stacklevel=5,
)
object.__setattr__(transport, "preferred", transport_protocol)
if supported_transports is not None:
warnings.warn(
"supported_transports is deprecated, use transport=ClientTransportConfig(supported=...) instead",
FutureWarning,
stacklevel=5,
)
object.__setattr__(transport, "supported", supported_transports)
class ClientTransportConfig(BaseModel):
"""Transport configuration for A2A client.
Groups all client transport-related settings including preferred transport,
supported transports for negotiation, and protocol-specific configurations.
Transport negotiation logic:
1. If `preferred` is set and server supports it → use client's preferred
2. Otherwise, if server's preferred is in client's `supported` → use server's preferred
3. Otherwise, find first match from client's `supported` in server's interfaces
Attributes:
preferred: Client's preferred transport. If set, client preference takes priority.
supported: Transports the client can use, in order of preference.
jsonrpc: JSON-RPC client transport configuration.
grpc: gRPC client transport configuration.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
preferred: TransportType | None = Field(
default=None,
description="Client's preferred transport. If set, takes priority over server preference.",
)
supported: list[TransportType] = Field(
default_factory=lambda: cast(list[TransportType], ["JSONRPC"]),
description="Transports the client can use, in order of preference",
)
jsonrpc: JSONRPCClientConfig = Field(
default_factory=JSONRPCClientConfig,
description="JSON-RPC client transport configuration",
)
grpc: GRPCClientConfig = Field(
default_factory=GRPCClientConfig,
description="gRPC client transport configuration",
)
@deprecated(
"""
`crewai.a2a.config.A2AConfig` is deprecated and will be removed in v2.0.0,
use `crewai.a2a.config.A2AClientConfig` or `crewai.a2a.config.A2AServerConfig` instead.
""",
category=FutureWarning,
)
class A2AConfig(BaseModel):
"""Configuration for A2A protocol integration.
Deprecated:
Use A2AClientConfig instead. This class will be removed in a future version.
Attributes:
endpoint: A2A agent endpoint URL.
auth: Authentication scheme.
timeout: Request timeout in seconds.
max_turns: Maximum conversation turns with A2A agent.
response_model: Optional Pydantic model for structured A2A agent responses.
fail_fast: If True, raise error when agent unreachable; if False, skip and continue.
trust_remote_completion_status: If True, return A2A agent's result directly when completed.
updates: Update mechanism config.
client_extensions: Client-side processing hooks for tool injection and prompt augmentation.
transport: Transport configuration (preferred, supported transports, gRPC settings).
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
endpoint: Url = Field(description="A2A agent endpoint URL")
auth: ClientAuthScheme | None = Field(
default=None,
description="Authentication scheme",
)
timeout: int = Field(default=120, description="Request timeout in seconds")
max_turns: int = Field(
default=10, description="Maximum conversation turns with A2A agent"
)
response_model: type[BaseModel] | None = Field(
default=None,
description="Optional Pydantic model for structured A2A agent responses",
)
fail_fast: bool = Field(
default=True,
description="If True, raise error when agent unreachable; if False, skip",
)
trust_remote_completion_status: bool = Field(
default=False,
description="If True, return A2A result directly when completed",
)
updates: UpdateConfig = Field(
default_factory=_get_default_update_config,
description="Update mechanism config",
)
client_extensions: list[ValidatedA2AExtension] = Field(
default_factory=list,
description="Client-side processing hooks for tool injection and prompt augmentation",
)
transport: ClientTransportConfig = Field(
default_factory=ClientTransportConfig,
description="Transport configuration (preferred, supported transports, gRPC settings)",
)
transport_protocol: TransportType | None = Field(
default=None,
description="Deprecated: Use transport.preferred instead",
exclude=True,
)
supported_transports: list[TransportType] | None = Field(
default=None,
description="Deprecated: Use transport.supported instead",
exclude=True,
)
use_client_preference: bool | None = Field(
default=None,
description="Deprecated: Set transport.preferred to enable client preference",
exclude=True,
)
_parallel_delegation: bool = PrivateAttr(default=False)
@model_validator(mode="after")
def _migrate_deprecated_transport_fields(self) -> Self:
"""Migrate deprecated transport fields to new config."""
_migrate_client_transport_fields(
self.transport, self.transport_protocol, self.supported_transports
)
if self.use_client_preference is not None:
warnings.warn(
"use_client_preference is deprecated, set transport.preferred to enable client preference",
FutureWarning,
stacklevel=4,
)
if self.use_client_preference and self.transport.supported:
object.__setattr__(
self.transport, "preferred", self.transport.supported[0]
)
return self
class A2AClientConfig(BaseModel):
"""Configuration for connecting to remote A2A agents.
Attributes:
endpoint: A2A agent endpoint URL.
auth: Authentication scheme.
timeout: Request timeout in seconds.
max_turns: Maximum conversation turns with A2A agent.
response_model: Optional Pydantic model for structured A2A agent responses.
fail_fast: If True, raise error when agent unreachable; if False, skip and continue.
trust_remote_completion_status: If True, return A2A agent's result directly when completed.
updates: Update mechanism config.
accepted_output_modes: Media types the client can accept in responses.
extensions: Extension URIs the client supports (A2A protocol extensions).
client_extensions: Client-side processing hooks for tool injection and prompt augmentation.
transport: Transport configuration (preferred, supported transports, gRPC settings).
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
endpoint: Url = Field(description="A2A agent endpoint URL")
auth: ClientAuthScheme | None = Field(
default=None,
description="Authentication scheme",
)
timeout: int = Field(default=120, description="Request timeout in seconds")
max_turns: int = Field(
default=10, description="Maximum conversation turns with A2A agent"
)
response_model: type[BaseModel] | None = Field(
default=None,
description="Optional Pydantic model for structured A2A agent responses",
)
fail_fast: bool = Field(
default=True,
description="If True, raise error when agent unreachable; if False, skip",
)
trust_remote_completion_status: bool = Field(
default=False,
description="If True, return A2A result directly when completed",
)
updates: UpdateConfig = Field(
default_factory=_get_default_update_config,
description="Update mechanism config",
)
accepted_output_modes: list[str] = Field(
default_factory=lambda: ["application/json"],
description="Media types the client can accept in responses",
)
extensions: list[str] = Field(
default_factory=list,
description="Extension URIs the client supports",
)
client_extensions: list[ValidatedA2AExtension] = Field(
default_factory=list,
description="Client-side processing hooks for tool injection and prompt augmentation",
)
transport: ClientTransportConfig = Field(
default_factory=ClientTransportConfig,
description="Transport configuration (preferred, supported transports, gRPC settings)",
)
transport_protocol: TransportType | None = Field(
default=None,
description="Deprecated: Use transport.preferred instead",
exclude=True,
)
supported_transports: list[TransportType] | None = Field(
default=None,
description="Deprecated: Use transport.supported instead",
exclude=True,
)
_parallel_delegation: bool = PrivateAttr(default=False)
@model_validator(mode="after")
def _migrate_deprecated_transport_fields(self) -> Self:
"""Migrate deprecated transport fields to new config."""
_migrate_client_transport_fields(
self.transport, self.transport_protocol, self.supported_transports
)
return self
class A2AServerConfig(BaseModel):
"""Configuration for exposing a Crew or Agent as an A2A server.
All fields correspond to A2A AgentCard fields. Fields like name, description,
and skills can be auto-derived from the Crew/Agent if not provided.
Attributes:
name: Human-readable name for the agent.
description: Human-readable description of the agent.
version: Version string for the agent card.
skills: List of agent skills/capabilities.
default_input_modes: Default supported input MIME types.
default_output_modes: Default supported output MIME types.
capabilities: Declaration of optional capabilities.
protocol_version: A2A protocol version this agent supports.
provider: Information about the agent's service provider.
documentation_url: URL to the agent's documentation.
icon_url: URL to an icon for the agent.
additional_interfaces: Additional supported interfaces.
security: Security requirement objects for all interactions.
security_schemes: Security schemes available to authorize requests.
supports_authenticated_extended_card: Whether agent provides extended card to authenticated users.
url: Preferred endpoint URL for the agent.
signing_config: Configuration for signing the AgentCard with JWS.
signatures: Deprecated. Pre-computed JWS signatures. Use signing_config instead.
server_extensions: Server-side A2A protocol extensions with on_request/on_response hooks.
push_notifications: Configuration for outgoing push notifications.
transport: Transport configuration (preferred transport, gRPC, REST settings).
auth: Authentication scheme for A2A endpoints.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(extra="forbid")
name: str | None = Field(
default=None,
description="Human-readable name for the agent. Auto-derived from Crew/Agent if not provided.",
)
description: str | None = Field(
default=None,
description="Human-readable description of the agent. Auto-derived from Crew/Agent if not provided.",
)
version: str = Field(
default="1.0.0",
description="Version string for the agent card",
)
skills: list[AgentSkill] = Field(
default_factory=list,
description="List of agent skills. Auto-derived from tasks/tools if not provided.",
)
default_input_modes: list[str] = Field(
default_factory=lambda: ["text/plain", "application/json"],
description="Default supported input MIME types",
)
default_output_modes: list[str] = Field(
default_factory=lambda: ["text/plain", "application/json"],
description="Default supported output MIME types",
)
capabilities: AgentCapabilities = Field(
default_factory=lambda: AgentCapabilities(
streaming=True,
push_notifications=False,
),
description="Declaration of optional capabilities supported by the agent",
)
protocol_version: ProtocolVersion = Field(
default="0.3.0",
description="A2A protocol version this agent supports",
)
provider: AgentProvider | None = Field(
default=None,
description="Information about the agent's service provider",
)
documentation_url: Url | None = Field(
default=None,
description="URL to the agent's documentation",
)
icon_url: Url | None = Field(
default=None,
description="URL to an icon for the agent",
)
additional_interfaces: list[AgentInterface] = Field(
default_factory=list,
description="Additional supported interfaces.",
)
security: list[dict[str, list[str]]] = Field(
default_factory=list,
description="Security requirement objects for all agent interactions",
)
security_schemes: dict[str, SecurityScheme] = Field(
default_factory=dict,
description="Security schemes available to authorize requests",
)
supports_authenticated_extended_card: bool = Field(
default=False,
description="Whether agent provides extended card to authenticated users",
)
url: Url | None = Field(
default=None,
description="Preferred endpoint URL for the agent. Set at runtime if not provided.",
)
signing_config: AgentCardSigningConfig | None = Field(
default=None,
description="Configuration for signing the AgentCard with JWS",
)
signatures: list[AgentCardSignature] | None = Field(
default=None,
description="Deprecated: Use signing_config instead. Pre-computed JWS signatures for the AgentCard.",
exclude=True,
deprecated=True,
)
server_extensions: list[ServerExtension] = Field(
default_factory=list,
description="Server-side A2A protocol extensions that modify agent behavior",
)
push_notifications: ServerPushNotificationConfig | None = Field(
default=None,
description="Configuration for outgoing push notifications",
)
transport: ServerTransportConfig = Field(
default_factory=ServerTransportConfig,
description="Transport configuration (preferred transport, gRPC, REST settings)",
)
preferred_transport: TransportType | None = Field(
default=None,
description="Deprecated: Use transport.preferred instead",
exclude=True,
deprecated=True,
)
auth: ServerAuthScheme | None = Field(
default=None,
description="Authentication scheme for A2A endpoints. Defaults to SimpleTokenAuth using AUTH_TOKEN env var.",
)
@model_validator(mode="after")
def _migrate_deprecated_fields(self) -> Self:
"""Migrate deprecated fields to new config."""
if self.preferred_transport is not None:
warnings.warn(
"preferred_transport is deprecated, use transport=ServerTransportConfig(preferred=...) instead",
FutureWarning,
stacklevel=4,
)
object.__setattr__(self.transport, "preferred", self.preferred_transport)
if self.signatures is not None:
warnings.warn(
"signatures is deprecated, use signing_config=AgentCardSigningConfig(...) instead. "
"The signatures field will be removed in v2.0.0.",
FutureWarning,
stacklevel=4,
)
return self
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/config.py",
"license": "MIT License",
"lines": 600,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/templates.py | """String templates for A2A (Agent-to-Agent) protocol messaging and status."""
from string import Template
from typing import Final
AVAILABLE_AGENTS_TEMPLATE: Final[Template] = Template(
"\n<AVAILABLE_A2A_AGENTS>\n $available_a2a_agents\n</AVAILABLE_A2A_AGENTS>\n"
)
PREVIOUS_A2A_CONVERSATION_TEMPLATE: Final[Template] = Template(
"\n<PREVIOUS_A2A_CONVERSATION>\n"
" $previous_a2a_conversation"
"\n</PREVIOUS_A2A_CONVERSATION>\n"
)
CONVERSATION_TURN_INFO_TEMPLATE: Final[Template] = Template(
"\n<CONVERSATION_PROGRESS>\n"
' turn="$turn_count"\n'
' max_turns="$max_turns"\n'
" $warning"
"\n</CONVERSATION_PROGRESS>\n"
)
UNAVAILABLE_AGENTS_NOTICE_TEMPLATE: Final[Template] = Template(
"\n<A2A_AGENTS_STATUS>\n"
" NOTE: A2A agents were configured but are currently unavailable.\n"
" You cannot delegate to remote agents for this task.\n\n"
" Unavailable Agents:\n"
" $unavailable_agents"
"\n</A2A_AGENTS_STATUS>\n"
)
REMOTE_AGENT_COMPLETED_NOTICE: Final[str] = """
<REMOTE_AGENT_STATUS>
STATUS: COMPLETED
The remote agent has finished processing your request. Their response is in the conversation history above.
You MUST now:
1. Extract the answer from the conversation history
2. Set is_a2a=false
3. Return the answer as your final message
DO NOT send another request - the task is already done.
</REMOTE_AGENT_STATUS>
"""
REMOTE_AGENT_RESPONSE_NOTICE: Final[str] = """
<REMOTE_AGENT_STATUS>
STATUS: RESPONSE_RECEIVED
The remote agent has responded. Their response is in the conversation history above.
You MUST now:
1. Set is_a2a=false (the remote task is complete and cannot receive more messages)
2. Provide YOUR OWN response to the original task based on the information received
IMPORTANT: Your response should be addressed to the USER who gave you the original task.
Report what the remote agent told you in THIRD PERSON (e.g., "The remote agent said..." or "I learned that...").
Do NOT address the remote agent directly or use "you" to refer to them.
</REMOTE_AGENT_STATUS>
"""
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/templates.py",
"license": "MIT License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/types.py | """Type definitions for A2A protocol message parts."""
from __future__ import annotations
from typing import (
Annotated,
Any,
Literal,
Protocol,
TypedDict,
runtime_checkable,
)
from pydantic import BeforeValidator, HttpUrl, TypeAdapter
from typing_extensions import NotRequired
try:
from crewai.a2a.updates import (
PollingConfig,
PollingHandler,
PushNotificationConfig,
PushNotificationHandler,
StreamingConfig,
StreamingHandler,
UpdateConfig,
)
except ImportError:
PollingConfig = Any # type: ignore[misc,assignment]
PollingHandler = Any # type: ignore[misc,assignment]
PushNotificationConfig = Any # type: ignore[misc,assignment]
PushNotificationHandler = Any # type: ignore[misc,assignment]
StreamingConfig = Any # type: ignore[misc,assignment]
StreamingHandler = Any # type: ignore[misc,assignment]
UpdateConfig = Any # type: ignore[misc,assignment]
TransportType = Literal["JSONRPC", "GRPC", "HTTP+JSON"]
ProtocolVersion = Literal[
"0.2.0",
"0.2.1",
"0.2.2",
"0.2.3",
"0.2.4",
"0.2.5",
"0.2.6",
"0.3.0",
"0.4.0",
]
http_url_adapter: TypeAdapter[HttpUrl] = TypeAdapter(HttpUrl)
Url = Annotated[
str,
BeforeValidator(
lambda value: str(http_url_adapter.validate_python(value, strict=True))
),
]
@runtime_checkable
class AgentResponseProtocol(Protocol):
"""Protocol for the dynamically created AgentResponse model."""
a2a_ids: tuple[str, ...]
message: str
is_a2a: bool
class PartsMetadataDict(TypedDict, total=False):
"""Metadata for A2A message parts.
Attributes:
mimeType: MIME type for the part content.
schema: JSON schema for the part content.
"""
mimeType: Literal["application/json"]
schema: dict[str, Any]
class PartsDict(TypedDict):
"""A2A message part containing text and optional metadata.
Attributes:
text: The text content of the message part.
metadata: Optional metadata describing the part content.
"""
text: str
metadata: NotRequired[PartsMetadataDict]
PollingHandlerType = type[PollingHandler]
StreamingHandlerType = type[StreamingHandler]
PushNotificationHandlerType = type[PushNotificationHandler]
HandlerType = PollingHandlerType | StreamingHandlerType | PushNotificationHandlerType
HANDLER_REGISTRY: dict[type[UpdateConfig], HandlerType] = {
PollingConfig: PollingHandler,
StreamingConfig: StreamingHandler,
PushNotificationConfig: PushNotificationHandler,
}
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/types.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/wrapper.py | """A2A agent wrapping logic for metaclass integration.
Wraps agent classes with A2A delegation capabilities.
"""
from __future__ import annotations
import asyncio
from collections.abc import Callable, Coroutine, Mapping
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import wraps
import json
from types import MethodType
from typing import TYPE_CHECKING, Any, NamedTuple
from a2a.types import Role, TaskState
from pydantic import BaseModel, ValidationError
from crewai.a2a.config import A2AClientConfig, A2AConfig
from crewai.a2a.extensions.base import (
A2AExtension,
ConversationState,
ExtensionRegistry,
)
from crewai.a2a.task_helpers import TaskStateResult
from crewai.a2a.templates import (
AVAILABLE_AGENTS_TEMPLATE,
CONVERSATION_TURN_INFO_TEMPLATE,
PREVIOUS_A2A_CONVERSATION_TEMPLATE,
REMOTE_AGENT_RESPONSE_NOTICE,
UNAVAILABLE_AGENTS_NOTICE_TEMPLATE,
)
from crewai.a2a.types import AgentResponseProtocol
from crewai.a2a.utils.agent_card import (
afetch_agent_card,
fetch_agent_card,
inject_a2a_server_methods,
)
from crewai.a2a.utils.delegation import (
aexecute_a2a_delegation,
execute_a2a_delegation,
)
from crewai.a2a.utils.response_model import get_a2a_agents_and_response_model
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AConversationCompletedEvent,
A2AMessageSentEvent,
)
from crewai.lite_agent_output import LiteAgentOutput
from crewai.task import Task
if TYPE_CHECKING:
from a2a.types import AgentCard, Message
from crewai.agent.core import Agent
from crewai.tools.base_tool import BaseTool
class DelegationContext(NamedTuple):
"""Context prepared for A2A delegation.
Groups all the values needed to execute a delegation to a remote A2A agent.
"""
a2a_agents: list[A2AConfig | A2AClientConfig]
agent_response_model: type[BaseModel] | None
current_request: str
agent_id: str
agent_config: A2AConfig | A2AClientConfig
context_id: str | None
task_id: str | None
metadata: dict[str, Any] | None
extensions: dict[str, Any] | None
reference_task_ids: list[str]
original_task_description: str
max_turns: int
class DelegationState(NamedTuple):
"""Mutable state for A2A delegation loop.
Groups values that may change during delegation turns.
"""
current_request: str
context_id: str | None
task_id: str | None
reference_task_ids: list[str]
conversation_history: list[Message]
agent_card: AgentCard | None
agent_card_dict: dict[str, Any] | None
agent_name: str | None
def wrap_agent_with_a2a_instance(
agent: Agent, extension_registry: ExtensionRegistry | None = None
) -> None:
"""Wrap an agent instance's task execution and kickoff methods with A2A support.
This function modifies the agent instance by wrapping its execute_task,
aexecute_task, kickoff, and kickoff_async methods to add A2A delegation
capabilities. Should only be called when the agent has a2a configuration set.
Args:
agent: The agent instance to wrap.
extension_registry: Optional registry of A2A extensions.
"""
if extension_registry is None:
extension_registry = ExtensionRegistry()
extension_registry.inject_all_tools(agent)
original_execute_task = agent.execute_task.__func__ # type: ignore[attr-defined]
original_aexecute_task = agent.aexecute_task.__func__ # type: ignore[attr-defined]
@wraps(original_execute_task)
def execute_task_with_a2a(
self: Agent,
task: Task,
context: str | None = None,
tools: list[BaseTool] | None = None,
) -> str:
"""Execute task with A2A delegation support (sync)."""
if not self.a2a:
return original_execute_task(self, task, context, tools) # type: ignore[no-any-return]
a2a_agents, agent_response_model = get_a2a_agents_and_response_model(self.a2a)
return _execute_task_with_a2a(
self=self,
a2a_agents=a2a_agents,
original_fn=original_execute_task,
task=task,
agent_response_model=agent_response_model,
context=context,
tools=tools,
extension_registry=extension_registry,
)
@wraps(original_aexecute_task)
async def aexecute_task_with_a2a(
self: Agent,
task: Task,
context: str | None = None,
tools: list[BaseTool] | None = None,
) -> str:
"""Execute task with A2A delegation support (async)."""
if not self.a2a:
return await original_aexecute_task(self, task, context, tools) # type: ignore[no-any-return]
a2a_agents, agent_response_model = get_a2a_agents_and_response_model(self.a2a)
return await _aexecute_task_with_a2a(
self=self,
a2a_agents=a2a_agents,
original_fn=original_aexecute_task,
task=task,
agent_response_model=agent_response_model,
context=context,
tools=tools,
extension_registry=extension_registry,
)
object.__setattr__(agent, "execute_task", MethodType(execute_task_with_a2a, agent))
object.__setattr__(
agent, "aexecute_task", MethodType(aexecute_task_with_a2a, agent)
)
original_kickoff = agent.kickoff.__func__ # type: ignore[attr-defined]
original_kickoff_async = agent.kickoff_async.__func__ # type: ignore[attr-defined]
@wraps(original_kickoff)
def kickoff_with_a2a(
self: Agent,
messages: str | list[Any],
response_format: type[Any] | None = None,
input_files: dict[str, Any] | None = None,
) -> Any:
"""Execute agent kickoff with A2A delegation support."""
if not self.a2a:
return original_kickoff(self, messages, response_format, input_files)
a2a_agents, agent_response_model = get_a2a_agents_and_response_model(self.a2a)
if not a2a_agents:
return original_kickoff(self, messages, response_format, input_files)
return _kickoff_with_a2a(
self=self,
a2a_agents=a2a_agents,
original_kickoff=original_kickoff,
messages=messages,
response_format=response_format,
input_files=input_files,
agent_response_model=agent_response_model,
extension_registry=extension_registry,
)
@wraps(original_kickoff_async)
async def kickoff_async_with_a2a(
self: Agent,
messages: str | list[Any],
response_format: type[Any] | None = None,
input_files: dict[str, Any] | None = None,
) -> Any:
"""Execute agent kickoff with A2A delegation support."""
if not self.a2a:
return await original_kickoff_async(
self, messages, response_format, input_files
)
a2a_agents, agent_response_model = get_a2a_agents_and_response_model(self.a2a)
if not a2a_agents:
return await original_kickoff_async(
self, messages, response_format, input_files
)
return await _akickoff_with_a2a(
self=self,
a2a_agents=a2a_agents,
original_kickoff_async=original_kickoff_async,
messages=messages,
response_format=response_format,
input_files=input_files,
agent_response_model=agent_response_model,
extension_registry=extension_registry,
)
object.__setattr__(agent, "kickoff", MethodType(kickoff_with_a2a, agent))
object.__setattr__(
agent, "kickoff_async", MethodType(kickoff_async_with_a2a, agent)
)
inject_a2a_server_methods(agent)
def _fetch_card_from_config(
config: A2AConfig | A2AClientConfig,
) -> tuple[A2AConfig | A2AClientConfig, AgentCard | Exception]:
"""Fetch agent card from A2A config.
Args:
config: A2A configuration
Returns:
Tuple of (config, card or exception)
"""
try:
card = fetch_agent_card(
endpoint=config.endpoint,
auth=config.auth,
timeout=config.timeout,
)
return config, card
except Exception as e:
return config, e
def _fetch_agent_cards_concurrently(
a2a_agents: list[A2AConfig | A2AClientConfig],
) -> tuple[dict[str, AgentCard], dict[str, str]]:
"""Fetch agent cards concurrently for multiple A2A agents.
Args:
a2a_agents: List of A2A agent configurations
Returns:
Tuple of (agent_cards dict, failed_agents dict mapping endpoint to error message)
"""
agent_cards: dict[str, AgentCard] = {}
failed_agents: dict[str, str] = {}
if not a2a_agents:
return agent_cards, failed_agents
max_workers = min(len(a2a_agents), 10)
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = {
executor.submit(_fetch_card_from_config, config): config
for config in a2a_agents
}
for future in as_completed(futures):
config, result = future.result()
if isinstance(result, Exception):
if config.fail_fast:
raise RuntimeError(
f"Failed to fetch agent card from {config.endpoint}. "
f"Ensure the A2A agent is running and accessible. Error: {result}"
) from result
failed_agents[config.endpoint] = str(result)
else:
agent_cards[config.endpoint] = result
return agent_cards, failed_agents
def _execute_task_with_a2a(
self: Agent,
a2a_agents: list[A2AConfig | A2AClientConfig],
original_fn: Callable[..., str],
task: Task,
agent_response_model: type[BaseModel] | None,
context: str | None,
tools: list[BaseTool] | None,
extension_registry: ExtensionRegistry,
) -> str:
"""Wrap execute_task with A2A delegation logic.
Args:
self: The agent instance
a2a_agents: Dictionary of A2A agent configurations
original_fn: The original execute_task method
task: The task to execute
context: Optional context for task execution
tools: Optional tools available to the agent
agent_response_model: Optional agent response model
extension_registry: Registry of A2A extensions
Returns:
Task execution result (either from LLM or A2A agent)
"""
original_description: str = task.description
original_output_pydantic = task.output_pydantic
original_response_model = task.response_model
agent_cards, failed_agents = _fetch_agent_cards_concurrently(a2a_agents)
if not agent_cards and a2a_agents and failed_agents:
unavailable_agents_text = ""
for endpoint, error in failed_agents.items():
unavailable_agents_text += f" - {endpoint}: {error}\n"
notice = UNAVAILABLE_AGENTS_NOTICE_TEMPLATE.substitute(
unavailable_agents=unavailable_agents_text
)
task.description = f"{original_description}{notice}"
try:
return original_fn(self, task, context, tools)
finally:
task.description = original_description
task.description, _, extension_states = _augment_prompt_with_a2a(
a2a_agents=a2a_agents,
task_description=original_description,
agent_cards=agent_cards,
failed_agents=failed_agents,
extension_registry=extension_registry,
)
task.response_model = agent_response_model
try:
raw_result = original_fn(self, task, context, tools)
agent_response = _parse_agent_response(
raw_result=raw_result, agent_response_model=agent_response_model
)
if extension_registry and isinstance(agent_response, BaseModel):
agent_response = extension_registry.process_response_with_all(
agent_response, extension_states
)
if isinstance(agent_response, BaseModel) and isinstance(
agent_response, AgentResponseProtocol
):
if agent_response.is_a2a:
return _delegate_to_a2a(
self,
agent_response=agent_response,
task=task,
original_fn=original_fn,
context=context,
tools=tools,
agent_cards=agent_cards,
original_task_description=original_description,
_extension_registry=extension_registry,
)
task.output_pydantic = None
return agent_response.message
return raw_result
finally:
task.description = original_description
if task.output_pydantic is not None:
task.output_pydantic = original_output_pydantic
task.response_model = original_response_model
def _kickoff_with_a2a(
self: Agent,
a2a_agents: list[A2AConfig | A2AClientConfig],
original_kickoff: Callable[..., LiteAgentOutput],
messages: str | list[Any],
response_format: type[Any] | None,
input_files: dict[str, Any] | None,
agent_response_model: type[BaseModel] | None,
extension_registry: ExtensionRegistry,
) -> LiteAgentOutput:
"""Execute kickoff with A2A delegation support (sync).
Args:
self: The agent instance.
a2a_agents: List of A2A agent configurations.
original_kickoff: The original kickoff method.
messages: Messages to send to the agent.
response_format: Optional response format.
input_files: Optional input files.
agent_response_model: Optional agent response model.
extension_registry: Registry of A2A extensions.
Returns:
LiteAgentOutput from kickoff or A2A delegation.
"""
if isinstance(messages, str):
description = messages
else:
content = next(
(m["content"] for m in reversed(messages) if m["role"] == "user"),
None,
)
description = content if isinstance(content, str) else ""
if not description:
return original_kickoff(self, messages, response_format, input_files)
fake_task = Task(
description=description,
agent=self,
expected_output="Result from A2A delegation",
input_files=input_files or {},
)
agent_cards, failed_agents = _fetch_agent_cards_concurrently(a2a_agents)
if not agent_cards and a2a_agents and failed_agents:
return original_kickoff(self, messages, response_format, input_files)
fake_task.description, _, extension_states = _augment_prompt_with_a2a(
a2a_agents=a2a_agents,
task_description=description,
agent_cards=agent_cards,
failed_agents=failed_agents,
extension_registry=extension_registry,
)
fake_task.response_model = agent_response_model
try:
result: LiteAgentOutput = original_kickoff(
self, messages, agent_response_model or response_format, input_files
)
agent_response = _parse_agent_response(
raw_result=result.raw, agent_response_model=agent_response_model
)
if extension_registry and isinstance(agent_response, BaseModel):
agent_response = extension_registry.process_response_with_all(
agent_response, extension_states
)
if isinstance(agent_response, BaseModel) and isinstance(
agent_response, AgentResponseProtocol
):
if agent_response.is_a2a:
def _kickoff_adapter(
self_: Agent,
_task: Task,
_context: str | None,
_tools: list[Any] | None,
) -> str:
fmt = (
_task.response_model or agent_response_model or response_format
)
output: LiteAgentOutput = original_kickoff(
self_, messages, fmt, input_files
)
return output.raw
result_str = _delegate_to_a2a(
self,
agent_response=agent_response,
task=fake_task,
original_fn=_kickoff_adapter,
context=None,
tools=None,
agent_cards=agent_cards,
original_task_description=description,
_extension_registry=extension_registry,
)
return LiteAgentOutput(
raw=result_str,
pydantic=None,
agent_role=self.role,
usage_metrics=None,
messages=[],
)
return LiteAgentOutput(
raw=agent_response.message,
pydantic=None,
agent_role=self.role,
usage_metrics=result.usage_metrics,
messages=result.messages,
)
return result
finally:
fake_task.description = description
async def _akickoff_with_a2a(
self: Agent,
a2a_agents: list[A2AConfig | A2AClientConfig],
original_kickoff_async: Callable[..., Coroutine[Any, Any, LiteAgentOutput]],
messages: str | list[Any],
response_format: type[Any] | None,
input_files: dict[str, Any] | None,
agent_response_model: type[BaseModel] | None,
extension_registry: ExtensionRegistry,
) -> LiteAgentOutput:
"""Execute kickoff with A2A delegation support (async).
Args:
self: The agent instance.
a2a_agents: List of A2A agent configurations.
original_kickoff_async: The original kickoff_async method.
messages: Messages to send to the agent.
response_format: Optional response format.
input_files: Optional input files.
agent_response_model: Optional agent response model.
extension_registry: Registry of A2A extensions.
Returns:
LiteAgentOutput from kickoff or A2A delegation.
"""
if isinstance(messages, str):
description = messages
else:
content = next(
(m["content"] for m in reversed(messages) if m["role"] == "user"),
None,
)
description = content if isinstance(content, str) else ""
if not description:
return await original_kickoff_async(
self, messages, response_format, input_files
)
fake_task = Task(
description=description,
agent=self,
expected_output="Result from A2A delegation",
input_files=input_files or {},
)
agent_cards, failed_agents = await _afetch_agent_cards_concurrently(a2a_agents)
if not agent_cards and a2a_agents and failed_agents:
return await original_kickoff_async(
self, messages, response_format, input_files
)
fake_task.description, _, extension_states = _augment_prompt_with_a2a(
a2a_agents=a2a_agents,
task_description=description,
agent_cards=agent_cards,
failed_agents=failed_agents,
extension_registry=extension_registry,
)
fake_task.response_model = agent_response_model
try:
result: LiteAgentOutput = await original_kickoff_async(
self, messages, agent_response_model or response_format, input_files
)
agent_response = _parse_agent_response(
raw_result=result.raw, agent_response_model=agent_response_model
)
if extension_registry and isinstance(agent_response, BaseModel):
agent_response = extension_registry.process_response_with_all(
agent_response, extension_states
)
if isinstance(agent_response, BaseModel) and isinstance(
agent_response, AgentResponseProtocol
):
if agent_response.is_a2a:
async def _kickoff_adapter(
self_: Agent,
_task: Task,
_context: str | None,
_tools: list[Any] | None,
) -> str:
fmt = (
_task.response_model or agent_response_model or response_format
)
output: LiteAgentOutput = await original_kickoff_async(
self_, messages, fmt, input_files
)
return output.raw
result_str = await _adelegate_to_a2a(
self,
agent_response=agent_response,
task=fake_task,
original_fn=_kickoff_adapter,
context=None,
tools=None,
agent_cards=agent_cards,
original_task_description=description,
_extension_registry=extension_registry,
)
return LiteAgentOutput(
raw=result_str,
pydantic=None,
agent_role=self.role,
usage_metrics=None,
messages=[],
)
return LiteAgentOutput(
raw=agent_response.message,
pydantic=None,
agent_role=self.role,
usage_metrics=result.usage_metrics,
messages=result.messages,
)
return result
finally:
fake_task.description = description
def _augment_prompt_with_a2a(
a2a_agents: list[A2AConfig | A2AClientConfig],
task_description: str,
agent_cards: Mapping[str, AgentCard | dict[str, Any]],
conversation_history: list[Message] | None = None,
turn_num: int = 0,
max_turns: int | None = None,
failed_agents: dict[str, str] | None = None,
extension_registry: ExtensionRegistry | None = None,
remote_status_notice: str = "",
) -> tuple[str, bool, dict[type[A2AExtension], ConversationState]]:
"""Add A2A delegation instructions to prompt.
Args:
a2a_agents: Dictionary of A2A agent configurations
task_description: Original task description
agent_cards: dictionary mapping agent IDs to AgentCards
conversation_history: Previous A2A Messages from conversation
turn_num: Current turn number (0-indexed)
max_turns: Maximum allowed turns (from config)
failed_agents: Dictionary mapping failed agent endpoints to error messages
extension_registry: Optional registry of A2A extensions
remote_status_notice: Optional notice about remote agent status to append
Returns:
Tuple of (augmented prompt, disable_structured_output flag, extension_states dict)
"""
if not agent_cards:
return task_description, False, {}
agents_text = ""
for config in a2a_agents:
if config.endpoint in agent_cards:
card = agent_cards[config.endpoint]
if isinstance(card, dict):
filtered = {
k: v
for k, v in card.items()
if k in {"description", "url", "skills"} and v is not None
}
agents_text += f"\n{json.dumps(filtered, indent=2)}\n"
else:
agents_text += f"\n{card.model_dump_json(indent=2, exclude_none=True, include={'description', 'url', 'skills'})}\n"
failed_agents = failed_agents or {}
if failed_agents:
agents_text += "\n<!-- Unavailable Agents -->\n"
for endpoint, error in failed_agents.items():
agents_text += f"\n<!-- Agent: {endpoint}\n Status: Unavailable\n Error: {error} -->\n"
agents_text = AVAILABLE_AGENTS_TEMPLATE.substitute(available_a2a_agents=agents_text)
history_text = ""
if conversation_history:
for msg in conversation_history:
history_text += f"\n{msg.model_dump_json(indent=2, exclude_none=True, exclude={'message_id'})}\n"
history_text = PREVIOUS_A2A_CONVERSATION_TEMPLATE.substitute(
previous_a2a_conversation=history_text
)
extension_states = {}
disable_structured_output = False
if extension_registry and conversation_history:
extension_states = extension_registry.extract_all_states(conversation_history)
for state in extension_states.values():
if state.is_ready():
disable_structured_output = True
break
turn_info = ""
if max_turns is not None and conversation_history:
turn_count = turn_num + 1
warning = ""
if turn_count >= max_turns:
warning = (
"CRITICAL: This is the FINAL turn. You MUST conclude the conversation now.\n"
"Set is_a2a=false and provide your final response to complete the task."
)
elif turn_count == max_turns - 1:
warning = "WARNING: Next turn will be the last. Consider wrapping up the conversation."
turn_info = CONVERSATION_TURN_INFO_TEMPLATE.substitute(
turn_count=turn_count,
max_turns=max_turns,
warning=warning,
)
augmented_prompt = f"""{task_description}
IMPORTANT: You have the ability to delegate this task to remote A2A agents.
{agents_text}
{history_text}{turn_info}{remote_status_notice}
"""
if extension_registry:
augmented_prompt = extension_registry.augment_prompt_with_all(
augmented_prompt, extension_states
)
return augmented_prompt, disable_structured_output, extension_states
def _parse_agent_response(
raw_result: str | dict[str, Any], agent_response_model: type[BaseModel] | None
) -> BaseModel | str | dict[str, Any]:
"""Parse LLM output as AgentResponse or return raw agent response."""
if agent_response_model:
try:
if isinstance(raw_result, str):
return agent_response_model.model_validate_json(raw_result)
if isinstance(raw_result, dict):
return agent_response_model.model_validate(raw_result)
except ValidationError:
return raw_result
return raw_result
def _handle_max_turns_exceeded(
conversation_history: list[Message],
max_turns: int,
from_task: Any | None = None,
from_agent: Any | None = None,
endpoint: str | None = None,
a2a_agent_name: str | None = None,
agent_card: dict[str, Any] | None = None,
) -> str:
"""Handle the case when max turns is exceeded.
Shared logic for both sync and async delegation.
Returns:
Final message if found in history.
Raises:
Exception: If no final message found and max turns exceeded.
"""
if conversation_history:
for msg in reversed(conversation_history):
if msg.role == Role.agent:
text_parts = [
part.root.text for part in msg.parts if part.root.kind == "text"
]
final_message = (
" ".join(text_parts) if text_parts else "Conversation completed"
)
crewai_event_bus.emit(
None,
A2AConversationCompletedEvent(
status="completed",
final_result=final_message,
error=None,
total_turns=max_turns,
from_task=from_task,
from_agent=from_agent,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
agent_card=agent_card,
),
)
return final_message
crewai_event_bus.emit(
None,
A2AConversationCompletedEvent(
status="failed",
final_result=None,
error=f"Conversation exceeded maximum turns ({max_turns})",
total_turns=max_turns,
from_task=from_task,
from_agent=from_agent,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
agent_card=agent_card,
),
)
raise Exception(f"A2A conversation exceeded maximum turns ({max_turns})")
def _emit_delegation_failed(
error_msg: str,
turn_num: int,
from_task: Any | None,
from_agent: Any | None,
endpoint: str | None,
a2a_agent_name: str | None,
agent_card: dict[str, Any] | None,
) -> str:
"""Emit failure event and return formatted error message."""
crewai_event_bus.emit(
None,
A2AConversationCompletedEvent(
status="failed",
final_result=None,
error=error_msg,
total_turns=turn_num + 1,
from_task=from_task,
from_agent=from_agent,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
agent_card=agent_card,
),
)
return f"A2A delegation failed: {error_msg}"
def _process_response_result(
raw_result: str,
disable_structured_output: bool,
turn_num: int,
agent_role: str,
agent_response_model: type[BaseModel] | None,
extension_registry: ExtensionRegistry | None = None,
extension_states: dict[type[A2AExtension], ConversationState] | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
endpoint: str | None = None,
a2a_agent_name: str | None = None,
agent_card: dict[str, Any] | None = None,
) -> tuple[str | None, str | None]:
"""Process LLM response and determine next action.
Shared logic for both sync and async handlers.
Returns:
Tuple of (final_result, next_request).
"""
if disable_structured_output:
final_turn_number = turn_num + 1
result_text = str(raw_result)
crewai_event_bus.emit(
None,
A2AMessageSentEvent(
message=result_text,
turn_number=final_turn_number,
is_multiturn=True,
agent_role=agent_role,
from_task=from_task,
from_agent=from_agent,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
),
)
crewai_event_bus.emit(
None,
A2AConversationCompletedEvent(
status="completed",
final_result=result_text,
error=None,
total_turns=final_turn_number,
from_task=from_task,
from_agent=from_agent,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
agent_card=agent_card,
),
)
return result_text, None
llm_response = _parse_agent_response(
raw_result=raw_result, agent_response_model=agent_response_model
)
if extension_registry and isinstance(llm_response, BaseModel):
llm_response = extension_registry.process_response_with_all(
llm_response, extension_states or {}
)
if isinstance(llm_response, BaseModel) and isinstance(
llm_response, AgentResponseProtocol
):
if not llm_response.is_a2a:
final_turn_number = turn_num + 1
crewai_event_bus.emit(
None,
A2AMessageSentEvent(
message=str(llm_response.message),
turn_number=final_turn_number,
is_multiturn=True,
agent_role=agent_role,
from_task=from_task,
from_agent=from_agent,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
),
)
crewai_event_bus.emit(
None,
A2AConversationCompletedEvent(
status="completed",
final_result=str(llm_response.message),
error=None,
total_turns=final_turn_number,
from_task=from_task,
from_agent=from_agent,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
agent_card=agent_card,
),
)
return llm_response.message, None
return None, llm_response.message
return str(raw_result), None
def _prepare_agent_cards_dict(
a2a_result: TaskStateResult,
agent_id: str,
agent_cards: Mapping[str, AgentCard | dict[str, Any]] | None,
) -> dict[str, AgentCard | dict[str, Any]]:
"""Prepare agent cards dictionary from result and existing cards.
Shared logic for both sync and async response handlers.
"""
agent_cards_dict: dict[str, AgentCard | dict[str, Any]] = (
dict(agent_cards) if agent_cards else {}
)
if "agent_card" in a2a_result and agent_id not in agent_cards_dict:
agent_cards_dict[agent_id] = a2a_result["agent_card"]
return agent_cards_dict
def _init_delegation_state(
ctx: DelegationContext,
agent_cards: dict[str, AgentCard] | None,
) -> DelegationState:
"""Initialize delegation state from context and agent cards.
Args:
ctx: Delegation context with config and settings.
agent_cards: Pre-fetched agent cards.
Returns:
Initial delegation state for the conversation loop.
"""
current_agent_card = agent_cards.get(ctx.agent_id) if agent_cards else None
return DelegationState(
current_request=ctx.current_request,
context_id=ctx.context_id,
task_id=ctx.task_id,
reference_task_ids=list(ctx.reference_task_ids),
conversation_history=[],
agent_card=current_agent_card,
agent_card_dict=current_agent_card.model_dump() if current_agent_card else None,
agent_name=current_agent_card.name if current_agent_card else None,
)
def _get_turn_context(
agent_config: A2AConfig | A2AClientConfig,
) -> tuple[Any | None, list[str] | None]:
"""Get context for a delegation turn.
Returns:
Tuple of (agent_branch, accepted_output_modes).
"""
console_formatter = getattr(crewai_event_bus, "_console", None)
agent_branch = None
if console_formatter:
agent_branch = getattr(
console_formatter, "current_agent_branch", None
) or getattr(console_formatter, "current_task_branch", None)
accepted_output_modes = None
if isinstance(agent_config, A2AClientConfig):
accepted_output_modes = agent_config.accepted_output_modes
return agent_branch, accepted_output_modes
def _prepare_delegation_context(
self: Agent,
agent_response: AgentResponseProtocol,
task: Task,
original_task_description: str | None,
) -> DelegationContext:
"""Prepare delegation context from agent response and task.
Shared logic for both sync and async delegation.
Returns:
DelegationContext with all values needed for delegation.
"""
a2a_agents, agent_response_model = get_a2a_agents_and_response_model(self.a2a)
agent_ids = tuple(config.endpoint for config in a2a_agents)
current_request = str(agent_response.message)
if not a2a_agents:
raise ValueError("No A2A agents configured for delegation")
if isinstance(agent_response, AgentResponseProtocol) and agent_response.a2a_ids:
agent_id = agent_response.a2a_ids[0]
else:
agent_id = agent_ids[0]
if agent_id not in agent_ids:
raise ValueError(f"Unknown A2A agent ID: {agent_id} not in {agent_ids}")
agent_config = next(filter(lambda x: x.endpoint == agent_id, a2a_agents), None)
if agent_config is None:
raise ValueError(f"Agent configuration not found for endpoint: {agent_id}")
task_config = task.config or {}
if original_task_description is None:
original_task_description = task.description
return DelegationContext(
a2a_agents=a2a_agents,
agent_response_model=agent_response_model,
current_request=current_request,
agent_id=agent_id,
agent_config=agent_config,
context_id=task_config.get("context_id"),
task_id=task_config.get("task_id"),
metadata=task_config.get("metadata"),
extensions=task_config.get("extensions"),
reference_task_ids=task_config.get("reference_task_ids", []),
original_task_description=original_task_description,
max_turns=agent_config.max_turns,
)
def _handle_task_completion(
a2a_result: TaskStateResult,
task: Task,
task_id_config: str | None,
reference_task_ids: list[str],
agent_config: A2AConfig | A2AClientConfig,
turn_num: int,
from_task: Any | None = None,
from_agent: Any | None = None,
endpoint: str | None = None,
a2a_agent_name: str | None = None,
agent_card: dict[str, Any] | None = None,
) -> tuple[str | None, str | None, list[str], str]:
"""Handle task completion state including reference task updates.
When a remote task completes, this function:
1. Adds the completed task_id to reference_task_ids (if not already present)
2. Clears task_id_config to signal that a new task ID should be generated for next turn
3. Updates task.config with the reference list for subsequent A2A calls
The reference_task_ids list tracks all completed tasks in this conversation chain,
allowing the remote agent to maintain context across multi-turn interactions.
Shared logic for both sync and async delegation.
Args:
a2a_result: Result from A2A delegation containing task status.
task: CrewAI Task object to update with reference IDs.
task_id_config: Current task ID (will be added to references if task completed).
reference_task_ids: Mutable list of completed task IDs (updated in place).
agent_config: A2A configuration with trust settings.
turn_num: Current turn number.
from_task: Optional CrewAI Task for event metadata.
from_agent: Optional CrewAI Agent for event metadata.
endpoint: A2A endpoint URL.
a2a_agent_name: Name of remote A2A agent.
agent_card: Agent card dict for event metadata.
Returns:
Tuple of (result_if_trusted, updated_task_id, updated_reference_task_ids, remote_notice).
- result_if_trusted: Final result if trust_remote_completion_status=True, else None
- updated_task_id: None (cleared to generate new ID for next turn)
- updated_reference_task_ids: The mutated list with completed task added
- remote_notice: Template notice about remote agent response
"""
remote_notice = ""
if a2a_result["status"] == TaskState.completed:
remote_notice = REMOTE_AGENT_RESPONSE_NOTICE
if task_id_config is not None and task_id_config not in reference_task_ids:
reference_task_ids.append(task_id_config)
if task.config is None:
task.config = {}
task.config["reference_task_ids"] = list(reference_task_ids)
task_id_config = None
if agent_config.trust_remote_completion_status:
result_text = a2a_result.get("result", "")
final_turn_number = turn_num + 1
crewai_event_bus.emit(
None,
A2AConversationCompletedEvent(
status="completed",
final_result=result_text,
error=None,
total_turns=final_turn_number,
from_task=from_task,
from_agent=from_agent,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
agent_card=agent_card,
),
)
return str(result_text), task_id_config, reference_task_ids, remote_notice
return None, task_id_config, reference_task_ids, remote_notice
def _handle_agent_response_and_continue(
self: Agent,
a2a_result: TaskStateResult,
agent_id: str,
agent_cards: dict[str, AgentCard] | None,
a2a_agents: list[A2AConfig | A2AClientConfig],
original_task_description: str,
conversation_history: list[Message],
turn_num: int,
max_turns: int,
task: Task,
original_fn: Callable[..., str],
context: str | None,
tools: list[BaseTool] | None,
agent_response_model: type[BaseModel] | None,
extension_registry: ExtensionRegistry | None = None,
remote_status_notice: str = "",
endpoint: str | None = None,
a2a_agent_name: str | None = None,
agent_card: dict[str, Any] | None = None,
) -> tuple[str | None, str | None]:
"""Handle A2A result and get CrewAI agent's response.
Args:
self: The agent instance
a2a_result: Result from A2A delegation
agent_id: ID of the A2A agent
agent_cards: Pre-fetched agent cards
a2a_agents: List of A2A configurations
original_task_description: Original task description
conversation_history: Conversation history
turn_num: Current turn number
max_turns: Maximum turns allowed
task: The task being executed
original_fn: Original execute_task method
context: Optional context
tools: Optional tools
agent_response_model: Response model for parsing
Returns:
Tuple of (final_result, current_request) where:
- final_result is not None if conversation should end
- current_request is the next message to send if continuing
"""
agent_cards_dict = _prepare_agent_cards_dict(a2a_result, agent_id, agent_cards)
(
task.description,
disable_structured_output,
extension_states,
) = _augment_prompt_with_a2a(
a2a_agents=a2a_agents,
task_description=original_task_description,
conversation_history=conversation_history,
turn_num=turn_num,
max_turns=max_turns,
agent_cards=agent_cards_dict,
remote_status_notice=remote_status_notice,
)
original_response_model = task.response_model
if disable_structured_output:
task.response_model = None
raw_result = original_fn(self, task, context, tools)
if disable_structured_output:
task.response_model = original_response_model
return _process_response_result(
raw_result=raw_result,
disable_structured_output=disable_structured_output,
turn_num=turn_num,
agent_role=self.role,
agent_response_model=agent_response_model,
extension_registry=extension_registry,
extension_states=extension_states,
from_task=task,
from_agent=self,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
agent_card=agent_card,
)
def _delegate_to_a2a(
self: Agent,
agent_response: AgentResponseProtocol,
task: Task,
original_fn: Callable[..., str],
context: str | None,
tools: list[BaseTool] | None,
agent_cards: dict[str, AgentCard] | None = None,
original_task_description: str | None = None,
_extension_registry: ExtensionRegistry | None = None,
) -> str:
"""Delegate to A2A agent with multi-turn conversation support.
Args:
self: The agent instance
agent_response: The AgentResponse indicating delegation
task: The task being executed (for extracting A2A fields)
original_fn: The original execute_task method for follow-ups
context: Optional context for task execution
tools: Optional tools available to the agent
agent_cards: Pre-fetched agent cards from _execute_task_with_a2a
original_task_description: The original task description before A2A augmentation
_extension_registry: Optional registry of A2A extensions (unused, reserved for future use)
Returns:
Result from A2A agent
Raises:
ImportError: If a2a-sdk is not installed
"""
ctx = _prepare_delegation_context(
self, agent_response, task, original_task_description
)
state = _init_delegation_state(ctx, agent_cards)
current_request = state.current_request
context_id = state.context_id
task_id = state.task_id
reference_task_ids = state.reference_task_ids
conversation_history = state.conversation_history
try:
for turn_num in range(ctx.max_turns):
agent_branch, accepted_output_modes = _get_turn_context(ctx.agent_config)
a2a_result = execute_a2a_delegation(
endpoint=ctx.agent_config.endpoint,
auth=ctx.agent_config.auth,
timeout=ctx.agent_config.timeout,
task_description=current_request,
context_id=context_id,
task_id=task_id,
reference_task_ids=reference_task_ids,
metadata=ctx.metadata,
extensions=ctx.extensions,
conversation_history=conversation_history,
agent_id=ctx.agent_id,
agent_role=Role.user,
agent_branch=agent_branch,
response_model=ctx.agent_config.response_model,
turn_number=turn_num + 1,
updates=ctx.agent_config.updates,
transport=ctx.agent_config.transport,
from_task=task,
from_agent=self,
client_extensions=getattr(ctx.agent_config, "extensions", None),
accepted_output_modes=accepted_output_modes,
input_files=task.input_files,
)
conversation_history = a2a_result.get("history", [])
if conversation_history:
latest_message = conversation_history[-1]
if latest_message.task_id is not None:
task_id = latest_message.task_id
if latest_message.context_id is not None:
context_id = latest_message.context_id
if a2a_result["status"] in [TaskState.completed, TaskState.input_required]:
trusted_result, task_id, reference_task_ids, remote_notice = (
_handle_task_completion(
a2a_result,
task,
task_id,
reference_task_ids,
ctx.agent_config,
turn_num,
from_task=task,
from_agent=self,
endpoint=ctx.agent_config.endpoint,
a2a_agent_name=state.agent_name,
agent_card=state.agent_card_dict,
)
)
if trusted_result is not None:
return trusted_result
final_result, next_request = _handle_agent_response_and_continue(
self=self,
a2a_result=a2a_result,
agent_id=ctx.agent_id,
agent_cards=agent_cards,
a2a_agents=ctx.a2a_agents,
original_task_description=ctx.original_task_description,
conversation_history=conversation_history,
turn_num=turn_num,
max_turns=ctx.max_turns,
task=task,
original_fn=original_fn,
context=context,
tools=tools,
agent_response_model=ctx.agent_response_model,
extension_registry=_extension_registry,
remote_status_notice=remote_notice,
endpoint=ctx.agent_config.endpoint,
a2a_agent_name=state.agent_name,
agent_card=state.agent_card_dict,
)
if final_result is not None:
return final_result
if next_request is not None:
current_request = next_request
continue
error_msg = a2a_result.get("error", "Unknown error")
final_result, next_request = _handle_agent_response_and_continue(
self=self,
a2a_result=a2a_result,
agent_id=ctx.agent_id,
agent_cards=agent_cards,
a2a_agents=ctx.a2a_agents,
original_task_description=ctx.original_task_description,
conversation_history=conversation_history,
turn_num=turn_num,
max_turns=ctx.max_turns,
task=task,
original_fn=original_fn,
context=context,
tools=tools,
agent_response_model=ctx.agent_response_model,
extension_registry=_extension_registry,
endpoint=ctx.agent_config.endpoint,
a2a_agent_name=state.agent_name,
agent_card=state.agent_card_dict,
)
if final_result is not None:
return final_result
if next_request is not None:
current_request = next_request
continue
return _emit_delegation_failed(
error_msg,
turn_num,
task,
self,
ctx.agent_config.endpoint,
state.agent_name,
state.agent_card_dict,
)
return _handle_max_turns_exceeded(
conversation_history,
ctx.max_turns,
from_task=task,
from_agent=self,
endpoint=ctx.agent_config.endpoint,
a2a_agent_name=state.agent_name,
agent_card=state.agent_card_dict,
)
finally:
task.description = ctx.original_task_description
async def _afetch_card_from_config(
config: A2AConfig | A2AClientConfig,
) -> tuple[A2AConfig | A2AClientConfig, AgentCard | Exception]:
"""Fetch agent card from A2A config asynchronously."""
try:
card = await afetch_agent_card(
endpoint=config.endpoint,
auth=config.auth,
timeout=config.timeout,
)
return config, card
except Exception as e:
return config, e
async def _afetch_agent_cards_concurrently(
a2a_agents: list[A2AConfig | A2AClientConfig],
) -> tuple[dict[str, AgentCard], dict[str, str]]:
"""Fetch agent cards concurrently for multiple A2A agents using asyncio."""
agent_cards: dict[str, AgentCard] = {}
failed_agents: dict[str, str] = {}
if not a2a_agents:
return agent_cards, failed_agents
tasks = [_afetch_card_from_config(config) for config in a2a_agents]
results = await asyncio.gather(*tasks)
for config, result in results:
if isinstance(result, Exception):
if config.fail_fast:
raise RuntimeError(
f"Failed to fetch agent card from {config.endpoint}. "
f"Ensure the A2A agent is running and accessible. Error: {result}"
) from result
failed_agents[config.endpoint] = str(result)
else:
agent_cards[config.endpoint] = result
return agent_cards, failed_agents
async def _aexecute_task_with_a2a(
self: Agent,
a2a_agents: list[A2AConfig | A2AClientConfig],
original_fn: Callable[..., Coroutine[Any, Any, str]],
task: Task,
agent_response_model: type[BaseModel] | None,
context: str | None,
tools: list[BaseTool] | None,
extension_registry: ExtensionRegistry,
) -> str:
"""Async version of _execute_task_with_a2a."""
original_description: str = task.description
original_output_pydantic = task.output_pydantic
original_response_model = task.response_model
agent_cards, failed_agents = await _afetch_agent_cards_concurrently(a2a_agents)
if not agent_cards and a2a_agents and failed_agents:
unavailable_agents_text = ""
for endpoint, error in failed_agents.items():
unavailable_agents_text += f" - {endpoint}: {error}\n"
notice = UNAVAILABLE_AGENTS_NOTICE_TEMPLATE.substitute(
unavailable_agents=unavailable_agents_text
)
task.description = f"{original_description}{notice}"
try:
return await original_fn(self, task, context, tools)
finally:
task.description = original_description
task.description, _, extension_states = _augment_prompt_with_a2a(
a2a_agents=a2a_agents,
task_description=original_description,
agent_cards=agent_cards,
failed_agents=failed_agents,
extension_registry=extension_registry,
)
task.response_model = agent_response_model
try:
raw_result = await original_fn(self, task, context, tools)
agent_response = _parse_agent_response(
raw_result=raw_result, agent_response_model=agent_response_model
)
if extension_registry and isinstance(agent_response, BaseModel):
agent_response = extension_registry.process_response_with_all(
agent_response, extension_states
)
if isinstance(agent_response, BaseModel) and isinstance(
agent_response, AgentResponseProtocol
):
if agent_response.is_a2a:
return await _adelegate_to_a2a(
self,
agent_response=agent_response,
task=task,
original_fn=original_fn,
context=context,
tools=tools,
agent_cards=agent_cards,
original_task_description=original_description,
_extension_registry=extension_registry,
)
task.output_pydantic = None
return agent_response.message
return raw_result
finally:
task.description = original_description
if task.output_pydantic is not None:
task.output_pydantic = original_output_pydantic
task.response_model = original_response_model
async def _ahandle_agent_response_and_continue(
self: Agent,
a2a_result: TaskStateResult,
agent_id: str,
agent_cards: dict[str, AgentCard] | None,
a2a_agents: list[A2AConfig | A2AClientConfig],
original_task_description: str,
conversation_history: list[Message],
turn_num: int,
max_turns: int,
task: Task,
original_fn: Callable[..., Coroutine[Any, Any, str]],
context: str | None,
tools: list[BaseTool] | None,
agent_response_model: type[BaseModel] | None,
extension_registry: ExtensionRegistry | None = None,
remote_status_notice: str = "",
endpoint: str | None = None,
a2a_agent_name: str | None = None,
agent_card: dict[str, Any] | None = None,
) -> tuple[str | None, str | None]:
"""Async version of _handle_agent_response_and_continue."""
agent_cards_dict = _prepare_agent_cards_dict(a2a_result, agent_id, agent_cards)
(
task.description,
disable_structured_output,
extension_states,
) = _augment_prompt_with_a2a(
a2a_agents=a2a_agents,
task_description=original_task_description,
conversation_history=conversation_history,
turn_num=turn_num,
max_turns=max_turns,
agent_cards=agent_cards_dict,
remote_status_notice=remote_status_notice,
)
original_response_model = task.response_model
if disable_structured_output:
task.response_model = None
raw_result = await original_fn(self, task, context, tools)
if disable_structured_output:
task.response_model = original_response_model
return _process_response_result(
raw_result=raw_result,
disable_structured_output=disable_structured_output,
turn_num=turn_num,
agent_role=self.role,
agent_response_model=agent_response_model,
extension_registry=extension_registry,
extension_states=extension_states,
from_task=task,
from_agent=self,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
agent_card=agent_card,
)
async def _adelegate_to_a2a(
self: Agent,
agent_response: AgentResponseProtocol,
task: Task,
original_fn: Callable[..., Coroutine[Any, Any, str]],
context: str | None,
tools: list[BaseTool] | None,
agent_cards: dict[str, AgentCard] | None = None,
original_task_description: str | None = None,
_extension_registry: ExtensionRegistry | None = None,
) -> str:
"""Async version of _delegate_to_a2a."""
ctx = _prepare_delegation_context(
self, agent_response, task, original_task_description
)
state = _init_delegation_state(ctx, agent_cards)
current_request = state.current_request
context_id = state.context_id
task_id = state.task_id
reference_task_ids = state.reference_task_ids
conversation_history = state.conversation_history
try:
for turn_num in range(ctx.max_turns):
agent_branch, accepted_output_modes = _get_turn_context(ctx.agent_config)
a2a_result = await aexecute_a2a_delegation(
endpoint=ctx.agent_config.endpoint,
auth=ctx.agent_config.auth,
timeout=ctx.agent_config.timeout,
task_description=current_request,
context_id=context_id,
task_id=task_id,
reference_task_ids=reference_task_ids,
metadata=ctx.metadata,
extensions=ctx.extensions,
conversation_history=conversation_history,
agent_id=ctx.agent_id,
agent_role=Role.user,
agent_branch=agent_branch,
response_model=ctx.agent_config.response_model,
turn_number=turn_num + 1,
transport=ctx.agent_config.transport,
updates=ctx.agent_config.updates,
from_task=task,
from_agent=self,
client_extensions=getattr(ctx.agent_config, "extensions", None),
accepted_output_modes=accepted_output_modes,
input_files=task.input_files,
)
conversation_history = a2a_result.get("history", [])
if conversation_history:
latest_message = conversation_history[-1]
if latest_message.task_id is not None:
task_id = latest_message.task_id
if latest_message.context_id is not None:
context_id = latest_message.context_id
if a2a_result["status"] in [TaskState.completed, TaskState.input_required]:
trusted_result, task_id, reference_task_ids, remote_notice = (
_handle_task_completion(
a2a_result,
task,
task_id,
reference_task_ids,
ctx.agent_config,
turn_num,
from_task=task,
from_agent=self,
endpoint=ctx.agent_config.endpoint,
a2a_agent_name=state.agent_name,
agent_card=state.agent_card_dict,
)
)
if trusted_result is not None:
return trusted_result
final_result, next_request = await _ahandle_agent_response_and_continue(
self=self,
a2a_result=a2a_result,
agent_id=ctx.agent_id,
agent_cards=agent_cards,
a2a_agents=ctx.a2a_agents,
original_task_description=ctx.original_task_description,
conversation_history=conversation_history,
turn_num=turn_num,
max_turns=ctx.max_turns,
task=task,
original_fn=original_fn,
context=context,
tools=tools,
agent_response_model=ctx.agent_response_model,
extension_registry=_extension_registry,
remote_status_notice=remote_notice,
endpoint=ctx.agent_config.endpoint,
a2a_agent_name=state.agent_name,
agent_card=state.agent_card_dict,
)
if final_result is not None:
return final_result
if next_request is not None:
current_request = next_request
continue
error_msg = a2a_result.get("error", "Unknown error")
final_result, next_request = await _ahandle_agent_response_and_continue(
self=self,
a2a_result=a2a_result,
agent_id=ctx.agent_id,
agent_cards=agent_cards,
a2a_agents=ctx.a2a_agents,
original_task_description=ctx.original_task_description,
conversation_history=conversation_history,
turn_num=turn_num,
max_turns=ctx.max_turns,
task=task,
original_fn=original_fn,
context=context,
tools=tools,
agent_response_model=ctx.agent_response_model,
extension_registry=_extension_registry,
endpoint=ctx.agent_config.endpoint,
a2a_agent_name=state.agent_name,
agent_card=state.agent_card_dict,
)
if final_result is not None:
return final_result
if next_request is not None:
current_request = next_request
continue
return _emit_delegation_failed(
error_msg,
turn_num,
task,
self,
ctx.agent_config.endpoint,
state.agent_name,
state.agent_card_dict,
)
return _handle_max_turns_exceeded(
conversation_history,
ctx.max_turns,
from_task=task,
from_agent=self,
endpoint=ctx.agent_config.endpoint,
a2a_agent_name=state.agent_name,
agent_card=state.agent_card_dict,
)
finally:
task.description = ctx.original_task_description
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/wrapper.py",
"license": "MIT License",
"lines": 1513,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/agent/internal/meta.py | """Generic metaclass for agent extensions.
This metaclass enables extension capabilities for agents by detecting
extension fields in class annotations and applying appropriate wrappers.
"""
from typing import Any
import warnings
from pydantic import model_validator
from pydantic._internal._model_construction import ModelMetaclass
class AgentMeta(ModelMetaclass):
"""Generic metaclass for agent extensions.
Detects extension fields (like 'a2a') in class annotations and applies
the appropriate wrapper logic to enable extension functionality.
"""
def __new__(
mcs,
name: str,
bases: tuple[type, ...],
namespace: dict[str, Any],
**kwargs: Any,
) -> type:
"""Create a new class with extension support.
Args:
name: The name of the class being created
bases: Base classes
namespace: Class namespace dictionary
**kwargs: Additional keyword arguments
Returns:
The newly created class with extension support if applicable
"""
orig_post_init_setup = namespace.get("post_init_setup")
if orig_post_init_setup is not None:
original_func = (
orig_post_init_setup.wrapped
if hasattr(orig_post_init_setup, "wrapped")
else orig_post_init_setup
)
def post_init_setup_with_extensions(self: Any) -> Any:
"""Wrap post_init_setup to apply extensions after initialization.
Args:
self: The agent instance
Returns:
The agent instance
"""
result = original_func(self)
a2a_value = getattr(self, "a2a", None)
if a2a_value is not None:
from crewai.a2a.extensions.registry import (
create_extension_registry_from_config,
)
from crewai.a2a.wrapper import wrap_agent_with_a2a_instance
extension_registry = create_extension_registry_from_config(
a2a_value
)
wrap_agent_with_a2a_instance(self, extension_registry)
return result
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=".*overrides an existing Pydantic.*"
)
namespace["post_init_setup"] = model_validator(mode="after")(
post_init_setup_with_extensions
)
return super().__new__(mcs, name, bases, namespace, **kwargs)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/agent/internal/meta.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/src/crewai/events/types/a2a_events.py | """Events for A2A (Agent-to-Agent) delegation.
This module defines events emitted during A2A protocol delegation,
including both single-turn and multiturn conversation flows.
"""
from __future__ import annotations
from typing import Any, Literal
from pydantic import model_validator
from crewai.events.base_events import BaseEvent
class A2AEventBase(BaseEvent):
"""Base class for A2A events with task/agent context."""
from_task: Any = None
from_agent: Any = None
@model_validator(mode="before")
@classmethod
def extract_task_and_agent_metadata(cls, data: dict[str, Any]) -> dict[str, Any]:
"""Extract task and agent metadata before validation."""
if task := data.get("from_task"):
data["task_id"] = str(task.id)
data["task_name"] = task.name or task.description
data.setdefault("source_fingerprint", str(task.id))
data.setdefault("source_type", "task")
data.setdefault(
"fingerprint_metadata",
{
"task_id": str(task.id),
"task_name": task.name or task.description,
},
)
data["from_task"] = None
if agent := data.get("from_agent"):
data["agent_id"] = str(agent.id)
data["agent_role"] = agent.role
data.setdefault("source_fingerprint", str(agent.id))
data.setdefault("source_type", "agent")
data.setdefault(
"fingerprint_metadata",
{
"agent_id": str(agent.id),
"agent_role": agent.role,
},
)
data["from_agent"] = None
return data
class A2ADelegationStartedEvent(A2AEventBase):
"""Event emitted when A2A delegation starts.
Attributes:
endpoint: A2A agent endpoint URL (AgentCard URL).
task_description: Task being delegated to the A2A agent.
agent_id: A2A agent identifier.
context_id: A2A context ID grouping related tasks.
is_multiturn: Whether this is part of a multiturn conversation.
turn_number: Current turn number (1-indexed, 1 for single-turn).
a2a_agent_name: Name of the A2A agent from agent card.
agent_card: Full A2A agent card metadata.
protocol_version: A2A protocol version being used.
provider: Agent provider/organization info from agent card.
skill_id: ID of the specific skill being invoked.
metadata: Custom A2A metadata key-value pairs.
extensions: List of A2A extension URIs in use.
"""
type: str = "a2a_delegation_started"
endpoint: str
task_description: str
agent_id: str
context_id: str | None = None
is_multiturn: bool = False
turn_number: int = 1
a2a_agent_name: str | None = None
agent_card: dict[str, Any] | None = None
protocol_version: str | None = None
provider: dict[str, Any] | None = None
skill_id: str | None = None
metadata: dict[str, Any] | None = None
extensions: list[str] | None = None
class A2ADelegationCompletedEvent(A2AEventBase):
"""Event emitted when A2A delegation completes.
Attributes:
status: Completion status (completed, input_required, failed, etc.).
result: Result message if status is completed.
error: Error/response message (error for failed, response for input_required).
context_id: A2A context ID grouping related tasks.
is_multiturn: Whether this is part of a multiturn conversation.
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
agent_card: Full A2A agent card metadata.
provider: Agent provider/organization info from agent card.
metadata: Custom A2A metadata key-value pairs.
extensions: List of A2A extension URIs in use.
"""
type: str = "a2a_delegation_completed"
status: str
result: str | None = None
error: str | None = None
context_id: str | None = None
is_multiturn: bool = False
endpoint: str | None = None
a2a_agent_name: str | None = None
agent_card: dict[str, Any] | None = None
provider: dict[str, Any] | None = None
metadata: dict[str, Any] | None = None
extensions: list[str] | None = None
class A2AConversationStartedEvent(A2AEventBase):
"""Event emitted when a multiturn A2A conversation starts.
This is emitted once at the beginning of a multiturn conversation,
before the first message exchange.
Attributes:
agent_id: A2A agent identifier.
endpoint: A2A agent endpoint URL.
context_id: A2A context ID grouping related tasks.
a2a_agent_name: Name of the A2A agent from agent card.
agent_card: Full A2A agent card metadata.
protocol_version: A2A protocol version being used.
provider: Agent provider/organization info from agent card.
skill_id: ID of the specific skill being invoked.
reference_task_ids: Related task IDs for context.
metadata: Custom A2A metadata key-value pairs.
extensions: List of A2A extension URIs in use.
"""
type: str = "a2a_conversation_started"
agent_id: str
endpoint: str
context_id: str | None = None
a2a_agent_name: str | None = None
agent_card: dict[str, Any] | None = None
protocol_version: str | None = None
provider: dict[str, Any] | None = None
skill_id: str | None = None
reference_task_ids: list[str] | None = None
metadata: dict[str, Any] | None = None
extensions: list[str] | None = None
class A2AMessageSentEvent(A2AEventBase):
"""Event emitted when a message is sent to the A2A agent.
Attributes:
message: Message content sent to the A2A agent.
turn_number: Current turn number (1-indexed).
context_id: A2A context ID grouping related tasks.
message_id: Unique A2A message identifier.
is_multiturn: Whether this is part of a multiturn conversation.
agent_role: Role of the CrewAI agent sending the message.
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
skill_id: ID of the specific skill being invoked.
metadata: Custom A2A metadata key-value pairs.
extensions: List of A2A extension URIs in use.
"""
type: str = "a2a_message_sent"
message: str
turn_number: int
context_id: str | None = None
message_id: str | None = None
is_multiturn: bool = False
agent_role: str | None = None
endpoint: str | None = None
a2a_agent_name: str | None = None
skill_id: str | None = None
metadata: dict[str, Any] | None = None
extensions: list[str] | None = None
class A2AResponseReceivedEvent(A2AEventBase):
"""Event emitted when a response is received from the A2A agent.
Attributes:
response: Response content from the A2A agent.
turn_number: Current turn number (1-indexed).
context_id: A2A context ID grouping related tasks.
message_id: Unique A2A message identifier.
is_multiturn: Whether this is part of a multiturn conversation.
status: Response status (input_required, completed, etc.).
final: Whether this is the final response in the stream.
agent_role: Role of the CrewAI agent (for display).
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
metadata: Custom A2A metadata key-value pairs.
extensions: List of A2A extension URIs in use.
"""
type: str = "a2a_response_received"
response: str
turn_number: int
context_id: str | None = None
message_id: str | None = None
is_multiturn: bool = False
status: str
final: bool = False
agent_role: str | None = None
endpoint: str | None = None
a2a_agent_name: str | None = None
metadata: dict[str, Any] | None = None
extensions: list[str] | None = None
class A2AConversationCompletedEvent(A2AEventBase):
"""Event emitted when a multiturn A2A conversation completes.
This is emitted once at the end of a multiturn conversation.
Attributes:
status: Final status (completed, failed, etc.).
final_result: Final result if completed successfully.
error: Error message if failed.
context_id: A2A context ID grouping related tasks.
total_turns: Total number of turns in the conversation.
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
agent_card: Full A2A agent card metadata.
reference_task_ids: Related task IDs for context.
metadata: Custom A2A metadata key-value pairs.
extensions: List of A2A extension URIs in use.
"""
type: str = "a2a_conversation_completed"
status: Literal["completed", "failed"]
final_result: str | None = None
error: str | None = None
context_id: str | None = None
total_turns: int
endpoint: str | None = None
a2a_agent_name: str | None = None
agent_card: dict[str, Any] | None = None
reference_task_ids: list[str] | None = None
metadata: dict[str, Any] | None = None
extensions: list[str] | None = None
class A2APollingStartedEvent(A2AEventBase):
"""Event emitted when polling mode begins for A2A delegation.
Attributes:
task_id: A2A task ID being polled.
context_id: A2A context ID grouping related tasks.
polling_interval: Seconds between poll attempts.
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_polling_started"
task_id: str
context_id: str | None = None
polling_interval: float
endpoint: str
a2a_agent_name: str | None = None
metadata: dict[str, Any] | None = None
class A2APollingStatusEvent(A2AEventBase):
"""Event emitted on each polling iteration.
Attributes:
task_id: A2A task ID being polled.
context_id: A2A context ID grouping related tasks.
state: Current task state from remote agent.
elapsed_seconds: Time since polling started.
poll_count: Number of polls completed.
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_polling_status"
task_id: str
context_id: str | None = None
state: str
elapsed_seconds: float
poll_count: int
endpoint: str | None = None
a2a_agent_name: str | None = None
metadata: dict[str, Any] | None = None
class A2APushNotificationRegisteredEvent(A2AEventBase):
"""Event emitted when push notification callback is registered.
Attributes:
task_id: A2A task ID for which callback is registered.
context_id: A2A context ID grouping related tasks.
callback_url: URL where agent will send push notifications.
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_push_notification_registered"
task_id: str
context_id: str | None = None
callback_url: str
endpoint: str | None = None
a2a_agent_name: str | None = None
metadata: dict[str, Any] | None = None
class A2APushNotificationReceivedEvent(A2AEventBase):
"""Event emitted when a push notification is received.
This event should be emitted by the user's webhook handler when it receives
a push notification from the remote A2A agent, before calling
`result_store.store_result()`.
Attributes:
task_id: A2A task ID from the notification.
context_id: A2A context ID grouping related tasks.
state: Current task state from the notification.
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_push_notification_received"
task_id: str
context_id: str | None = None
state: str
endpoint: str | None = None
a2a_agent_name: str | None = None
metadata: dict[str, Any] | None = None
class A2APushNotificationSentEvent(A2AEventBase):
"""Event emitted when a push notification is sent to a callback URL.
Emitted by the A2A server when it sends a task status update to the
client's registered push notification callback URL.
Attributes:
task_id: A2A task ID being notified.
context_id: A2A context ID grouping related tasks.
callback_url: URL the notification was sent to.
state: Task state being reported.
success: Whether the notification was successfully delivered.
error: Error message if delivery failed.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_push_notification_sent"
task_id: str
context_id: str | None = None
callback_url: str
state: str
success: bool = True
error: str | None = None
metadata: dict[str, Any] | None = None
class A2APushNotificationTimeoutEvent(A2AEventBase):
"""Event emitted when push notification wait times out.
Attributes:
task_id: A2A task ID that timed out.
context_id: A2A context ID grouping related tasks.
timeout_seconds: Timeout duration in seconds.
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_push_notification_timeout"
task_id: str
context_id: str | None = None
timeout_seconds: float
endpoint: str | None = None
a2a_agent_name: str | None = None
metadata: dict[str, Any] | None = None
class A2AStreamingStartedEvent(A2AEventBase):
"""Event emitted when streaming mode begins for A2A delegation.
Attributes:
task_id: A2A task ID for the streaming session.
context_id: A2A context ID grouping related tasks.
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
turn_number: Current turn number (1-indexed).
is_multiturn: Whether this is part of a multiturn conversation.
agent_role: Role of the CrewAI agent.
metadata: Custom A2A metadata key-value pairs.
extensions: List of A2A extension URIs in use.
"""
type: str = "a2a_streaming_started"
task_id: str | None = None
context_id: str | None = None
endpoint: str
a2a_agent_name: str | None = None
turn_number: int = 1
is_multiturn: bool = False
agent_role: str | None = None
metadata: dict[str, Any] | None = None
extensions: list[str] | None = None
class A2AStreamingChunkEvent(A2AEventBase):
"""Event emitted when a streaming chunk is received.
Attributes:
task_id: A2A task ID for the streaming session.
context_id: A2A context ID grouping related tasks.
chunk: The text content of the chunk.
chunk_index: Index of this chunk in the stream (0-indexed).
final: Whether this is the final chunk in the stream.
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
turn_number: Current turn number (1-indexed).
is_multiturn: Whether this is part of a multiturn conversation.
metadata: Custom A2A metadata key-value pairs.
extensions: List of A2A extension URIs in use.
"""
type: str = "a2a_streaming_chunk"
task_id: str | None = None
context_id: str | None = None
chunk: str
chunk_index: int
final: bool = False
endpoint: str | None = None
a2a_agent_name: str | None = None
turn_number: int = 1
is_multiturn: bool = False
metadata: dict[str, Any] | None = None
extensions: list[str] | None = None
class A2AAgentCardFetchedEvent(A2AEventBase):
"""Event emitted when an agent card is successfully fetched.
Attributes:
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
agent_card: Full A2A agent card metadata.
protocol_version: A2A protocol version from agent card.
provider: Agent provider/organization info from agent card.
cached: Whether the agent card was retrieved from cache.
fetch_time_ms: Time taken to fetch the agent card in milliseconds.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_agent_card_fetched"
endpoint: str
a2a_agent_name: str | None = None
agent_card: dict[str, Any] | None = None
protocol_version: str | None = None
provider: dict[str, Any] | None = None
cached: bool = False
fetch_time_ms: float | None = None
metadata: dict[str, Any] | None = None
class A2AAuthenticationFailedEvent(A2AEventBase):
"""Event emitted when authentication to an A2A agent fails.
Attributes:
endpoint: A2A agent endpoint URL.
auth_type: Type of authentication attempted (e.g., bearer, oauth2, api_key).
error: Error message describing the failure.
status_code: HTTP status code if applicable.
a2a_agent_name: Name of the A2A agent if known.
protocol_version: A2A protocol version being used.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_authentication_failed"
endpoint: str
auth_type: str | None = None
error: str
status_code: int | None = None
a2a_agent_name: str | None = None
protocol_version: str | None = None
metadata: dict[str, Any] | None = None
class A2AArtifactReceivedEvent(A2AEventBase):
"""Event emitted when an artifact is received from a remote A2A agent.
Attributes:
task_id: A2A task ID the artifact belongs to.
artifact_id: Unique identifier for the artifact.
artifact_name: Name of the artifact.
artifact_description: Purpose description of the artifact.
mime_type: MIME type of the artifact content.
size_bytes: Size of the artifact in bytes.
append: Whether content should be appended to existing artifact.
last_chunk: Whether this is the final chunk of the artifact.
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
context_id: Context ID for correlation.
turn_number: Current turn number (1-indexed).
is_multiturn: Whether this is part of a multiturn conversation.
metadata: Custom A2A metadata key-value pairs.
extensions: List of A2A extension URIs in use.
"""
type: str = "a2a_artifact_received"
task_id: str
artifact_id: str
artifact_name: str | None = None
artifact_description: str | None = None
mime_type: str | None = None
size_bytes: int | None = None
append: bool = False
last_chunk: bool = False
endpoint: str | None = None
a2a_agent_name: str | None = None
context_id: str | None = None
turn_number: int = 1
is_multiturn: bool = False
metadata: dict[str, Any] | None = None
extensions: list[str] | None = None
class A2AConnectionErrorEvent(A2AEventBase):
"""Event emitted when a connection error occurs during A2A communication.
Attributes:
endpoint: A2A agent endpoint URL.
error: Error message describing the connection failure.
error_type: Type of error (e.g., timeout, connection_refused, dns_error).
status_code: HTTP status code if applicable.
a2a_agent_name: Name of the A2A agent from agent card.
operation: The operation being attempted when error occurred.
context_id: A2A context ID grouping related tasks.
task_id: A2A task ID if applicable.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_connection_error"
endpoint: str
error: str
error_type: str | None = None
status_code: int | None = None
a2a_agent_name: str | None = None
operation: str | None = None
context_id: str | None = None
task_id: str | None = None
metadata: dict[str, Any] | None = None
class A2AServerTaskStartedEvent(A2AEventBase):
"""Event emitted when an A2A server task execution starts.
Attributes:
task_id: A2A task ID for this execution.
context_id: A2A context ID grouping related tasks.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_server_task_started"
task_id: str
context_id: str
metadata: dict[str, Any] | None = None
class A2AServerTaskCompletedEvent(A2AEventBase):
"""Event emitted when an A2A server task execution completes.
Attributes:
task_id: A2A task ID for this execution.
context_id: A2A context ID grouping related tasks.
result: The task result.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_server_task_completed"
task_id: str
context_id: str
result: str
metadata: dict[str, Any] | None = None
class A2AServerTaskCanceledEvent(A2AEventBase):
"""Event emitted when an A2A server task execution is canceled.
Attributes:
task_id: A2A task ID for this execution.
context_id: A2A context ID grouping related tasks.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_server_task_canceled"
task_id: str
context_id: str
metadata: dict[str, Any] | None = None
class A2AServerTaskFailedEvent(A2AEventBase):
"""Event emitted when an A2A server task execution fails.
Attributes:
task_id: A2A task ID for this execution.
context_id: A2A context ID grouping related tasks.
error: Error message describing the failure.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_server_task_failed"
task_id: str
context_id: str
error: str
metadata: dict[str, Any] | None = None
class A2AParallelDelegationStartedEvent(A2AEventBase):
"""Event emitted when parallel delegation to multiple A2A agents begins.
Attributes:
endpoints: List of A2A agent endpoints being delegated to.
task_description: Description of the task being delegated.
"""
type: str = "a2a_parallel_delegation_started"
endpoints: list[str]
task_description: str
class A2AParallelDelegationCompletedEvent(A2AEventBase):
"""Event emitted when parallel delegation to multiple A2A agents completes.
Attributes:
endpoints: List of A2A agent endpoints that were delegated to.
success_count: Number of successful delegations.
failure_count: Number of failed delegations.
results: Summary of results from each agent.
"""
type: str = "a2a_parallel_delegation_completed"
endpoints: list[str]
success_count: int
failure_count: int
results: dict[str, str] | None = None
class A2ATransportNegotiatedEvent(A2AEventBase):
"""Event emitted when transport protocol is negotiated with an A2A agent.
This event is emitted after comparing client and server transport capabilities
to select the optimal transport protocol and endpoint URL.
Attributes:
endpoint: Original A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
negotiated_transport: The transport protocol selected (JSONRPC, GRPC, HTTP+JSON).
negotiated_url: The URL to use for the selected transport.
source: How the transport was selected ('client_preferred', 'server_preferred', 'fallback').
client_supported_transports: Transports the client can use.
server_supported_transports: Transports the server supports.
server_preferred_transport: The server's preferred transport from AgentCard.
client_preferred_transport: The client's preferred transport if set.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_transport_negotiated"
endpoint: str
a2a_agent_name: str | None = None
negotiated_transport: str
negotiated_url: str
source: str
client_supported_transports: list[str]
server_supported_transports: list[str]
server_preferred_transport: str
client_preferred_transport: str | None = None
metadata: dict[str, Any] | None = None
class A2AContentTypeNegotiatedEvent(A2AEventBase):
"""Event emitted when content types are negotiated with an A2A agent.
This event is emitted after comparing client and server input/output mode
capabilities to determine compatible MIME types for communication.
Attributes:
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
skill_name: Skill name if negotiation was skill-specific.
client_input_modes: MIME types the client can send.
client_output_modes: MIME types the client can accept.
server_input_modes: MIME types the server accepts.
server_output_modes: MIME types the server produces.
negotiated_input_modes: Compatible input MIME types selected.
negotiated_output_modes: Compatible output MIME types selected.
negotiation_success: Whether compatible types were found for both directions.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_content_type_negotiated"
endpoint: str
a2a_agent_name: str | None = None
skill_name: str | None = None
client_input_modes: list[str]
client_output_modes: list[str]
server_input_modes: list[str]
server_output_modes: list[str]
negotiated_input_modes: list[str]
negotiated_output_modes: list[str]
negotiation_success: bool = True
metadata: dict[str, Any] | None = None
# -----------------------------------------------------------------------------
# Context Lifecycle Events
# -----------------------------------------------------------------------------
class A2AContextCreatedEvent(A2AEventBase):
"""Event emitted when an A2A context is created.
Contexts group related tasks in a conversation or workflow.
Attributes:
context_id: Unique identifier for the context.
created_at: Unix timestamp when context was created.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_context_created"
context_id: str
created_at: float
metadata: dict[str, Any] | None = None
class A2AContextExpiredEvent(A2AEventBase):
"""Event emitted when an A2A context expires due to TTL.
Attributes:
context_id: The expired context identifier.
created_at: Unix timestamp when context was created.
age_seconds: How long the context existed before expiring.
task_count: Number of tasks in the context when expired.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_context_expired"
context_id: str
created_at: float
age_seconds: float
task_count: int
metadata: dict[str, Any] | None = None
class A2AContextIdleEvent(A2AEventBase):
"""Event emitted when an A2A context becomes idle.
Idle contexts have had no activity for the configured threshold.
Attributes:
context_id: The idle context identifier.
idle_seconds: Seconds since last activity.
task_count: Number of tasks in the context.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_context_idle"
context_id: str
idle_seconds: float
task_count: int
metadata: dict[str, Any] | None = None
class A2AContextCompletedEvent(A2AEventBase):
"""Event emitted when all tasks in an A2A context complete.
Attributes:
context_id: The completed context identifier.
total_tasks: Total number of tasks that were in the context.
duration_seconds: Total context lifetime in seconds.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_context_completed"
context_id: str
total_tasks: int
duration_seconds: float
metadata: dict[str, Any] | None = None
class A2AContextPrunedEvent(A2AEventBase):
"""Event emitted when an A2A context is pruned (deleted).
Pruning removes the context metadata and optionally associated tasks.
Attributes:
context_id: The pruned context identifier.
task_count: Number of tasks that were in the context.
age_seconds: How long the context existed before pruning.
metadata: Custom A2A metadata key-value pairs.
"""
type: str = "a2a_context_pruned"
context_id: str
task_count: int
age_seconds: float
metadata: dict[str, Any] | None = None
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/events/types/a2a_events.py",
"license": "MIT License",
"lines": 672,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/types/utils.py | """Utilities for creating and manipulating types."""
from typing import Annotated, Final, Literal, cast
_DYNAMIC_LITERAL_ALIAS: Final[Literal["DynamicLiteral"]] = "DynamicLiteral"
def create_literals_from_strings(
values: Annotated[
tuple[str, ...], "Should contain unique strings; duplicates will be removed"
],
) -> type:
"""Create a Literal type for each A2A agent ID.
Args:
values: a tuple of the A2A agent IDs
Returns:
Literal type for each A2A agent ID
Raises:
ValueError: If values is empty (Literal requires at least one value)
"""
unique_values: tuple[str, ...] = tuple(dict.fromkeys(values))
if not unique_values:
raise ValueError("Cannot create Literal type from empty values")
return cast(type, Literal.__getitem__(unique_values))
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/types/utils.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/tests/agents/test_agent_a2a_wrapping.py | """Test A2A wrapper is only applied when a2a is passed to Agent."""
from unittest.mock import patch
import pytest
from crewai import Agent
from crewai.a2a.config import A2AConfig
try:
import a2a # noqa: F401
A2A_SDK_INSTALLED = True
except ImportError:
A2A_SDK_INSTALLED = False
def test_agent_without_a2a_has_no_wrapper():
"""Verify that agents without a2a don't get the wrapper applied."""
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
)
assert agent.a2a is None
assert callable(agent.execute_task)
@pytest.mark.skipif(
True,
reason="Requires a2a-sdk to be installed. This test verifies wrapper is applied when a2a is set.",
)
def test_agent_with_a2a_has_wrapper():
"""Verify that agents with a2a get the wrapper applied."""
a2a_config = A2AConfig(
endpoint="http://test-endpoint.com",
)
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
a2a=a2a_config,
)
assert agent.a2a is not None
assert agent.a2a.endpoint == "http://test-endpoint.com"
assert callable(agent.execute_task)
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_agent_with_a2a_creates_successfully():
"""Verify that creating an agent with a2a succeeds and applies wrapper."""
a2a_config = A2AConfig(
endpoint="http://test-endpoint.com",
)
agent = Agent(
role="test role",
goal="test goal",
backstory="test backstory",
a2a=a2a_config,
)
assert agent.a2a is not None
assert agent.a2a.endpoint == "http://test-endpoint.com/"
assert callable(agent.execute_task)
assert hasattr(agent.execute_task, "__wrapped__")
def test_multiple_agents_without_a2a():
"""Verify that multiple agents without a2a work correctly."""
agent1 = Agent(
role="agent 1",
goal="test goal",
backstory="test backstory",
)
agent2 = Agent(
role="agent 2",
goal="test goal",
backstory="test backstory",
)
assert agent1.a2a is None
assert agent2.a2a is None
assert callable(agent1.execute_task)
assert callable(agent2.execute_task)
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_wrapper_is_applied_differently_per_instance():
"""Verify that agents with and without a2a have different execute_task methods."""
agent_without_a2a = Agent(
role="agent without a2a",
goal="test goal",
backstory="test backstory",
)
a2a_config = A2AConfig(endpoint="http://test-endpoint.com")
agent_with_a2a = Agent(
role="agent with a2a",
goal="test goal",
backstory="test backstory",
a2a=a2a_config,
)
assert agent_without_a2a.execute_task.__func__ is not agent_with_a2a.execute_task.__func__
assert not hasattr(agent_without_a2a.execute_task, "__wrapped__")
assert hasattr(agent_with_a2a.execute_task, "__wrapped__")
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/agents/test_agent_a2a_wrapping.py",
"license": "MIT License",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/flow/visualization/builder.py | """Flow structure builder for analyzing Flow execution."""
from __future__ import annotations
from collections import defaultdict
import inspect
import logging
from typing import TYPE_CHECKING, Any
from crewai.flow.constants import AND_CONDITION, OR_CONDITION
from crewai.flow.flow_wrappers import FlowCondition
from crewai.flow.types import FlowMethodName
from crewai.flow.utils import (
is_flow_condition_dict,
is_simple_flow_condition,
)
from crewai.flow.visualization.schema import extract_method_signature
from crewai.flow.visualization.types import FlowStructure, NodeMetadata, StructureEdge
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from crewai.flow.flow import Flow
def _extract_direct_or_triggers(
condition: str | dict[str, Any] | list[Any] | FlowCondition,
) -> list[str]:
"""Extract direct OR-level trigger strings from a condition.
This function extracts strings that would directly trigger a listener,
meaning they appear at the top level of an OR condition. Strings nested
inside AND conditions are NOT considered direct triggers for router paths.
For example:
- or_("a", "b") -> ["a", "b"] (both are direct triggers)
- and_("a", "b") -> [] (neither are direct triggers, both required)
- or_(and_("a", "b"), "c") -> ["c"] (only "c" is a direct trigger)
Args:
condition: Can be a string, dict, or list.
Returns:
List of direct OR-level trigger strings.
"""
if isinstance(condition, str):
return [condition]
if isinstance(condition, dict):
cond_type = condition.get("type", OR_CONDITION)
conditions_list = condition.get("conditions", [])
if cond_type == OR_CONDITION:
strings = []
for sub_cond in conditions_list:
strings.extend(_extract_direct_or_triggers(sub_cond))
return strings
return []
if isinstance(condition, list):
strings = []
for item in condition:
strings.extend(_extract_direct_or_triggers(item))
return strings
if callable(condition) and hasattr(condition, "__name__"):
return [condition.__name__]
return []
def _extract_all_trigger_names(
condition: str | dict[str, Any] | list[Any] | FlowCondition,
) -> list[str]:
"""Extract ALL trigger names from a condition for display purposes.
Unlike _extract_direct_or_triggers, this extracts ALL strings and method
names from the entire condition tree, including those nested in AND conditions.
This is used for displaying trigger information in the UI.
For example:
- or_("a", "b") -> ["a", "b"]
- and_("a", "b") -> ["a", "b"]
- or_(and_("a", method_6), method_4) -> ["a", "method_6", "method_4"]
Args:
condition: Can be a string, dict, or list.
Returns:
List of all trigger names found in the condition.
"""
if isinstance(condition, str):
return [condition]
if isinstance(condition, dict):
conditions_list = condition.get("conditions", [])
strings = []
for sub_cond in conditions_list:
strings.extend(_extract_all_trigger_names(sub_cond))
return strings
if isinstance(condition, list):
strings = []
for item in condition:
strings.extend(_extract_all_trigger_names(item))
return strings
if callable(condition) and hasattr(condition, "__name__"):
return [condition.__name__]
return []
def _create_edges_from_condition(
condition: str | dict[str, Any] | list[Any] | FlowCondition,
target: str,
nodes: dict[str, NodeMetadata],
) -> list[StructureEdge]:
"""Create edges from a condition tree, preserving AND/OR semantics.
This function recursively processes the condition tree and creates edges
with the appropriate condition_type for each trigger.
For AND conditions, all triggers get edges with condition_type="AND".
For OR conditions, triggers get edges with condition_type="OR".
Args:
condition: The condition tree (string, dict, or list).
target: The target node name.
nodes: Dictionary of all nodes for validation.
Returns:
List of StructureEdge objects representing the condition.
"""
edges: list[StructureEdge] = []
if isinstance(condition, str):
if condition in nodes:
edges.append(
StructureEdge(
source=condition,
target=target,
condition_type=OR_CONDITION,
is_router_path=False,
)
)
elif callable(condition) and hasattr(condition, "__name__"):
method_name = condition.__name__
if method_name in nodes:
edges.append(
StructureEdge(
source=method_name,
target=target,
condition_type=OR_CONDITION,
is_router_path=False,
)
)
elif isinstance(condition, dict):
cond_type = condition.get("type", OR_CONDITION)
conditions_list = condition.get("conditions", [])
if cond_type == AND_CONDITION:
triggers = _extract_all_trigger_names(condition)
edges.extend(
StructureEdge(
source=trigger,
target=target,
condition_type=AND_CONDITION,
is_router_path=False,
)
for trigger in triggers
if trigger in nodes
)
else:
for sub_cond in conditions_list:
edges.extend(_create_edges_from_condition(sub_cond, target, nodes))
elif isinstance(condition, list):
for item in condition:
edges.extend(_create_edges_from_condition(item, target, nodes))
return edges
def build_flow_structure(flow: Flow[Any]) -> FlowStructure:
"""Build a structure representation of a Flow's execution.
Args:
flow: Flow instance to analyze.
Returns:
Dictionary with nodes, edges, start_methods, and router_methods.
"""
nodes: dict[str, NodeMetadata] = {}
edges: list[StructureEdge] = []
start_methods: list[str] = []
router_methods: list[str] = []
for method_name, method in flow._methods.items():
node_metadata: NodeMetadata = {"type": "listen"}
if hasattr(method, "__is_start_method__") and method.__is_start_method__:
node_metadata["type"] = "start"
start_methods.append(method_name)
if hasattr(method, "__is_router__") and method.__is_router__:
node_metadata["is_router"] = True
node_metadata["type"] = "router"
router_methods.append(method_name)
if method_name in flow._router_paths:
node_metadata["router_paths"] = [
str(p) for p in flow._router_paths[method_name]
]
if hasattr(method, "__trigger_methods__") and method.__trigger_methods__:
node_metadata["trigger_methods"] = [
str(m) for m in method.__trigger_methods__
]
if hasattr(method, "__condition_type__") and method.__condition_type__:
node_metadata["trigger_condition_type"] = method.__condition_type__
if "condition_type" not in node_metadata:
node_metadata["condition_type"] = method.__condition_type__
if node_metadata.get("is_router") and "condition_type" not in node_metadata:
node_metadata["condition_type"] = "IF"
if (
hasattr(method, "__trigger_condition__")
and method.__trigger_condition__ is not None
):
node_metadata["trigger_condition"] = method.__trigger_condition__
if "trigger_methods" not in node_metadata:
extracted = _extract_all_trigger_names(method.__trigger_condition__)
if extracted:
node_metadata["trigger_methods"] = extracted
node_metadata["method_signature"] = extract_method_signature(
method, method_name
)
try:
source_code = inspect.getsource(method)
node_metadata["source_code"] = source_code
try:
source_lines, start_line = inspect.getsourcelines(method)
node_metadata["source_lines"] = source_lines
node_metadata["source_start_line"] = start_line
except (OSError, TypeError):
pass
try:
source_file = inspect.getsourcefile(method)
if source_file:
node_metadata["source_file"] = source_file
except (OSError, TypeError):
try:
class_file = inspect.getsourcefile(flow.__class__)
if class_file:
node_metadata["source_file"] = class_file
except (OSError, TypeError):
pass
except (OSError, TypeError):
pass
try:
class_obj = flow.__class__
if class_obj:
class_name = class_obj.__name__
bases = class_obj.__bases__
if bases:
base_strs = []
for base in bases:
if hasattr(base, "__name__"):
if hasattr(base, "__origin__"):
base_strs.append(str(base))
else:
base_strs.append(base.__name__)
else:
base_strs.append(str(base))
try:
source_lines = inspect.getsource(class_obj).split("\n")
_, class_start_line = inspect.getsourcelines(class_obj)
for idx, line in enumerate(source_lines):
stripped = line.strip()
if stripped.startswith("class ") and class_name in stripped:
class_signature = stripped.rstrip(":")
node_metadata["class_signature"] = class_signature
node_metadata["class_line_number"] = (
class_start_line + idx
)
break
except (OSError, TypeError):
class_signature = f"class {class_name}({', '.join(base_strs)})"
node_metadata["class_signature"] = class_signature
else:
class_signature = f"class {class_name}"
node_metadata["class_signature"] = class_signature
node_metadata["class_name"] = class_name
except (OSError, TypeError, AttributeError):
pass
nodes[method_name] = node_metadata
for listener_name, condition_data in flow._listeners.items():
if listener_name in router_methods:
continue
if is_simple_flow_condition(condition_data):
cond_type, methods = condition_data
edges.extend(
StructureEdge(
source=str(trigger_method),
target=str(listener_name),
condition_type=cond_type,
is_router_path=False,
)
for trigger_method in methods
if str(trigger_method) in nodes
)
elif is_flow_condition_dict(condition_data):
edges.extend(
_create_edges_from_condition(condition_data, str(listener_name), nodes)
)
for method_name, node_metadata in nodes.items(): # type: ignore[assignment]
if node_metadata.get("is_router") and "trigger_methods" in node_metadata:
trigger_methods = node_metadata["trigger_methods"]
condition_type = node_metadata.get("trigger_condition_type", OR_CONDITION)
if "trigger_condition" in node_metadata:
edges.extend(
_create_edges_from_condition(
node_metadata["trigger_condition"], # type: ignore[arg-type]
method_name,
nodes,
)
)
else:
edges.extend(
StructureEdge(
source=trigger_method,
target=method_name,
condition_type=condition_type,
is_router_path=False,
)
for trigger_method in trigger_methods
if trigger_method in nodes
)
all_string_triggers: set[str] = set()
for condition_data in flow._listeners.values():
if is_simple_flow_condition(condition_data):
_, methods = condition_data
for m in methods:
if str(m) not in nodes: # It's a string trigger, not a method name
all_string_triggers.add(str(m))
elif is_flow_condition_dict(condition_data):
for trigger in _extract_direct_or_triggers(condition_data):
if trigger not in nodes:
all_string_triggers.add(trigger)
all_router_outputs: set[str] = set()
for router_method_name in router_methods:
if router_method_name not in flow._router_paths:
flow._router_paths[FlowMethodName(router_method_name)] = []
current_paths = flow._router_paths.get(FlowMethodName(router_method_name), [])
if current_paths and router_method_name in nodes:
nodes[router_method_name]["router_paths"] = [str(p) for p in current_paths]
all_router_outputs.update(str(p) for p in current_paths)
if not current_paths:
logger.warning(
f"Could not determine return paths for router '{router_method_name}'. "
f"Add a return type annotation like "
f"'-> Literal[\"path1\", \"path2\"]' or '-> YourEnum' "
f"to enable proper flow visualization."
)
orphaned_triggers = all_string_triggers - all_router_outputs
if orphaned_triggers:
logger.error(
f"Found listeners waiting for triggers {orphaned_triggers} "
f"but no router outputs these values explicitly. "
f"If your router returns a non-static value, check that your router has proper return type annotations."
)
for router_method_name in router_methods:
if router_method_name not in flow._router_paths:
continue
router_paths = flow._router_paths[FlowMethodName(router_method_name)]
for path in router_paths:
for listener_name, condition_data in flow._listeners.items():
if listener_name == router_method_name:
continue
trigger_strings_from_cond: list[str] = []
if is_simple_flow_condition(condition_data):
_, methods = condition_data
trigger_strings_from_cond = [str(m) for m in methods]
elif is_flow_condition_dict(condition_data):
trigger_strings_from_cond = _extract_direct_or_triggers(
condition_data
)
if str(path) in trigger_strings_from_cond:
edges.append(
StructureEdge(
source=router_method_name,
target=str(listener_name),
condition_type=None,
is_router_path=True,
router_path_label=str(path),
)
)
for start_method in flow._start_methods:
if start_method not in nodes and start_method in flow._methods:
method = flow._methods[start_method]
nodes[str(start_method)] = NodeMetadata(type="start")
if hasattr(method, "__trigger_methods__") and method.__trigger_methods__:
nodes[str(start_method)]["trigger_methods"] = [
str(m) for m in method.__trigger_methods__
]
if hasattr(method, "__condition_type__") and method.__condition_type__:
nodes[str(start_method)]["condition_type"] = method.__condition_type__
return FlowStructure(
nodes=nodes,
edges=edges,
start_methods=start_methods,
router_methods=router_methods,
)
def calculate_execution_paths(structure: FlowStructure) -> int:
"""Calculate number of possible execution paths through the flow.
Args:
structure: FlowStructure to analyze.
Returns:
Number of possible execution paths.
"""
graph = defaultdict(list)
for edge in structure["edges"]:
graph[edge["source"]].append(
{
"target": edge["target"],
"is_router": edge["is_router_path"],
"condition": edge["condition_type"],
}
)
all_nodes = set(structure["nodes"].keys())
nodes_with_outgoing = set(edge["source"] for edge in structure["edges"])
terminal_nodes = all_nodes - nodes_with_outgoing
if not structure["start_methods"] or not terminal_nodes:
return 0
def count_paths_from(node: str, visited: set[str]) -> int:
"""Recursively count execution paths from a given node.
Args:
node: Node name to start counting from.
visited: Set of already visited nodes to prevent cycles.
Returns:
Number of execution paths from this node to terminal nodes.
"""
if node in terminal_nodes:
return 1
if node in visited:
return 0
visited.add(node)
outgoing = graph[node]
if not outgoing:
visited.remove(node)
return 1
if node in structure["router_methods"]:
total = 0
for edge_info in outgoing:
target = str(edge_info["target"])
total += count_paths_from(target, visited.copy())
visited.remove(node)
return total
total = 0
for edge_info in outgoing:
target = str(edge_info["target"])
total += count_paths_from(target, visited.copy())
visited.remove(node)
return total if total > 0 else 1
total_paths = 0
for start in structure["start_methods"]:
total_paths += count_paths_from(start, set())
return max(total_paths, 1)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/flow/visualization/builder.py",
"license": "MIT License",
"lines": 423,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/flow/visualization/renderers/interactive.py | """Interactive HTML renderer for Flow structure visualization."""
import json
from pathlib import Path
import tempfile
from typing import Any, ClassVar
import webbrowser
from jinja2 import Environment, FileSystemLoader, nodes, select_autoescape
from jinja2.ext import Extension
from jinja2.parser import Parser
from crewai.flow.visualization.builder import calculate_execution_paths
from crewai.flow.visualization.types import FlowStructure
class CSSExtension(Extension):
"""Jinja2 extension for rendering CSS link tags.
Provides {% css 'path/to/file.css' %} tag syntax.
"""
tags: ClassVar[set[str]] = {"css"} # type: ignore[misc]
def parse(self, parser: Parser) -> nodes.Node:
"""Parse {% css 'styles.css' %} tag.
Args:
parser: Jinja2 parser instance.
Returns:
Output node with rendered CSS link tag.
"""
lineno: int = next(parser.stream).lineno
args: list[nodes.Expr] = [parser.parse_expression()]
return nodes.Output([self.call_method("_render_css", args)]).set_lineno(lineno)
def _render_css(self, href: str) -> str:
"""Render CSS link tag.
Args:
href: Path to CSS file.
Returns:
HTML link tag string.
"""
return f'<link rel="stylesheet" href="{href}">'
class JSExtension(Extension):
"""Jinja2 extension for rendering script tags.
Provides {% js 'path/to/file.js' %} tag syntax.
"""
tags: ClassVar[set[str]] = {"js"} # type: ignore[misc]
def parse(self, parser: Parser) -> nodes.Node:
"""Parse {% js 'script.js' %} tag.
Args:
parser: Jinja2 parser instance.
Returns:
Output node with rendered script tag.
"""
lineno: int = next(parser.stream).lineno
args: list[nodes.Expr] = [parser.parse_expression()]
return nodes.Output([self.call_method("_render_js", args)]).set_lineno(lineno)
def _render_js(self, src: str) -> str:
"""Render script tag.
Args:
src: Path to JavaScript file.
Returns:
HTML script tag string.
"""
return f'<script src="{src}"></script>'
CREWAI_ORANGE = "#FF5A50"
DARK_GRAY = "#333333"
WHITE = "#FFFFFF"
GRAY = "#666666"
BG_DARK = "#0d1117"
BG_CARD = "#161b22"
BORDER_SUBTLE = "#30363d"
TEXT_PRIMARY = "#e6edf3"
TEXT_SECONDARY = "#7d8590"
def calculate_node_positions(
dag: FlowStructure,
) -> dict[str, dict[str, int | float]]:
"""Calculate hierarchical positions (level, x, y) for each node.
Args:
dag: FlowStructure containing nodes and edges.
Returns:
Dictionary mapping node names to their position data (level, x, y).
"""
children: dict[str, list[str]] = {name: [] for name in dag["nodes"]}
parents: dict[str, list[str]] = {name: [] for name in dag["nodes"]}
for edge in dag["edges"]:
source = edge["source"]
target = edge["target"]
if source in children and target in children:
children[source].append(target)
parents[target].append(source)
levels: dict[str, int] = {}
queue: list[tuple[str, int]] = []
for start_method in dag["start_methods"]:
if start_method in dag["nodes"]:
levels[start_method] = 0
queue.append((start_method, 0))
visited: set[str] = set()
while queue:
node, level = queue.pop(0)
if node in visited:
continue
visited.add(node)
if node not in levels or levels[node] < level:
levels[node] = level
for child in children.get(node, []):
if child not in visited:
child_level = level + 1
if child not in levels or levels[child] < child_level:
levels[child] = child_level
queue.append((child, child_level))
for name in dag["nodes"]:
if name not in levels:
levels[name] = 0
nodes_by_level: dict[int, list[str]] = {}
for node, level in levels.items():
if level not in nodes_by_level:
nodes_by_level[level] = []
nodes_by_level[level].append(node)
positions: dict[str, dict[str, int | float]] = {}
level_separation = 300 # Vertical spacing between levels
node_spacing = 400 # Horizontal spacing between nodes
parent_count: dict[str, int] = {}
for node, parent_list in parents.items():
parent_count[node] = len(parent_list)
for level, nodes_at_level in sorted(nodes_by_level.items()):
y = level * level_separation
if level == 0:
num_nodes = len(nodes_at_level)
for i, node in enumerate(nodes_at_level):
x = (i - (num_nodes - 1) / 2) * node_spacing
positions[node] = {"level": level, "x": x, "y": y}
else:
for i, node in enumerate(nodes_at_level):
parent_list = parents.get(node, [])
parent_positions: list[float] = [
positions[parent]["x"]
for parent in parent_list
if parent in positions
]
if parent_positions:
if len(parent_positions) > 1 and len(set(parent_positions)) == 1:
base_x = parent_positions[0]
avg_x = base_x + node_spacing * 0.4
else:
avg_x = sum(parent_positions) / len(parent_positions)
else:
avg_x = i * node_spacing * 0.5
positions[node] = {"level": level, "x": avg_x, "y": y}
nodes_at_level_sorted = sorted(
nodes_at_level, key=lambda n: positions[n]["x"]
)
min_spacing = node_spacing * 0.6 # Minimum horizontal distance
for i in range(len(nodes_at_level_sorted) - 1):
current_node = nodes_at_level_sorted[i]
next_node = nodes_at_level_sorted[i + 1]
current_x = positions[current_node]["x"]
next_x = positions[next_node]["x"]
if next_x - current_x < min_spacing:
positions[next_node]["x"] = current_x + min_spacing
return positions
def render_interactive(
dag: FlowStructure,
filename: str = "flow_dag.html",
show: bool = True,
) -> str:
"""Create interactive HTML visualization of Flow structure.
Generates three output files in a temporary directory: HTML template,
CSS stylesheet, and JavaScript. Optionally opens the visualization in
default browser.
Args:
dag: FlowStructure to visualize.
filename: Output HTML filename (basename only, no path).
show: Whether to open in browser.
Returns:
Absolute path to generated HTML file in temporary directory.
"""
node_positions = calculate_node_positions(dag)
nodes_list: list[dict[str, Any]] = []
for name, metadata in dag["nodes"].items():
node_type: str = metadata.get("type", "listen")
color_config: dict[str, Any]
font_color: str
border_width: int
if node_type == "start":
color_config = {
"background": "var(--node-bg-start)",
"border": "var(--node-border-start)",
"highlight": {
"background": "var(--node-bg-start)",
"border": "var(--node-border-start)",
},
}
font_color = "var(--node-text-color)"
border_width = 3
elif node_type == "router":
color_config = {
"background": "var(--node-bg-router)",
"border": CREWAI_ORANGE,
"highlight": {
"background": "var(--node-bg-router)",
"border": CREWAI_ORANGE,
},
}
font_color = "var(--node-text-color)"
border_width = 3
else:
color_config = {
"background": "var(--node-bg-listen)",
"border": "var(--node-border-listen)",
"highlight": {
"background": "var(--node-bg-listen)",
"border": "var(--node-border-listen)",
},
}
font_color = "var(--node-text-color)"
border_width = 3
title_parts: list[str] = []
type_badge_bg: str = (
CREWAI_ORANGE if node_type in ["start", "router"] else DARK_GRAY
)
title_parts.append(f"""
<div style="border-bottom: 1px solid rgba(102,102,102,0.15); padding-bottom: 8px; margin-bottom: 10px;">
<div style="font-size: 13px; font-weight: 700; color: {DARK_GRAY}; margin-bottom: 6px;">{name}</div>
<span style="display: inline-block; background: {type_badge_bg}; color: white; padding: 2px 8px; border-radius: 4px; font-size: 10px; font-weight: 600; text-transform: uppercase; letter-spacing: 0.5px;">{node_type}</span>
</div>
""")
if metadata.get("condition_type"):
condition = metadata["condition_type"]
if condition == "AND":
condition_badge_bg = "rgba(255,90,80,0.12)"
condition_color = CREWAI_ORANGE
elif condition == "IF":
condition_badge_bg = "rgba(255,90,80,0.18)"
condition_color = CREWAI_ORANGE
else:
condition_badge_bg = "rgba(102,102,102,0.12)"
condition_color = GRAY
title_parts.append(f"""
<div style="margin-bottom: 8px;">
<div style="font-size: 10px; text-transform: uppercase; color: {GRAY}; letter-spacing: 0.5px; margin-bottom: 3px; font-weight: 600;">Condition</div>
<span style="display: inline-block; background: {condition_badge_bg}; color: {condition_color}; padding: 3px 8px; border-radius: 4px; font-size: 11px; font-weight: 700;">{condition}</span>
</div>
""")
if metadata.get("trigger_methods"):
triggers = metadata["trigger_methods"]
triggers_items = "".join(
[
f'<li style="margin: 3px 0;"><code style="background: rgba(102,102,102,0.08); padding: 2px 6px; border-radius: 3px; font-size: 10px; color: {DARK_GRAY}; border: 1px solid rgba(102,102,102,0.12);">{t}</code></li>'
for t in triggers
]
)
title_parts.append(f"""
<div style="margin-bottom: 8px;">
<div style="font-size: 10px; text-transform: uppercase; color: {GRAY}; letter-spacing: 0.5px; margin-bottom: 4px; font-weight: 600;">Triggers</div>
<ul style="list-style: none; padding: 0; margin: 0;">{triggers_items}</ul>
</div>
""")
if metadata.get("router_paths"):
paths = metadata["router_paths"]
paths_items = "".join(
[
f'<li style="margin: 3px 0;"><code style="background: rgba(255,90,80,0.08); padding: 2px 6px; border-radius: 3px; font-size: 10px; color: {CREWAI_ORANGE}; border: 1px solid rgba(255,90,80,0.2); font-weight: 600;">{p}</code></li>'
for p in paths
]
)
title_parts.append(f"""
<div>
<div style="font-size: 10px; text-transform: uppercase; color: {GRAY}; letter-spacing: 0.5px; margin-bottom: 4px; font-weight: 600;">Router Paths</div>
<ul style="list-style: none; padding: 0; margin: 0;">{paths_items}</ul>
</div>
""")
bg_color = color_config["background"]
border_color = color_config["border"]
position_data = node_positions.get(name, {"level": 0, "x": 0, "y": 0})
node_data: dict[str, Any] = {
"id": name,
"label": name,
"title": "".join(title_parts),
"shape": "custom",
"size": 30,
"level": position_data["level"],
"nodeStyle": {
"name": name,
"bgColor": bg_color,
"borderColor": border_color,
"borderWidth": border_width,
"fontColor": font_color,
},
"opacity": 1.0,
"glowSize": 0,
"glowColor": None,
}
# Add x,y only for graphs with 3-4 nodes
total_nodes = len(dag["nodes"])
if 3 <= total_nodes <= 4:
node_data["x"] = position_data["x"]
node_data["y"] = position_data["y"]
nodes_list.append(node_data)
execution_paths: int = calculate_execution_paths(dag)
edges_list: list[dict[str, Any]] = []
for edge in dag["edges"]:
edge_label: str = ""
edge_color: str = GRAY
edge_dashes: bool | list[int] = False
if edge["is_router_path"]:
edge_color = CREWAI_ORANGE
edge_dashes = [15, 10]
if "router_path_label" in edge:
edge_label = edge["router_path_label"]
elif edge["condition_type"] == "AND":
edge_label = "AND"
edge_color = CREWAI_ORANGE
elif edge["condition_type"] == "OR":
edge_label = "OR"
edge_color = GRAY
edge_data: dict[str, Any] = {
"from": edge["source"],
"to": edge["target"],
"label": edge_label,
"arrows": "to",
"width": 2,
"selectionWidth": 0,
"color": {
"color": edge_color,
"highlight": edge_color,
},
}
if edge_dashes is not False:
edge_data["dashes"] = edge_dashes
edges_list.append(edge_data)
template_dir = Path(__file__).parent.parent / "assets"
env = Environment(
loader=FileSystemLoader(template_dir),
autoescape=select_autoescape(["html", "xml", "css", "js"]),
variable_start_string="'{{",
variable_end_string="}}'",
extensions=[CSSExtension, JSExtension],
)
temp_dir = Path(tempfile.mkdtemp(prefix="crewai_flow_"))
output_path = temp_dir / Path(filename).name
css_filename = output_path.stem + "_style.css"
css_output_path = temp_dir / css_filename
js_filename = output_path.stem + "_script.js"
js_output_path = temp_dir / js_filename
css_file = template_dir / "style.css"
css_content = css_file.read_text(encoding="utf-8")
css_content = css_content.replace("'{{ WHITE }}'", WHITE)
css_content = css_content.replace("'{{ DARK_GRAY }}'", DARK_GRAY)
css_content = css_content.replace("'{{ GRAY }}'", GRAY)
css_content = css_content.replace("'{{ CREWAI_ORANGE }}'", CREWAI_ORANGE)
css_output_path.write_text(css_content, encoding="utf-8")
js_file = template_dir / "interactive.js"
js_content = js_file.read_text(encoding="utf-8")
dag_nodes_json = json.dumps(dag["nodes"])
dag_full_json = json.dumps(dag)
js_content = js_content.replace("{{ WHITE }}", WHITE)
js_content = js_content.replace("{{ DARK_GRAY }}", DARK_GRAY)
js_content = js_content.replace("{{ GRAY }}", GRAY)
js_content = js_content.replace("{{ CREWAI_ORANGE }}", CREWAI_ORANGE)
js_content = js_content.replace("'{{ nodeData }}'", dag_nodes_json)
js_content = js_content.replace("'{{ dagData }}'", dag_full_json)
js_content = js_content.replace("'{{ nodes_list_json }}'", json.dumps(nodes_list))
js_content = js_content.replace("'{{ edges_list_json }}'", json.dumps(edges_list))
js_output_path.write_text(js_content, encoding="utf-8")
template = env.get_template("interactive_flow.html.j2")
html_content = template.render(
CREWAI_ORANGE=CREWAI_ORANGE,
DARK_GRAY=DARK_GRAY,
WHITE=WHITE,
GRAY=GRAY,
BG_DARK=BG_DARK,
BG_CARD=BG_CARD,
BORDER_SUBTLE=BORDER_SUBTLE,
TEXT_PRIMARY=TEXT_PRIMARY,
TEXT_SECONDARY=TEXT_SECONDARY,
nodes_list_json=json.dumps(nodes_list),
edges_list_json=json.dumps(edges_list),
dag_nodes_count=len(dag["nodes"]),
dag_edges_count=len(dag["edges"]),
execution_paths=execution_paths,
css_path=css_filename,
js_path=js_filename,
)
output_path.write_text(html_content, encoding="utf-8")
if show:
webbrowser.open(f"file://{output_path.absolute()}")
return str(output_path.absolute())
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/flow/visualization/renderers/interactive.py",
"license": "MIT License",
"lines": 380,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/flow/visualization/schema.py | """OpenAPI schema conversion utilities for Flow methods."""
import inspect
from typing import Any, get_args, get_origin
def type_to_openapi_schema(type_hint: Any) -> dict[str, Any]:
"""Convert Python type hint to OpenAPI schema.
Args:
type_hint: Python type hint to convert.
Returns:
OpenAPI schema dictionary.
"""
if type_hint is inspect.Parameter.empty:
return {}
if type_hint is None or type_hint is type(None):
return {"type": "null"}
if hasattr(type_hint, "__module__") and hasattr(type_hint, "__name__"):
if type_hint.__module__ == "typing" and type_hint.__name__ == "Any":
return {}
type_str = str(type_hint)
if type_str == "typing.Any" or type_str == "<class 'typing.Any'>":
return {}
if isinstance(type_hint, str):
return {"type": type_hint}
origin = get_origin(type_hint)
args = get_args(type_hint)
if type_hint is str:
return {"type": "string"}
if type_hint is int:
return {"type": "integer"}
if type_hint is float:
return {"type": "number"}
if type_hint is bool:
return {"type": "boolean"}
if type_hint is dict or origin is dict:
if args and len(args) > 1:
return {
"type": "object",
"additionalProperties": type_to_openapi_schema(args[1]),
}
return {"type": "object"}
if type_hint is list or origin is list:
if args:
return {"type": "array", "items": type_to_openapi_schema(args[0])}
return {"type": "array"}
if hasattr(type_hint, "__name__"):
return {"type": "object", "className": type_hint.__name__}
return {}
def extract_method_signature(method: Any, method_name: str) -> dict[str, Any]:
"""Extract method signature as OpenAPI schema with documentation.
Args:
method: Method to analyze.
method_name: Method name.
Returns:
Dictionary with operationId, parameters, returns, summary, and description.
"""
try:
sig = inspect.signature(method)
parameters = {}
for param_name, param in sig.parameters.items():
if param_name == "self":
continue
parameters[param_name] = type_to_openapi_schema(param.annotation)
return_type = type_to_openapi_schema(sig.return_annotation)
docstring = inspect.getdoc(method)
result: dict[str, Any] = {
"operationId": method_name,
"parameters": parameters,
"returns": return_type,
}
if docstring:
lines = docstring.strip().split("\n")
summary = lines[0].strip()
if summary:
result["summary"] = summary
if len(lines) > 1:
description = "\n".join(line.strip() for line in lines[1:]).strip()
if description:
result["description"] = description
return result
except Exception:
return {"operationId": method_name, "parameters": {}, "returns": {}}
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/flow/visualization/schema.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/flow/visualization/types.py | """Type definitions for Flow structure visualization."""
from typing import Any, TypedDict
class NodeMetadata(TypedDict, total=False):
"""Metadata for a single node in the flow structure."""
type: str
is_router: bool
router_paths: list[str]
condition_type: str | None
trigger_condition_type: str | None
trigger_methods: list[str]
trigger_condition: dict[str, Any] | None
method_signature: dict[str, Any]
source_code: str
source_lines: list[str]
source_start_line: int
source_file: str
class_signature: str
class_name: str
class_line_number: int
class StructureEdge(TypedDict, total=False):
"""Represents a connection in the flow structure."""
source: str
target: str
condition_type: str | None
is_router_path: bool
router_path_label: str
class FlowStructure(TypedDict):
"""Complete structure representation of a Flow."""
nodes: dict[str, NodeMetadata]
edges: list[StructureEdge]
start_methods: list[str]
router_methods: list[str]
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/flow/visualization/types.py",
"license": "MIT License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/tests/test_flow_visualization.py | """Tests for flow visualization and structure building."""
import json
import os
import tempfile
from pathlib import Path
import pytest
from crewai.flow.flow import Flow, and_, listen, or_, router, start
from crewai.flow.visualization import (
build_flow_structure,
visualize_flow_structure,
)
class SimpleFlow(Flow):
"""Simple flow for testing basic visualization."""
@start()
def begin(self):
return "started"
@listen(begin)
def process(self):
return "processed"
class RouterFlow(Flow):
"""Flow with router for testing router visualization."""
@start()
def init(self):
return "initialized"
@router(init)
def decide(self):
if hasattr(self, "state") and self.state.get("path") == "b":
return "path_b"
return "path_a"
@listen("path_a")
def handle_a(self):
return "handled_a"
@listen("path_b")
def handle_b(self):
return "handled_b"
class ComplexFlow(Flow):
"""Complex flow with AND/OR conditions for testing."""
@start()
def start_a(self):
return "a"
@start()
def start_b(self):
return "b"
@listen(and_(start_a, start_b))
def converge_and(self):
return "and_done"
@listen(or_(start_a, start_b))
def converge_or(self):
return "or_done"
@router(converge_and)
def router_decision(self):
return "final_path"
@listen("final_path")
def finalize(self):
return "complete"
def test_build_flow_structure_simple():
"""Test building structure for a simple sequential flow."""
flow = SimpleFlow()
structure = build_flow_structure(flow)
assert structure is not None
assert len(structure["nodes"]) == 2
assert len(structure["edges"]) == 1
node_names = set(structure["nodes"].keys())
assert "begin" in node_names
assert "process" in node_names
assert len(structure["start_methods"]) == 1
assert "begin" in structure["start_methods"]
edge = structure["edges"][0]
assert edge["source"] == "begin"
assert edge["target"] == "process"
assert edge["condition_type"] == "OR"
def test_build_flow_structure_with_router():
"""Test building structure for a flow with router."""
flow = RouterFlow()
structure = build_flow_structure(flow)
assert structure is not None
assert len(structure["nodes"]) == 4
assert len(structure["router_methods"]) == 1
assert "decide" in structure["router_methods"]
router_node = structure["nodes"]["decide"]
assert router_node["type"] == "router"
if "router_paths" in router_node:
assert len(router_node["router_paths"]) >= 1
assert any("path" in path for path in router_node["router_paths"])
router_edges = [edge for edge in structure["edges"] if edge["is_router_path"]]
assert len(router_edges) >= 1
def test_build_flow_structure_with_and_or_conditions():
"""Test building structure for a flow with AND/OR conditions."""
flow = ComplexFlow()
structure = build_flow_structure(flow)
assert structure is not None
and_edges = [
edge
for edge in structure["edges"]
if edge["target"] == "converge_and" and edge["condition_type"] == "AND"
]
assert len(and_edges) == 2
or_edges = [
edge
for edge in structure["edges"]
if edge["target"] == "converge_or" and edge["condition_type"] == "OR"
]
assert len(or_edges) == 2
def test_visualize_flow_structure_creates_html():
"""Test that visualization generates valid HTML file."""
flow = SimpleFlow()
structure = build_flow_structure(flow)
html_file = visualize_flow_structure(structure, "test_flow.html", show=False)
assert os.path.exists(html_file)
with open(html_file, "r", encoding="utf-8") as f:
html_content = f.read()
assert "<!DOCTYPE html>" in html_content
assert "<html" in html_content
assert "CrewAI Flow Visualization" in html_content
assert "network-container" in html_content
assert "drawer" in html_content
assert "nav-controls" in html_content
def test_visualize_flow_structure_creates_assets():
"""Test that visualization creates CSS and JS files."""
flow = SimpleFlow()
structure = build_flow_structure(flow)
html_file = visualize_flow_structure(structure, "test_flow.html", show=False)
html_path = Path(html_file)
css_file = html_path.parent / f"{html_path.stem}_style.css"
js_file = html_path.parent / f"{html_path.stem}_script.js"
assert css_file.exists()
assert js_file.exists()
css_content = css_file.read_text(encoding="utf-8")
assert len(css_content) > 0
assert "body" in css_content or ":root" in css_content
js_content = js_file.read_text(encoding="utf-8")
assert len(js_content) > 0
assert "NetworkManager" in js_content
def test_visualize_flow_structure_json_data():
"""Test that visualization includes valid JSON data in JS file."""
flow = RouterFlow()
structure = build_flow_structure(flow)
html_file = visualize_flow_structure(structure, "test_flow.html", show=False)
html_path = Path(html_file)
js_file = html_path.parent / f"{html_path.stem}_script.js"
js_content = js_file.read_text(encoding="utf-8")
assert "init" in js_content
assert "decide" in js_content
assert "handle_a" in js_content
assert "handle_b" in js_content
assert "router" in js_content.lower()
assert "path_a" in js_content
assert "path_b" in js_content
def test_node_metadata_includes_source_info():
"""Test that nodes include source code and line number information."""
flow = SimpleFlow()
structure = build_flow_structure(flow)
for node_name, node_metadata in structure["nodes"].items():
assert node_metadata["source_code"] is not None
assert len(node_metadata["source_code"]) > 0
assert node_metadata["source_start_line"] is not None
assert node_metadata["source_start_line"] > 0
assert node_metadata["source_file"] is not None
assert node_metadata["source_file"].endswith(".py")
def test_node_metadata_includes_method_signature():
"""Test that nodes include method signature information."""
flow = SimpleFlow()
structure = build_flow_structure(flow)
begin_node = structure["nodes"]["begin"]
assert begin_node["method_signature"] is not None
assert "operationId" in begin_node["method_signature"]
assert begin_node["method_signature"]["operationId"] == "begin"
assert "parameters" in begin_node["method_signature"]
assert "returns" in begin_node["method_signature"]
def test_router_node_has_correct_metadata():
"""Test that router nodes have correct type and paths."""
flow = RouterFlow()
structure = build_flow_structure(flow)
router_node = structure["nodes"]["decide"]
assert router_node["type"] == "router"
assert router_node["is_router"] is True
assert router_node["router_paths"] is not None
assert len(router_node["router_paths"]) == 2
assert "path_a" in router_node["router_paths"]
assert "path_b" in router_node["router_paths"]
def test_listen_node_has_trigger_methods():
"""Test that listen nodes include trigger method information."""
flow = RouterFlow()
structure = build_flow_structure(flow)
handle_a_node = structure["nodes"]["handle_a"]
assert handle_a_node["trigger_methods"] is not None
assert "path_a" in handle_a_node["trigger_methods"]
def test_and_condition_node_metadata():
"""Test that AND condition nodes have correct metadata."""
flow = ComplexFlow()
structure = build_flow_structure(flow)
converge_and_node = structure["nodes"]["converge_and"]
assert converge_and_node["condition_type"] == "AND"
assert converge_and_node["trigger_condition"] is not None
assert converge_and_node["trigger_condition"]["type"] == "AND"
assert len(converge_and_node["trigger_condition"]["conditions"]) == 2
def test_visualization_handles_special_characters():
"""Test that visualization properly handles special characters in method names."""
class SpecialCharFlow(Flow):
@start()
def method_with_underscore(self):
return "test"
@listen(method_with_underscore)
def another_method_123(self):
return "done"
flow = SpecialCharFlow()
structure = build_flow_structure(flow)
assert len(structure["nodes"]) == 2
json_str = json.dumps(structure)
assert json_str is not None
assert "method_with_underscore" in json_str
assert "another_method_123" in json_str
def test_empty_flow_structure():
"""Test building structure for a flow with no methods."""
class EmptyFlow(Flow):
pass
flow = EmptyFlow()
structure = build_flow_structure(flow)
assert structure is not None
assert len(structure["nodes"]) == 0
assert len(structure["edges"]) == 0
assert len(structure["start_methods"]) == 0
def test_topological_path_counting():
"""Test that topological path counting is accurate."""
flow = ComplexFlow()
structure = build_flow_structure(flow)
assert len(structure["nodes"]) > 0
assert len(structure["edges"]) > 0
def test_class_signature_metadata():
"""Test that nodes include class signature information."""
flow = SimpleFlow()
structure = build_flow_structure(flow)
for node_name, node_metadata in structure["nodes"].items():
assert node_metadata["class_name"] is not None
assert node_metadata["class_name"] == "SimpleFlow"
assert node_metadata["class_signature"] is not None
assert "SimpleFlow" in node_metadata["class_signature"]
def test_visualization_plot_method():
"""Test that flow.plot() method works."""
flow = SimpleFlow()
html_file = flow.plot("test_plot.html", show=False)
assert os.path.exists(html_file)
def test_router_paths_to_string_conditions():
"""Test that router paths correctly connect to listeners with string conditions."""
class RouterToStringFlow(Flow):
@start()
def init(self):
return "initialized"
@router(init)
def decide(self):
if hasattr(self, "state") and self.state.get("path") == "b":
return "path_b"
return "path_a"
@listen(or_("path_a", "path_b"))
def handle_either(self):
return "handled"
@listen("path_b")
def handle_b_only(self):
return "handled_b"
flow = RouterToStringFlow()
structure = build_flow_structure(flow)
decide_node = structure["nodes"]["decide"]
assert "path_a" in decide_node["router_paths"]
assert "path_b" in decide_node["router_paths"]
router_edges = [edge for edge in structure["edges"] if edge["is_router_path"]]
assert len(router_edges) == 3
edges_to_handle_either = [
edge for edge in router_edges if edge["target"] == "handle_either"
]
assert len(edges_to_handle_either) == 2
edges_to_handle_b_only = [
edge for edge in router_edges if edge["target"] == "handle_b_only"
]
assert len(edges_to_handle_b_only) == 1
def test_router_paths_not_in_and_conditions():
"""Test that router paths don't create edges to AND-nested conditions."""
class RouterAndConditionFlow(Flow):
@start()
def init(self):
return "initialized"
@router(init)
def decide(self):
return "path_a"
@listen("path_a")
def step_1(self):
return "step_1_done"
@listen(and_("path_a", step_1))
def step_2_and(self):
return "step_2_done"
@listen(or_(and_("path_a", step_1), "path_a"))
def step_3_or(self):
return "step_3_done"
flow = RouterAndConditionFlow()
structure = build_flow_structure(flow)
router_edges = [edge for edge in structure["edges"] if edge["is_router_path"]]
targets = [edge["target"] for edge in router_edges]
assert "step_1" in targets
assert "step_3_or" in targets
assert "step_2_and" not in targets
def test_chained_routers_no_self_loops():
"""Test that chained routers don't create self-referencing edges.
This tests the bug where routers with string triggers (like 'auth', 'exp')
would incorrectly create edges to themselves when another router outputs
those strings.
"""
class ChainedRouterFlow(Flow):
"""Flow with multiple chained routers using string outputs."""
@start()
def entrance(self):
return "started"
@router(entrance)
def session_in_cache(self):
return "exp"
@router("exp")
def check_exp(self):
return "auth"
@router("auth")
def call_ai_auth(self):
return "action"
@listen("action")
def forward_to_action(self):
return "done"
@listen("authenticate")
def forward_to_authenticate(self):
return "need_auth"
flow = ChainedRouterFlow()
structure = build_flow_structure(flow)
# Check that no self-loops exist
for edge in structure["edges"]:
assert edge["source"] != edge["target"], (
f"Self-loop detected: {edge['source']} -> {edge['target']}"
)
# Verify correct connections
router_edges = [edge for edge in structure["edges"] if edge["is_router_path"]]
# session_in_cache -> check_exp (via 'exp')
exp_edges = [
edge
for edge in router_edges
if edge["router_path_label"] == "exp" and edge["source"] == "session_in_cache"
]
assert len(exp_edges) == 1
assert exp_edges[0]["target"] == "check_exp"
# check_exp -> call_ai_auth (via 'auth')
auth_edges = [
edge
for edge in router_edges
if edge["router_path_label"] == "auth" and edge["source"] == "check_exp"
]
assert len(auth_edges) == 1
assert auth_edges[0]["target"] == "call_ai_auth"
# call_ai_auth -> forward_to_action (via 'action')
action_edges = [
edge
for edge in router_edges
if edge["router_path_label"] == "action" and edge["source"] == "call_ai_auth"
]
assert len(action_edges) == 1
assert action_edges[0]["target"] == "forward_to_action"
def test_routers_with_shared_output_strings():
"""Test that routers with shared output strings don't create incorrect edges.
This tests a scenario where multiple routers can output the same string,
ensuring the visualization only creates edges for the router that actually
outputs the string, not all routers.
"""
class SharedOutputRouterFlow(Flow):
"""Flow where multiple routers can output 'auth'."""
@start()
def start(self):
return "started"
@router(start)
def router_a(self):
# This router can output 'auth' or 'skip'
return "auth"
@router("auth")
def router_b(self):
# This router listens to 'auth' but outputs 'done'
return "done"
@listen("done")
def finalize(self):
return "complete"
@listen("skip")
def handle_skip(self):
return "skipped"
flow = SharedOutputRouterFlow()
structure = build_flow_structure(flow)
# Check no self-loops
for edge in structure["edges"]:
assert edge["source"] != edge["target"], (
f"Self-loop detected: {edge['source']} -> {edge['target']}"
)
# router_a should connect to router_b via 'auth'
router_edges = [edge for edge in structure["edges"] if edge["is_router_path"]]
auth_from_a = [
edge
for edge in router_edges
if edge["source"] == "router_a" and edge["router_path_label"] == "auth"
]
assert len(auth_from_a) == 1
assert auth_from_a[0]["target"] == "router_b"
# router_b should connect to finalize via 'done'
done_from_b = [
edge
for edge in router_edges
if edge["source"] == "router_b" and edge["router_path_label"] == "done"
]
assert len(done_from_b) == 1
assert done_from_b[0]["target"] == "finalize"
def test_warning_for_router_without_paths(caplog):
"""Test that a warning is logged when a router has no determinable paths."""
import logging
class RouterWithoutPathsFlow(Flow):
"""Flow with a router that returns a dynamic value."""
@start()
def begin(self):
return "started"
@router(begin)
def dynamic_router(self):
# Returns a variable that can't be statically analyzed
import random
return random.choice(["path_a", "path_b"])
@listen("path_a")
def handle_a(self):
return "a"
@listen("path_b")
def handle_b(self):
return "b"
flow = RouterWithoutPathsFlow()
with caplog.at_level(logging.WARNING):
build_flow_structure(flow)
# Check that warning was logged for the router
assert any(
"Could not determine return paths for router 'dynamic_router'" in record.message
for record in caplog.records
)
# Check that error was logged for orphaned triggers
assert any(
"Found listeners waiting for triggers" in record.message
for record in caplog.records
)
def test_warning_for_orphaned_listeners(caplog):
"""Test that an error is logged when listeners wait for triggers no router outputs."""
import logging
from typing import Literal
class OrphanedListenerFlow(Flow):
"""Flow where a listener waits for a trigger that no router outputs."""
@start()
def begin(self):
return "started"
@router(begin)
def my_router(self) -> Literal["option_a", "option_b"]:
return "option_a"
@listen("option_a")
def handle_a(self):
return "a"
@listen("option_c") # This trigger is never output by any router
def handle_orphan(self):
return "orphan"
flow = OrphanedListenerFlow()
with caplog.at_level(logging.ERROR):
build_flow_structure(flow)
# Check that error was logged for orphaned trigger
assert any(
"Found listeners waiting for triggers" in record.message
and "option_c" in record.message
for record in caplog.records
)
def test_no_warning_for_properly_typed_router(caplog):
"""Test that no warning is logged when router has proper type annotations."""
import logging
from typing import Literal
class ProperlyTypedRouterFlow(Flow):
"""Flow with properly typed router."""
@start()
def begin(self):
return "started"
@router(begin)
def typed_router(self) -> Literal["path_a", "path_b"]:
return "path_a"
@listen("path_a")
def handle_a(self):
return "a"
@listen("path_b")
def handle_b(self):
return "b"
flow = ProperlyTypedRouterFlow()
with caplog.at_level(logging.WARNING):
build_flow_structure(flow)
# No warnings should be logged
warning_messages = [r.message for r in caplog.records if r.levelno >= logging.WARNING]
assert not any("Could not determine return paths" in msg for msg in warning_messages)
assert not any("Found listeners waiting for triggers" in msg for msg in warning_messages) | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/test_flow_visualization.py",
"license": "MIT License",
"lines": 491,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/firecrawl_crawl_website_tool_test.py | import pytest
from crewai_tools.tools.firecrawl_crawl_website_tool.firecrawl_crawl_website_tool import (
FirecrawlCrawlWebsiteTool,
)
@pytest.mark.vcr()
def test_firecrawl_crawl_tool_integration():
tool = FirecrawlCrawlWebsiteTool(config={
"limit": 2,
"max_discovery_depth": 1,
"scrape_options": {"formats": ["markdown"]}
})
result = tool.run(url="https://firecrawl.dev")
assert result is not None
assert hasattr(result, 'status')
assert result.status in ["completed", "scraping"]
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/firecrawl_crawl_website_tool_test.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/firecrawl_scrape_website_tool_test.py | import pytest
from crewai_tools.tools.firecrawl_scrape_website_tool.firecrawl_scrape_website_tool import (
FirecrawlScrapeWebsiteTool,
)
@pytest.mark.vcr()
def test_firecrawl_scrape_tool_integration():
tool = FirecrawlScrapeWebsiteTool()
result = tool.run(url="https://firecrawl.dev")
assert result is not None
assert hasattr(result, 'markdown')
assert len(result.markdown) > 0
assert "Firecrawl" in result.markdown or "firecrawl" in result.markdown.lower()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/firecrawl_scrape_website_tool_test.py",
"license": "MIT License",
"lines": 12,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/firecrawl_search_tool_test.py | import pytest
from crewai_tools.tools.firecrawl_search_tool.firecrawl_search_tool import FirecrawlSearchTool
@pytest.mark.vcr()
def test_firecrawl_search_tool_integration():
tool = FirecrawlSearchTool()
result = tool.run(query="firecrawl")
assert result is not None
assert hasattr(result, 'web') or hasattr(result, 'news') or hasattr(result, 'images')
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/firecrawl_search_tool_test.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/mypy.py | """Mypy plugin for CrewAI decorator type checking.
This plugin informs mypy about attributes injected by the @CrewBase decorator.
"""
from collections.abc import Callable
from mypy.nodes import MDEF, SymbolTableNode, Var
from mypy.plugin import ClassDefContext, Plugin
from mypy.types import AnyType, TypeOfAny
class CrewAIPlugin(Plugin):
"""Mypy plugin that handles @CrewBase decorator attribute injection."""
def get_class_decorator_hook(
self, fullname: str
) -> Callable[[ClassDefContext], None] | None:
"""Return hook for class decorators.
Args:
fullname: Fully qualified name of the decorator.
Returns:
Hook function if this is a CrewBase decorator, None otherwise.
"""
if fullname in ("crewai.project.CrewBase", "crewai.project.crew_base.CrewBase"):
return self._crew_base_hook
return None
@staticmethod
def _crew_base_hook(ctx: ClassDefContext) -> None:
"""Add injected attributes to @CrewBase decorated classes.
Args:
ctx: Context for the class being decorated.
"""
any_type = AnyType(TypeOfAny.explicit)
str_type = ctx.api.named_type("builtins.str")
dict_type = ctx.api.named_type("builtins.dict", [str_type, any_type])
agents_config_var = Var("agents_config", dict_type)
agents_config_var.info = ctx.cls.info
agents_config_var._fullname = f"{ctx.cls.info.fullname}.agents_config"
ctx.cls.info.names["agents_config"] = SymbolTableNode(MDEF, agents_config_var)
tasks_config_var = Var("tasks_config", dict_type)
tasks_config_var.info = ctx.cls.info
tasks_config_var._fullname = f"{ctx.cls.info.fullname}.tasks_config"
ctx.cls.info.names["tasks_config"] = SymbolTableNode(MDEF, tasks_config_var)
def plugin(_: str) -> type[Plugin]:
"""Entry point for mypy plugin.
Args:
_: Mypy version string.
Returns:
Plugin class.
"""
return CrewAIPlugin
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/mypy.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/adapters/crewai_rag_adapter.py | """Adapter for CrewAI's native RAG system."""
from __future__ import annotations
import hashlib
from typing import TYPE_CHECKING, Any, cast
import uuid
from crewai.rag.config.types import RagConfigType
from crewai.rag.config.utils import get_rag_client
from crewai.rag.core.base_client import BaseClient
from crewai.rag.factory import create_client
from crewai.rag.types import BaseRecord, SearchResult
from pydantic import PrivateAttr
from pydantic.dataclasses import is_pydantic_dataclass
from typing_extensions import TypeIs, Unpack
from crewai_tools.rag.data_types import DataType
from crewai_tools.rag.misc import sanitize_metadata_for_chromadb
from crewai_tools.tools.rag.rag_tool import Adapter
from crewai_tools.tools.rag.types import AddDocumentParams, ContentItem
if TYPE_CHECKING:
from crewai.rag.qdrant.config import QdrantConfig
def _is_qdrant_config(config: Any) -> TypeIs[QdrantConfig]:
"""Check if config is a QdrantConfig using safe duck typing.
Args:
config: RAG configuration to check.
Returns:
True if config is a QdrantConfig instance.
"""
if not is_pydantic_dataclass(config):
return False
try:
return cast(bool, config.provider == "qdrant") # type: ignore[attr-defined]
except (AttributeError, ImportError):
return False
class CrewAIRagAdapter(Adapter):
"""Adapter that uses CrewAI's native RAG system.
Supports custom vector database configuration through the config parameter.
"""
collection_name: str = "default"
summarize: bool = False
similarity_threshold: float = 0.6
limit: int = 5
config: RagConfigType | None = None
_client: BaseClient | None = PrivateAttr(default=None)
def model_post_init(self, __context: Any) -> None:
"""Initialize the CrewAI RAG client after model initialization."""
if self.config is not None:
self._client = create_client(self.config)
else:
self._client = get_rag_client()
collection_params: dict[str, Any] = {"collection_name": self.collection_name}
if self.config is not None and _is_qdrant_config(self.config):
if self.config.vectors_config is not None:
collection_params["vectors_config"] = self.config.vectors_config
self._client.get_or_create_collection(**collection_params)
def query(
self,
question: str,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
"""Query the knowledge base with a question.
Args:
question: The question to ask
similarity_threshold: Minimum similarity score for results (default: 0.6)
limit: Maximum number of results to return (default: 5)
Returns:
Relevant content from the knowledge base
"""
search_limit = limit if limit is not None else self.limit
search_threshold = (
similarity_threshold
if similarity_threshold is not None
else self.similarity_threshold
)
if self._client is None:
raise ValueError("Client is not initialized")
results: list[SearchResult] = self._client.search(
collection_name=self.collection_name,
query=question,
limit=search_limit,
score_threshold=search_threshold,
)
if not results:
return "No relevant content found."
contents: list[str] = []
for result in results:
content: str = result.get("content", "")
if content:
contents.append(content)
return "\n\n".join(contents)
def add(self, *args: ContentItem, **kwargs: Unpack[AddDocumentParams]) -> None:
"""Add content to the knowledge base.
Args:
*args: Content items to add (strings, paths, or document dicts)
**kwargs: Additional parameters including:
- data_type: DataType enum or string (e.g., "file", "pdf_file", "text")
- path: Path to file or directory (alternative to positional arg)
- file_path: Alias for path
- metadata: Additional metadata to attach to documents
- url: URL to fetch content from
- website: Website URL to scrape
- github_url: GitHub repository URL
- youtube_url: YouTube video URL
- directory_path: Path to directory
Examples:
rag_tool.add("path/to/document.pdf", data_type=DataType.PDF_FILE)
rag_tool.add(path="path/to/document.pdf", data_type="file")
rag_tool.add(file_path="path/to/document.pdf", data_type="pdf_file")
rag_tool.add("path/to/document.pdf") # auto-detects PDF
"""
import os
from crewai_tools.rag.base_loader import LoaderResult
from crewai_tools.rag.data_types import DataType, DataTypes
from crewai_tools.rag.source_content import SourceContent
documents: list[BaseRecord] = []
raw_data_type = kwargs.get("data_type")
base_metadata: dict[str, Any] = kwargs.get("metadata", {})
data_type: DataType | None = None
if raw_data_type is not None:
if isinstance(raw_data_type, DataType):
if raw_data_type != DataType.FILE:
data_type = raw_data_type
elif isinstance(raw_data_type, str):
if raw_data_type != "file":
try:
data_type = DataType(raw_data_type)
except ValueError:
raise ValueError(
f"Invalid data_type: '{raw_data_type}'. "
f"Valid values are: 'file' (auto-detect), or one of: "
f"{', '.join(dt.value for dt in DataType)}"
) from None
content_items: list[ContentItem] = list(args)
path_value = kwargs.get("path") or kwargs.get("file_path")
if path_value is not None:
content_items.append(path_value)
if url := kwargs.get("url"):
content_items.append(url)
if website := kwargs.get("website"):
content_items.append(website)
if github_url := kwargs.get("github_url"):
content_items.append(github_url)
if youtube_url := kwargs.get("youtube_url"):
content_items.append(youtube_url)
if directory_path := kwargs.get("directory_path"):
content_items.append(directory_path)
file_extensions = {
".pdf",
".txt",
".csv",
".json",
".xml",
".docx",
".mdx",
".md",
}
for arg in content_items:
source_ref: str
if isinstance(arg, dict):
source_ref = str(arg.get("source", arg.get("content", "")))
else:
source_ref = str(arg)
if not data_type:
ext = os.path.splitext(source_ref)[1].lower()
is_url = source_ref.startswith(("http://", "https://", "file://"))
if (
ext in file_extensions
and not is_url
and not os.path.isfile(source_ref)
):
raise FileNotFoundError(f"File does not exist: {source_ref}")
data_type = DataTypes.from_content(source_ref)
if data_type == DataType.DIRECTORY:
if not os.path.isdir(source_ref):
raise ValueError(f"Directory does not exist: {source_ref}")
# Define binary and non-text file extensions to skip
binary_extensions = {
".pyc",
".pyo",
".png",
".jpg",
".jpeg",
".gif",
".bmp",
".ico",
".svg",
".webp",
".pdf",
".zip",
".tar",
".gz",
".bz2",
".7z",
".rar",
".exe",
".dll",
".so",
".dylib",
".bin",
".dat",
".db",
".sqlite",
".class",
".jar",
".war",
".ear",
}
for root, dirs, files in os.walk(source_ref):
dirs[:] = [d for d in dirs if not d.startswith(".")]
for filename in files:
if filename.startswith("."):
continue
# Skip binary files based on extension
file_ext = os.path.splitext(filename)[1].lower()
if file_ext in binary_extensions:
continue
# Skip __pycache__ directories
if "__pycache__" in root:
continue
file_path: str = os.path.join(root, filename)
try:
file_data_type: DataType = DataTypes.from_content(file_path)
file_loader = file_data_type.get_loader()
file_chunker = file_data_type.get_chunker()
file_source = SourceContent(file_path)
file_result: LoaderResult = file_loader.load(file_source)
file_chunks = file_chunker.chunk(file_result.content)
for chunk_idx, file_chunk in enumerate(file_chunks):
file_metadata: dict[str, Any] = base_metadata.copy()
file_metadata.update(file_result.metadata)
file_metadata["data_type"] = str(file_data_type)
file_metadata["file_path"] = file_path
file_metadata["chunk_index"] = chunk_idx
file_metadata["total_chunks"] = len(file_chunks)
if isinstance(arg, dict):
file_metadata.update(arg.get("metadata", {}))
chunk_hash = hashlib.sha256(
f"{file_result.doc_id}_{chunk_idx}_{file_chunk}".encode()
).hexdigest()
chunk_id = str(uuid.UUID(chunk_hash[:32]))
documents.append(
{
"doc_id": chunk_id,
"content": file_chunk,
"metadata": sanitize_metadata_for_chromadb(
file_metadata
),
}
)
except Exception: # noqa: S112
# Silently skip files that can't be processed
continue
else:
metadata: dict[str, Any] = base_metadata.copy()
source_content = SourceContent(source_ref)
if data_type in [
DataType.PDF_FILE,
DataType.TEXT_FILE,
DataType.DOCX,
DataType.CSV,
DataType.JSON,
DataType.XML,
DataType.MDX,
]:
if not source_content.is_url() and not source_content.path_exists():
raise FileNotFoundError(f"File does not exist: {source_ref}")
loader = data_type.get_loader()
chunker = data_type.get_chunker()
loader_result: LoaderResult = loader.load(source_content)
chunks = chunker.chunk(loader_result.content)
for i, chunk in enumerate(chunks):
chunk_metadata: dict[str, Any] = metadata.copy()
chunk_metadata.update(loader_result.metadata)
chunk_metadata["data_type"] = str(data_type)
chunk_metadata["chunk_index"] = i
chunk_metadata["total_chunks"] = len(chunks)
chunk_metadata["source"] = source_ref
if isinstance(arg, dict):
chunk_metadata.update(arg.get("metadata", {}))
chunk_hash = hashlib.sha256(
f"{loader_result.doc_id}_{i}_{chunk}".encode()
).hexdigest()
chunk_id = str(uuid.UUID(chunk_hash[:32]))
documents.append(
{
"doc_id": chunk_id,
"content": chunk,
"metadata": sanitize_metadata_for_chromadb(chunk_metadata),
}
)
if documents:
if self._client is None:
raise ValueError("Client is not initialized")
self._client.add_documents(
collection_name=self.collection_name, documents=documents
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/adapters/crewai_rag_adapter.py",
"license": "MIT License",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/adapters/lancedb_adapter.py | from collections.abc import Callable
from pathlib import Path
from typing import Any
from lancedb import ( # type: ignore[import-untyped]
DBConnection as LanceDBConnection,
connect as lancedb_connect,
)
from lancedb.table import Table as LanceDBTable # type: ignore[import-untyped]
from openai import Client as OpenAIClient
from pydantic import Field, PrivateAttr
from crewai_tools.tools.rag.rag_tool import Adapter
def _default_embedding_function():
client = OpenAIClient()
def _embedding_function(input):
rs = client.embeddings.create(input=input, model="text-embedding-ada-002")
return [record.embedding for record in rs.data]
return _embedding_function
class LanceDBAdapter(Adapter):
uri: str | Path
table_name: str
embedding_function: Callable = Field(default_factory=_default_embedding_function)
top_k: int = 3
vector_column_name: str = "vector"
text_column_name: str = "text"
_db: LanceDBConnection = PrivateAttr()
_table: LanceDBTable = PrivateAttr()
def model_post_init(self, __context: Any) -> None:
self._db = lancedb_connect(self.uri)
self._table = self._db.open_table(self.table_name)
super().model_post_init(__context)
def query(self, question: str) -> str: # type: ignore[override]
query = self.embedding_function([question])[0]
results = (
self._table.search(query, vector_column_name=self.vector_column_name)
.limit(self.top_k)
.select([self.text_column_name])
.to_list()
)
values = [result[self.text_column_name] for result in results]
return "\n".join(values)
def add(
self,
*args: Any,
**kwargs: Any,
) -> None:
self._table.add(*args, **kwargs)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/adapters/lancedb_adapter.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/adapters/mcp_adapter.py | """MCPServer for CrewAI."""
from __future__ import annotations
from collections.abc import Callable
import logging
from typing import TYPE_CHECKING, Any
from crewai.tools import BaseTool
from crewai.utilities.pydantic_schema_utils import create_model_from_schema
from crewai.utilities.string_utils import sanitize_tool_name
from pydantic import BaseModel
from crewai_tools.adapters.tool_collection import ToolCollection
if TYPE_CHECKING:
from mcp import StdioServerParameters
from mcp.types import CallToolResult, TextContent, Tool
from mcpadapt.core import MCPAdapt, ToolAdapter
logger = logging.getLogger(__name__)
try:
from mcp import StdioServerParameters
from mcp.types import CallToolResult, TextContent, Tool
from mcpadapt.core import MCPAdapt, ToolAdapter
class CrewAIToolAdapter(ToolAdapter):
"""Adapter that creates CrewAI tools with properly normalized JSON schemas.
This adapter bypasses mcpadapt's model creation which adds invalid null values
to field schemas, instead using CrewAI's own schema utilities.
"""
def adapt(
self,
func: Callable[[dict[str, Any] | None], CallToolResult],
mcp_tool: Tool,
) -> BaseTool:
"""Adapt a MCP tool to a CrewAI tool.
Args:
func: The function to call when the tool is invoked.
mcp_tool: The MCP tool definition to adapt.
Returns:
A CrewAI BaseTool instance.
"""
tool_name = sanitize_tool_name(mcp_tool.name)
tool_description = mcp_tool.description or ""
args_model = create_model_from_schema(mcp_tool.inputSchema)
class CrewAIMCPTool(BaseTool):
name: str = tool_name
description: str = tool_description
args_schema: type[BaseModel] = args_model
def _run(self, **kwargs: Any) -> Any:
result = func(kwargs)
if len(result.content) == 1:
first_content = result.content[0]
if isinstance(first_content, TextContent):
return first_content.text
return str(first_content)
return str(
[
content.text
for content in result.content
if isinstance(content, TextContent)
]
)
def _generate_description(self) -> None:
schema = self.args_schema.model_json_schema()
schema.pop("$defs", None)
self.description = (
f"Tool Name: {self.name}\n"
f"Tool Arguments: {schema}\n"
f"Tool Description: {self.description}"
)
return CrewAIMCPTool()
async def async_adapt(self, afunc: Any, mcp_tool: Tool) -> Any:
"""Async adaptation is not supported by CrewAI."""
raise NotImplementedError("async is not supported by the CrewAI framework.")
MCP_AVAILABLE = True
except ImportError as e:
logger.debug(f"MCP packages not available: {e}")
MCP_AVAILABLE = False
class MCPServerAdapter:
"""Manages the lifecycle of an MCP server and make its tools available to CrewAI.
Note: tools can only be accessed after the server has been started with the
`start()` method.
Usage:
# context manager + stdio
with MCPServerAdapter(...) as tools:
# tools is now available
# context manager + sse
with MCPServerAdapter({"url": "http://localhost:8000/sse"}) as tools:
# tools is now available
# context manager with filtered tools
with MCPServerAdapter(..., "tool1", "tool2") as filtered_tools:
# only tool1 and tool2 are available
# context manager with custom connect timeout (60 seconds)
with MCPServerAdapter(..., connect_timeout=60) as tools:
# tools is now available with longer timeout
# manually stop mcp server
try:
mcp_server = MCPServerAdapter(...)
tools = mcp_server.tools # all tools
# or with filtered tools and custom timeout
mcp_server = MCPServerAdapter(..., "tool1", "tool2", connect_timeout=45)
filtered_tools = mcp_server.tools # only tool1 and tool2
...
finally:
mcp_server.stop()
# Best practice is ensure cleanup is done after use.
mcp_server.stop() # run after crew().kickoff()
"""
def __init__(
self,
serverparams: StdioServerParameters | dict[str, Any],
*tool_names: str,
connect_timeout: int = 30,
) -> None:
"""Initialize the MCP Server.
Args:
serverparams: The parameters for the MCP server it supports either a
`StdioServerParameters` or a `dict` respectively for STDIO and SSE.
*tool_names: Optional names of tools to filter. If provided, only tools with
matching names will be available.
connect_timeout: Connection timeout in seconds to the MCP server (default is 30s).
"""
super().__init__()
self._adapter = None
self._tools = None
self._tool_names = (
[sanitize_tool_name(name) for name in tool_names] if tool_names else None
)
if not MCP_AVAILABLE:
import click
if click.confirm(
"You are missing the 'mcp' package. Would you like to install it?"
):
import subprocess
try:
subprocess.run(["uv", "add", "mcp crewai-tools'[mcp]'"], check=True) # noqa: S607
except subprocess.CalledProcessError as e:
raise ImportError("Failed to install mcp package") from e
else:
raise ImportError(
"`mcp` package not found, please run `uv add crewai-tools[mcp]`"
)
try:
self._serverparams = serverparams
self._adapter = MCPAdapt(
self._serverparams, CrewAIToolAdapter(), connect_timeout
)
self.start()
except Exception as e:
if self._adapter is not None:
try:
self.stop()
except Exception as stop_e:
logger.error(f"Error during stop cleanup: {stop_e}")
raise RuntimeError(f"Failed to initialize MCP Adapter: {e}") from e
def start(self) -> None:
"""Start the MCP server and initialize the tools."""
self._tools = self._adapter.__enter__() # type: ignore[union-attr]
def stop(self) -> None:
"""Stop the MCP server."""
self._adapter.__exit__(None, None, None) # type: ignore[union-attr]
@property
def tools(self) -> ToolCollection[BaseTool]:
"""The CrewAI tools available from the MCP server.
Raises:
ValueError: If the MCP server is not started.
Returns:
The CrewAI tools available from the MCP server.
"""
if self._tools is None:
raise ValueError(
"MCP server not started, run `mcp_server.start()` first before accessing `tools`"
)
tools_collection = ToolCollection(self._tools)
if self._tool_names:
return tools_collection.filter_by_names(self._tool_names)
return tools_collection
def __enter__(self) -> ToolCollection[BaseTool]:
"""Enter the context manager.
Note that `__init__()` already starts the MCP server,
so tools should already be available.
"""
return self.tools
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: Any,
) -> None:
"""Exit the context manager."""
self._adapter.__exit__(exc_type, exc_value, traceback) # type: ignore[union-attr]
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/adapters/mcp_adapter.py",
"license": "MIT License",
"lines": 186,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/adapters/rag_adapter.py | from typing import Any
from crewai_tools.rag.core import RAG
from crewai_tools.tools.rag.rag_tool import Adapter
class RAGAdapter(Adapter):
def __init__(
self,
collection_name: str = "crewai_knowledge_base",
persist_directory: str | None = None,
embedding_model: str = "text-embedding-3-small",
top_k: int = 5,
embedding_api_key: str | None = None,
**embedding_kwargs,
):
super().__init__()
# Prepare embedding configuration
embedding_config = {"api_key": embedding_api_key, **embedding_kwargs}
self._adapter = RAG(
collection_name=collection_name,
persist_directory=persist_directory,
embedding_model=embedding_model,
top_k=top_k,
embedding_config=embedding_config,
)
def query(self, question: str) -> str: # type: ignore[override]
return self._adapter.query(question)
def add(
self,
*args: Any,
**kwargs: Any,
) -> None:
self._adapter.add(*args, **kwargs)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/adapters/rag_adapter.py",
"license": "MIT License",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/adapters/tool_collection.py | from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
from crewai.tools import BaseTool
T = TypeVar("T", bound=BaseTool)
class ToolCollection(list, Generic[T]):
"""A collection of tools that can be accessed by index or name.
This class extends the built-in list to provide dictionary-like
access to tools based on their name property.
Usage:
tools = ToolCollection(list_of_tools)
# Access by index (regular list behavior)
first_tool = tools[0]
# Access by name (new functionality)
search_tool = tools["search"]
"""
def __init__(self, tools: list[T] | None = None):
super().__init__(tools or [])
self._name_cache: dict[str, T] = {}
self._build_name_cache()
def _build_name_cache(self) -> None:
self._name_cache = {tool.name.lower(): tool for tool in self}
def __getitem__(self, key: int | str) -> T: # type: ignore[override]
if isinstance(key, str):
return self._name_cache[key.lower()]
return super().__getitem__(key)
def append(self, tool: T) -> None:
super().append(tool)
self._name_cache[tool.name.lower()] = tool
def extend(self, tools: list[T]) -> None: # type: ignore[override]
super().extend(tools)
self._build_name_cache()
def insert(self, index: int, tool: T) -> None: # type: ignore[override]
super().insert(index, tool)
self._name_cache[tool.name.lower()] = tool
def remove(self, tool: T) -> None:
super().remove(tool)
if tool.name.lower() in self._name_cache:
del self._name_cache[tool.name.lower()]
def pop(self, index: int = -1) -> T: # type: ignore[override]
tool = super().pop(index)
if tool.name.lower() in self._name_cache:
del self._name_cache[tool.name.lower()]
return tool
def filter_by_names(self, names: list[str] | None = None) -> ToolCollection[T]:
if names is None:
return self
return ToolCollection(
[
tool
for name in names
if (tool := self._name_cache.get(name.lower())) is not None
]
)
def filter_where(self, func: Callable[[T], bool]) -> ToolCollection[T]:
return ToolCollection([tool for tool in self if func(tool)])
def clear(self) -> None:
super().clear()
self._name_cache.clear()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/adapters/tool_collection.py",
"license": "MIT License",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/adapters/zapier_adapter.py | import logging
import os
from typing import Final, Literal
from crewai.tools import BaseTool
from pydantic import Field, create_model
import requests
ACTIONS_URL: Final[Literal["https://actions.zapier.com/api/v2/ai-actions"]] = (
"https://actions.zapier.com/api/v2/ai-actions"
)
logger = logging.getLogger(__name__)
class ZapierActionTool(BaseTool):
"""A tool that wraps a Zapier action."""
name: str = Field(description="Tool name")
description: str = Field(description="Tool description")
action_id: str = Field(description="Zapier action ID")
api_key: str = Field(description="Zapier API key")
def _run(self, **kwargs) -> str:
"""Execute the Zapier action."""
headers = {"x-api-key": self.api_key, "Content-Type": "application/json"}
instructions = kwargs.pop(
"instructions", "Execute this action with the provided parameters"
)
if not kwargs:
action_params = {"instructions": instructions, "params": {}}
else:
formatted_params = {}
for key, value in kwargs.items():
formatted_params[key] = {
"value": value,
"mode": "guess",
}
action_params = {"instructions": instructions, "params": formatted_params}
execute_url = f"{ACTIONS_URL}/{self.action_id}/execute/"
response = requests.request(
"POST",
execute_url,
headers=headers,
json=action_params,
timeout=30,
)
response.raise_for_status()
return response.json()
class ZapierActionsAdapter:
"""Adapter for Zapier Actions."""
def __init__(self, api_key: str | None = None):
self.api_key = api_key or os.getenv("ZAPIER_API_KEY")
if not self.api_key:
logger.error("Zapier Actions API key is required")
raise ValueError("Zapier Actions API key is required")
def get_zapier_actions(self):
headers = {
"x-api-key": self.api_key,
}
response = requests.request(
"GET",
ACTIONS_URL,
headers=headers,
timeout=30,
)
response.raise_for_status()
return response.json()
def tools(self) -> list[ZapierActionTool]:
"""Convert Zapier actions to BaseTool instances."""
actions_response = self.get_zapier_actions()
tools = []
for action in actions_response.get("results", []):
tool_name = (
action["meta"]["action_label"]
.replace(" ", "_")
.replace(":", "")
.lower()
)
params = action.get("params", {})
args_fields = {
"instructions": (
str,
Field(description="Instructions for how to execute this action"),
)
}
for param_name, param_info in params.items():
field_type = (
str # Default to string, could be enhanced based on param_info
)
field_description = (
param_info.get("description", "")
if isinstance(param_info, dict)
else ""
)
args_fields[param_name] = (
field_type,
Field(description=field_description),
)
args_schema = create_model(f"{tool_name.title()}Schema", **args_fields) # type: ignore[call-overload]
tool = ZapierActionTool(
name=tool_name,
description=action["description"],
action_id=action["id"],
api_key=self.api_key,
args_schema=args_schema,
)
tools.append(tool)
return tools
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/adapters/zapier_adapter.py",
"license": "MIT License",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py | from datetime import datetime, timezone
import json
import os
import time
from crewai.tools import BaseTool
from dotenv import load_dotenv
from pydantic import BaseModel, Field
from crewai_tools.aws.bedrock.exceptions import (
BedrockAgentError,
BedrockValidationError,
)
# Load environment variables from .env file
load_dotenv()
class BedrockInvokeAgentToolInput(BaseModel):
"""Input schema for BedrockInvokeAgentTool."""
query: str = Field(..., description="The query to send to the agent")
class BedrockInvokeAgentTool(BaseTool):
name: str = "Bedrock Agent Invoke Tool"
description: str = "An agent responsible for policy analysis."
args_schema: type[BaseModel] = BedrockInvokeAgentToolInput
agent_id: str | None = None
agent_alias_id: str | None = None
session_id: str | None = None
enable_trace: bool = False
end_session: bool = False
package_dependencies: list[str] = Field(default_factory=lambda: ["boto3"])
def __init__(
self,
agent_id: str | None = None,
agent_alias_id: str | None = None,
session_id: str | None = None,
enable_trace: bool = False,
end_session: bool = False,
description: str | None = None,
**kwargs,
):
"""Initialize the BedrockInvokeAgentTool with agent configuration.
Args:
agent_id (str): The unique identifier of the Bedrock agent
agent_alias_id (str): The unique identifier of the agent alias
session_id (str): The unique identifier of the session
enable_trace (bool): Whether to enable trace for the agent invocation
end_session (bool): Whether to end the session with the agent
description (Optional[str]): Custom description for the tool
"""
super().__init__(**kwargs)
# Get values from environment variables if not provided
self.agent_id = agent_id or os.getenv("BEDROCK_AGENT_ID")
self.agent_alias_id = agent_alias_id or os.getenv("BEDROCK_AGENT_ALIAS_ID")
self.session_id = session_id or str(
int(time.time())
) # Use timestamp as session ID if not provided
self.enable_trace = enable_trace
self.end_session = end_session
# Update the description if provided
if description:
self.description = description
# Validate parameters
self._validate_parameters()
def _validate_parameters(self):
"""Validate the parameters according to AWS API requirements."""
try:
# Validate agent_id
if not self.agent_id:
raise BedrockValidationError("agent_id cannot be empty")
if not isinstance(self.agent_id, str):
raise BedrockValidationError("agent_id must be a string")
# Validate agent_alias_id
if not self.agent_alias_id:
raise BedrockValidationError("agent_alias_id cannot be empty")
if not isinstance(self.agent_alias_id, str):
raise BedrockValidationError("agent_alias_id must be a string")
# Validate session_id if provided
if self.session_id and not isinstance(self.session_id, str):
raise BedrockValidationError("session_id must be a string")
except BedrockValidationError as e:
raise BedrockValidationError(f"Parameter validation failed: {e!s}") from e
def _run(self, query: str) -> str:
try:
import boto3
from botocore.exceptions import ClientError
except ImportError as e:
raise ImportError(
"`boto3` package not found, please run `uv add boto3`"
) from e
try:
# Initialize the Bedrock Agent Runtime client
bedrock_agent = boto3.client(
"bedrock-agent-runtime",
region_name=os.getenv(
"AWS_REGION", os.getenv("AWS_DEFAULT_REGION", "us-west-2")
),
)
# Format the prompt with current time
current_utc = datetime.now(timezone.utc)
prompt = f"""
The current time is: {current_utc}
Below is the users query or task. Complete it and answer it consicely and to the point:
{query}
"""
# Invoke the agent
response = bedrock_agent.invoke_agent(
agentId=self.agent_id,
agentAliasId=self.agent_alias_id,
sessionId=self.session_id,
inputText=prompt,
enableTrace=self.enable_trace,
endSession=self.end_session,
)
# Process the response
completion = ""
# Check if response contains a completion field
if "completion" in response:
# Process streaming response format
for event in response.get("completion", []):
if "chunk" in event and "bytes" in event["chunk"]:
chunk_bytes = event["chunk"]["bytes"]
if isinstance(chunk_bytes, (bytes, bytearray)):
completion += chunk_bytes.decode("utf-8")
else:
completion += str(chunk_bytes)
# If no completion found in streaming format, try direct format
if not completion and "chunk" in response and "bytes" in response["chunk"]:
chunk_bytes = response["chunk"]["bytes"]
if isinstance(chunk_bytes, (bytes, bytearray)):
completion = chunk_bytes.decode("utf-8")
else:
completion = str(chunk_bytes)
# If still no completion, return debug info
if not completion:
debug_info = {
"error": "Could not extract completion from response",
"response_keys": list(response.keys()),
}
# Add more debug info
if "chunk" in response:
debug_info["chunk_keys"] = list(response["chunk"].keys())
raise BedrockAgentError(
f"Failed to extract completion: {json.dumps(debug_info, indent=2)}"
)
return completion
except ClientError as e:
error_code = "Unknown"
error_message = str(e)
# Try to extract error code if available
if hasattr(e, "response") and "Error" in e.response:
error_code = e.response["Error"].get("Code", "Unknown")
error_message = e.response["Error"].get("Message", str(e))
raise BedrockAgentError(f"Error ({error_code}): {error_message}") from e
except BedrockAgentError:
# Re-raise BedrockAgentError exceptions
raise
except Exception as e:
raise BedrockAgentError(f"Unexpected error: {e!s}") from e
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/aws/bedrock/agents/invoke_agent_tool.py",
"license": "MIT License",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/aws/bedrock/browser/browser_session_manager.py | from __future__ import annotations
import logging
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from bedrock_agentcore.tools.browser_client import BrowserClient
from playwright.async_api import Browser as AsyncBrowser
from playwright.sync_api import Browser as SyncBrowser
logger = logging.getLogger(__name__)
class BrowserSessionManager:
"""Manages browser sessions for different threads.
This class maintains separate browser sessions for different threads,
enabling concurrent usage of browsers in multi-threaded environments.
Browsers are created lazily only when needed by tools.
"""
def __init__(self, region: str = "us-west-2"):
"""Initialize the browser session manager.
Args:
region: AWS region for browser client
"""
self.region = region
self._async_sessions: dict[str, tuple[BrowserClient, AsyncBrowser]] = {}
self._sync_sessions: dict[str, tuple[BrowserClient, SyncBrowser]] = {}
async def get_async_browser(self, thread_id: str) -> AsyncBrowser:
"""Get or create an async browser for the specified thread.
Args:
thread_id: Unique identifier for the thread requesting the browser
Returns:
An async browser instance specific to the thread
"""
if thread_id in self._async_sessions:
return self._async_sessions[thread_id][1]
return await self._create_async_browser_session(thread_id)
def get_sync_browser(self, thread_id: str) -> SyncBrowser:
"""Get or create a sync browser for the specified thread.
Args:
thread_id: Unique identifier for the thread requesting the browser
Returns:
A sync browser instance specific to the thread
"""
if thread_id in self._sync_sessions:
return self._sync_sessions[thread_id][1]
return self._create_sync_browser_session(thread_id)
async def _create_async_browser_session(self, thread_id: str) -> AsyncBrowser:
"""Create a new async browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
Returns:
The newly created async browser instance
Raises:
Exception: If browser session creation fails
"""
from bedrock_agentcore.tools.browser_client import BrowserClient
browser_client = BrowserClient(region=self.region)
try:
# Start browser session
browser_client.start()
# Get WebSocket connection info
ws_url, headers = browser_client.generate_ws_headers()
logger.info(
f"Connecting to async WebSocket endpoint for thread {thread_id}: {ws_url}"
)
from playwright.async_api import async_playwright
# Connect to browser using Playwright
playwright = await async_playwright().start()
browser = await playwright.chromium.connect_over_cdp(
endpoint_url=ws_url, headers=headers, timeout=30000
)
logger.info(
f"Successfully connected to async browser for thread {thread_id}"
)
# Store session resources
self._async_sessions[thread_id] = (browser_client, browser)
return browser
except Exception as e:
logger.error(
f"Failed to create async browser session for thread {thread_id}: {e}"
)
# Clean up resources if session creation fails
if browser_client:
try:
browser_client.stop()
except Exception as cleanup_error:
logger.warning(f"Error cleaning up browser client: {cleanup_error}")
raise
def _create_sync_browser_session(self, thread_id: str) -> SyncBrowser:
"""Create a new sync browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
Returns:
The newly created sync browser instance
Raises:
Exception: If browser session creation fails
"""
from bedrock_agentcore.tools.browser_client import BrowserClient
browser_client = BrowserClient(region=self.region)
try:
# Start browser session
browser_client.start()
# Get WebSocket connection info
ws_url, headers = browser_client.generate_ws_headers()
logger.info(
f"Connecting to sync WebSocket endpoint for thread {thread_id}: {ws_url}"
)
from playwright.sync_api import sync_playwright
# Connect to browser using Playwright
playwright = sync_playwright().start()
browser = playwright.chromium.connect_over_cdp(
endpoint_url=ws_url, headers=headers, timeout=30000
)
logger.info(
f"Successfully connected to sync browser for thread {thread_id}"
)
# Store session resources
self._sync_sessions[thread_id] = (browser_client, browser)
return browser
except Exception as e:
logger.error(
f"Failed to create sync browser session for thread {thread_id}: {e}"
)
# Clean up resources if session creation fails
if browser_client:
try:
browser_client.stop()
except Exception as cleanup_error:
logger.warning(f"Error cleaning up browser client: {cleanup_error}")
raise
async def close_async_browser(self, thread_id: str) -> None:
"""Close the async browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
"""
if thread_id not in self._async_sessions:
logger.warning(f"No async browser session found for thread {thread_id}")
return
browser_client, browser = self._async_sessions[thread_id]
# Close browser
if browser:
try:
await browser.close()
except Exception as e:
logger.warning(
f"Error closing async browser for thread {thread_id}: {e}"
)
# Stop browser client
if browser_client:
try:
browser_client.stop()
except Exception as e:
logger.warning(
f"Error stopping browser client for thread {thread_id}: {e}"
)
# Remove session from dictionary
del self._async_sessions[thread_id]
logger.info(f"Async browser session cleaned up for thread {thread_id}")
def close_sync_browser(self, thread_id: str) -> None:
"""Close the sync browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
"""
if thread_id not in self._sync_sessions:
logger.warning(f"No sync browser session found for thread {thread_id}")
return
browser_client, browser = self._sync_sessions[thread_id]
# Close browser
if browser:
try:
browser.close()
except Exception as e:
logger.warning(
f"Error closing sync browser for thread {thread_id}: {e}"
)
# Stop browser client
if browser_client:
try:
browser_client.stop()
except Exception as e:
logger.warning(
f"Error stopping browser client for thread {thread_id}: {e}"
)
# Remove session from dictionary
del self._sync_sessions[thread_id]
logger.info(f"Sync browser session cleaned up for thread {thread_id}")
async def close_all_browsers(self) -> None:
"""Close all browser sessions."""
# Close all async browsers
async_thread_ids = list(self._async_sessions.keys())
for thread_id in async_thread_ids:
await self.close_async_browser(thread_id)
# Close all sync browsers
sync_thread_ids = list(self._sync_sessions.keys())
for thread_id in sync_thread_ids:
self.close_sync_browser(thread_id)
logger.info("All browser sessions closed")
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/aws/bedrock/browser/browser_session_manager.py",
"license": "MIT License",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/aws/bedrock/browser/browser_toolkit.py | """Toolkit for navigating web with AWS browser."""
import asyncio
import json
import logging
from typing import Any
from urllib.parse import urlparse
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
from crewai_tools.aws.bedrock.browser.browser_session_manager import (
BrowserSessionManager,
)
from crewai_tools.aws.bedrock.browser.utils import aget_current_page, get_current_page
logger = logging.getLogger(__name__)
# Input schemas
class NavigateToolInput(BaseModel):
"""Input for NavigateTool."""
url: str = Field(description="URL to navigate to")
thread_id: str = Field(
default="default", description="Thread ID for the browser session"
)
class ClickToolInput(BaseModel):
"""Input for ClickTool."""
selector: str = Field(description="CSS selector for the element to click on")
thread_id: str = Field(
default="default", description="Thread ID for the browser session"
)
class GetElementsToolInput(BaseModel):
"""Input for GetElementsTool."""
selector: str = Field(description="CSS selector for elements to get")
thread_id: str = Field(
default="default", description="Thread ID for the browser session"
)
class ExtractTextToolInput(BaseModel):
"""Input for ExtractTextTool."""
thread_id: str = Field(
default="default", description="Thread ID for the browser session"
)
class ExtractHyperlinksToolInput(BaseModel):
"""Input for ExtractHyperlinksTool."""
thread_id: str = Field(
default="default", description="Thread ID for the browser session"
)
class NavigateBackToolInput(BaseModel):
"""Input for NavigateBackTool."""
thread_id: str = Field(
default="default", description="Thread ID for the browser session"
)
class CurrentWebPageToolInput(BaseModel):
"""Input for CurrentWebPageTool."""
thread_id: str = Field(
default="default", description="Thread ID for the browser session"
)
# Base tool class
class BrowserBaseTool(BaseTool):
"""Base class for browser tools."""
def __init__(self, session_manager: BrowserSessionManager): # type: ignore[call-arg]
"""Initialize with a session manager."""
super().__init__() # type: ignore[call-arg]
self._session_manager = session_manager
if self._is_in_asyncio_loop() and hasattr(self, "_arun"):
self._original_run = self._run
# Override _run to use _arun when in an asyncio loop
def patched_run(*args, **kwargs):
try:
import nest_asyncio # type: ignore[import-untyped]
loop = asyncio.get_event_loop()
nest_asyncio.apply(loop)
return asyncio.get_event_loop().run_until_complete(
self._arun(*args, **kwargs)
)
except Exception as e:
return f"Error in patched _run: {e!s}"
self._run = patched_run # type: ignore[method-assign]
async def get_async_page(self, thread_id: str) -> Any:
"""Get or create a page for the specified thread."""
browser = await self._session_manager.get_async_browser(thread_id)
return await aget_current_page(browser)
def get_sync_page(self, thread_id: str) -> Any:
"""Get or create a page for the specified thread."""
browser = self._session_manager.get_sync_browser(thread_id)
return get_current_page(browser)
def _is_in_asyncio_loop(self) -> bool:
"""Check if we're currently in an asyncio event loop."""
try:
loop = asyncio.get_event_loop()
return loop.is_running()
except RuntimeError:
return False
# Tool classes
class NavigateTool(BrowserBaseTool):
"""Tool for navigating a browser to a URL."""
name: str = "navigate_browser"
description: str = "Navigate a browser to the specified URL"
args_schema: type[BaseModel] = NavigateToolInput
def _run(self, url: str, thread_id: str = "default", **kwargs) -> str:
"""Use the sync tool."""
try:
# Get page for this thread
page = self.get_sync_page(thread_id)
# Validate URL scheme
parsed_url = urlparse(url)
if parsed_url.scheme not in ("http", "https"):
raise ValueError("URL scheme must be 'http' or 'https'")
# Navigate to URL
response = page.goto(url)
status = response.status if response else "unknown"
return f"Navigating to {url} returned status code {status}"
except Exception as e:
return f"Error navigating to {url}: {e!s}"
async def _arun(self, url: str, thread_id: str = "default", **kwargs) -> str:
"""Use the async tool."""
try:
# Get page for this thread
page = await self.get_async_page(thread_id)
# Validate URL scheme
parsed_url = urlparse(url)
if parsed_url.scheme not in ("http", "https"):
raise ValueError("URL scheme must be 'http' or 'https'")
# Navigate to URL
response = await page.goto(url)
status = response.status if response else "unknown"
return f"Navigating to {url} returned status code {status}"
except Exception as e:
return f"Error navigating to {url}: {e!s}"
class ClickTool(BrowserBaseTool):
"""Tool for clicking on an element with the given CSS selector."""
name: str = "click_element"
description: str = "Click on an element with the given CSS selector"
args_schema: type[BaseModel] = ClickToolInput
visible_only: bool = True
"""Whether to consider only visible elements."""
playwright_strict: bool = False
"""Whether to employ Playwright's strict mode when clicking on elements."""
playwright_timeout: float = 1_000
"""Timeout (in ms) for Playwright to wait for element to be ready."""
def _selector_effective(self, selector: str) -> str:
if not self.visible_only:
return selector
return f"{selector} >> visible=1"
def _run(self, selector: str, thread_id: str = "default", **kwargs) -> str:
"""Use the sync tool."""
try:
# Get the current page
page = self.get_sync_page(thread_id)
# Click on the element
selector_effective = self._selector_effective(selector=selector)
from playwright.sync_api import TimeoutError as PlaywrightTimeoutError
try:
page.click(
selector_effective,
strict=self.playwright_strict,
timeout=self.playwright_timeout,
)
except PlaywrightTimeoutError:
return f"Unable to click on element '{selector}'"
except Exception as click_error:
return f"Unable to click on element '{selector}': {click_error!s}"
return f"Clicked element '{selector}'"
except Exception as e:
return f"Error clicking on element: {e!s}"
async def _arun(self, selector: str, thread_id: str = "default", **kwargs) -> str:
"""Use the async tool."""
try:
# Get the current page
page = await self.get_async_page(thread_id)
# Click on the element
selector_effective = self._selector_effective(selector=selector)
from playwright.async_api import TimeoutError as PlaywrightTimeoutError
try:
await page.click(
selector_effective,
strict=self.playwright_strict,
timeout=self.playwright_timeout,
)
except PlaywrightTimeoutError:
return f"Unable to click on element '{selector}'"
except Exception as click_error:
return f"Unable to click on element '{selector}': {click_error!s}"
return f"Clicked element '{selector}'"
except Exception as e:
return f"Error clicking on element: {e!s}"
class NavigateBackTool(BrowserBaseTool):
"""Tool for navigating back in browser history."""
name: str = "navigate_back"
description: str = "Navigate back to the previous page"
args_schema: type[BaseModel] = NavigateBackToolInput
def _run(self, thread_id: str = "default", **kwargs) -> str:
"""Use the sync tool."""
try:
# Get the current page
page = self.get_sync_page(thread_id)
# Navigate back
try:
page.go_back()
return "Navigated back to the previous page"
except Exception as nav_error:
return f"Unable to navigate back: {nav_error!s}"
except Exception as e:
return f"Error navigating back: {e!s}"
async def _arun(self, thread_id: str = "default", **kwargs) -> str:
"""Use the async tool."""
try:
# Get the current page
page = await self.get_async_page(thread_id)
# Navigate back
try:
await page.go_back()
return "Navigated back to the previous page"
except Exception as nav_error:
return f"Unable to navigate back: {nav_error!s}"
except Exception as e:
return f"Error navigating back: {e!s}"
class ExtractTextTool(BrowserBaseTool):
"""Tool for extracting text from a webpage."""
name: str = "extract_text"
description: str = "Extract all the text on the current webpage"
args_schema: type[BaseModel] = ExtractTextToolInput
def _run(self, thread_id: str = "default", **kwargs) -> str:
"""Use the sync tool."""
try:
# Import BeautifulSoup
try:
from bs4 import BeautifulSoup
except ImportError:
return (
"The 'beautifulsoup4' package is required to use this tool."
" Please install it with 'pip install beautifulsoup4'."
)
# Get the current page
page = self.get_sync_page(thread_id)
# Extract text
content = page.content()
soup = BeautifulSoup(content, "html.parser")
return soup.get_text(separator="\n").strip()
except Exception as e:
return f"Error extracting text: {e!s}"
async def _arun(self, thread_id: str = "default", **kwargs) -> str:
"""Use the async tool."""
try:
# Import BeautifulSoup
try:
from bs4 import BeautifulSoup
except ImportError:
return (
"The 'beautifulsoup4' package is required to use this tool."
" Please install it with 'pip install beautifulsoup4'."
)
# Get the current page
page = await self.get_async_page(thread_id)
# Extract text
content = await page.content()
soup = BeautifulSoup(content, "html.parser")
return soup.get_text(separator="\n").strip()
except Exception as e:
return f"Error extracting text: {e!s}"
class ExtractHyperlinksTool(BrowserBaseTool):
"""Tool for extracting hyperlinks from a webpage."""
name: str = "extract_hyperlinks"
description: str = "Extract all hyperlinks on the current webpage"
args_schema: type[BaseModel] = ExtractHyperlinksToolInput
def _run(self, thread_id: str = "default", **kwargs) -> str:
"""Use the sync tool."""
try:
# Import BeautifulSoup
try:
from bs4 import BeautifulSoup
except ImportError:
return (
"The 'beautifulsoup4' package is required to use this tool."
" Please install it with 'pip install beautifulsoup4'."
)
# Get the current page
page = self.get_sync_page(thread_id)
# Extract hyperlinks
content = page.content()
soup = BeautifulSoup(content, "html.parser")
links = []
for link in soup.find_all("a", href=True):
text = link.get_text().strip()
href = link["href"]
if href.startswith(("http", "https")): # type: ignore[union-attr]
links.append({"text": text, "url": href})
if not links:
return "No hyperlinks found on the current page."
return json.dumps(links, indent=2)
except Exception as e:
return f"Error extracting hyperlinks: {e!s}"
async def _arun(self, thread_id: str = "default", **kwargs) -> str:
"""Use the async tool."""
try:
# Import BeautifulSoup
try:
from bs4 import BeautifulSoup
except ImportError:
return (
"The 'beautifulsoup4' package is required to use this tool."
" Please install it with 'pip install beautifulsoup4'."
)
# Get the current page
page = await self.get_async_page(thread_id)
# Extract hyperlinks
content = await page.content()
soup = BeautifulSoup(content, "html.parser")
links = []
for link in soup.find_all("a", href=True):
text = link.get_text().strip()
href = link["href"]
if href.startswith(("http", "https")): # type: ignore[union-attr]
links.append({"text": text, "url": href})
if not links:
return "No hyperlinks found on the current page."
return json.dumps(links, indent=2)
except Exception as e:
return f"Error extracting hyperlinks: {e!s}"
class GetElementsTool(BrowserBaseTool):
"""Tool for getting elements from a webpage."""
name: str = "get_elements"
description: str = "Get elements from the webpage using a CSS selector"
args_schema: type[BaseModel] = GetElementsToolInput
def _run(self, selector: str, thread_id: str = "default", **kwargs) -> str:
"""Use the sync tool."""
try:
# Get the current page
page = self.get_sync_page(thread_id)
# Get elements
elements = page.query_selector_all(selector)
if not elements:
return f"No elements found with selector '{selector}'"
elements_text = []
for i, element in enumerate(elements):
text = element.text_content()
elements_text.append(f"Element {i + 1}: {text.strip()}")
return "\n".join(elements_text)
except Exception as e:
return f"Error getting elements: {e!s}"
async def _arun(self, selector: str, thread_id: str = "default", **kwargs) -> str:
"""Use the async tool."""
try:
# Get the current page
page = await self.get_async_page(thread_id)
# Get elements
elements = await page.query_selector_all(selector)
if not elements:
return f"No elements found with selector '{selector}'"
elements_text = []
for i, element in enumerate(elements):
text = await element.text_content()
elements_text.append(f"Element {i + 1}: {text.strip()}")
return "\n".join(elements_text)
except Exception as e:
return f"Error getting elements: {e!s}"
class CurrentWebPageTool(BrowserBaseTool):
"""Tool for getting information about the current webpage."""
name: str = "current_webpage"
description: str = "Get information about the current webpage"
args_schema: type[BaseModel] = CurrentWebPageToolInput
def _run(self, thread_id: str = "default", **kwargs) -> str:
"""Use the sync tool."""
try:
# Get the current page
page = self.get_sync_page(thread_id)
# Get information
url = page.url
title = page.title()
return f"URL: {url}\nTitle: {title}"
except Exception as e:
return f"Error getting current webpage info: {e!s}"
async def _arun(self, thread_id: str = "default", **kwargs) -> str:
"""Use the async tool."""
try:
# Get the current page
page = await self.get_async_page(thread_id)
# Get information
url = page.url
title = await page.title()
return f"URL: {url}\nTitle: {title}"
except Exception as e:
return f"Error getting current webpage info: {e!s}"
class BrowserToolkit:
"""Toolkit for navigating web with AWS Bedrock browser.
This toolkit provides a set of tools for working with a remote browser
and supports multiple threads by maintaining separate browser sessions
for each thread ID. Browsers are created lazily only when needed.
Example:
```python
from crewai import Agent, Task, Crew
from crewai_tools.aws.bedrock.browser import create_browser_toolkit
# Create the browser toolkit
toolkit, browser_tools = create_browser_toolkit(region="us-west-2")
# Create a CrewAI agent that uses the browser tools
research_agent = Agent(
role="Web Researcher",
goal="Research and summarize web content",
backstory="You're an expert at finding information online.",
tools=browser_tools,
)
# Create a task for the agent
research_task = Task(
description="Navigate to https://example.com and extract all text content. Summarize the main points.",
agent=research_agent,
)
# Create and run the crew
crew = Crew(agents=[research_agent], tasks=[research_task])
result = crew.kickoff()
# Clean up browser resources when done
import asyncio
asyncio.run(toolkit.cleanup())
```
"""
def __init__(self, region: str = "us-west-2"):
"""Initialize the toolkit.
Args:
region: AWS region for the browser client
"""
self.region = region
self.session_manager = BrowserSessionManager(region=region)
self.tools: list[BaseTool] = []
self._nest_current_loop()
self._setup_tools()
def _nest_current_loop(self):
"""Apply nest_asyncio if we're in an asyncio loop."""
try:
loop = asyncio.get_event_loop()
if loop.is_running():
try:
import nest_asyncio
nest_asyncio.apply(loop)
except Exception as e:
logger.warning(f"Failed to apply nest_asyncio: {e!s}")
except RuntimeError:
pass
def _setup_tools(self) -> None:
"""Initialize tools without creating any browsers."""
self.tools = [
NavigateTool(session_manager=self.session_manager),
ClickTool(session_manager=self.session_manager),
NavigateBackTool(session_manager=self.session_manager),
ExtractTextTool(session_manager=self.session_manager),
ExtractHyperlinksTool(session_manager=self.session_manager),
GetElementsTool(session_manager=self.session_manager),
CurrentWebPageTool(session_manager=self.session_manager),
]
def get_tools(self) -> list[BaseTool]:
"""Get the list of browser tools.
Returns:
List of CrewAI tools
"""
return self.tools
def get_tools_by_name(self) -> dict[str, BaseTool]:
"""Get a dictionary of tools mapped by their names.
Returns:
Dictionary of {tool_name: tool}
"""
return {tool.name: tool for tool in self.tools}
async def cleanup(self) -> None:
"""Clean up all browser sessions asynchronously."""
await self.session_manager.close_all_browsers()
logger.info("All browser sessions cleaned up")
def sync_cleanup(self) -> None:
"""Clean up all browser sessions from synchronous code."""
import asyncio
try:
loop = asyncio.get_event_loop()
if loop.is_running():
asyncio.create_task(self.cleanup()) # noqa: RUF006
else:
loop.run_until_complete(self.cleanup())
except RuntimeError:
asyncio.run(self.cleanup())
def create_browser_toolkit(
region: str = "us-west-2",
) -> tuple[BrowserToolkit, list[BaseTool]]:
"""Create a BrowserToolkit.
Args:
region: AWS region for browser client
Returns:
Tuple of (toolkit, tools)
"""
toolkit = BrowserToolkit(region=region)
tools = toolkit.get_tools()
return toolkit, tools
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/aws/bedrock/browser/browser_toolkit.py",
"license": "MIT License",
"lines": 481,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/aws/bedrock/browser/utils.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from playwright.async_api import Browser as AsyncBrowser, Page as AsyncPage
from playwright.sync_api import Browser as SyncBrowser, Page as SyncPage
async def aget_current_page(browser: AsyncBrowser | Any) -> AsyncPage:
"""Asynchronously get the current page of the browser.
Args:
browser: The browser (AsyncBrowser) to get the current page from.
Returns:
AsyncPage: The current page.
"""
if not browser.contexts:
context = await browser.new_context()
return await context.new_page()
context = browser.contexts[0]
if not context.pages:
return await context.new_page()
return context.pages[-1]
def get_current_page(browser: SyncBrowser | Any) -> SyncPage:
"""Get the current page of the browser.
Args:
browser: The browser to get the current page from.
Returns:
SyncPage: The current page.
"""
if not browser.contexts:
context = browser.new_context()
return context.new_page()
context = browser.contexts[0]
if not context.pages:
return context.new_page()
return context.pages[-1]
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/aws/bedrock/browser/utils.py",
"license": "MIT License",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/aws/bedrock/code_interpreter/code_interpreter_toolkit.py | """Toolkit for working with AWS Bedrock Code Interpreter."""
from __future__ import annotations
import json
import logging
from typing import TYPE_CHECKING, Any
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
if TYPE_CHECKING:
from bedrock_agentcore.tools.code_interpreter_client import CodeInterpreter
logger = logging.getLogger(__name__)
def extract_output_from_stream(response):
"""Extract output from code interpreter response stream.
Args:
response: Response from code interpreter execution
Returns:
Extracted output as string
"""
output = []
for event in response["stream"]:
if "result" in event:
result = event["result"]
for content_item in result["content"]:
if content_item["type"] == "text":
output.append(content_item["text"])
if content_item["type"] == "resource":
resource = content_item["resource"]
if "text" in resource:
file_path = resource["uri"].replace("file://", "")
file_content = resource["text"]
output.append(f"==== File: {file_path} ====\n{file_content}\n")
else:
output.append(json.dumps(resource))
return "\n".join(output)
# Input schemas
class ExecuteCodeInput(BaseModel):
"""Input for ExecuteCode."""
code: str = Field(description="The code to execute")
language: str = Field(
default="python", description="The programming language of the code"
)
clear_context: bool = Field(
default=False, description="Whether to clear execution context"
)
thread_id: str = Field(
default="default", description="Thread ID for the code interpreter session"
)
class ExecuteCommandInput(BaseModel):
"""Input for ExecuteCommand."""
command: str = Field(description="The command to execute")
thread_id: str = Field(
default="default", description="Thread ID for the code interpreter session"
)
class ReadFilesInput(BaseModel):
"""Input for ReadFiles."""
paths: list[str] = Field(description="List of file paths to read")
thread_id: str = Field(
default="default", description="Thread ID for the code interpreter session"
)
class ListFilesInput(BaseModel):
"""Input for ListFiles."""
directory_path: str = Field(default="", description="Path to the directory to list")
thread_id: str = Field(
default="default", description="Thread ID for the code interpreter session"
)
class DeleteFilesInput(BaseModel):
"""Input for DeleteFiles."""
paths: list[str] = Field(description="List of file paths to delete")
thread_id: str = Field(
default="default", description="Thread ID for the code interpreter session"
)
class WriteFilesInput(BaseModel):
"""Input for WriteFiles."""
files: list[dict[str, str]] = Field(
description="List of dictionaries with path and text fields"
)
thread_id: str = Field(
default="default", description="Thread ID for the code interpreter session"
)
class StartCommandInput(BaseModel):
"""Input for StartCommand."""
command: str = Field(description="The command to execute asynchronously")
thread_id: str = Field(
default="default", description="Thread ID for the code interpreter session"
)
class GetTaskInput(BaseModel):
"""Input for GetTask."""
task_id: str = Field(description="The ID of the task to check")
thread_id: str = Field(
default="default", description="Thread ID for the code interpreter session"
)
class StopTaskInput(BaseModel):
"""Input for StopTask."""
task_id: str = Field(description="The ID of the task to stop")
thread_id: str = Field(
default="default", description="Thread ID for the code interpreter session"
)
# Tool classes
class ExecuteCodeTool(BaseTool):
"""Tool for executing code in various languages."""
name: str = "execute_code"
description: str = "Execute code in various languages (primarily Python)"
args_schema: type[BaseModel] = ExecuteCodeInput
toolkit: Any = Field(default=None, exclude=True)
def __init__(self, toolkit):
super().__init__()
self.toolkit = toolkit
def _run(
self,
code: str,
language: str = "python",
clear_context: bool = False,
thread_id: str = "default",
) -> str:
try:
# Get or create code interpreter
code_interpreter = self.toolkit._get_or_create_interpreter(
thread_id=thread_id
)
# Execute code
response = code_interpreter.invoke(
method="executeCode",
params={
"code": code,
"language": language,
"clearContext": clear_context,
},
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error executing code: {e!s}"
async def _arun(
self,
code: str,
language: str = "python",
clear_context: bool = False,
thread_id: str = "default",
) -> str:
# Use _run as we're working with a synchronous API that's thread-safe
return self._run(
code=code,
language=language,
clear_context=clear_context,
thread_id=thread_id,
)
class ExecuteCommandTool(BaseTool):
"""Tool for running shell commands in the code interpreter environment."""
name: str = "execute_command"
description: str = "Run shell commands in the code interpreter environment"
args_schema: type[BaseModel] = ExecuteCommandInput
toolkit: Any = Field(default=None, exclude=True)
def __init__(self, toolkit):
super().__init__()
self.toolkit = toolkit
def _run(self, command: str, thread_id: str = "default") -> str:
try:
# Get or create code interpreter
code_interpreter = self.toolkit._get_or_create_interpreter(
thread_id=thread_id
)
# Execute command
response = code_interpreter.invoke(
method="executeCommand", params={"command": command}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error executing command: {e!s}"
async def _arun(self, command: str, thread_id: str = "default") -> str:
# Use _run as we're working with a synchronous API that's thread-safe
return self._run(command=command, thread_id=thread_id)
class ReadFilesTool(BaseTool):
"""Tool for reading content of files in the environment."""
name: str = "read_files"
description: str = "Read content of files in the environment"
args_schema: type[BaseModel] = ReadFilesInput
toolkit: Any = Field(default=None, exclude=True)
def __init__(self, toolkit):
super().__init__()
self.toolkit = toolkit
def _run(self, paths: list[str], thread_id: str = "default") -> str:
try:
# Get or create code interpreter
code_interpreter = self.toolkit._get_or_create_interpreter(
thread_id=thread_id
)
# Read files
response = code_interpreter.invoke(
method="readFiles", params={"paths": paths}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error reading files: {e!s}"
async def _arun(self, paths: list[str], thread_id: str = "default") -> str:
# Use _run as we're working with a synchronous API that's thread-safe
return self._run(paths=paths, thread_id=thread_id)
class ListFilesTool(BaseTool):
"""Tool for listing files in directories in the environment."""
name: str = "list_files"
description: str = "List files in directories in the environment"
args_schema: type[BaseModel] = ListFilesInput
toolkit: Any = Field(default=None, exclude=True)
def __init__(self, toolkit):
super().__init__()
self.toolkit = toolkit
def _run(self, directory_path: str = "", thread_id: str = "default") -> str:
try:
# Get or create code interpreter
code_interpreter = self.toolkit._get_or_create_interpreter(
thread_id=thread_id
)
# List files
response = code_interpreter.invoke(
method="listFiles", params={"directoryPath": directory_path}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error listing files: {e!s}"
async def _arun(self, directory_path: str = "", thread_id: str = "default") -> str:
# Use _run as we're working with a synchronous API that's thread-safe
return self._run(directory_path=directory_path, thread_id=thread_id)
class DeleteFilesTool(BaseTool):
"""Tool for removing files from the environment."""
name: str = "delete_files"
description: str = "Remove files from the environment"
args_schema: type[BaseModel] = DeleteFilesInput
toolkit: Any = Field(default=None, exclude=True)
def __init__(self, toolkit):
super().__init__()
self.toolkit = toolkit
def _run(self, paths: list[str], thread_id: str = "default") -> str:
try:
# Get or create code interpreter
code_interpreter = self.toolkit._get_or_create_interpreter(
thread_id=thread_id
)
# Remove files
response = code_interpreter.invoke(
method="removeFiles", params={"paths": paths}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error deleting files: {e!s}"
async def _arun(self, paths: list[str], thread_id: str = "default") -> str:
# Use _run as we're working with a synchronous API that's thread-safe
return self._run(paths=paths, thread_id=thread_id)
class WriteFilesTool(BaseTool):
"""Tool for creating or updating files in the environment."""
name: str = "write_files"
description: str = "Create or update files in the environment"
args_schema: type[BaseModel] = WriteFilesInput
toolkit: Any = Field(default=None, exclude=True)
def __init__(self, toolkit):
super().__init__()
self.toolkit = toolkit
def _run(self, files: list[dict[str, str]], thread_id: str = "default") -> str:
try:
# Get or create code interpreter
code_interpreter = self.toolkit._get_or_create_interpreter(
thread_id=thread_id
)
# Write files
response = code_interpreter.invoke(
method="writeFiles", params={"content": files}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error writing files: {e!s}"
async def _arun(
self, files: list[dict[str, str]], thread_id: str = "default"
) -> str:
# Use _run as we're working with a synchronous API that's thread-safe
return self._run(files=files, thread_id=thread_id)
class StartCommandTool(BaseTool):
"""Tool for starting long-running commands asynchronously."""
name: str = "start_command_execution"
description: str = "Start long-running commands asynchronously"
args_schema: type[BaseModel] = StartCommandInput
toolkit: Any = Field(default=None, exclude=True)
def __init__(self, toolkit):
super().__init__()
self.toolkit = toolkit
def _run(self, command: str, thread_id: str = "default") -> str:
try:
# Get or create code interpreter
code_interpreter = self.toolkit._get_or_create_interpreter(
thread_id=thread_id
)
# Start command execution
response = code_interpreter.invoke(
method="startCommandExecution", params={"command": command}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error starting command: {e!s}"
async def _arun(self, command: str, thread_id: str = "default") -> str:
# Use _run as we're working with a synchronous API that's thread-safe
return self._run(command=command, thread_id=thread_id)
class GetTaskTool(BaseTool):
"""Tool for checking status of async tasks."""
name: str = "get_task"
description: str = "Check status of async tasks"
args_schema: type[BaseModel] = GetTaskInput
toolkit: Any = Field(default=None, exclude=True)
def __init__(self, toolkit):
super().__init__()
self.toolkit = toolkit
def _run(self, task_id: str, thread_id: str = "default") -> str:
try:
# Get or create code interpreter
code_interpreter = self.toolkit._get_or_create_interpreter(
thread_id=thread_id
)
# Get task status
response = code_interpreter.invoke(
method="getTask", params={"taskId": task_id}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error getting task status: {e!s}"
async def _arun(self, task_id: str, thread_id: str = "default") -> str:
# Use _run as we're working with a synchronous API that's thread-safe
return self._run(task_id=task_id, thread_id=thread_id)
class StopTaskTool(BaseTool):
"""Tool for stopping running tasks."""
name: str = "stop_task"
description: str = "Stop running tasks"
args_schema: type[BaseModel] = StopTaskInput
toolkit: Any = Field(default=None, exclude=True)
def __init__(self, toolkit):
super().__init__()
self.toolkit = toolkit
def _run(self, task_id: str, thread_id: str = "default") -> str:
try:
# Get or create code interpreter
code_interpreter = self.toolkit._get_or_create_interpreter(
thread_id=thread_id
)
# Stop task
response = code_interpreter.invoke(
method="stopTask", params={"taskId": task_id}
)
return extract_output_from_stream(response)
except Exception as e:
return f"Error stopping task: {e!s}"
async def _arun(self, task_id: str, thread_id: str = "default") -> str:
# Use _run as we're working with a synchronous API that's thread-safe
return self._run(task_id=task_id, thread_id=thread_id)
class CodeInterpreterToolkit:
"""Toolkit for working with AWS Bedrock code interpreter environment.
This toolkit provides a set of tools for working with a remote code interpreter environment:
* execute_code - Run code in various languages (primarily Python)
* execute_command - Run shell commands
* read_files - Read content of files in the environment
* list_files - List files in directories
* delete_files - Remove files from the environment
* write_files - Create or update files
* start_command_execution - Start long-running commands asynchronously
* get_task - Check status of async tasks
* stop_task - Stop running tasks
The toolkit lazily initializes the code interpreter session on first use.
It supports multiple threads by maintaining separate code interpreter sessions for each thread ID.
Example:
```python
from crewai import Agent, Task, Crew
from crewai_tools.aws.bedrock.code_interpreter import (
create_code_interpreter_toolkit,
)
# Create the code interpreter toolkit
toolkit, code_tools = create_code_interpreter_toolkit(region="us-west-2")
# Create a CrewAI agent that uses the code interpreter tools
developer_agent = Agent(
role="Python Developer",
goal="Create and execute Python code to solve problems",
backstory="You're a skilled Python developer with expertise in data analysis.",
tools=code_tools,
)
# Create a task for the agent
coding_task = Task(
description="Write a Python function that calculates the factorial of a number and test it.",
agent=developer_agent,
)
# Create and run the crew
crew = Crew(agents=[developer_agent], tasks=[coding_task])
result = crew.kickoff()
# Clean up resources when done
import asyncio
asyncio.run(toolkit.cleanup())
```
"""
def __init__(self, region: str = "us-west-2"):
"""Initialize the toolkit.
Args:
region: AWS region for the code interpreter
"""
self.region = region
self._code_interpreters: dict[str, CodeInterpreter] = {}
self.tools: list[BaseTool] = []
self._setup_tools()
def _setup_tools(self) -> None:
"""Initialize tools without creating any code interpreter sessions."""
self.tools = [
ExecuteCodeTool(self),
ExecuteCommandTool(self),
ReadFilesTool(self),
ListFilesTool(self),
DeleteFilesTool(self),
WriteFilesTool(self),
StartCommandTool(self),
GetTaskTool(self),
StopTaskTool(self),
]
def _get_or_create_interpreter(self, thread_id: str = "default") -> CodeInterpreter:
"""Get or create a code interpreter for the specified thread.
Args:
thread_id: Thread ID for the code interpreter session
Returns:
CodeInterpreter instance
"""
if thread_id in self._code_interpreters:
return self._code_interpreters[thread_id]
# Create a new code interpreter for this thread
from bedrock_agentcore.tools.code_interpreter_client import CodeInterpreter
code_interpreter = CodeInterpreter(region=self.region)
code_interpreter.start()
logger.info(
f"Started code interpreter with session_id:{code_interpreter.session_id} for thread:{thread_id}"
)
# Store the interpreter
self._code_interpreters[thread_id] = code_interpreter
return code_interpreter
def get_tools(self) -> list[BaseTool]:
"""Get the list of code interpreter tools.
Returns:
List of CrewAI tools
"""
return self.tools
def get_tools_by_name(self) -> dict[str, BaseTool]:
"""Get a dictionary of tools mapped by their names.
Returns:
Dictionary of {tool_name: tool}
"""
return {tool.name: tool for tool in self.tools}
async def cleanup(self, thread_id: str | None = None) -> None:
"""Clean up resources.
Args:
thread_id: Optional thread ID to clean up. If None, cleans up all sessions.
"""
if thread_id:
# Clean up a specific thread's session
if thread_id in self._code_interpreters:
try:
self._code_interpreters[thread_id].stop()
del self._code_interpreters[thread_id]
logger.info(
f"Code interpreter session for thread {thread_id} cleaned up"
)
except Exception as e:
logger.warning(
f"Error stopping code interpreter for thread {thread_id}: {e}"
)
else:
# Clean up all sessions
thread_ids = list(self._code_interpreters.keys())
for tid in thread_ids:
try:
self._code_interpreters[tid].stop()
except Exception as e: # noqa: PERF203
logger.warning(
f"Error stopping code interpreter for thread {tid}: {e}"
)
self._code_interpreters = {}
logger.info("All code interpreter sessions cleaned up")
def create_code_interpreter_toolkit(
region: str = "us-west-2",
) -> tuple[CodeInterpreterToolkit, list[BaseTool]]:
"""Create a CodeInterpreterToolkit.
Args:
region: AWS region for code interpreter
Returns:
Tuple of (toolkit, tools)
"""
toolkit = CodeInterpreterToolkit(region=region)
tools = toolkit.get_tools()
return toolkit, tools
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/aws/bedrock/code_interpreter/code_interpreter_toolkit.py",
"license": "MIT License",
"lines": 483,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/aws/bedrock/exceptions.py | """Custom exceptions for AWS Bedrock integration."""
class BedrockError(Exception):
"""Base exception for Bedrock-related errors."""
class BedrockAgentError(BedrockError):
"""Exception raised for errors in the Bedrock Agent operations."""
class BedrockKnowledgeBaseError(BedrockError):
"""Exception raised for errors in the Bedrock Knowledge Base operations."""
class BedrockValidationError(BedrockError):
"""Exception raised for validation errors in Bedrock operations."""
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/aws/bedrock/exceptions.py",
"license": "MIT License",
"lines": 9,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py | import json
import os
from typing import Any
from crewai.tools import BaseTool
from dotenv import load_dotenv
from pydantic import BaseModel, Field
from crewai_tools.aws.bedrock.exceptions import (
BedrockKnowledgeBaseError,
BedrockValidationError,
)
# Load environment variables from .env file
load_dotenv()
class BedrockKBRetrieverToolInput(BaseModel):
"""Input schema for BedrockKBRetrieverTool."""
query: str = Field(
..., description="The query to retrieve information from the knowledge base"
)
class BedrockKBRetrieverTool(BaseTool):
name: str = "Bedrock Knowledge Base Retriever Tool"
description: str = (
"Retrieves information from an Amazon Bedrock Knowledge Base given a query"
)
args_schema: type[BaseModel] = BedrockKBRetrieverToolInput
knowledge_base_id: str = None # type: ignore[assignment]
number_of_results: int | None = 5
retrieval_configuration: dict[str, Any] | None = None
guardrail_configuration: dict[str, Any] | None = None
next_token: str | None = None
package_dependencies: list[str] = Field(default_factory=lambda: ["boto3"])
def __init__(
self,
knowledge_base_id: str | None = None,
number_of_results: int | None = 5,
retrieval_configuration: dict[str, Any] | None = None,
guardrail_configuration: dict[str, Any] | None = None,
next_token: str | None = None,
**kwargs,
):
"""Initialize the BedrockKBRetrieverTool with knowledge base configuration.
Args:
knowledge_base_id (str): The unique identifier of the knowledge base to query
number_of_results (Optional[int], optional): The maximum number of results to return. Defaults to 5.
retrieval_configuration (Optional[Dict[str, Any]], optional): Configurations for the knowledge base query and retrieval process. Defaults to None.
guardrail_configuration (Optional[Dict[str, Any]], optional): Guardrail settings. Defaults to None.
next_token (Optional[str], optional): Token for retrieving the next batch of results. Defaults to None.
"""
super().__init__(**kwargs)
# Get knowledge_base_id from environment variable if not provided
self.knowledge_base_id = knowledge_base_id or os.getenv("BEDROCK_KB_ID") # type: ignore[assignment]
self.number_of_results = number_of_results
self.guardrail_configuration = guardrail_configuration
self.next_token = next_token
# Initialize retrieval_configuration with provided parameters or use the one provided
if retrieval_configuration is None:
self.retrieval_configuration = self._build_retrieval_configuration()
else:
self.retrieval_configuration = retrieval_configuration
# Validate parameters
self._validate_parameters()
# Update the description to include the knowledge base details
self.description = f"Retrieves information from Amazon Bedrock Knowledge Base '{self.knowledge_base_id}' given a query"
def _build_retrieval_configuration(self) -> dict[str, Any]:
"""Build the retrieval configuration based on provided parameters.
Returns:
Dict[str, Any]: The constructed retrieval configuration
"""
vector_search_config = {}
# Add number of results if provided
if self.number_of_results is not None:
vector_search_config["numberOfResults"] = self.number_of_results
return {"vectorSearchConfiguration": vector_search_config}
def _validate_parameters(self):
"""Validate the parameters according to AWS API requirements."""
try:
# Validate knowledge_base_id
if not self.knowledge_base_id:
raise BedrockValidationError("knowledge_base_id cannot be empty")
if not isinstance(self.knowledge_base_id, str):
raise BedrockValidationError("knowledge_base_id must be a string")
if len(self.knowledge_base_id) > 10:
raise BedrockValidationError(
"knowledge_base_id must be 10 characters or less"
)
if not all(c.isalnum() for c in self.knowledge_base_id):
raise BedrockValidationError(
"knowledge_base_id must contain only alphanumeric characters"
)
# Validate next_token if provided
if self.next_token:
if not isinstance(self.next_token, str):
raise BedrockValidationError("next_token must be a string")
if len(self.next_token) < 1 or len(self.next_token) > 2048:
raise BedrockValidationError(
"next_token must be between 1 and 2048 characters"
)
if " " in self.next_token:
raise BedrockValidationError("next_token cannot contain spaces")
# Validate number_of_results if provided
if self.number_of_results is not None:
if not isinstance(self.number_of_results, int):
raise BedrockValidationError("number_of_results must be an integer")
if self.number_of_results < 1:
raise BedrockValidationError(
"number_of_results must be greater than 0"
)
except BedrockValidationError as e:
raise BedrockValidationError(f"Parameter validation failed: {e!s}") from e
def _process_retrieval_result(self, result: dict[str, Any]) -> dict[str, Any]:
"""Process a single retrieval result from Bedrock Knowledge Base.
Args:
result (Dict[str, Any]): Raw result from Bedrock Knowledge Base
Returns:
Dict[str, Any]: Processed result with standardized format
"""
# Extract content
content_obj = result.get("content", {})
content = content_obj.get("text", "")
content_type = content_obj.get("type", "text")
# Extract location information
location = result.get("location", {})
location_type = location.get("type", "unknown")
source_uri = None
# Map for location types and their URI fields
location_mapping = {
"s3Location": {"field": "uri", "type": "S3"},
"confluenceLocation": {"field": "url", "type": "Confluence"},
"salesforceLocation": {"field": "url", "type": "Salesforce"},
"sharePointLocation": {"field": "url", "type": "SharePoint"},
"webLocation": {"field": "url", "type": "Web"},
"customDocumentLocation": {"field": "id", "type": "CustomDocument"},
"kendraDocumentLocation": {"field": "uri", "type": "KendraDocument"},
"sqlLocation": {"field": "query", "type": "SQL"},
}
# Extract the URI based on location type
for loc_key, config in location_mapping.items():
if loc_key in location:
source_uri = location[loc_key].get(config["field"])
if not location_type or location_type == "unknown":
location_type = config["type"]
break
# Create result object
result_object = {
"content": content,
"content_type": content_type,
"source_type": location_type,
"source_uri": source_uri,
}
# Add optional fields if available
if "score" in result:
result_object["score"] = result["score"]
if "metadata" in result:
result_object["metadata"] = result["metadata"]
# Handle byte content if present
if "byteContent" in content_obj:
result_object["byte_content"] = content_obj["byteContent"]
# Handle row content if present
if "row" in content_obj:
result_object["row_content"] = content_obj["row"]
return result_object
def _run(self, query: str) -> str:
try:
import boto3
from botocore.exceptions import ClientError
except ImportError as e:
raise ImportError(
"`boto3` package not found, please run `uv add boto3`"
) from e
try:
# Initialize the Bedrock Agent Runtime client
bedrock_agent_runtime = boto3.client(
"bedrock-agent-runtime",
region_name=os.getenv(
"AWS_REGION", os.getenv("AWS_DEFAULT_REGION", "us-east-1")
),
# AWS SDK will automatically use AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY from environment
)
# Prepare the request parameters
retrieve_params = {
"knowledgeBaseId": self.knowledge_base_id,
"retrievalQuery": {"text": query},
}
# Add optional parameters if provided
if self.retrieval_configuration:
retrieve_params["retrievalConfiguration"] = self.retrieval_configuration
if self.guardrail_configuration:
retrieve_params["guardrailConfiguration"] = self.guardrail_configuration
if self.next_token:
retrieve_params["nextToken"] = self.next_token
# Make the retrieve API call
response = bedrock_agent_runtime.retrieve(**retrieve_params)
# Process the response
results = []
for result in response.get("retrievalResults", []):
processed_result = self._process_retrieval_result(result)
results.append(processed_result)
# Build the response object
response_object = {}
if results:
response_object["results"] = results
else:
response_object["message"] = "No results found for the given query." # type: ignore[assignment]
if "nextToken" in response:
response_object["nextToken"] = response["nextToken"]
if "guardrailAction" in response:
response_object["guardrailAction"] = response["guardrailAction"]
# Return the results as a JSON string
return json.dumps(response_object, indent=2)
except ClientError as e:
error_code = "Unknown"
error_message = str(e)
# Try to extract error code if available
if hasattr(e, "response") and "Error" in e.response:
error_code = e.response["Error"].get("Code", "Unknown")
error_message = e.response["Error"].get("Message", str(e))
raise BedrockKnowledgeBaseError(
f"Error ({error_code}): {error_message}"
) from e
except Exception as e:
raise BedrockKnowledgeBaseError(f"Unexpected error: {e!s}") from e
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/aws/bedrock/knowledge_base/retriever_tool.py",
"license": "MIT License",
"lines": 219,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/aws/s3/reader_tool.py | import os
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
class S3ReaderToolInput(BaseModel):
"""Input schema for S3ReaderTool."""
file_path: str = Field(
..., description="S3 file path (e.g., 's3://bucket-name/file-name')"
)
class S3ReaderTool(BaseTool):
name: str = "S3 Reader Tool"
description: str = "Reads a file from Amazon S3 given an S3 file path"
args_schema: type[BaseModel] = S3ReaderToolInput
package_dependencies: list[str] = Field(default_factory=lambda: ["boto3"])
def _run(self, file_path: str) -> str:
try:
import boto3
from botocore.exceptions import ClientError
except ImportError as e:
raise ImportError(
"`boto3` package not found, please run `uv add boto3`"
) from e
try:
bucket_name, object_key = self._parse_s3_path(file_path)
s3 = boto3.client(
"s3",
region_name=os.getenv("CREW_AWS_REGION", "us-east-1"),
aws_access_key_id=os.getenv("CREW_AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.getenv("CREW_AWS_SEC_ACCESS_KEY"),
)
# Read file content from S3
response = s3.get_object(Bucket=bucket_name, Key=object_key)
return response["Body"].read().decode("utf-8")
except ClientError as e:
return f"Error reading file from S3: {e!s}"
def _parse_s3_path(self, file_path: str) -> tuple:
parts = file_path.replace("s3://", "").split("/", 1)
return parts[0], parts[1]
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/aws/s3/reader_tool.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/aws/s3/writer_tool.py | import os
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
class S3WriterToolInput(BaseModel):
"""Input schema for S3WriterTool."""
file_path: str = Field(
..., description="S3 file path (e.g., 's3://bucket-name/file-name')"
)
content: str = Field(..., description="Content to write to the file")
class S3WriterTool(BaseTool):
name: str = "S3 Writer Tool"
description: str = "Writes content to a file in Amazon S3 given an S3 file path"
args_schema: type[BaseModel] = S3WriterToolInput
package_dependencies: list[str] = Field(default_factory=lambda: ["boto3"])
def _run(self, file_path: str, content: str) -> str:
try:
import boto3
from botocore.exceptions import ClientError
except ImportError as e:
raise ImportError(
"`boto3` package not found, please run `uv add boto3`"
) from e
try:
bucket_name, object_key = self._parse_s3_path(file_path)
s3 = boto3.client(
"s3",
region_name=os.getenv("CREW_AWS_REGION", "us-east-1"),
aws_access_key_id=os.getenv("CREW_AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.getenv("CREW_AWS_SEC_ACCESS_KEY"),
)
s3.put_object(
Bucket=bucket_name, Key=object_key, Body=content.encode("utf-8")
)
return f"Successfully wrote content to {file_path}"
except ClientError as e:
return f"Error writing file to S3: {e!s}"
def _parse_s3_path(self, file_path: str) -> tuple:
parts = file_path.replace("s3://", "").split("/", 1)
return parts[0], parts[1]
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/aws/s3/writer_tool.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/printer.py | """Utility for colored console output."""
class Printer:
"""Handles colored console output formatting."""
@staticmethod
def print(content: str, color: str | None = None) -> None:
"""Prints content with optional color formatting.
Args:
content: The string to be printed.
color: Optional color name to format the output. If provided,
must match one of the _print_* methods available in this class.
If not provided or if the color is not supported, prints without
formatting.
"""
if hasattr(Printer, f"_print_{color}"):
getattr(Printer, f"_print_{color}")(content)
else:
print(content) # noqa: T201
@staticmethod
def _print_bold_purple(content: str) -> None:
"""Prints content in bold purple color.
Args:
content: The string to be printed in bold purple.
"""
print(f"\033[1m\033[95m {content}\033[00m") # noqa: T201
@staticmethod
def _print_bold_green(content: str) -> None:
"""Prints content in bold green color.
Args:
content: The string to be printed in bold green.
"""
print(f"\033[1m\033[92m {content}\033[00m") # noqa: T201
@staticmethod
def _print_purple(content: str) -> None:
"""Prints content in purple color.
Args:
content: The string to be printed in purple.
"""
print(f"\033[95m {content}\033[00m") # noqa: T201
@staticmethod
def _print_red(content: str) -> None:
"""Prints content in red color.
Args:
content: The string to be printed in red.
"""
print(f"\033[91m {content}\033[00m") # noqa: T201
@staticmethod
def _print_bold_blue(content: str) -> None:
"""Prints content in bold blue color.
Args:
content: The string to be printed in bold blue.
"""
print(f"\033[1m\033[94m {content}\033[00m") # noqa: T201
@staticmethod
def _print_yellow(content: str) -> None:
"""Prints content in yellow color.
Args:
content: The string to be printed in yellow.
"""
print(f"\033[93m {content}\033[00m") # noqa: T201
@staticmethod
def _print_bold_yellow(content: str) -> None:
"""Prints content in bold yellow color.
Args:
content: The string to be printed in bold yellow.
"""
print(f"\033[1m\033[93m {content}\033[00m") # noqa: T201
@staticmethod
def _print_cyan(content: str) -> None:
"""Prints content in cyan color.
Args:
content: The string to be printed in cyan.
"""
print(f"\033[96m {content}\033[00m") # noqa: T201
@staticmethod
def _print_bold_cyan(content: str) -> None:
"""Prints content in bold cyan color.
Args:
content: The string to be printed in bold cyan.
"""
print(f"\033[1m\033[96m {content}\033[00m") # noqa: T201
@staticmethod
def _print_magenta(content: str) -> None:
"""Prints content in magenta color.
Args:
content: The string to be printed in magenta.
"""
print(f"\033[35m {content}\033[00m") # noqa: T201
@staticmethod
def _print_bold_magenta(content: str) -> None:
"""Prints content in bold magenta color.
Args:
content: The string to be printed in bold magenta.
"""
print(f"\033[1m\033[35m {content}\033[00m") # noqa: T201
@staticmethod
def _print_green(content: str) -> None:
"""Prints content in green color.
Args:
content: The string to be printed in green.
"""
print(f"\033[32m {content}\033[00m") # noqa: T201
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/printer.py",
"license": "MIT License",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/base_loader.py | from abc import ABC, abstractmethod
from typing import Any
from pydantic import BaseModel, Field
from crewai_tools.rag.misc import compute_sha256
from crewai_tools.rag.source_content import SourceContent
class LoaderResult(BaseModel):
content: str = Field(description="The text content of the source")
source: str = Field(description="The source of the content", default="unknown")
metadata: dict[str, Any] = Field(
description="The metadata of the source", default_factory=dict
)
doc_id: str = Field(description="The id of the document")
class BaseLoader(ABC):
def __init__(self, config: dict[str, Any] | None = None) -> None:
self.config = config or {}
@abstractmethod
def load(self, content: SourceContent, **kwargs) -> LoaderResult: ...
@staticmethod
def generate_doc_id(
source_ref: str | None = None, content: str | None = None
) -> str:
"""Generate a unique document id based on the source reference and content.
If the source reference is not provided, the content is used as the source reference.
If the content is not provided, the source reference is used as the content.
If both are provided, the source reference is used as the content.
Both are optional because the TEXT content type does not have a source reference. In this case, the content is used as the source reference.
"""
source_ref = source_ref or ""
content = content or ""
return compute_sha256(source_ref + content)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/base_loader.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/chunkers/base_chunker.py | import re
class RecursiveCharacterTextSplitter:
"""A text splitter that recursively splits text based on a hierarchy of separators."""
def __init__(
self,
chunk_size: int = 4000,
chunk_overlap: int = 200,
separators: list[str] | None = None,
keep_separator: bool = True,
) -> None:
"""Initialize the RecursiveCharacterTextSplitter.
Args:
chunk_size: Maximum size of each chunk
chunk_overlap: Number of characters to overlap between chunks
separators: List of separators to use for splitting (in order of preference)
keep_separator: Whether to keep the separator in the split text
"""
if chunk_overlap >= chunk_size:
raise ValueError(
f"Chunk overlap ({chunk_overlap}) cannot be >= chunk size ({chunk_size})"
)
self._chunk_size = chunk_size
self._chunk_overlap = chunk_overlap
self._keep_separator = keep_separator
self._separators = separators or [
"\n\n",
"\n",
" ",
"",
]
def split_text(self, text: str) -> list[str]:
"""Split the input text into chunks.
Args:
text: The text to split.
Returns:
A list of text chunks.
"""
return self._split_text(text, self._separators)
def _split_text(self, text: str, separators: list[str]) -> list[str]:
separator = separators[-1]
new_separators = []
for i, sep in enumerate(separators):
if sep == "":
separator = sep
break
if re.search(re.escape(sep), text):
separator = sep
new_separators = separators[i + 1 :]
break
splits = self._split_text_with_separator(text, separator)
good_splits = []
for split in splits:
if len(split) < self._chunk_size:
good_splits.append(split)
else:
if new_separators:
other_info = self._split_text(split, new_separators)
good_splits.extend(other_info)
else:
good_splits.extend(self._split_by_characters(split))
return self._merge_splits(good_splits, separator)
def _split_text_with_separator(self, text: str, separator: str) -> list[str]:
if separator == "":
return list(text)
if self._keep_separator and separator in text:
parts = text.split(separator)
splits = []
for i, part in enumerate(parts):
if i == 0:
splits.append(part)
elif i == len(parts) - 1:
if part:
splits.append(separator + part)
else:
if part:
splits.append(separator + part)
else:
if splits:
splits[-1] += separator
return [s for s in splits if s]
return text.split(separator)
def _split_by_characters(self, text: str) -> list[str]:
chunks = []
for i in range(0, len(text), self._chunk_size):
chunks.append(text[i : i + self._chunk_size]) # noqa: PERF401
return chunks
def _merge_splits(self, splits: list[str], separator: str) -> list[str]:
"""Merge splits into chunks with proper overlap."""
docs: list[str] = []
current_doc: list[str] = []
total = 0
for split in splits:
split_len = len(split)
if total + split_len > self._chunk_size and current_doc:
if separator == "":
doc = "".join(current_doc)
else:
if self._keep_separator and separator == " ":
doc = "".join(current_doc)
else:
doc = separator.join(current_doc)
if doc:
docs.append(doc)
# Handle overlap by keeping some of the previous content
while total > self._chunk_overlap and len(current_doc) > 1:
removed = current_doc.pop(0)
total -= len(removed)
if separator != "":
total -= len(separator)
current_doc.append(split)
total += split_len
if separator != "" and len(current_doc) > 1:
total += len(separator)
if current_doc:
if separator == "":
doc = "".join(current_doc)
else:
if self._keep_separator and separator == " ":
doc = "".join(current_doc)
else:
doc = separator.join(current_doc)
if doc:
docs.append(doc)
return docs
class BaseChunker:
def __init__(
self,
chunk_size: int = 1000,
chunk_overlap: int = 200,
separators: list[str] | None = None,
keep_separator: bool = True,
) -> None:
"""Initialize the Chunker.
Args:
chunk_size: Maximum size of each chunk
chunk_overlap: Number of characters to overlap between chunks
separators: List of separators to use for splitting
keep_separator: Whether to keep separators in the chunks
"""
self._splitter = RecursiveCharacterTextSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
separators=separators,
keep_separator=keep_separator,
)
def chunk(self, text: str) -> list[str]:
"""Chunk the input text into smaller pieces.
Args:
text: The text to chunk.
Returns:
A list of text chunks.
"""
if not text or not text.strip():
return []
return self._splitter.split_text(text)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/chunkers/base_chunker.py",
"license": "MIT License",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/chunkers/default_chunker.py | from crewai_tools.rag.chunkers.base_chunker import BaseChunker
class DefaultChunker(BaseChunker):
def __init__(
self,
chunk_size: int = 2000,
chunk_overlap: int = 20,
separators: list[str] | None = None,
keep_separator: bool = True,
):
super().__init__(chunk_size, chunk_overlap, separators, keep_separator)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/chunkers/default_chunker.py",
"license": "MIT License",
"lines": 10,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/chunkers/structured_chunker.py | from crewai_tools.rag.chunkers.base_chunker import BaseChunker
class CsvChunker(BaseChunker):
def __init__(
self,
chunk_size: int = 1200,
chunk_overlap: int = 100,
separators: list[str] | None = None,
keep_separator: bool = True,
):
if separators is None:
separators = [
"\nRow ", # Row boundaries (from CSVLoader format)
"\n", # Line breaks
" | ", # Column separators
", ", # Comma separators
" ", # Word breaks
"", # Character level
]
super().__init__(chunk_size, chunk_overlap, separators, keep_separator)
class JsonChunker(BaseChunker):
def __init__(
self,
chunk_size: int = 2000,
chunk_overlap: int = 200,
separators: list[str] | None = None,
keep_separator: bool = True,
):
if separators is None:
separators = [
"\n\n", # Object/array boundaries
"\n", # Line breaks
"},", # Object endings
"],", # Array endings
", ", # Property separators
": ", # Key-value separators
" ", # Word breaks
"", # Character level
]
super().__init__(chunk_size, chunk_overlap, separators, keep_separator)
class XmlChunker(BaseChunker):
def __init__(
self,
chunk_size: int = 2500,
chunk_overlap: int = 250,
separators: list[str] | None = None,
keep_separator: bool = True,
):
if separators is None:
separators = [
"\n\n", # Element boundaries
"\n", # Line breaks
">", # Tag endings
". ", # Sentence endings (for text content)
"! ", # Exclamation endings
"? ", # Question endings
", ", # Comma separators
" ", # Word breaks
"", # Character level
]
super().__init__(chunk_size, chunk_overlap, separators, keep_separator)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/chunkers/structured_chunker.py",
"license": "MIT License",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/chunkers/text_chunker.py | from crewai_tools.rag.chunkers.base_chunker import BaseChunker
class TextChunker(BaseChunker):
def __init__(
self,
chunk_size: int = 1500,
chunk_overlap: int = 150,
separators: list[str] | None = None,
keep_separator: bool = True,
):
if separators is None:
separators = [
"\n\n\n", # Multiple line breaks (sections)
"\n\n", # Paragraph breaks
"\n", # Line breaks
". ", # Sentence endings
"! ", # Exclamation endings
"? ", # Question endings
"; ", # Semicolon breaks
", ", # Comma breaks
" ", # Word breaks
"", # Character level
]
super().__init__(chunk_size, chunk_overlap, separators, keep_separator)
class DocxChunker(BaseChunker):
def __init__(
self,
chunk_size: int = 2500,
chunk_overlap: int = 250,
separators: list[str] | None = None,
keep_separator: bool = True,
):
if separators is None:
separators = [
"\n\n\n", # Multiple line breaks (major sections)
"\n\n", # Paragraph breaks
"\n", # Line breaks
". ", # Sentence endings
"! ", # Exclamation endings
"? ", # Question endings
"; ", # Semicolon breaks
", ", # Comma breaks
" ", # Word breaks
"", # Character level
]
super().__init__(chunk_size, chunk_overlap, separators, keep_separator)
class MdxChunker(BaseChunker):
def __init__(
self,
chunk_size: int = 3000,
chunk_overlap: int = 300,
separators: list[str] | None = None,
keep_separator: bool = True,
):
if separators is None:
separators = [
"\n## ", # H2 headers (major sections)
"\n### ", # H3 headers (subsections)
"\n#### ", # H4 headers (sub-subsections)
"\n\n", # Paragraph breaks
"\n```", # Code block boundaries
"\n", # Line breaks
". ", # Sentence endings
"! ", # Exclamation endings
"? ", # Question endings
"; ", # Semicolon breaks
", ", # Comma breaks
" ", # Word breaks
"", # Character level
]
super().__init__(chunk_size, chunk_overlap, separators, keep_separator)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/chunkers/text_chunker.py",
"license": "MIT License",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/chunkers/web_chunker.py | from crewai_tools.rag.chunkers.base_chunker import BaseChunker
class WebsiteChunker(BaseChunker):
def __init__(
self,
chunk_size: int = 2500,
chunk_overlap: int = 250,
separators: list[str] | None = None,
keep_separator: bool = True,
):
if separators is None:
separators = [
"\n\n\n", # Major section breaks
"\n\n", # Paragraph breaks
"\n", # Line breaks
". ", # Sentence endings
"! ", # Exclamation endings
"? ", # Question endings
"; ", # Semicolon breaks
", ", # Comma breaks
" ", # Word breaks
"", # Character level
]
super().__init__(chunk_size, chunk_overlap, separators, keep_separator)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/chunkers/web_chunker.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/core.py | import logging
from pathlib import Path
from typing import Any
from uuid import uuid4
import chromadb
from pydantic import BaseModel, Field, PrivateAttr
from crewai_tools.rag.base_loader import BaseLoader
from crewai_tools.rag.chunkers.base_chunker import BaseChunker
from crewai_tools.rag.data_types import DataType
from crewai_tools.rag.embedding_service import EmbeddingService
from crewai_tools.rag.misc import compute_sha256
from crewai_tools.rag.source_content import SourceContent
from crewai_tools.tools.rag.rag_tool import Adapter
logger = logging.getLogger(__name__)
class Document(BaseModel):
id: str = Field(default_factory=lambda: str(uuid4()))
content: str
metadata: dict[str, Any] = Field(default_factory=dict)
data_type: DataType = DataType.TEXT
source: str | None = None
class RAG(Adapter):
collection_name: str = "crewai_knowledge_base"
persist_directory: str | None = None
embedding_provider: str = "openai"
embedding_model: str = "text-embedding-3-large"
summarize: bool = False
top_k: int = 5
embedding_config: dict[str, Any] = Field(default_factory=dict)
_client: Any = PrivateAttr()
_collection: Any = PrivateAttr()
_embedding_service: EmbeddingService = PrivateAttr()
def model_post_init(self, __context: Any) -> None:
try:
if self.persist_directory:
self._client = chromadb.PersistentClient(path=self.persist_directory)
else:
self._client = chromadb.Client()
self._collection = self._client.get_or_create_collection(
name=self.collection_name,
metadata={
"hnsw:space": "cosine",
"description": "CrewAI Knowledge Base",
},
)
self._embedding_service = EmbeddingService(
provider=self.embedding_provider,
model=self.embedding_model,
**self.embedding_config,
)
except Exception as e:
logger.error(f"Failed to initialize ChromaDB: {e}")
raise
super().model_post_init(__context)
def add(
self,
content: str | Path,
data_type: str | DataType | None = None,
metadata: dict[str, Any] | None = None,
loader: BaseLoader | None = None,
chunker: BaseChunker | None = None,
**kwargs: Any,
) -> None:
source_content = SourceContent(content)
data_type = self._get_data_type(data_type=data_type, content=source_content)
if not loader:
loader = data_type.get_loader()
if not chunker:
chunker = data_type.get_chunker()
loader_result = loader.load(source_content)
doc_id = loader_result.doc_id
existing_doc = self._collection.get(
where={"source": source_content.source_ref}, limit=1
)
existing_doc_id = (
existing_doc and existing_doc["metadatas"][0]["doc_id"]
if existing_doc["metadatas"]
else None
)
if existing_doc_id == doc_id:
logger.warning(
f"Document with source {loader_result.source} already exists"
)
return
# Document with same source ref does exists but the content has changed, deleting the oldest reference
if existing_doc_id and existing_doc_id != loader_result.doc_id:
logger.warning(f"Deleting old document with doc_id {existing_doc_id}")
self._collection.delete(where={"doc_id": existing_doc_id})
documents = []
chunks = chunker.chunk(loader_result.content)
for i, chunk in enumerate(chunks):
doc_metadata = (metadata or {}).copy()
doc_metadata["chunk_index"] = i
documents.append(
Document(
id=compute_sha256(chunk),
content=chunk,
metadata=doc_metadata,
data_type=data_type,
source=loader_result.source,
)
)
if not documents:
logger.warning("No documents to add")
return
contents = [doc.content for doc in documents]
try:
embeddings = self._embedding_service.embed_batch(contents)
except Exception as e:
logger.error(f"Failed to generate embeddings: {e}")
return
ids = [doc.id for doc in documents]
metadatas = []
for doc in documents:
doc_metadata = doc.metadata.copy()
doc_metadata.update(
{
"data_type": doc.data_type.value,
"source": doc.source,
"doc_id": doc_id,
}
)
metadatas.append(doc_metadata)
try:
self._collection.add(
ids=ids,
embeddings=embeddings,
documents=contents,
metadatas=metadatas,
)
logger.info(f"Added {len(documents)} documents to knowledge base")
except Exception as e:
logger.error(f"Failed to add documents to ChromaDB: {e}")
def query(self, question: str, where: dict[str, Any] | None = None) -> str: # type: ignore
try:
question_embedding = self._embedding_service.embed_text(question)
results = self._collection.query(
query_embeddings=[question_embedding],
n_results=self.top_k,
where=where,
include=["documents", "metadatas", "distances"],
)
if (
not results
or not results.get("documents")
or not results["documents"][0]
):
return "No relevant content found."
documents = results["documents"][0]
metadatas = results.get("metadatas", [None])[0] or []
distances = results.get("distances", [None])[0] or []
# Return sources with relevance scores
formatted_results = []
for i, doc in enumerate(documents):
metadata = metadatas[i] if i < len(metadatas) else {}
distance = distances[i] if i < len(distances) else 1.0
source = metadata.get("source", "unknown") if metadata else "unknown"
score = (
1 - distance if distance is not None else 0
) # Convert distance to similarity
formatted_results.append(
f"[Source: {source}, Relevance: {score:.3f}]\n{doc}"
)
return "\n\n".join(formatted_results)
except Exception as e:
logger.error(f"Query failed: {e}")
return f"Error querying knowledge base: {e}"
def delete_collection(self) -> None:
try:
self._client.delete_collection(self.collection_name)
logger.info(f"Deleted collection: {self.collection_name}")
except Exception as e:
logger.error(f"Failed to delete collection: {e}")
def get_collection_info(self) -> dict[str, Any]:
try:
count = self._collection.count()
return {
"name": self.collection_name,
"count": count,
"embedding_model": self.embedding_model,
}
except Exception as e:
logger.error(f"Failed to get collection info: {e}")
return {"error": str(e)}
@staticmethod
def _get_data_type(
content: SourceContent, data_type: str | DataType | None = None
) -> DataType:
try:
if isinstance(data_type, str):
return DataType(data_type)
except Exception: # noqa: S110
pass
return content.data_type
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/core.py",
"license": "MIT License",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/data_types.py | from enum import Enum
from importlib import import_module
import os
from pathlib import Path
from typing import cast
from urllib.parse import urlparse
from crewai_tools.rag.base_loader import BaseLoader
from crewai_tools.rag.chunkers.base_chunker import BaseChunker
class DataType(str, Enum):
FILE = "file"
PDF_FILE = "pdf_file"
TEXT_FILE = "text_file"
CSV = "csv"
JSON = "json"
XML = "xml"
DOCX = "docx"
MDX = "mdx"
MYSQL = "mysql"
POSTGRES = "postgres"
GITHUB = "github"
DIRECTORY = "directory"
WEBSITE = "website"
DOCS_SITE = "docs_site"
YOUTUBE_VIDEO = "youtube_video"
YOUTUBE_CHANNEL = "youtube_channel"
TEXT = "text"
def get_chunker(self) -> BaseChunker:
from importlib import import_module
chunkers = {
DataType.PDF_FILE: ("text_chunker", "TextChunker"),
DataType.TEXT_FILE: ("text_chunker", "TextChunker"),
DataType.TEXT: ("text_chunker", "TextChunker"),
DataType.DOCX: ("text_chunker", "DocxChunker"),
DataType.MDX: ("text_chunker", "MdxChunker"),
# Structured formats
DataType.CSV: ("structured_chunker", "CsvChunker"),
DataType.JSON: ("structured_chunker", "JsonChunker"),
DataType.XML: ("structured_chunker", "XmlChunker"),
DataType.WEBSITE: ("web_chunker", "WebsiteChunker"),
DataType.DIRECTORY: ("text_chunker", "TextChunker"),
DataType.YOUTUBE_VIDEO: ("text_chunker", "TextChunker"),
DataType.YOUTUBE_CHANNEL: ("text_chunker", "TextChunker"),
DataType.GITHUB: ("text_chunker", "TextChunker"),
DataType.DOCS_SITE: ("text_chunker", "TextChunker"),
DataType.MYSQL: ("text_chunker", "TextChunker"),
DataType.POSTGRES: ("text_chunker", "TextChunker"),
}
if self not in chunkers:
raise ValueError(f"No chunker defined for {self}")
module_name, class_name = chunkers[self]
module_path = f"crewai_tools.rag.chunkers.{module_name}"
try:
module = import_module(module_path)
return cast(BaseChunker, getattr(module, class_name)())
except Exception as e:
raise ValueError(f"Error loading chunker for {self}: {e}") from e
def get_loader(self) -> BaseLoader:
loaders = {
DataType.PDF_FILE: ("pdf_loader", "PDFLoader"),
DataType.TEXT_FILE: ("text_loader", "TextFileLoader"),
DataType.TEXT: ("text_loader", "TextLoader"),
DataType.XML: ("xml_loader", "XMLLoader"),
DataType.WEBSITE: ("webpage_loader", "WebPageLoader"),
DataType.MDX: ("mdx_loader", "MDXLoader"),
DataType.JSON: ("json_loader", "JSONLoader"),
DataType.DOCX: ("docx_loader", "DOCXLoader"),
DataType.CSV: ("csv_loader", "CSVLoader"),
DataType.DIRECTORY: ("directory_loader", "DirectoryLoader"),
DataType.YOUTUBE_VIDEO: ("youtube_video_loader", "YoutubeVideoLoader"),
DataType.YOUTUBE_CHANNEL: (
"youtube_channel_loader",
"YoutubeChannelLoader",
),
DataType.GITHUB: ("github_loader", "GithubLoader"),
DataType.DOCS_SITE: ("docs_site_loader", "DocsSiteLoader"),
DataType.MYSQL: ("mysql_loader", "MySQLLoader"),
DataType.POSTGRES: ("postgres_loader", "PostgresLoader"),
}
if self not in loaders:
raise ValueError(f"No loader defined for {self}")
module_name, class_name = loaders[self]
module_path = f"crewai_tools.rag.loaders.{module_name}"
try:
module = import_module(module_path)
return cast(BaseLoader, getattr(module, class_name)())
except Exception as e:
raise ValueError(f"Error loading loader for {self}: {e}") from e
class DataTypes:
@staticmethod
def from_content(content: str | Path | None = None) -> DataType:
if content is None:
return DataType.TEXT
if isinstance(content, Path):
content = str(content)
is_url = False
if isinstance(content, str):
try:
url = urlparse(content)
is_url = bool(url.scheme and url.netloc) or url.scheme == "file"
except Exception: # noqa: S110
pass
def get_file_type(path: str) -> DataType | None:
mapping = {
".pdf": DataType.PDF_FILE,
".csv": DataType.CSV,
".mdx": DataType.MDX,
".md": DataType.MDX,
".docx": DataType.DOCX,
".json": DataType.JSON,
".xml": DataType.XML,
".txt": DataType.TEXT_FILE,
}
for ext, dtype in mapping.items():
if path.endswith(ext):
return dtype
return None
if is_url:
dtype = get_file_type(url.path)
if dtype:
return dtype
if "docs" in url.netloc or ("docs" in url.path and url.scheme != "file"):
return DataType.DOCS_SITE
if "github.com" in url.netloc:
return DataType.GITHUB
return DataType.WEBSITE
if os.path.isfile(content):
dtype = get_file_type(content)
if dtype:
return dtype
if os.path.exists(content):
return DataType.TEXT_FILE
elif os.path.isdir(content):
return DataType.DIRECTORY
return DataType.TEXT
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/data_types.py",
"license": "MIT License",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/csv_loader.py | import csv
from io import StringIO
from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.loaders.utils import load_from_url
from crewai_tools.rag.source_content import SourceContent
class CSVLoader(BaseLoader):
def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: # type: ignore[override]
source_ref = source_content.source_ref
content_str = source_content.source
if source_content.is_url():
content_str = load_from_url(
content_str,
kwargs,
accept_header="text/csv, application/csv, text/plain",
loader_name="CSVLoader",
)
elif source_content.path_exists():
content_str = self._load_from_file(content_str)
return self._parse_csv(content_str, source_ref)
@staticmethod
def _load_from_file(path: str) -> str:
with open(path, encoding="utf-8") as file:
return file.read()
def _parse_csv(self, content: str, source_ref: str) -> LoaderResult:
try:
csv_reader = csv.DictReader(StringIO(content))
text_parts = []
headers = csv_reader.fieldnames
if headers:
text_parts.append("Headers: " + " | ".join(headers))
text_parts.append("-" * 50)
for row_num, row in enumerate(csv_reader, 1):
row_text = " | ".join([f"{k}: {v}" for k, v in row.items() if v])
text_parts.append(f"Row {row_num}: {row_text}")
text = "\n".join(text_parts)
metadata = {
"format": "csv",
"columns": headers,
"rows": len(text_parts) - 2 if headers else 0,
}
except Exception as e:
text = content
metadata = {"format": "csv", "parse_error": str(e)}
return LoaderResult(
content=text,
source=source_ref,
metadata=metadata,
doc_id=self.generate_doc_id(source_ref=source_ref, content=text),
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/csv_loader.py",
"license": "MIT License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/directory_loader.py | import os
from pathlib import Path
from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.source_content import SourceContent
class DirectoryLoader(BaseLoader):
def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: # type: ignore[override]
"""Load and process all files from a directory recursively.
Args:
source_content: Directory path or URL to a directory listing
**kwargs: Additional options:
- recursive: bool (default True) - Whether to search recursively
- include_extensions: list - Only include files with these extensions
- exclude_extensions: list - Exclude files with these extensions
- max_files: int - Maximum number of files to process
"""
source_ref = source_content.source_ref
if source_content.is_url():
raise ValueError(
"URL directory loading is not supported. Please provide a local directory path."
)
if not os.path.exists(source_ref):
raise FileNotFoundError(f"Directory does not exist: {source_ref}")
if not os.path.isdir(source_ref):
raise ValueError(f"Path is not a directory: {source_ref}")
return self._process_directory(source_ref, kwargs)
def _process_directory(self, dir_path: str, kwargs: dict) -> LoaderResult:
recursive: bool = kwargs.get("recursive", True)
include_extensions: list[str] | None = kwargs.get("include_extensions", None)
exclude_extensions: list[str] | None = kwargs.get("exclude_extensions", None)
max_files: int | None = kwargs.get("max_files", None)
files = self._find_files(
dir_path, recursive, include_extensions, exclude_extensions
)
if max_files is not None and len(files) > max_files:
files = files[:max_files]
all_contents = []
processed_files = []
errors = []
for file_path in files:
try:
result = self._process_single_file(file_path)
if result:
all_contents.append(f"=== File: {file_path} ===\n{result.content}")
processed_files.append(
{
"path": file_path,
"metadata": result.metadata,
"source": result.source,
}
)
except Exception as e: # noqa: PERF203
error_msg = f"Error processing {file_path}: {e!s}"
errors.append(error_msg)
all_contents.append(f"=== File: {file_path} (ERROR) ===\n{error_msg}")
combined_content = "\n\n".join(all_contents)
metadata = {
"format": "directory",
"directory_path": dir_path,
"total_files": len(files),
"processed_files": len(processed_files),
"errors": len(errors),
"file_details": processed_files,
"error_details": errors,
}
return LoaderResult(
content=combined_content,
source=dir_path,
metadata=metadata,
doc_id=self.generate_doc_id(source_ref=dir_path, content=combined_content),
)
def _find_files(
self,
dir_path: str,
recursive: bool,
include_ext: list[str] | None = None,
exclude_ext: list[str] | None = None,
) -> list[str]:
"""Find all files in directory matching criteria."""
files = []
if recursive:
for root, dirs, filenames in os.walk(dir_path):
dirs[:] = [d for d in dirs if not d.startswith(".")]
for filename in filenames:
if self._should_include_file(filename, include_ext, exclude_ext):
files.append(os.path.join(root, filename)) # noqa: PERF401
else:
try:
for item in os.listdir(dir_path):
item_path = os.path.join(dir_path, item)
if os.path.isfile(item_path) and self._should_include_file(
item, include_ext, exclude_ext
):
files.append(item_path)
except PermissionError:
pass
return sorted(files)
@staticmethod
def _should_include_file(
filename: str,
include_ext: list[str] | None = None,
exclude_ext: list[str] | None = None,
) -> bool:
"""Determine if a file should be included based on criteria."""
if filename.startswith("."):
return False
_, ext = os.path.splitext(filename.lower())
if include_ext:
if ext not in [
e.lower() if e.startswith(".") else f".{e.lower()}" for e in include_ext
]:
return False
if exclude_ext:
if ext in [
e.lower() if e.startswith(".") else f".{e.lower()}" for e in exclude_ext
]:
return False
return True
@staticmethod
def _process_single_file(file_path: str) -> LoaderResult:
from crewai_tools.rag.data_types import DataTypes
data_type = DataTypes.from_content(Path(file_path))
loader = data_type.get_loader()
result = loader.load(SourceContent(file_path))
if result.metadata is None:
result.metadata = {}
result.metadata.update(
{
"file_path": file_path,
"file_size": os.path.getsize(file_path),
"data_type": str(data_type),
"loader_type": loader.__class__.__name__,
}
)
return result
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/directory_loader.py",
"license": "MIT License",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/docs_site_loader.py | """Documentation site loader."""
from urllib.parse import urljoin, urlparse
from bs4 import BeautifulSoup
import requests
from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.source_content import SourceContent
class DocsSiteLoader(BaseLoader):
"""Loader for documentation websites."""
def load(self, source: SourceContent, **kwargs) -> LoaderResult: # type: ignore[override]
"""Load content from a documentation site.
Args:
source: Documentation site URL
**kwargs: Additional arguments
Returns:
LoaderResult with documentation content
"""
docs_url = source.source
try:
response = requests.get(docs_url, timeout=30)
response.raise_for_status()
except requests.RequestException as e:
raise ValueError(
f"Unable to fetch documentation from {docs_url}: {e}"
) from e
soup = BeautifulSoup(response.text, "html.parser")
for script in soup(["script", "style"]):
script.decompose()
title = soup.find("title")
title_text = title.get_text(strip=True) if title else "Documentation"
for selector in [
"main",
"article",
'[role="main"]',
".content",
"#content",
".documentation",
]:
main_content = soup.select_one(selector)
if main_content:
break
if not main_content:
main_content = soup.find("body")
if not main_content:
raise ValueError(
f"Unable to extract content from documentation site: {docs_url}"
)
text_parts = [f"Title: {title_text}", ""]
headings = main_content.find_all(["h1", "h2", "h3"])
if headings:
text_parts.append("Table of Contents:")
for heading in headings[:15]:
level = int(heading.name[1])
indent = " " * (level - 1)
text_parts.append(f"{indent}- {heading.get_text(strip=True)}")
text_parts.append("")
text = main_content.get_text(separator="\n", strip=True)
lines = [line.strip() for line in text.split("\n") if line.strip()]
text_parts.extend(lines)
nav_links = []
for nav_selector in ["nav", ".sidebar", ".toc", ".navigation"]:
nav = soup.select_one(nav_selector)
if nav:
links = nav.find_all("a", href=True)
for link in links[:20]:
href = link.get("href", "")
if isinstance(href, str) and not href.startswith(
("http://", "https://", "mailto:", "#")
):
full_url = urljoin(docs_url, href)
nav_links.append(f"- {link.get_text(strip=True)}: {full_url}")
if nav_links:
text_parts.append("")
text_parts.append("Related documentation pages:")
text_parts.extend(nav_links[:10])
content = "\n".join(text_parts)
if len(content) > 100000:
content = content[:100000] + "\n\n[Content truncated...]"
return LoaderResult(
content=content,
metadata={
"source": docs_url,
"title": title_text,
"domain": urlparse(docs_url).netloc,
},
doc_id=self.generate_doc_id(source_ref=docs_url, content=content),
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/docs_site_loader.py",
"license": "MIT License",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/docx_loader.py | import os
import tempfile
from typing import Any
import requests
from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.source_content import SourceContent
class DOCXLoader(BaseLoader):
def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: # type: ignore[override]
try:
from docx import Document as DocxDocument
except ImportError as e:
raise ImportError(
"python-docx is required for DOCX loading. Install with: 'uv pip install python-docx' or pip install crewai-tools[rag]"
) from e
source_ref = source_content.source_ref
if source_content.is_url():
temp_file = self._download_from_url(source_ref, kwargs)
try:
return self._load_from_file(temp_file, source_ref, DocxDocument)
finally:
os.unlink(temp_file)
elif source_content.path_exists():
return self._load_from_file(source_ref, source_ref, DocxDocument)
else:
raise ValueError(
f"Source must be a valid file path or URL, got: {source_content.source}"
)
@staticmethod
def _download_from_url(url: str, kwargs: dict) -> str:
headers = kwargs.get(
"headers",
{
"Accept": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"User-Agent": "Mozilla/5.0 (compatible; crewai-tools DOCXLoader)",
},
)
try:
response = requests.get(url, headers=headers, timeout=30)
response.raise_for_status()
# Create temporary file to save the DOCX content
with tempfile.NamedTemporaryFile(suffix=".docx", delete=False) as temp_file:
temp_file.write(response.content)
return temp_file.name
except Exception as e:
raise ValueError(f"Error fetching content from URL {url}: {e!s}") from e
def _load_from_file(
self,
file_path: str,
source_ref: str,
DocxDocument: Any, # noqa: N803
) -> LoaderResult:
try:
doc = DocxDocument(file_path)
text_parts = []
for paragraph in doc.paragraphs:
if paragraph.text.strip():
text_parts.append(paragraph.text) # noqa: PERF401
content = "\n".join(text_parts)
metadata = {
"format": "docx",
"paragraphs": len(doc.paragraphs),
"tables": len(doc.tables),
}
return LoaderResult(
content=content,
source=source_ref,
metadata=metadata,
doc_id=self.generate_doc_id(source_ref=source_ref, content=content),
)
except Exception as e:
raise ValueError(f"Error loading DOCX file: {e!s}") from e
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/docx_loader.py",
"license": "MIT License",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/github_loader.py | """GitHub repository content loader."""
from github import Github, GithubException
from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.source_content import SourceContent
class GithubLoader(BaseLoader):
"""Loader for GitHub repository content."""
def load(self, source: SourceContent, **kwargs) -> LoaderResult: # type: ignore[override]
"""Load content from a GitHub repository.
Args:
source: GitHub repository URL
**kwargs: Additional arguments including gh_token and content_types
Returns:
LoaderResult with repository content
"""
metadata = kwargs.get("metadata", {})
gh_token = metadata.get("gh_token")
content_types = metadata.get("content_types", ["code", "repo"])
repo_url = source.source
if not repo_url.startswith("https://github.com/"):
raise ValueError(f"Invalid GitHub URL: {repo_url}")
parts = repo_url.replace("https://github.com/", "").strip("/").split("/")
if len(parts) < 2:
raise ValueError(f"Invalid GitHub repository URL: {repo_url}")
repo_name = f"{parts[0]}/{parts[1]}"
g = Github(gh_token) if gh_token else Github()
try:
repo = g.get_repo(repo_name)
except GithubException as e:
raise ValueError(f"Unable to access repository {repo_name}: {e}") from e
all_content = []
if "repo" in content_types:
all_content.append(f"Repository: {repo.full_name}")
all_content.append(f"Description: {repo.description or 'No description'}")
all_content.append(f"Language: {repo.language or 'Not specified'}")
all_content.append(f"Stars: {repo.stargazers_count}")
all_content.append(f"Forks: {repo.forks_count}")
all_content.append("")
if "code" in content_types:
try:
readme = repo.get_readme()
all_content.append("README:")
all_content.append(readme.decoded_content.decode(errors="ignore"))
all_content.append("")
except GithubException:
pass
try:
contents = repo.get_contents("")
if isinstance(contents, list):
all_content.append("Repository structure:")
for content_file in contents[:20]:
all_content.append( # noqa: PERF401
f"- {content_file.path} ({content_file.type})"
)
all_content.append("")
except GithubException:
pass
if "pr" in content_types:
prs = repo.get_pulls(state="open")
pr_list = list(prs[:5])
if pr_list:
all_content.append("Recent Pull Requests:")
for pr in pr_list:
all_content.append(f"- PR #{pr.number}: {pr.title}")
if pr.body:
body_preview = pr.body[:200].replace("\n", " ")
all_content.append(f" {body_preview}")
all_content.append("")
if "issue" in content_types:
issues = repo.get_issues(state="open")
issue_list = [i for i in list(issues[:10]) if not i.pull_request][:5]
if issue_list:
all_content.append("Recent Issues:")
for issue in issue_list:
all_content.append(f"- Issue #{issue.number}: {issue.title}")
if issue.body:
body_preview = issue.body[:200].replace("\n", " ")
all_content.append(f" {body_preview}")
all_content.append("")
if not all_content:
raise ValueError(f"No content could be loaded from repository: {repo_url}")
content = "\n".join(all_content)
return LoaderResult(
content=content,
metadata={
"source": repo_url,
"repo": repo_name,
"content_types": content_types,
},
doc_id=self.generate_doc_id(source_ref=repo_url, content=content),
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/github_loader.py",
"license": "MIT License",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/json_loader.py | import json
from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.loaders.utils import load_from_url
from crewai_tools.rag.source_content import SourceContent
class JSONLoader(BaseLoader):
def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: # type: ignore[override]
source_ref = source_content.source_ref
content = source_content.source
if source_content.is_url():
content = load_from_url(
source_ref,
kwargs,
accept_header="application/json",
loader_name="JSONLoader",
)
elif source_content.path_exists():
content = self._load_from_file(source_ref)
return self._parse_json(content, source_ref)
@staticmethod
def _load_from_file(path: str) -> str:
with open(path, encoding="utf-8") as file:
return file.read()
def _parse_json(self, content: str, source_ref: str) -> LoaderResult:
try:
data = json.loads(content)
if isinstance(data, dict):
text = "\n".join(
f"{k}: {json.dumps(v, indent=0)}" for k, v in data.items()
)
elif isinstance(data, list):
text = "\n".join(json.dumps(item, indent=0) for item in data)
else:
text = json.dumps(data, indent=0)
metadata = {
"format": "json",
"type": type(data).__name__,
"size": len(data) if isinstance(data, (list, dict)) else 1,
}
except json.JSONDecodeError as e:
text = content
metadata = {"format": "json", "parse_error": str(e)}
return LoaderResult(
content=text,
source=source_ref,
metadata=metadata,
doc_id=self.generate_doc_id(source_ref=source_ref, content=text),
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/json_loader.py",
"license": "MIT License",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/mdx_loader.py | import re
from typing import Final
from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.loaders.utils import load_from_url
from crewai_tools.rag.source_content import SourceContent
_IMPORT_PATTERN: Final[re.Pattern[str]] = re.compile(r"^import\s+.*?\n", re.MULTILINE)
_EXPORT_PATTERN: Final[re.Pattern[str]] = re.compile(
r"^export\s+.*?(?:\n|$)", re.MULTILINE
)
_JSX_TAG_PATTERN: Final[re.Pattern[str]] = re.compile(r"<[^>]+>")
_EXTRA_NEWLINES_PATTERN: Final[re.Pattern[str]] = re.compile(r"\n\s*\n\s*\n")
class MDXLoader(BaseLoader):
def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: # type: ignore[override]
source_ref = source_content.source_ref
content = source_content.source
if source_content.is_url():
content = load_from_url(
source_ref,
kwargs,
accept_header="text/markdown, text/x-markdown, text/plain",
loader_name="MDXLoader",
)
elif source_content.path_exists():
content = self._load_from_file(source_ref)
return self._parse_mdx(content, source_ref)
@staticmethod
def _load_from_file(path: str) -> str:
with open(path, encoding="utf-8") as file:
return file.read()
def _parse_mdx(self, content: str, source_ref: str) -> LoaderResult:
cleaned_content = content
# Remove import statements
cleaned_content = _IMPORT_PATTERN.sub("", cleaned_content)
# Remove export statements
cleaned_content = _EXPORT_PATTERN.sub("", cleaned_content)
# Remove JSX tags (simple approach)
cleaned_content = _JSX_TAG_PATTERN.sub("", cleaned_content)
# Clean up extra whitespace
cleaned_content = _EXTRA_NEWLINES_PATTERN.sub("\n\n", cleaned_content)
cleaned_content = cleaned_content.strip()
metadata = {"format": "mdx"}
return LoaderResult(
content=cleaned_content,
source=source_ref,
metadata=metadata,
doc_id=self.generate_doc_id(source_ref=source_ref, content=cleaned_content),
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/mdx_loader.py",
"license": "MIT License",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/mysql_loader.py | """MySQL database loader."""
from typing import Any
from urllib.parse import urlparse
from pymysql import Error, connect
from pymysql.cursors import DictCursor
from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.source_content import SourceContent
class MySQLLoader(BaseLoader):
"""Loader for MySQL database content."""
def load(self, source: SourceContent, **kwargs: Any) -> LoaderResult: # type: ignore[override]
"""Load content from a MySQL database table.
Args:
source: SQL query (e.g., "SELECT * FROM table_name")
**kwargs: Additional arguments including db_uri
Returns:
LoaderResult with database content
"""
metadata = kwargs.get("metadata", {})
db_uri = metadata.get("db_uri")
if not db_uri:
raise ValueError("Database URI is required for MySQL loader")
query = source.source
parsed = urlparse(db_uri)
if parsed.scheme not in ["mysql", "mysql+pymysql"]:
raise ValueError(f"Invalid MySQL URI scheme: {parsed.scheme}")
connection_params = {
"host": parsed.hostname or "localhost",
"port": parsed.port or 3306,
"user": parsed.username,
"password": parsed.password,
"database": parsed.path.lstrip("/") if parsed.path else None,
"charset": "utf8mb4",
"cursorclass": DictCursor,
}
if not connection_params["database"]:
raise ValueError("Database name is required in the URI")
try:
connection = connect(**connection_params)
try:
with connection.cursor() as cursor:
cursor.execute(query)
rows = cursor.fetchall()
if not rows:
content = "No data found in the table"
return LoaderResult(
content=content,
metadata={"source": query, "row_count": 0},
doc_id=self.generate_doc_id(
source_ref=query, content=content
),
)
text_parts = []
columns = list(rows[0].keys())
text_parts.append(f"Columns: {', '.join(columns)}")
text_parts.append(f"Total rows: {len(rows)}")
text_parts.append("")
for i, row in enumerate(rows, 1):
text_parts.append(f"Row {i}:")
for col, val in row.items():
if val is not None:
text_parts.append(f" {col}: {val}")
text_parts.append("")
content = "\n".join(text_parts)
if len(content) > 100000:
content = content[:100000] + "\n\n[Content truncated...]"
return LoaderResult(
content=content,
metadata={
"source": query,
"database": connection_params["database"],
"row_count": len(rows),
"columns": columns,
},
doc_id=self.generate_doc_id(source_ref=query, content=content),
)
finally:
connection.close()
except Error as e:
raise ValueError(f"MySQL database error: {e}") from e
except Exception as e:
raise ValueError(f"Failed to load data from MySQL: {e}") from e
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/mysql_loader.py",
"license": "MIT License",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/pdf_loader.py | """PDF loader for extracting text from PDF files."""
import os
from pathlib import Path
from typing import Any, cast
from urllib.parse import urlparse
import urllib.request
from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.source_content import SourceContent
class PDFLoader(BaseLoader):
"""Loader for PDF files and URLs."""
@staticmethod
def _is_url(path: str) -> bool:
"""Check if the path is a URL."""
try:
parsed = urlparse(path)
return parsed.scheme in ("http", "https")
except Exception:
return False
@staticmethod
def _download_pdf(url: str) -> bytes:
"""Download PDF content from a URL.
Args:
url: The URL to download from.
Returns:
The PDF content as bytes.
Raises:
ValueError: If the download fails.
"""
try:
with urllib.request.urlopen(url, timeout=30) as response: # noqa: S310
return cast(bytes, response.read())
except Exception as e:
raise ValueError(f"Failed to download PDF from {url}: {e!s}") from e
def load(self, source: SourceContent, **kwargs: Any) -> LoaderResult: # type: ignore[override]
"""Load and extract text from a PDF file or URL.
Args:
source: The source content containing the PDF file path or URL.
Returns:
LoaderResult with extracted text content.
Raises:
FileNotFoundError: If the PDF file doesn't exist.
ImportError: If required PDF libraries aren't installed.
ValueError: If the PDF cannot be read or downloaded.
"""
try:
import pymupdf # type: ignore[import-untyped]
except ImportError as e:
raise ImportError(
"PDF support requires pymupdf. Install with: uv add pymupdf"
) from e
file_path = source.source
is_url = self._is_url(file_path)
if is_url:
source_name = Path(urlparse(file_path).path).name or "downloaded.pdf"
else:
source_name = Path(file_path).name
text_content: list[str] = []
metadata: dict[str, Any] = {
"source": file_path,
"file_name": source_name,
"file_type": "pdf",
}
try:
if is_url:
pdf_bytes = self._download_pdf(file_path)
doc = pymupdf.open(stream=pdf_bytes, filetype="pdf")
else:
if not os.path.isfile(file_path):
raise FileNotFoundError(f"PDF file not found: {file_path}")
doc = pymupdf.open(file_path)
metadata["num_pages"] = len(doc)
for page_num, page in enumerate(doc, 1):
page_text = page.get_text()
if page_text.strip():
text_content.append(f"Page {page_num}:\n{page_text}")
doc.close()
except FileNotFoundError:
raise
except Exception as e:
raise ValueError(f"Error reading PDF from {file_path}: {e!s}") from e
if not text_content:
content = f"[PDF file with no extractable text: {source_name}]"
else:
content = "\n\n".join(text_content)
return LoaderResult(
content=content,
source=file_path,
metadata=metadata,
doc_id=self.generate_doc_id(source_ref=file_path, content=content),
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/pdf_loader.py",
"license": "MIT License",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/postgres_loader.py | """PostgreSQL database loader."""
from urllib.parse import urlparse
from psycopg2 import Error, connect
from psycopg2.extras import RealDictCursor
from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.source_content import SourceContent
class PostgresLoader(BaseLoader):
"""Loader for PostgreSQL database content."""
def load(self, source: SourceContent, **kwargs) -> LoaderResult: # type: ignore[override]
"""Load content from a PostgreSQL database table.
Args:
source: SQL query (e.g., "SELECT * FROM table_name")
**kwargs: Additional arguments including db_uri
Returns:
LoaderResult with database content
"""
metadata = kwargs.get("metadata", {})
db_uri = metadata.get("db_uri")
if not db_uri:
raise ValueError("Database URI is required for PostgreSQL loader")
query = source.source
parsed = urlparse(db_uri)
if parsed.scheme not in ["postgresql", "postgres", "postgresql+psycopg2"]:
raise ValueError(f"Invalid PostgreSQL URI scheme: {parsed.scheme}")
connection_params = {
"host": parsed.hostname or "localhost",
"port": parsed.port or 5432,
"user": parsed.username,
"password": parsed.password,
"database": parsed.path.lstrip("/") if parsed.path else None,
"cursor_factory": RealDictCursor,
}
if not connection_params["database"]:
raise ValueError("Database name is required in the URI")
try:
connection = connect(**connection_params)
try:
with connection.cursor() as cursor:
cursor.execute(query)
rows = cursor.fetchall()
if not rows:
content = "No data found in the table"
return LoaderResult(
content=content,
metadata={"source": query, "row_count": 0},
doc_id=self.generate_doc_id(
source_ref=query, content=content
),
)
text_parts = []
columns = list(rows[0].keys())
text_parts.append(f"Columns: {', '.join(columns)}")
text_parts.append(f"Total rows: {len(rows)}")
text_parts.append("")
for i, row in enumerate(rows, 1):
text_parts.append(f"Row {i}:")
for col, val in row.items():
if val is not None:
text_parts.append(f" {col}: {val}")
text_parts.append("")
content = "\n".join(text_parts)
if len(content) > 100000:
content = content[:100000] + "\n\n[Content truncated...]"
return LoaderResult(
content=content,
metadata={
"source": query,
"database": connection_params["database"],
"row_count": len(rows),
"columns": columns,
},
doc_id=self.generate_doc_id(source_ref=query, content=content),
)
finally:
connection.close()
except Error as e:
raise ValueError(f"PostgreSQL database error: {e}") from e
except Exception as e:
raise ValueError(f"Failed to load data from PostgreSQL: {e}") from e
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/postgres_loader.py",
"license": "MIT License",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/text_loader.py | from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.source_content import SourceContent
class TextFileLoader(BaseLoader):
def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: # type: ignore[override]
source_ref = source_content.source_ref
if not source_content.path_exists():
raise FileNotFoundError(
f"The following file does not exist: {source_content.source}"
)
with open(source_content.source, encoding="utf-8") as file:
content = file.read()
return LoaderResult(
content=content,
source=source_ref,
doc_id=self.generate_doc_id(source_ref=source_ref, content=content),
)
class TextLoader(BaseLoader):
def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: # type: ignore[override]
return LoaderResult(
content=source_content.source,
source=source_content.source_ref,
doc_id=self.generate_doc_id(content=source_content.source),
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/text_loader.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/utils.py | """Utility functions for RAG loaders."""
def load_from_url(
url: str, kwargs: dict, accept_header: str = "*/*", loader_name: str = "Loader"
) -> str:
"""Load content from a URL.
Args:
url: The URL to fetch content from
kwargs: Additional keyword arguments (can include 'headers' override)
accept_header: The Accept header value for the request
loader_name: The name of the loader for the User-Agent header
Returns:
The text content from the URL
Raises:
ValueError: If there's an error fetching the URL
"""
import requests
headers = kwargs.get(
"headers",
{
"Accept": accept_header,
"User-Agent": f"Mozilla/5.0 (compatible; crewai-tools {loader_name})",
},
)
try:
response = requests.get(url, headers=headers, timeout=30)
response.raise_for_status()
return response.text
except Exception as e:
raise ValueError(f"Error fetching content from URL {url}: {e!s}") from e
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/utils.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/webpage_loader.py | import re
from typing import Final
from bs4 import BeautifulSoup
import requests
from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.source_content import SourceContent
_SPACES_PATTERN: Final[re.Pattern[str]] = re.compile(r"[ \t]+")
_NEWLINE_PATTERN: Final[re.Pattern[str]] = re.compile(r"\s+\n\s+")
class WebPageLoader(BaseLoader):
def load(self, source_content: SourceContent, **kwargs) -> LoaderResult: # type: ignore[override]
url = source_content.source
headers = kwargs.get(
"headers",
{
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Language": "en-US,en;q=0.9",
},
)
try:
response = requests.get(url, timeout=15, headers=headers)
response.encoding = response.apparent_encoding
soup = BeautifulSoup(response.text, "html.parser")
for script in soup(["script", "style"]):
script.decompose()
text = soup.get_text(" ")
text = _SPACES_PATTERN.sub(" ", text)
text = _NEWLINE_PATTERN.sub("\n", text)
text = text.strip()
title = (
soup.title.string.strip() if soup.title and soup.title.string else ""
)
metadata = {
"url": url,
"title": title,
"status_code": response.status_code,
"content_type": response.headers.get("content-type", ""),
}
return LoaderResult(
content=text,
source=url,
metadata=metadata,
doc_id=self.generate_doc_id(source_ref=url, content=text),
)
except Exception as e:
raise ValueError(f"Error loading webpage {url}: {e!s}") from e
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/webpage_loader.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/xml_loader.py | from typing import Any
from xml.etree.ElementTree import ParseError, fromstring, parse
from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.loaders.utils import load_from_url
from crewai_tools.rag.source_content import SourceContent
class XMLLoader(BaseLoader):
def load(self, source_content: SourceContent, **kwargs: Any) -> LoaderResult: # type: ignore[override]
"""Load and parse XML content from various sources.
Args:
source_content: SourceContent: The source content to load.
**kwargs: Additional keyword arguments for loading from URL.
Returns:
LoaderResult: The result of loading and parsing the XML content.
"""
source_ref = source_content.source_ref
content = source_content.source
if source_content.is_url():
content = load_from_url(
source_ref,
kwargs,
accept_header="application/xml, text/xml, text/plain",
loader_name="XMLLoader",
)
elif source_content.path_exists():
content = self._load_from_file(source_ref)
return self._parse_xml(content, source_ref)
@staticmethod
def _load_from_file(path: str) -> str:
with open(path, encoding="utf-8") as file:
return file.read()
def _parse_xml(self, content: str, source_ref: str) -> LoaderResult:
try:
if content.strip().startswith("<"):
root = fromstring(content) # noqa: S314
else:
root = parse(source_ref).getroot() # noqa: S314
text_parts = []
for text_content in root.itertext():
if text_content and text_content.strip():
text_parts.append(text_content.strip()) # noqa: PERF401
text = "\n".join(text_parts)
metadata = {"format": "xml", "root_tag": root.tag}
except ParseError as e:
text = content
metadata = {"format": "xml", "parse_error": str(e)}
return LoaderResult(
content=text,
source=source_ref,
metadata=metadata,
doc_id=self.generate_doc_id(source_ref=source_ref, content=text),
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/xml_loader.py",
"license": "MIT License",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/youtube_channel_loader.py | """YouTube channel loader for extracting content from YouTube channels."""
import re
from typing import Any
from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.source_content import SourceContent
class YoutubeChannelLoader(BaseLoader):
"""Loader for YouTube channels."""
def load(self, source: SourceContent, **kwargs) -> LoaderResult: # type: ignore[override]
"""Load and extract content from a YouTube channel.
Args:
source: The source content containing the YouTube channel URL
Returns:
LoaderResult with channel content
Raises:
ImportError: If required YouTube libraries aren't installed
ValueError: If the URL is not a valid YouTube channel URL
"""
try:
from pytube import Channel # type: ignore[import-untyped]
except ImportError as e:
raise ImportError(
"YouTube channel support requires pytube. Install with: uv add pytube"
) from e
channel_url = source.source
if not any(
pattern in channel_url
for pattern in [
"youtube.com/channel/",
"youtube.com/c/",
"youtube.com/@",
"youtube.com/user/",
]
):
raise ValueError(f"Invalid YouTube channel URL: {channel_url}")
metadata: dict[str, Any] = {
"source": channel_url,
"data_type": "youtube_channel",
}
try:
channel = Channel(channel_url)
metadata["channel_name"] = channel.channel_name
metadata["channel_id"] = channel.channel_id
max_videos = kwargs.get("max_videos", 10)
video_urls = list(channel.video_urls)[:max_videos]
metadata["num_videos_loaded"] = len(video_urls)
metadata["total_videos"] = len(list(channel.video_urls))
content_parts = [
f"YouTube Channel: {channel.channel_name}",
f"Channel ID: {channel.channel_id}",
f"Total Videos: {metadata['total_videos']}",
f"Videos Loaded: {metadata['num_videos_loaded']}",
"\n--- Video Summaries ---\n",
]
try:
from pytube import YouTube
from youtube_transcript_api import YouTubeTranscriptApi
for i, video_url in enumerate(video_urls, 1):
try:
video_id = self._extract_video_id(video_url)
if not video_id:
continue
yt = YouTube(video_url)
title = yt.title or f"Video {i}"
description = (
yt.description[:200] if yt.description else "No description"
)
content_parts.append(f"\n{i}. {title}")
content_parts.append(f" URL: {video_url}")
content_parts.append(f" Description: {description}...")
try:
api = YouTubeTranscriptApi()
transcript_list = api.list(video_id)
try:
transcript = transcript_list.find_transcript(["en"])
except Exception:
try:
transcript = (
transcript_list.find_generated_transcript(
["en"]
)
)
except Exception:
transcript = next(iter(transcript_list))
if transcript:
transcript_data = transcript.fetch()
text_parts = []
char_count = 0
for entry in transcript_data:
text = (
entry.text.strip()
if hasattr(entry, "text")
else ""
)
if text:
text_parts.append(text)
char_count += len(text)
if char_count > 500:
break
if text_parts:
preview = " ".join(text_parts)[:500]
content_parts.append(
f" Transcript Preview: {preview}..."
)
except Exception:
content_parts.append(" Transcript: Not available")
except Exception as e:
content_parts.append(f"\n{i}. Error loading video: {e!s}")
except ImportError:
for i, video_url in enumerate(video_urls, 1):
content_parts.append(f"\n{i}. {video_url}")
content = "\n".join(content_parts)
except Exception as e:
raise ValueError(
f"Unable to load YouTube channel {channel_url}: {e!s}"
) from e
return LoaderResult(
content=content,
source=channel_url,
metadata=metadata,
doc_id=self.generate_doc_id(source_ref=channel_url, content=content),
)
@staticmethod
def _extract_video_id(url: str) -> str | None:
"""Extract video ID from YouTube URL."""
patterns = [
r"(?:youtube\.com\/watch\?v=|youtu\.be\/|youtube\.com\/embed\/|youtube\.com\/v\/)([^&\n?#]+)",
]
for pattern in patterns:
match = re.search(pattern, url)
if match:
return match.group(1)
return None
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/youtube_channel_loader.py",
"license": "MIT License",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/loaders/youtube_video_loader.py | """YouTube video loader for extracting transcripts from YouTube videos."""
import re
from typing import Any
from urllib.parse import parse_qs, urlparse
from crewai_tools.rag.base_loader import BaseLoader, LoaderResult
from crewai_tools.rag.source_content import SourceContent
class YoutubeVideoLoader(BaseLoader):
"""Loader for YouTube videos."""
def load(self, source: SourceContent, **kwargs) -> LoaderResult: # type: ignore[override]
"""Load and extract transcript from a YouTube video.
Args:
source: The source content containing the YouTube URL
Returns:
LoaderResult with transcript content
Raises:
ImportError: If required YouTube libraries aren't installed
ValueError: If the URL is not a valid YouTube video URL
"""
try:
from youtube_transcript_api import YouTubeTranscriptApi
except ImportError as e:
raise ImportError(
"YouTube support requires youtube-transcript-api. "
"Install with: uv add youtube-transcript-api"
) from e
video_url = source.source
video_id = self._extract_video_id(video_url)
if not video_id:
raise ValueError(f"Invalid YouTube URL: {video_url}")
metadata: dict[str, Any] = {
"source": video_url,
"video_id": video_id,
"data_type": "youtube_video",
}
try:
api = YouTubeTranscriptApi()
transcript_list = api.list(video_id)
try:
transcript = transcript_list.find_transcript(["en"])
except Exception:
try:
transcript = transcript_list.find_generated_transcript(["en"])
except Exception:
transcript = next(iter(transcript_list))
if transcript:
metadata["language"] = transcript.language
metadata["is_generated"] = transcript.is_generated
transcript_data = transcript.fetch()
text_content = []
for entry in transcript_data:
text = entry.text.strip() if hasattr(entry, "text") else ""
if text:
text_content.append(text)
content = " ".join(text_content)
try:
from pytube import YouTube # type: ignore[import-untyped]
yt = YouTube(video_url)
metadata["title"] = yt.title
metadata["author"] = yt.author
metadata["length_seconds"] = yt.length
metadata["description"] = (
yt.description[:500] if yt.description else None
)
if yt.title:
content = f"Title: {yt.title}\n\nAuthor: {yt.author or 'Unknown'}\n\nTranscript:\n{content}"
except Exception: # noqa: S110
pass
else:
raise ValueError(
f"No transcript available for YouTube video: {video_id}"
)
except Exception as e:
raise ValueError(
f"Unable to extract transcript from YouTube video {video_id}: {e!s}"
) from e
return LoaderResult(
content=content,
source=video_url,
metadata=metadata,
doc_id=self.generate_doc_id(source_ref=video_url, content=content),
)
@staticmethod
def _extract_video_id(url: str) -> str | None:
"""Extract video ID from various YouTube URL formats."""
patterns = [
r"(?:youtube\.com\/watch\?v=|youtu\.be\/|youtube\.com\/embed\/|youtube\.com\/v\/)([^&\n?#]+)",
]
for pattern in patterns:
match = re.search(pattern, url)
if match:
return match.group(1)
try:
parsed = urlparse(url)
hostname = parsed.hostname
if hostname:
hostname_lower = hostname.lower()
# Allow youtube.com and any subdomain of youtube.com, plus youtu.be shortener
if (
hostname_lower == "youtube.com"
or hostname_lower.endswith(".youtube.com")
or hostname_lower == "youtu.be"
):
query_params = parse_qs(parsed.query)
if "v" in query_params:
return query_params["v"][0]
except Exception: # noqa: S110
pass
return None
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/loaders/youtube_video_loader.py",
"license": "MIT License",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/misc.py | import hashlib
from typing import Any
def compute_sha256(content: str) -> str:
"""Compute the SHA-256 hash of the given content.
Args:
content: The content to hash.
Returns:
The SHA-256 hash of the content as a hexadecimal string.
"""
return hashlib.sha256(content.encode()).hexdigest()
def sanitize_metadata_for_chromadb(metadata: dict[str, Any]) -> dict[str, Any]:
"""Sanitize metadata to ensure ChromaDB compatibility.
ChromaDB only accepts str, int, float, or bool values in metadata.
This function converts other types to strings.
Args:
metadata: Dictionary of metadata to sanitize
Returns:
Sanitized metadata dictionary with only ChromaDB-compatible types
"""
sanitized = {}
for key, value in metadata.items():
if isinstance(value, (str, int, float, bool)) or value is None:
sanitized[key] = value
elif isinstance(value, (list, tuple)):
# Convert lists/tuples to pipe-separated strings
sanitized[key] = " | ".join(str(v) for v in value)
else:
# Convert other types to string
sanitized[key] = str(value)
return sanitized
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/misc.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/rag/source_content.py | from __future__ import annotations
from functools import cached_property
import os
from pathlib import Path
from typing import TYPE_CHECKING
from urllib.parse import urlparse
from crewai_tools.rag.misc import compute_sha256
if TYPE_CHECKING:
from crewai_tools.rag.data_types import DataType
class SourceContent:
def __init__(self, source: str | Path):
self.source = str(source)
def is_url(self) -> bool:
if not isinstance(self.source, str):
return False
try:
parsed_url = urlparse(self.source)
return bool(parsed_url.scheme and parsed_url.netloc)
except Exception:
return False
def path_exists(self) -> bool:
return os.path.exists(self.source)
@cached_property
def data_type(self) -> DataType:
from crewai_tools.rag.data_types import DataTypes
return DataTypes.from_content(self.source)
@cached_property
def source_ref(self) -> str:
""" "
Returns the source reference for the content.
If the content is a URL or a local file, returns the source.
Otherwise, returns the hash of the content.
"""
if self.is_url() or self.path_exists():
return self.source
return compute_sha256(self.source)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/rag/source_content.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py | import os
import secrets
from typing import Any
from crewai.tools import BaseTool, EnvVar
from openai import OpenAI
from openai.types.chat import ChatCompletion
from pydantic import BaseModel, Field
class AIMindToolConstants:
MINDS_API_BASE_URL = "https://mdb.ai/"
MIND_NAME_PREFIX = "crwai_mind_"
DATASOURCE_NAME_PREFIX = "crwai_ds_"
class AIMindToolInputSchema(BaseModel):
"""Input for AIMind Tool."""
query: str = Field(description="Question in natural language to ask the AI-Mind")
class AIMindTool(BaseTool):
name: str = "AIMind Tool"
description: str = (
"A wrapper around [AI-Minds](https://mindsdb.com/minds). "
"Useful for when you need answers to questions from your data, stored in "
"data sources including PostgreSQL, MySQL, MariaDB, ClickHouse, Snowflake "
"and Google BigQuery. "
"Input should be a question in natural language."
)
args_schema: type[BaseModel] = AIMindToolInputSchema
api_key: str | None = None
datasources: list[dict[str, Any]] = Field(default_factory=list)
mind_name: str | None = None
package_dependencies: list[str] = Field(default_factory=lambda: ["minds-sdk"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="MINDS_API_KEY", description="API key for AI-Minds", required=True
),
]
)
def __init__(self, api_key: str | None = None, **kwargs):
super().__init__(**kwargs)
self.api_key = api_key or os.getenv("MINDS_API_KEY")
if not self.api_key:
raise ValueError(
"API key must be provided either through constructor or MINDS_API_KEY environment variable"
)
try:
from minds.client import Client # type: ignore
from minds.datasources import DatabaseConfig # type: ignore
except ImportError as e:
raise ImportError(
"`minds_sdk` package not found, please run `pip install minds-sdk`"
) from e
minds_client = Client(api_key=self.api_key)
# Convert the datasources to DatabaseConfig objects.
datasources = []
for datasource in self.datasources:
config = DatabaseConfig(
name=f"{AIMindToolConstants.DATASOURCE_NAME_PREFIX}_{secrets.token_hex(5)}",
engine=datasource["engine"],
description=datasource["description"],
connection_data=datasource["connection_data"],
tables=datasource["tables"],
)
datasources.append(config)
# Generate a random name for the Mind.
name = f"{AIMindToolConstants.MIND_NAME_PREFIX}_{secrets.token_hex(5)}"
mind = minds_client.minds.create(
name=name, datasources=datasources, replace=True
)
self.mind_name = mind.name
def _run(self, query: str):
# Run the query on the AI-Mind.
# The Minds API is OpenAI compatible and therefore, the OpenAI client can be used.
openai_client = OpenAI(
base_url=AIMindToolConstants.MINDS_API_BASE_URL, api_key=self.api_key
)
if self.mind_name is None:
raise ValueError("Mind name is not set.")
completion = openai_client.chat.completions.create(
model=self.mind_name,
messages=[{"role": "user", "content": query}],
stream=False,
)
if not isinstance(completion, ChatCompletion):
raise ValueError("Invalid response from AI-Mind")
return completion.choices[0].message.content
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/ai_mind_tool/ai_mind_tool.py",
"license": "MIT License",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/apify_actors_tool/apify_actors_tool.py | from __future__ import annotations
import os
from typing import TYPE_CHECKING, Any
from crewai.tools import BaseTool, EnvVar
from pydantic import Field
if TYPE_CHECKING:
from langchain_apify import ApifyActorsTool as _ApifyActorsTool
class ApifyActorsTool(BaseTool):
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="APIFY_API_TOKEN",
description="API token for Apify platform access",
required=True,
),
]
)
"""Tool that runs Apify Actors.
To use, you should have the environment variable `APIFY_API_TOKEN` set
with your API key.
For details, see https://docs.apify.com/platform/integrations/crewai
Args:
actor_name (str): The name of the Apify Actor to run.
*args: Variable length argument list passed to BaseTool.
**kwargs: Arbitrary keyword arguments passed to BaseTool.
Returns:
List[Dict[str, Any]]: Results from the Actor execution.
Raises:
ValueError: If `APIFY_API_TOKEN` is not set or if the tool is not initialized.
ImportError: If `langchain_apify` package is not installed.
Example:
.. code-block:: python
from crewai_tools import ApifyActorsTool
tool = ApifyActorsTool(actor_name="apify/rag-web-browser")
results = tool.run(run_input={"query": "What is CrewAI?", "maxResults": 5})
for result in results:
print(f"URL: {result['metadata']['url']}")
print(f"Content: {result.get('markdown', 'N/A')[:100]}...")
"""
actor_tool: _ApifyActorsTool = Field(description="Apify Actor Tool")
package_dependencies: list[str] = Field(default_factory=lambda: ["langchain-apify"])
def __init__(self, actor_name: str, *args: Any, **kwargs: Any) -> None:
if not os.environ.get("APIFY_API_TOKEN"):
msg = (
"APIFY_API_TOKEN environment variable is not set. "
"Please set it to your API key, to learn how to get it, "
"see https://docs.apify.com/platform/integrations/api"
)
raise ValueError(msg)
try:
from langchain_apify import ApifyActorsTool as _ApifyActorsTool
except ImportError as e:
raise ImportError(
"Could not import langchain_apify python package. "
"Please install it with `pip install langchain-apify` or `uv add langchain-apify`."
) from e
actor_tool = _ApifyActorsTool(actor_name)
kwargs.update(
{
"name": actor_tool.name,
"description": actor_tool.description,
"args_schema": actor_tool.args_schema,
"actor_tool": actor_tool,
}
)
super().__init__(*args, **kwargs)
def _run(self, run_input: dict[str, Any]) -> list[dict[str, Any]]:
"""Run the Actor tool with the given input.
Returns:
List[Dict[str, Any]]: Results from the Actor execution.
Raises:
ValueError: If 'actor_tool' is not initialized.
"""
try:
return self.actor_tool._run(run_input)
except Exception as e:
msg = (
f"Failed to run ApifyActorsTool {self.name}. "
"Please check your Apify account Actor run logs for more details."
f"Error: {e}"
)
raise RuntimeError(msg) from e
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/apify_actors_tool/apify_actors_tool.py",
"license": "MIT License",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/arxiv_paper_tool/arxiv_paper_tool.py | import logging
from pathlib import Path
import re
import time
from typing import ClassVar
import urllib.error
import urllib.parse
import urllib.request
import xml.etree.ElementTree as ET
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, ConfigDict, Field
logger = logging.getLogger(__file__)
class ArxivToolInput(BaseModel):
search_query: str = Field(
..., description="Search query for Arxiv, e.g., 'transformer neural network'"
)
max_results: int = Field(
5, ge=1, le=100, description="Max results to fetch; must be between 1 and 100"
)
class ArxivPaperTool(BaseTool):
BASE_API_URL: ClassVar[str] = "http://export.arxiv.org/api/query"
SLEEP_DURATION: ClassVar[int] = 1
SUMMARY_TRUNCATE_LENGTH: ClassVar[int] = 300
ATOM_NAMESPACE: ClassVar[str] = "{http://www.w3.org/2005/Atom}"
REQUEST_TIMEOUT: ClassVar[int] = 10
name: str = "Arxiv Paper Fetcher and Downloader"
description: str = "Fetches metadata from Arxiv based on a search query and optionally downloads PDFs."
args_schema: type[BaseModel] = ArxivToolInput
model_config = ConfigDict(extra="allow")
package_dependencies: list[str] = Field(default_factory=lambda: ["pydantic"])
env_vars: list[EnvVar] = Field(default_factory=list)
download_pdfs: bool = False
save_dir: str = "./arxiv_pdfs"
use_title_as_filename: bool = False
def _run(self, search_query: str, max_results: int = 5) -> str:
try:
args = ArxivToolInput(search_query=search_query, max_results=max_results)
logger.info(
f"Running Arxiv tool: query='{args.search_query}', max_results={args.max_results}, "
f"download_pdfs={self.download_pdfs}, save_dir='{self.save_dir}', "
f"use_title_as_filename={self.use_title_as_filename}"
)
papers = self.fetch_arxiv_data(args.search_query, args.max_results)
if self.download_pdfs:
save_dir = self._validate_save_path(self.save_dir)
for paper in papers:
if paper["pdf_url"]:
if self.use_title_as_filename:
safe_title = re.sub(
r'[\\/*?:"<>|]', "_", paper["title"]
).strip()
filename_base = safe_title or paper["arxiv_id"]
else:
filename_base = paper["arxiv_id"]
filename = f"{filename_base[:500]}.pdf"
save_path = Path(save_dir) / filename
self.download_pdf(paper["pdf_url"], save_path) # type: ignore[arg-type]
time.sleep(self.SLEEP_DURATION)
results = [self._format_paper_result(p) for p in papers]
return "\n\n" + "-" * 80 + "\n\n".join(results)
except Exception as e:
logger.error(f"ArxivTool Error: {e!s}")
return f"Failed to fetch or download Arxiv papers: {e!s}"
def fetch_arxiv_data(self, search_query: str, max_results: int) -> list[dict]:
api_url = f"{self.BASE_API_URL}?search_query={urllib.parse.quote(search_query)}&start=0&max_results={max_results}"
logger.info(f"Fetching data from Arxiv API: {api_url}")
try:
with urllib.request.urlopen( # noqa: S310
api_url, timeout=self.REQUEST_TIMEOUT
) as response:
if response.status != 200:
raise Exception(f"HTTP {response.status}: {response.reason}")
data = response.read().decode("utf-8")
except urllib.error.URLError as e:
logger.error(f"Error fetching data from Arxiv: {e}")
raise
root = ET.fromstring(data) # noqa: S314
papers = []
for entry in root.findall(self.ATOM_NAMESPACE + "entry"):
raw_id = self._get_element_text(entry, "id")
arxiv_id = raw_id.split("/")[-1].replace(".", "_") if raw_id else "unknown"
title = self._get_element_text(entry, "title") or "No Title"
summary = self._get_element_text(entry, "summary") or "No Summary"
published = self._get_element_text(entry, "published") or "No Publish Date"
authors = [
self._get_element_text(author, "name") or "Unknown"
for author in entry.findall(self.ATOM_NAMESPACE + "author")
]
pdf_url = self._extract_pdf_url(entry)
papers.append(
{
"arxiv_id": arxiv_id,
"title": title,
"summary": summary,
"authors": authors,
"published_date": published,
"pdf_url": pdf_url,
}
)
return papers
@staticmethod
def _get_element_text(entry: ET.Element, element_name: str) -> str | None:
elem = entry.find(f"{ArxivPaperTool.ATOM_NAMESPACE}{element_name}")
return elem.text.strip() if elem is not None and elem.text else None
def _extract_pdf_url(self, entry: ET.Element) -> str | None:
for link in entry.findall(self.ATOM_NAMESPACE + "link"):
if link.attrib.get("title", "").lower() == "pdf":
return link.attrib.get("href")
for link in entry.findall(self.ATOM_NAMESPACE + "link"):
href = link.attrib.get("href")
if href and "pdf" in href:
return href
return None
def _format_paper_result(self, paper: dict) -> str:
summary = (
(paper["summary"][: self.SUMMARY_TRUNCATE_LENGTH] + "...")
if len(paper["summary"]) > self.SUMMARY_TRUNCATE_LENGTH
else paper["summary"]
)
authors_str = ", ".join(paper["authors"])
return (
f"Title: {paper['title']}\n"
f"Authors: {authors_str}\n"
f"Published: {paper['published_date']}\n"
f"PDF: {paper['pdf_url'] or 'N/A'}\n"
f"Summary: {summary}"
)
@staticmethod
def _validate_save_path(path: str) -> Path:
save_path = Path(path).resolve()
save_path.mkdir(parents=True, exist_ok=True)
return save_path
def download_pdf(self, pdf_url: str, save_path: str):
try:
logger.info(f"Downloading PDF from {pdf_url} to {save_path}")
urllib.request.urlretrieve(pdf_url, str(save_path)) # noqa: S310
logger.info(f"PDF saved: {save_path}")
except urllib.error.URLError as e:
logger.error(f"Network error occurred while downloading {pdf_url}: {e}")
raise
except OSError as e:
logger.error(f"File save error for {save_path}: {e}")
raise
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/arxiv_paper_tool/arxiv_paper_tool.py",
"license": "MIT License",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py | from datetime import datetime
import json
import os
import time
from typing import Annotated, Any, ClassVar, Literal
from crewai.tools import BaseTool, EnvVar
from dotenv import load_dotenv
from pydantic import BaseModel, Field
from pydantic.types import StringConstraints
import requests
load_dotenv()
def _save_results_to_file(content: str) -> None:
"""Saves the search results to a file."""
filename = f"search_results_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.txt"
with open(filename, "w") as file:
file.write(content)
FreshnessPreset = Literal["pd", "pw", "pm", "py"]
FreshnessRange = Annotated[
str, StringConstraints(pattern=r"^\d{4}-\d{2}-\d{2}to\d{4}-\d{2}-\d{2}$")
]
Freshness = FreshnessPreset | FreshnessRange
SafeSearch = Literal["off", "moderate", "strict"]
class BraveSearchToolSchema(BaseModel):
"""Input for BraveSearchTool"""
query: str = Field(..., description="Search query to perform")
country: str | None = Field(
default=None,
description="Country code for geo-targeting (e.g., 'US', 'BR').",
)
search_language: str | None = Field(
default=None,
description="Language code for the search results (e.g., 'en', 'es').",
)
count: int | None = Field(
default=None,
description="The maximum number of results to return. Actual number may be less.",
)
offset: int | None = Field(
default=None, description="Skip the first N result sets/pages. Max is 9."
)
safesearch: SafeSearch | None = Field(
default=None,
description="Filter out explicit content. Options: off/moderate/strict",
)
spellcheck: bool | None = Field(
default=None,
description="Attempt to correct spelling errors in the search query.",
)
freshness: Freshness | None = Field(
default=None,
description="Enforce freshness of results. Options: pd/pw/pm/py, or YYYY-MM-DDtoYYYY-MM-DD",
)
text_decorations: bool | None = Field(
default=None,
description="Include markup to highlight search terms in the results.",
)
extra_snippets: bool | None = Field(
default=None,
description="Include up to 5 text snippets for each page if possible.",
)
operators: bool | None = Field(
default=None,
description="Whether to apply search operators (e.g., site:example.com).",
)
# TODO: Extend support to additional endpoints (e.g., /images, /news, etc.)
class BraveSearchTool(BaseTool):
"""A tool that performs web searches using the Brave Search API."""
name: str = "Brave Search"
description: str = (
"A tool that performs web searches using the Brave Search API. "
"Results are returned as structured JSON data."
)
args_schema: type[BaseModel] = BraveSearchToolSchema
search_url: str = "https://api.search.brave.com/res/v1/web/search"
n_results: int = 10
save_file: bool = False
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="BRAVE_API_KEY",
description="API key for Brave Search",
required=True,
),
]
)
# Rate limiting parameters
_last_request_time: ClassVar[float] = 0
_min_request_interval: ClassVar[float] = 1.0 # seconds
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if "BRAVE_API_KEY" not in os.environ:
raise ValueError(
"BRAVE_API_KEY environment variable is required for BraveSearchTool"
)
def _run(
self,
**kwargs: Any,
) -> Any:
current_time = time.time()
if (current_time - self._last_request_time) < self._min_request_interval:
time.sleep(
self._min_request_interval - (current_time - self._last_request_time)
)
BraveSearchTool._last_request_time = time.time()
# Construct and send the request
try:
# Maintain both "search_query" and "query" for backwards compatibility
query = kwargs.get("search_query") or kwargs.get("query")
if not query:
raise ValueError("Query is required")
payload = {"q": query}
if country := kwargs.get("country"):
payload["country"] = country
if search_language := kwargs.get("search_language"):
payload["search_language"] = search_language
# Fallback to deprecated n_results parameter if no count is provided
count = kwargs.get("count")
if count is not None:
payload["count"] = count
else:
payload["count"] = self.n_results
# Offset may be 0, so avoid truthiness check
offset = kwargs.get("offset")
if offset is not None:
payload["offset"] = offset
if safesearch := kwargs.get("safesearch"):
payload["safesearch"] = safesearch
save_file = kwargs.get("save_file", self.save_file)
if freshness := kwargs.get("freshness"):
payload["freshness"] = freshness
# Boolean parameters
spellcheck = kwargs.get("spellcheck")
if spellcheck is not None:
payload["spellcheck"] = spellcheck
text_decorations = kwargs.get("text_decorations")
if text_decorations is not None:
payload["text_decorations"] = text_decorations
extra_snippets = kwargs.get("extra_snippets")
if extra_snippets is not None:
payload["extra_snippets"] = extra_snippets
operators = kwargs.get("operators")
if operators is not None:
payload["operators"] = operators
# Limit the result types to "web" since there is presently no
# handling of other types like "discussions", "faq", "infobox",
# "news", "videos", or "locations".
payload["result_filter"] = "web"
# Setup Request Headers
headers = {
"X-Subscription-Token": os.environ["BRAVE_API_KEY"],
"Accept": "application/json",
}
response = requests.get(
self.search_url, headers=headers, params=payload, timeout=30
)
response.raise_for_status() # Handle non-200 responses
results = response.json()
# TODO: Handle other result types like "discussions", "faq", etc.
web_results_items = []
if "web" in results:
web_results = results["web"]["results"]
for result in web_results:
url = result.get("url")
title = result.get("title")
# If, for whatever reason, this entry does not have a title
# or url, skip it.
if not url or not title:
continue
item = {
"url": url,
"title": title,
}
description = result.get("description")
if description:
item["description"] = description
snippets = result.get("extra_snippets")
if snippets:
item["snippets"] = snippets
web_results_items.append(item)
content = json.dumps(web_results_items)
except requests.RequestException as e:
return f"Error performing search: {e!s}"
except KeyError as e:
return f"Error parsing search results: {e!s}"
if save_file:
_save_results_to_file(content)
return f"\nSearch results: {content}\n"
return content
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/brave_search_tool/brave_search_tool.py",
"license": "MIT License",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/brightdata_tool/brightdata_dataset.py | import asyncio
import os
from typing import Any
import aiohttp
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
class BrightDataConfig(BaseModel):
API_URL: str = "https://api.brightdata.com"
DEFAULT_TIMEOUT: int = 600
DEFAULT_POLLING_INTERVAL: int = 1
@classmethod
def from_env(cls):
return cls(
API_URL=os.environ.get("BRIGHTDATA_API_URL", "https://api.brightdata.com"),
DEFAULT_TIMEOUT=int(os.environ.get("BRIGHTDATA_DEFAULT_TIMEOUT", "600")),
DEFAULT_POLLING_INTERVAL=int(
os.environ.get("BRIGHTDATA_DEFAULT_POLLING_INTERVAL", "1")
),
)
class BrightDataDatasetToolException(Exception): # noqa: N818
"""Exception raised for custom error in the application."""
def __init__(self, message, error_code):
self.message = message
super().__init__(message)
self.error_code = error_code
def __str__(self):
return f"{self.message} (Error Code: {self.error_code})"
class BrightDataDatasetToolSchema(BaseModel):
"""Schema for validating input parameters for the BrightDataDatasetTool.
Attributes:
dataset_type (str): Required Bright Data Dataset Type used to specify which dataset to access.
format (str): Response format (json by default). Multiple formats exist - json, ndjson, jsonl, csv
url (str): The URL from which structured data needs to be extracted.
zipcode (Optional[str]): An optional ZIP code to narrow down the data geographically.
additional_params (Optional[Dict]): Extra parameters for the Bright Data API call.
"""
dataset_type: str = Field(..., description="The Bright Data Dataset Type")
format: str | None = Field(
default="json", description="Response format (json by default)"
)
url: str = Field(..., description="The URL to extract data from")
zipcode: str | None = Field(default=None, description="Optional zipcode")
additional_params: dict[str, Any] | None = Field(
default=None, description="Additional params if any"
)
config = BrightDataConfig.from_env()
BRIGHTDATA_API_URL = config.API_URL
timeout = config.DEFAULT_TIMEOUT
datasets = [
{
"id": "amazon_product",
"dataset_id": "gd_l7q7dkf244hwjntr0",
"description": "\n".join(
[
"Quickly read structured amazon product data.",
"Requires a valid product URL with /dp/ in it.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "amazon_product_reviews",
"dataset_id": "gd_le8e811kzy4ggddlq",
"description": "\n".join(
[
"Quickly read structured amazon product review data.",
"Requires a valid product URL with /dp/ in it.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "amazon_product_search",
"dataset_id": "gd_lwdb4vjm1ehb499uxs",
"description": "\n".join(
[
"Quickly read structured amazon product search data.",
"Requires a valid search keyword and amazon domain URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["keyword", "url", "pages_to_search"],
"defaults": {"pages_to_search": "1"},
},
{
"id": "walmart_product",
"dataset_id": "gd_l95fol7l1ru6rlo116",
"description": "\n".join(
[
"Quickly read structured walmart product data.",
"Requires a valid product URL with /ip/ in it.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "walmart_seller",
"dataset_id": "gd_m7ke48w81ocyu4hhz0",
"description": "\n".join(
[
"Quickly read structured walmart seller data.",
"Requires a valid walmart seller URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "ebay_product",
"dataset_id": "gd_ltr9mjt81n0zzdk1fb",
"description": "\n".join(
[
"Quickly read structured ebay product data.",
"Requires a valid ebay product URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "homedepot_products",
"dataset_id": "gd_lmusivh019i7g97q2n",
"description": "\n".join(
[
"Quickly read structured homedepot product data.",
"Requires a valid homedepot product URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "zara_products",
"dataset_id": "gd_lct4vafw1tgx27d4o0",
"description": "\n".join(
[
"Quickly read structured zara product data.",
"Requires a valid zara product URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "etsy_products",
"dataset_id": "gd_ltppk0jdv1jqz25mz",
"description": "\n".join(
[
"Quickly read structured etsy product data.",
"Requires a valid etsy product URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "bestbuy_products",
"dataset_id": "gd_ltre1jqe1jfr7cccf",
"description": "\n".join(
[
"Quickly read structured bestbuy product data.",
"Requires a valid bestbuy product URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "linkedin_person_profile",
"dataset_id": "gd_l1viktl72bvl7bjuj0",
"description": "\n".join(
[
"Quickly read structured linkedin people profile data.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "linkedin_company_profile",
"dataset_id": "gd_l1vikfnt1wgvvqz95w",
"description": "\n".join(
[
"Quickly read structured linkedin company profile data",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "linkedin_job_listings",
"dataset_id": "gd_lpfll7v5hcqtkxl6l",
"description": "\n".join(
[
"Quickly read structured linkedin job listings data",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "linkedin_posts",
"dataset_id": "gd_lyy3tktm25m4avu764",
"description": "\n".join(
[
"Quickly read structured linkedin posts data",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "linkedin_people_search",
"dataset_id": "gd_m8d03he47z8nwb5xc",
"description": "\n".join(
[
"Quickly read structured linkedin people search data",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url", "first_name", "last_name"],
},
{
"id": "crunchbase_company",
"dataset_id": "gd_l1vijqt9jfj7olije",
"description": "\n".join(
[
"Quickly read structured crunchbase company data",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "zoominfo_company_profile",
"dataset_id": "gd_m0ci4a4ivx3j5l6nx",
"description": "\n".join(
[
"Quickly read structured ZoomInfo company profile data.",
"Requires a valid ZoomInfo company URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "instagram_profiles",
"dataset_id": "gd_l1vikfch901nx3by4",
"description": "\n".join(
[
"Quickly read structured Instagram profile data.",
"Requires a valid Instagram URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "instagram_posts",
"dataset_id": "gd_lk5ns7kz21pck8jpis",
"description": "\n".join(
[
"Quickly read structured Instagram post data.",
"Requires a valid Instagram URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "instagram_reels",
"dataset_id": "gd_lyclm20il4r5helnj",
"description": "\n".join(
[
"Quickly read structured Instagram reel data.",
"Requires a valid Instagram URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "instagram_comments",
"dataset_id": "gd_ltppn085pokosxh13",
"description": "\n".join(
[
"Quickly read structured Instagram comments data.",
"Requires a valid Instagram URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "facebook_posts",
"dataset_id": "gd_lyclm1571iy3mv57zw",
"description": "\n".join(
[
"Quickly read structured Facebook post data.",
"Requires a valid Facebook post URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "facebook_marketplace_listings",
"dataset_id": "gd_lvt9iwuh6fbcwmx1a",
"description": "\n".join(
[
"Quickly read structured Facebook marketplace listing data.",
"Requires a valid Facebook marketplace listing URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "facebook_company_reviews",
"dataset_id": "gd_m0dtqpiu1mbcyc2g86",
"description": "\n".join(
[
"Quickly read structured Facebook company reviews data.",
"Requires a valid Facebook company URL and number of reviews.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url", "num_of_reviews"],
},
{
"id": "facebook_events",
"dataset_id": "gd_m14sd0to1jz48ppm51",
"description": "\n".join(
[
"Quickly read structured Facebook events data.",
"Requires a valid Facebook event URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "tiktok_profiles",
"dataset_id": "gd_l1villgoiiidt09ci",
"description": "\n".join(
[
"Quickly read structured Tiktok profiles data.",
"Requires a valid Tiktok profile URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "tiktok_posts",
"dataset_id": "gd_lu702nij2f790tmv9h",
"description": "\n".join(
[
"Quickly read structured Tiktok post data.",
"Requires a valid Tiktok post URL.",
"This can be a cache lookup, so it can be more reliable than scraping",
]
),
"inputs": ["url"],
},
{
"id": "tiktok_shop",
"dataset_id": "gd_m45m1u911dsa4274pi",
"description": "\n".join(
[
"Quickly read structured Tiktok shop data.",
"Requires a valid Tiktok shop product URL.",
"This can be a cache lookup...",
]
),
"inputs": ["url"],
},
]
class BrightDataDatasetTool(BaseTool):
"""CrewAI-compatible tool for scraping structured data using Bright Data Datasets.
Attributes:
name (str): Tool name displayed in the CrewAI environment.
description (str): Tool description shown to agents or users.
args_schema (Type[BaseModel]): Pydantic schema for validating input arguments.
"""
name: str = "Bright Data Dataset Tool"
description: str = "Scrapes structured data using Bright Data Dataset API from a URL and optional input parameters"
args_schema: type[BaseModel] = BrightDataDatasetToolSchema
dataset_type: str | None = None
url: str | None = None
format: str = "json"
zipcode: str | None = None
additional_params: dict[str, Any] | None = None
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="BRIGHT_DATA_API_KEY",
description="API key for Bright Data",
required=True,
),
]
)
def __init__(
self,
dataset_type: str | None = None,
url: str | None = None,
format: str = "json",
zipcode: str | None = None,
additional_params: dict[str, Any] | None = None,
**kwargs: Any,
):
super().__init__(**kwargs)
self.dataset_type = dataset_type
self.url = url
self.format = format
self.zipcode = zipcode
self.additional_params = additional_params
def filter_dataset_by_id(self, target_id):
return [dataset for dataset in datasets if dataset["id"] == target_id]
async def get_dataset_data_async(
self,
dataset_type: str,
output_format: str,
url: str,
zipcode: str | None = None,
additional_params: dict[str, Any] | None = None,
polling_interval: int = 1,
) -> str:
"""Asynchronously trigger and poll Bright Data dataset scraping.
Args:
dataset_type (str): Bright Data Dataset Type.
url (str): Target URL to scrape.
zipcode (Optional[str]): Optional ZIP code for geo-specific data.
additional_params (Optional[Dict]): Extra API parameters.
polling_interval (int): Time interval in seconds between polling attempts.
Returns:
Dict: Structured dataset result from Bright Data.
Raises:
Exception: If any API step fails or the job fails.
TimeoutError: If polling times out before job completion.
"""
request_data = {"url": url}
if zipcode is not None:
request_data["zipcode"] = zipcode
# Set additional parameters dynamically depending upon the dataset that is being requested
if additional_params:
request_data.update(additional_params)
api_key = os.getenv("BRIGHT_DATA_API_KEY")
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
dataset_id = ""
dataset = self.filter_dataset_by_id(dataset_type)
if len(dataset) == 1:
dataset_id = dataset[0]["dataset_id"]
else:
raise ValueError(
f"Unable to find the dataset for {dataset_type}. Please make sure to pass a valid one"
)
async with aiohttp.ClientSession() as session:
# Step 1: Trigger job
async with session.post(
f"{BRIGHTDATA_API_URL}/datasets/v3/trigger",
params={"dataset_id": dataset_id, "include_errors": "true"},
json=[request_data],
headers=headers,
) as trigger_response:
if trigger_response.status != 200:
raise BrightDataDatasetToolException(
f"Trigger failed: {await trigger_response.text()}",
trigger_response.status,
)
trigger_data = await trigger_response.json()
snapshot_id = trigger_data.get("snapshot_id")
# Step 2: Poll for completion
elapsed = 0
while elapsed < timeout:
await asyncio.sleep(polling_interval)
elapsed += polling_interval
async with session.get(
f"{BRIGHTDATA_API_URL}/datasets/v3/progress/{snapshot_id}",
headers=headers,
) as status_response:
if status_response.status != 200:
raise BrightDataDatasetToolException(
f"Status check failed: {await status_response.text()}",
status_response.status,
)
status_data = await status_response.json()
if status_data.get("status") == "ready":
break
if status_data.get("status") == "error":
raise BrightDataDatasetToolException(
f"Job failed: {status_data}", 0
)
else:
raise TimeoutError("Polling timed out before job completed.")
# Step 3: Retrieve result
async with session.get(
f"{BRIGHTDATA_API_URL}/datasets/v3/snapshot/{snapshot_id}",
params={"format": output_format},
headers=headers,
) as snapshot_response:
if snapshot_response.status != 200:
raise BrightDataDatasetToolException(
f"Result fetch failed: {await snapshot_response.text()}",
snapshot_response.status,
)
return await snapshot_response.text()
def _run(
self,
url: str | None = None,
dataset_type: str | None = None,
format: str | None = None,
zipcode: str | None = None,
additional_params: dict[str, Any] | None = None,
**kwargs: Any,
) -> Any:
dataset_type = dataset_type or self.dataset_type
output_format = format or self.format
url = url or self.url
zipcode = zipcode or self.zipcode
additional_params = additional_params or self.additional_params
if not dataset_type:
raise ValueError(
"dataset_type is required either in constructor or method call"
)
if not url:
raise ValueError("url is required either in constructor or method call")
valid_output_formats = {"json", "ndjson", "jsonl", "csv"}
if output_format not in valid_output_formats:
raise ValueError(
f"Unsupported output format: {output_format}. Must be one of {', '.join(valid_output_formats)}."
)
api_key = os.getenv("BRIGHT_DATA_API_KEY")
if not api_key:
raise ValueError("BRIGHT_DATA_API_KEY environment variable is required.")
try:
return asyncio.run(
self.get_dataset_data_async(
dataset_type=dataset_type,
output_format=output_format,
url=url,
zipcode=zipcode,
additional_params=additional_params,
)
)
except TimeoutError as e:
return f"Timeout Exception occured in method : get_dataset_data_async. Details - {e!s}"
except BrightDataDatasetToolException as e:
return (
f"Exception occured in method : get_dataset_data_async. Details - {e!s}"
)
except Exception as e:
return f"Bright Data API error: {e!s}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/brightdata_tool/brightdata_dataset.py",
"license": "MIT License",
"lines": 559,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/brightdata_tool/brightdata_serp.py | import os
from typing import Any
import urllib.parse
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
import requests
class BrightDataConfig(BaseModel):
API_URL: str = "https://api.brightdata.com/request"
@classmethod
def from_env(cls):
return cls(
API_URL=os.environ.get(
"BRIGHTDATA_API_URL", "https://api.brightdata.com/request"
)
)
class BrightDataSearchToolSchema(BaseModel):
"""Schema that defines the input arguments for the BrightDataSearchToolSchema.
Attributes:
query (str): The search query to be executed (e.g., "latest AI news").
search_engine (Optional[str]): The search engine to use ("google", "bing", "yandex"). Default is "google".
country (Optional[str]): Two-letter country code for geo-targeting (e.g., "us", "in"). Default is "us".
language (Optional[str]): Language code for search results (e.g., "en", "es"). Default is "en".
search_type (Optional[str]): Type of search, such as "isch" (images), "nws" (news), "jobs", etc.
device_type (Optional[str]): Device type to simulate ("desktop", "mobile", "ios", "android"). Default is "desktop".
parse_results (Optional[bool]): If True, results will be returned in structured JSON. If False, raw HTML. Default is True.
"""
query: str = Field(..., description="Search query to perform")
search_engine: str | None = Field(
default="google",
description="Search engine domain (e.g., 'google', 'bing', 'yandex')",
)
country: str | None = Field(
default="us",
description="Two-letter country code for geo-targeting (e.g., 'us', 'gb')",
)
language: str | None = Field(
default="en",
description="Language code (e.g., 'en', 'es') used in the query URL",
)
search_type: str | None = Field(
default=None,
description="Type of search (e.g., 'isch' for images, 'nws' for news)",
)
device_type: str | None = Field(
default="desktop",
description="Device type to simulate (e.g., 'mobile', 'desktop', 'ios')",
)
parse_results: bool | None = Field(
default=True,
description="Whether to parse and return JSON (True) or raw HTML/text (False)",
)
class BrightDataSearchTool(BaseTool):
"""A web search tool that utilizes Bright Data's SERP API to perform queries and return either structured results
or raw page content from search engines like Google or Bing.
Attributes:
name (str): Tool name used by the agent.
description (str): A brief explanation of what the tool does.
args_schema (Type[BaseModel]): Schema class for validating tool arguments.
base_url (str): The Bright Data API endpoint used for making the POST request.
api_key (str): Bright Data API key loaded from the environment variable 'BRIGHT_DATA_API_KEY'.
zone (str): Zone identifier from Bright Data, loaded from the environment variable 'BRIGHT_DATA_ZONE'.
Raises:
ValueError: If API key or zone environment variables are not set.
"""
name: str = "Bright Data SERP Search"
description: str = "Tool to perform web search using Bright Data SERP API."
args_schema: type[BaseModel] = BrightDataSearchToolSchema
_config = BrightDataConfig.from_env()
base_url: str = ""
api_key: str = ""
zone: str = ""
query: str | None = None
search_engine: str = "google"
country: str = "us"
language: str = "en"
search_type: str | None = None
device_type: str = "desktop"
parse_results: bool = True
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="BRIGHT_DATA_API_KEY",
description="API key for Bright Data",
required=True,
),
]
)
def __init__(
self,
query: str | None = None,
search_engine: str = "google",
country: str = "us",
language: str = "en",
search_type: str | None = None,
device_type: str = "desktop",
parse_results: bool = True,
**kwargs: Any,
):
super().__init__(**kwargs)
self.base_url = self._config.API_URL
self.query = query
self.search_engine = search_engine
self.country = country
self.language = language
self.search_type = search_type
self.device_type = device_type
self.parse_results = parse_results
self.api_key = os.getenv("BRIGHT_DATA_API_KEY") or ""
self.zone = os.getenv("BRIGHT_DATA_ZONE") or ""
if not self.api_key:
raise ValueError("BRIGHT_DATA_API_KEY environment variable is required.")
if not self.zone:
raise ValueError("BRIGHT_DATA_ZONE environment variable is required.")
def get_search_url(self, engine: str, query: str):
if engine == "yandex":
return f"https://yandex.com/search/?text=${query}"
if engine == "bing":
return f"https://www.bing.com/search?q=${query}"
return f"https://www.google.com/search?q=${query}"
def _run(
self,
query: str | None = None,
search_engine: str | None = None,
country: str | None = None,
language: str | None = None,
search_type: str | None = None,
device_type: str | None = None,
parse_results: bool | None = None,
**kwargs,
) -> Any:
"""Executes a search query using Bright Data SERP API and returns results.
Args:
query (str): The search query string (URL encoded internally).
search_engine (str): The search engine to use (default: "google").
country (str): Country code for geotargeting (default: "us").
language (str): Language code for the query (default: "en").
search_type (str): Optional type of search such as "nws", "isch", "jobs".
device_type (str): Optional device type to simulate (e.g., "mobile", "ios", "desktop").
parse_results (bool): If True, returns structured data; else raw page (default: True).
results_count (str or int): Number of search results to fetch (default: "10").
Returns:
dict or str: Parsed JSON data from Bright Data if available, otherwise error message.
"""
query = query or self.query
search_engine = search_engine or self.search_engine
country = country or self.country
language = language or self.language
search_type = search_type or self.search_type
device_type = device_type or self.device_type
parse_results = (
parse_results if parse_results is not None else self.parse_results
)
results_count = kwargs.get("results_count", "10")
# Validate required parameters
if not query:
raise ValueError("query is required either in constructor or method call")
# Build the search URL
query = urllib.parse.quote(query)
url = self.get_search_url(search_engine, query)
# Add parameters to the URL
params = []
if country:
params.append(f"gl={country}")
if language:
params.append(f"hl={language}")
if results_count:
params.append(f"num={results_count}")
if parse_results:
params.append("brd_json=1")
if search_type:
if search_type == "jobs":
params.append("ibp=htl;jobs")
else:
params.append(f"tbm={search_type}")
if device_type:
if device_type == "mobile":
params.append("brd_mobile=1")
elif device_type == "ios":
params.append("brd_mobile=ios")
elif device_type == "android":
params.append("brd_mobile=android")
# Combine parameters with the URL
if params:
url += "&" + "&".join(params)
# Set up the API request parameters
request_params = {"zone": self.zone, "url": url, "format": "raw"}
request_params = {k: v for k, v in request_params.items() if v is not None}
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
try:
response = requests.post(
self.base_url, json=request_params, headers=headers, timeout=30
)
response.raise_for_status()
return response.text
except requests.RequestException as e:
return f"Error performing BrightData search: {e!s}"
except Exception as e:
return f"Error fetching results: {e!s}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/brightdata_tool/brightdata_serp.py",
"license": "MIT License",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/brightdata_tool/brightdata_unlocker.py | import os
from typing import Any
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
import requests
class BrightDataConfig(BaseModel):
API_URL: str = "https://api.brightdata.com/request"
@classmethod
def from_env(cls):
return cls(
API_URL=os.environ.get(
"BRIGHTDATA_API_URL", "https://api.brightdata.com/request"
)
)
class BrightDataUnlockerToolSchema(BaseModel):
"""Pydantic schema for input parameters used by the BrightDataWebUnlockerTool.
This schema defines the structure and validation for parameters passed when performing
a web scraping request using Bright Data's Web Unlocker.
Attributes:
url (str): The target URL to scrape.
format (Optional[str]): Format of the response returned by Bright Data. Default 'raw' format.
data_format (Optional[str]): Response data format (html by default). markdown is one more option.
"""
url: str = Field(..., description="URL to perform the web scraping")
format: str | None = Field(
default="raw", description="Response format (raw is standard)"
)
data_format: str | None = Field(
default="markdown", description="Response data format (html by default)"
)
class BrightDataWebUnlockerTool(BaseTool):
"""A tool for performing web scraping using the Bright Data Web Unlocker API.
This tool allows automated and programmatic access to web pages by routing requests
through Bright Data's unlocking and proxy infrastructure, which can bypass bot
protection mechanisms like CAPTCHA, geo-restrictions, and anti-bot detection.
Attributes:
name (str): Name of the tool.
description (str): Description of what the tool does.
args_schema (Type[BaseModel]): Pydantic model schema for expected input arguments.
base_url (str): Base URL of the Bright Data Web Unlocker API.
api_key (str): Bright Data API key (must be set in the BRIGHT_DATA_API_KEY environment variable).
zone (str): Bright Data zone identifier (must be set in the BRIGHT_DATA_ZONE environment variable).
Methods:
_run(**kwargs: Any) -> Any:
Sends a scraping request to Bright Data's Web Unlocker API and returns the result.
"""
name: str = "Bright Data Web Unlocker Scraping"
description: str = "Tool to perform web scraping using Bright Data Web Unlocker"
args_schema: type[BaseModel] = BrightDataUnlockerToolSchema
_config = BrightDataConfig.from_env()
base_url: str = ""
api_key: str = ""
zone: str = ""
url: str | None = None
format: str = "raw"
data_format: str = "markdown"
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="BRIGHT_DATA_API_KEY",
description="API key for Bright Data",
required=True,
),
]
)
def __init__(
self,
url: str | None = None,
format: str = "raw",
data_format: str = "markdown",
**kwargs: Any,
):
super().__init__(**kwargs)
self.base_url = self._config.API_URL
self.url = url
self.format = format
self.data_format = data_format
self.api_key = os.getenv("BRIGHT_DATA_API_KEY") or ""
self.zone = os.getenv("BRIGHT_DATA_ZONE") or ""
if not self.api_key:
raise ValueError("BRIGHT_DATA_API_KEY environment variable is required.")
if not self.zone:
raise ValueError("BRIGHT_DATA_ZONE environment variable is required.")
def _run(
self,
url: str | None = None,
format: str | None = None,
data_format: str | None = None,
**kwargs: Any,
) -> Any:
url = url or self.url
format = format or self.format
data_format = data_format or self.data_format
if not url:
raise ValueError("url is required either in constructor or method call")
payload = {
"url": url,
"zone": self.zone,
"format": format,
}
valid_data_formats = {"html", "markdown"}
if data_format not in valid_data_formats:
raise ValueError(
f"Unsupported data format: {data_format}. Must be one of {', '.join(valid_data_formats)}."
)
if data_format == "markdown":
payload["data_format"] = "markdown"
headers = {
"Authorization": f"Bearer {self.api_key}",
"Content-Type": "application/json",
}
try:
response = requests.post(
self.base_url, json=payload, headers=headers, timeout=30
)
response.raise_for_status()
return response.text
except requests.RequestException as e:
return f"HTTP Error performing BrightData Web Unlocker Scrape: {e}\nResponse: {getattr(e.response, 'text', '')}"
except Exception as e:
return f"Error fetching results: {e!s}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/brightdata_tool/brightdata_unlocker.py",
"license": "MIT License",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py | import os
from typing import Any
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
class BrowserbaseLoadToolSchema(BaseModel):
url: str = Field(description="Website URL")
class BrowserbaseLoadTool(BaseTool):
name: str = "Browserbase web load tool"
description: str = "Load webpages url in a headless browser using Browserbase and return the contents"
args_schema: type[BaseModel] = BrowserbaseLoadToolSchema
api_key: str | None = os.getenv("BROWSERBASE_API_KEY")
project_id: str | None = os.getenv("BROWSERBASE_PROJECT_ID")
text_content: bool | None = False
session_id: str | None = None
proxy: bool | None = None
browserbase: Any | None = None
package_dependencies: list[str] = Field(default_factory=lambda: ["browserbase"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="BROWSERBASE_API_KEY",
description="API key for Browserbase services",
required=False,
),
EnvVar(
name="BROWSERBASE_PROJECT_ID",
description="Project ID for Browserbase services",
required=False,
),
]
)
def __init__(
self,
api_key: str | None = None,
project_id: str | None = None,
text_content: bool | None = False,
session_id: str | None = None,
proxy: bool | None = None,
**kwargs,
):
super().__init__(**kwargs)
if not self.api_key:
raise EnvironmentError(
"BROWSERBASE_API_KEY environment variable is required for initialization"
)
try:
from browserbase import Browserbase # type: ignore
except ImportError:
import click
if click.confirm(
"`browserbase` package not found, would you like to install it?"
):
import subprocess
subprocess.run(["uv", "add", "browserbase"], check=True) # noqa: S607
from browserbase import Browserbase # type: ignore
else:
raise ImportError(
"`browserbase` package not found, please run `uv add browserbase`"
) from None
self.browserbase = Browserbase(api_key=self.api_key)
self.text_content = text_content
self.session_id = session_id
self.proxy = proxy
def _run(self, url: str):
return self.browserbase.load_url( # type: ignore[union-attr]
url, self.text_content, self.session_id, self.proxy
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/browserbase_load_tool/browserbase_load_tool.py",
"license": "MIT License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py | from pydantic import BaseModel, Field
from crewai_tools.rag.data_types import DataType
from crewai_tools.tools.rag.rag_tool import RagTool
class FixedCodeDocsSearchToolSchema(BaseModel):
"""Input for CodeDocsSearchTool."""
search_query: str = Field(
...,
description="Mandatory search query you want to use to search the Code Docs content",
)
class CodeDocsSearchToolSchema(FixedCodeDocsSearchToolSchema):
"""Input for CodeDocsSearchTool."""
docs_url: str = Field(..., description="Mandatory docs_url path you want to search")
class CodeDocsSearchTool(RagTool):
name: str = "Search a Code Docs content"
description: str = (
"A tool that can be used to semantic search a query from a Code Docs content."
)
args_schema: type[BaseModel] = CodeDocsSearchToolSchema
def __init__(self, docs_url: str | None = None, **kwargs):
super().__init__(**kwargs)
if docs_url is not None:
self.add(docs_url)
self.description = f"A tool that can be used to semantic search a query the {docs_url} Code Docs content."
self.args_schema = FixedCodeDocsSearchToolSchema
self._generate_description()
def add(self, docs_url: str) -> None:
super().add(docs_url, data_type=DataType.DOCS_SITE)
def _run( # type: ignore[override]
self,
search_query: str,
docs_url: str | None = None,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
if docs_url is not None:
self.add(docs_url)
return super()._run(
query=search_query, similarity_threshold=similarity_threshold, limit=limit
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/code_docs_search_tool/code_docs_search_tool.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py | """Code Interpreter Tool for executing Python code in isolated environments.
This module provides a tool for executing Python code either in a Docker container for
safe isolation or directly in a restricted sandbox. It includes mechanisms for blocking
potentially unsafe operations and importing restricted modules.
"""
import importlib.util
import os
import subprocess
from types import ModuleType
from typing import Any, ClassVar, TypedDict
from crewai.tools import BaseTool
from docker import ( # type: ignore[import-untyped]
DockerClient,
from_env as docker_from_env,
)
from docker.errors import ImageNotFound, NotFound # type: ignore[import-untyped]
from docker.models.containers import Container # type: ignore[import-untyped]
from pydantic import BaseModel, Field
from typing_extensions import Unpack
from crewai_tools.printer import Printer
class RunKwargs(TypedDict, total=False):
"""Keyword arguments for the _run method."""
code: str
libraries_used: list[str]
class CodeInterpreterSchema(BaseModel):
"""Schema for defining inputs to the CodeInterpreterTool.
This schema defines the required parameters for code execution,
including the code to run and any libraries that need to be installed.
"""
code: str = Field(
...,
description="Python3 code used to be interpreted in the Docker container. ALWAYS PRINT the final result and the output of the code",
)
libraries_used: list[str] = Field(
...,
description="List of libraries used in the code with proper installing names separated by commas. Example: numpy,pandas,beautifulsoup4",
)
class SandboxPython:
"""A restricted Python execution environment for running code safely.
This class provides methods to safely execute Python code by restricting access to
potentially dangerous modules and built-in functions. It creates a sandboxed
environment where harmful operations are blocked.
"""
BLOCKED_MODULES: ClassVar[set[str]] = {
"os",
"sys",
"subprocess",
"shutil",
"importlib",
"inspect",
"tempfile",
"sysconfig",
"builtins",
}
UNSAFE_BUILTINS: ClassVar[set[str]] = {
"exec",
"eval",
"open",
"compile",
"input",
"globals",
"locals",
"vars",
"help",
"dir",
}
@staticmethod
def restricted_import(
name: str,
custom_globals: dict[str, Any] | None = None,
custom_locals: dict[str, Any] | None = None,
fromlist: list[str] | None = None,
level: int = 0,
) -> ModuleType:
"""A restricted import function that blocks importing of unsafe modules.
Args:
name: The name of the module to import.
custom_globals: Global namespace to use.
custom_locals: Local namespace to use.
fromlist: List of items to import from the module.
level: The level value passed to __import__.
Returns:
The imported module if allowed.
Raises:
ImportError: If the module is in the blocked modules list.
"""
if name in SandboxPython.BLOCKED_MODULES:
raise ImportError(f"Importing '{name}' is not allowed.")
return __import__(name, custom_globals, custom_locals, fromlist or (), level)
@staticmethod
def safe_builtins() -> dict[str, Any]:
"""Creates a dictionary of built-in functions with unsafe ones removed.
Returns:
A dictionary of safe built-in functions and objects.
"""
import builtins
safe_builtins = {
k: v
for k, v in builtins.__dict__.items()
if k not in SandboxPython.UNSAFE_BUILTINS
}
safe_builtins["__import__"] = SandboxPython.restricted_import
return safe_builtins
@staticmethod
def exec(code: str, locals_: dict[str, Any]) -> None:
"""Executes Python code in a restricted environment.
Args:
code: The Python code to execute as a string.
locals_: A dictionary that will be used for local variable storage.
"""
exec(code, {"__builtins__": SandboxPython.safe_builtins()}, locals_) # noqa: S102
class CodeInterpreterTool(BaseTool):
"""A tool for executing Python code in isolated environments.
This tool provides functionality to run Python code either in a Docker container
for safe isolation or directly in a restricted sandbox. It can handle installing
Python packages and executing arbitrary Python code.
"""
name: str = "Code Interpreter"
description: str = "Interprets Python3 code strings with a final print statement."
args_schema: type[BaseModel] = CodeInterpreterSchema
default_image_tag: str = "code-interpreter:latest"
code: str | None = None
user_dockerfile_path: str | None = None
user_docker_base_url: str | None = None
unsafe_mode: bool = False
@staticmethod
def _get_installed_package_path() -> str:
"""Gets the installation path of the crewai_tools package.
Returns:
The directory path where the package is installed.
Raises:
RuntimeError: If the package cannot be found.
"""
spec = importlib.util.find_spec("crewai_tools")
if spec is None or spec.origin is None:
raise RuntimeError("Cannot find crewai_tools package installation path")
return os.path.dirname(spec.origin)
def _verify_docker_image(self) -> None:
"""Verifies if the Docker image is available or builds it if necessary.
Checks if the required Docker image exists. If not, builds it using either a
user-provided Dockerfile or the default one included with the package.
Raises:
FileNotFoundError: If the Dockerfile cannot be found.
"""
client = (
docker_from_env()
if self.user_docker_base_url is None
else DockerClient(base_url=self.user_docker_base_url)
)
try:
client.images.get(self.default_image_tag)
except ImageNotFound:
if self.user_dockerfile_path and os.path.exists(self.user_dockerfile_path):
dockerfile_path = self.user_dockerfile_path
else:
package_path = self._get_installed_package_path()
dockerfile_path = os.path.join(
package_path, "tools/code_interpreter_tool"
)
if not os.path.exists(dockerfile_path):
raise FileNotFoundError(
f"Dockerfile not found in {dockerfile_path}"
) from None
client.images.build(
path=dockerfile_path,
tag=self.default_image_tag,
rm=True,
)
def _run(self, **kwargs: Unpack[RunKwargs]) -> str:
"""Runs the code interpreter tool with the provided arguments.
Args:
**kwargs: Keyword arguments that should include 'code' and 'libraries_used'.
Returns:
The output of the executed code as a string.
"""
code: str | None = kwargs.get("code", self.code)
libraries_used: list[str] = kwargs.get("libraries_used", [])
if not code:
return "No code provided to execute."
if self.unsafe_mode:
return self.run_code_unsafe(code, libraries_used)
return self.run_code_safety(code, libraries_used)
@staticmethod
def _install_libraries(container: Container, libraries: list[str]) -> None:
"""Installs required Python libraries in the Docker container.
Args:
container: The Docker container where libraries will be installed.
libraries: A list of library names to install using pip.
"""
for library in libraries:
container.exec_run(["pip", "install", library])
def _init_docker_container(self) -> Container:
"""Initializes and returns a Docker container for code execution.
Stops and removes any existing container with the same name before creating
a new one. Maps the current working directory to /workspace in the container.
Returns:
A Docker container object ready for code execution.
"""
container_name = "code-interpreter"
client = docker_from_env()
current_path = os.getcwd()
# Check if the container is already running
try:
existing_container = client.containers.get(container_name)
existing_container.stop()
existing_container.remove()
except NotFound:
pass # Container does not exist, no need to remove
return client.containers.run(
self.default_image_tag,
detach=True,
tty=True,
working_dir="/workspace",
name=container_name,
volumes={current_path: {"bind": "/workspace", "mode": "rw"}}, # type: ignore
)
@staticmethod
def _check_docker_available() -> bool:
"""Checks if Docker is available and running on the system.
Attempts to run the 'docker info' command to verify Docker availability.
Prints appropriate messages if Docker is not installed or not running.
Returns:
True if Docker is available and running, False otherwise.
"""
try:
subprocess.run(
["docker", "info"], # noqa: S607
check=True,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
timeout=1,
)
return True
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
Printer.print(
"Docker is installed but not running or inaccessible.",
color="bold_purple",
)
return False
except FileNotFoundError:
Printer.print("Docker is not installed", color="bold_purple")
return False
def run_code_safety(self, code: str, libraries_used: list[str]) -> str:
"""Runs code in the safest available environment.
Attempts to run code in Docker if available, falls back to a restricted
sandbox if Docker is not available.
Args:
code: The Python code to execute as a string.
libraries_used: A list of Python library names to install before execution.
Returns:
The output of the executed code as a string.
"""
if self._check_docker_available():
return self.run_code_in_docker(code, libraries_used)
return self.run_code_in_restricted_sandbox(code)
def run_code_in_docker(self, code: str, libraries_used: list[str]) -> str:
"""Runs Python code in a Docker container for safe isolation.
Creates a Docker container, installs the required libraries, executes the code,
and then cleans up by stopping and removing the container.
Args:
code: The Python code to execute as a string.
libraries_used: A list of Python library names to install before execution.
Returns:
The output of the executed code as a string, or an error message if execution failed.
"""
Printer.print("Running code in Docker environment", color="bold_blue")
self._verify_docker_image()
container = self._init_docker_container()
self._install_libraries(container, libraries_used)
exec_result = container.exec_run(["python3", "-c", code])
container.stop()
container.remove()
if exec_result.exit_code != 0:
return f"Something went wrong while running the code: \n{exec_result.output.decode('utf-8')}"
return exec_result.output.decode("utf-8")
@staticmethod
def run_code_in_restricted_sandbox(code: str) -> str:
"""Runs Python code in a restricted sandbox environment.
Executes the code with restricted access to potentially dangerous modules and
built-in functions for basic safety when Docker is not available.
Args:
code: The Python code to execute as a string.
Returns:
The value of the 'result' variable from the executed code,
or an error message if execution failed.
"""
Printer.print("Running code in restricted sandbox", color="yellow")
exec_locals: dict[str, Any] = {}
try:
SandboxPython.exec(code=code, locals_=exec_locals)
return exec_locals.get("result", "No result variable found.")
except Exception as e:
return f"An error occurred: {e!s}"
@staticmethod
def run_code_unsafe(code: str, libraries_used: list[str]) -> str:
"""Runs code directly on the host machine without any safety restrictions.
WARNING: This mode is unsafe and should only be used in trusted environments
with code from trusted sources.
Args:
code: The Python code to execute as a string.
libraries_used: A list of Python library names to install before execution.
Returns:
The value of the 'result' variable from the executed code,
or an error message if execution failed.
"""
Printer.print("WARNING: Running code in unsafe mode", color="bold_magenta")
# Install libraries on the host machine
for library in libraries_used:
os.system(f"pip install {library}") # noqa: S605
# Execute the code
try:
exec_locals: dict[str, Any] = {}
exec(code, {}, exec_locals) # noqa: S102
return exec_locals.get("result", "No result variable found.")
except Exception as e:
return f"An error occurred: {e!s}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/code_interpreter_tool/code_interpreter_tool.py",
"license": "MIT License",
"lines": 316,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/composio_tool/composio_tool.py | """Composio tools wrapper."""
import typing as t
from crewai.tools import BaseTool, EnvVar
from pydantic import Field
import typing_extensions as te
class ComposioTool(BaseTool):
"""Wrapper for composio tools."""
composio_action: t.Callable
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="COMPOSIO_API_KEY",
description="API key for Composio services",
required=True,
),
]
)
def _run(self, *args: t.Any, **kwargs: t.Any) -> t.Any:
"""Run the composio action with given arguments."""
return self.composio_action(*args, **kwargs)
@staticmethod
def _check_connected_account(tool: t.Any, toolset: t.Any) -> None:
"""Check if connected account is required and if required it exists or not."""
from composio import Action
from composio.client.collections import ConnectedAccountModel
tool = t.cast(Action, tool)
if tool.no_auth:
return
connections = t.cast(
list[ConnectedAccountModel],
toolset.client.connected_accounts.get(),
)
if tool.app not in [connection.appUniqueId for connection in connections]:
raise RuntimeError(
f"No connected account found for app `{tool.app}`; "
f"Run `composio add {tool.app}` to fix this"
)
@classmethod
def from_action(
cls,
action: t.Any,
**kwargs: t.Any,
) -> te.Self:
"""Wrap a composio tool as crewAI tool."""
from composio import Action, ComposioToolSet
from composio.constants import DEFAULT_ENTITY_ID
from composio.utils.shared import json_schema_to_model
toolset = ComposioToolSet()
if not isinstance(action, Action):
action = Action(action)
action = t.cast(Action, action)
cls._check_connected_account(
tool=action,
toolset=toolset,
)
(action_schema,) = toolset.get_action_schemas(actions=[action])
schema = action_schema.model_dump(exclude_none=True)
entity_id = kwargs.pop("entity_id", DEFAULT_ENTITY_ID)
def function(**kwargs: t.Any) -> dict:
"""Wrapper function for composio action."""
return toolset.execute_action(
action=Action(schema["name"]),
params=kwargs,
entity_id=entity_id,
)
function.__name__ = schema["name"]
function.__doc__ = schema["description"]
return cls(
name=schema["name"],
description=schema["description"],
args_schema=json_schema_to_model(
action_schema.parameters.model_dump(
exclude_none=True,
)
),
composio_action=function,
**kwargs,
)
@classmethod
def from_app(
cls,
*apps: t.Any,
tags: list[str] | None = None,
use_case: str | None = None,
**kwargs: t.Any,
) -> list[te.Self]:
"""Create toolset from an app."""
if len(apps) == 0:
raise ValueError("You need to provide at least one app name")
if use_case is None and tags is None:
raise ValueError("Both `use_case` and `tags` cannot be `None`")
if use_case is not None and tags is not None:
raise ValueError(
"Cannot use both `use_case` and `tags` to filter the actions"
)
from composio import ComposioToolSet
toolset = ComposioToolSet()
if use_case is not None:
return [
cls.from_action(action=action, **kwargs)
for action in toolset.find_actions_by_use_case(*apps, use_case=use_case)
]
return [
cls.from_action(action=action, **kwargs)
for action in toolset.find_actions_by_tags(*apps, tags=tags) # type: ignore[arg-type]
]
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/composio_tool/composio_tool.py",
"license": "MIT License",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/contextualai_create_agent_tool/contextual_create_agent_tool.py | from typing import Any
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
class ContextualAICreateAgentSchema(BaseModel):
"""Schema for contextual create agent tool."""
agent_name: str = Field(..., description="Name for the new agent")
agent_description: str = Field(..., description="Description for the new agent")
datastore_name: str = Field(..., description="Name for the new datastore")
document_paths: list[str] = Field(..., description="List of file paths to upload")
class ContextualAICreateAgentTool(BaseTool):
"""Tool to create Contextual AI RAG agents with documents."""
name: str = "Contextual AI Create Agent Tool"
description: str = (
"Create a new Contextual AI RAG agent with documents and datastore"
)
args_schema: type[BaseModel] = ContextualAICreateAgentSchema
api_key: str
contextual_client: Any = None
package_dependencies: list[str] = Field(
default_factory=lambda: ["contextual-client"]
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
try:
from contextual import ContextualAI
self.contextual_client = ContextualAI(api_key=self.api_key)
except ImportError as e:
raise ImportError(
"contextual-client package is required. Install it with: pip install contextual-client"
) from e
def _run(
self,
agent_name: str,
agent_description: str,
datastore_name: str,
document_paths: list[str],
) -> str:
"""Create a complete RAG pipeline with documents."""
try:
import os
# Create datastore
datastore = self.contextual_client.datastores.create(name=datastore_name)
datastore_id = datastore.id
# Upload documents
document_ids = []
for doc_path in document_paths:
if not os.path.exists(doc_path):
raise FileNotFoundError(f"Document not found: {doc_path}")
with open(doc_path, "rb") as f:
ingestion_result = (
self.contextual_client.datastores.documents.ingest(
datastore_id, file=f
)
)
document_ids.append(ingestion_result.id)
# Create agent
agent = self.contextual_client.agents.create(
name=agent_name,
description=agent_description,
datastore_ids=[datastore_id],
)
return f"Successfully created agent '{agent_name}' with ID: {agent.id} and datastore ID: {datastore_id}. Uploaded {len(document_ids)} documents."
except Exception as e:
return f"Failed to create agent with documents: {e!s}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/contextualai_create_agent_tool/contextual_create_agent_tool.py",
"license": "MIT License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/contextualai_parse_tool/contextual_parse_tool.py | from crewai.tools import BaseTool
from pydantic import BaseModel, Field
class ContextualAIParseSchema(BaseModel):
"""Schema for contextual parse tool."""
file_path: str = Field(..., description="Path to the document to parse")
parse_mode: str = Field(default="standard", description="Parsing mode")
figure_caption_mode: str = Field(
default="concise", description="Figure caption mode"
)
enable_document_hierarchy: bool = Field(
default=True, description="Enable document hierarchy"
)
page_range: str | None = Field(
default=None, description="Page range to parse (e.g., '0-5')"
)
output_types: list[str] = Field(
default=["markdown-per-page"], description="List of output types"
)
class ContextualAIParseTool(BaseTool):
"""Tool to parse documents using Contextual AI's parser."""
name: str = "Contextual AI Document Parser"
description: str = "Parse documents using Contextual AI's advanced document parser"
args_schema: type[BaseModel] = ContextualAIParseSchema
api_key: str
package_dependencies: list[str] = Field(
default_factory=lambda: ["contextual-client"]
)
def _run(
self,
file_path: str,
parse_mode: str = "standard",
figure_caption_mode: str = "concise",
enable_document_hierarchy: bool = True,
page_range: str | None = None,
output_types: list[str] | None = None,
) -> str:
"""Parse a document using Contextual AI's parser."""
if output_types is None:
output_types = ["markdown-per-page"]
try:
import json
import os
from time import sleep
import requests
if not os.path.exists(file_path):
raise FileNotFoundError(f"Document not found: {file_path}")
base_url = "https://api.contextual.ai/v1"
headers = {
"accept": "application/json",
"authorization": f"Bearer {self.api_key}",
}
# Submit parse job
url = f"{base_url}/parse"
config = {
"parse_mode": parse_mode,
"figure_caption_mode": figure_caption_mode,
"enable_document_hierarchy": enable_document_hierarchy,
}
if page_range:
config["page_range"] = page_range
with open(file_path, "rb") as fp:
file = {"raw_file": fp}
result = requests.post(
url, headers=headers, data=config, files=file, timeout=30
)
response = json.loads(result.text)
job_id = response["job_id"]
# Monitor job status
status_url = f"{base_url}/parse/jobs/{job_id}/status"
while True:
result = requests.get(status_url, headers=headers, timeout=30)
parse_response = json.loads(result.text)["status"]
if parse_response == "completed":
break
if parse_response == "failed":
raise RuntimeError("Document parsing failed")
sleep(5)
# Get parse results
results_url = f"{base_url}/parse/jobs/{job_id}/results"
result = requests.get(
results_url,
headers=headers,
params={"output_types": ",".join(output_types)},
timeout=30,
)
return json.dumps(json.loads(result.text), indent=2)
except Exception as e:
return f"Failed to parse document: {e!s}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/contextualai_parse_tool/contextual_parse_tool.py",
"license": "MIT License",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/contextualai_query_tool/contextual_query_tool.py | import asyncio
from typing import Any
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
import requests
class ContextualAIQuerySchema(BaseModel):
"""Schema for contextual query tool."""
query: str = Field(..., description="Query to send to the Contextual AI agent.")
agent_id: str = Field(..., description="ID of the Contextual AI agent to query")
datastore_id: str | None = Field(
None, description="Optional datastore ID for document readiness verification"
)
class ContextualAIQueryTool(BaseTool):
"""Tool to query Contextual AI RAG agents."""
name: str = "Contextual AI Query Tool"
description: str = (
"Use this tool to query a Contextual AI RAG agent with access to your documents"
)
args_schema: type[BaseModel] = ContextualAIQuerySchema
api_key: str
contextual_client: Any = None
package_dependencies: list[str] = Field(
default_factory=lambda: ["contextual-client"]
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
try:
from contextual import ContextualAI
self.contextual_client = ContextualAI(api_key=self.api_key)
except ImportError as e:
raise ImportError(
"contextual-client package is required. Install it with: pip install contextual-client"
) from e
def _check_documents_ready(self, datastore_id: str) -> bool:
"""Synchronous check if all documents are ready."""
url = f"https://api.contextual.ai/v1/datastores/{datastore_id}/documents"
headers = {"Authorization": f"Bearer {self.api_key}"}
response = requests.get(url, headers=headers, timeout=30)
if response.status_code == 200:
data = response.json()
documents = data.get("documents", [])
return not any(
doc.get("status") in ("processing", "pending") for doc in documents
)
return True
async def _wait_for_documents_async(
self, datastore_id: str, max_attempts: int = 20, interval: float = 30.0
) -> bool:
"""Asynchronously poll until documents are ready, exiting early if possible."""
for _attempt in range(max_attempts):
ready = await asyncio.to_thread(self._check_documents_ready, datastore_id)
if ready:
return True
await asyncio.sleep(interval)
return True # give up but don't fail hard
def _run(self, query: str, agent_id: str, datastore_id: str | None = None) -> str:
if not agent_id:
raise ValueError("Agent ID is required to query the Contextual AI agent")
if datastore_id:
ready = self._check_documents_ready(datastore_id)
if not ready:
try:
# If no running event loop, use asyncio.run
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
if loop and loop.is_running():
# Already inside an event loop
try:
import nest_asyncio # type: ignore[import-untyped]
nest_asyncio.apply(loop)
loop.run_until_complete(
self._wait_for_documents_async(datastore_id)
)
except Exception: # noqa: S110
pass
else:
asyncio.run(self._wait_for_documents_async(datastore_id))
else:
pass
try:
response = self.contextual_client.agents.query.create(
agent_id=agent_id, messages=[{"role": "user", "content": query}]
)
if hasattr(response, "content"):
return response.content
if hasattr(response, "message"):
return (
response.message.content
if hasattr(response.message, "content")
else str(response.message)
)
if hasattr(response, "messages") and len(response.messages) > 0:
last_message = response.messages[-1]
return (
last_message.content
if hasattr(last_message, "content")
else str(last_message)
)
return str(response)
except Exception as e:
return f"Error querying Contextual AI agent: {e!s}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/contextualai_query_tool/contextual_query_tool.py",
"license": "MIT License",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/contextualai_rerank_tool/contextual_rerank_tool.py | from crewai.tools import BaseTool
from pydantic import BaseModel, Field
class ContextualAIRerankSchema(BaseModel):
"""Schema for contextual rerank tool."""
query: str = Field(..., description="The search query to rerank documents against")
documents: list[str] = Field(..., description="List of document texts to rerank")
instruction: str | None = Field(
default=None, description="Optional instruction for reranking behavior"
)
metadata: list[str] | None = Field(
default=None, description="Optional metadata for each document"
)
model: str = Field(
default="ctxl-rerank-en-v1-instruct", description="Reranker model to use"
)
class ContextualAIRerankTool(BaseTool):
"""Tool to rerank documents using Contextual AI's instruction-following reranker."""
name: str = "Contextual AI Document Reranker"
description: str = (
"Rerank documents using Contextual AI's instruction-following reranker"
)
args_schema: type[BaseModel] = ContextualAIRerankSchema
api_key: str
package_dependencies: list[str] = Field(
default_factory=lambda: ["contextual-client"]
)
def _run(
self,
query: str,
documents: list[str],
instruction: str | None = None,
metadata: list[str] | None = None,
model: str = "ctxl-rerank-en-v1-instruct",
) -> str:
"""Rerank documents using Contextual AI's instruction-following reranker."""
try:
import json
import requests
base_url = "https://api.contextual.ai/v1"
headers = {
"accept": "application/json",
"content-type": "application/json",
"authorization": f"Bearer {self.api_key}",
}
payload = {"query": query, "documents": documents, "model": model}
if instruction:
payload["instruction"] = instruction
if metadata:
if len(metadata) != len(documents):
raise ValueError(
"Metadata list must have the same length as documents list"
)
payload["metadata"] = metadata
rerank_url = f"{base_url}/rerank"
result = requests.post(
rerank_url, json=payload, headers=headers, timeout=30
)
if result.status_code != 200:
raise RuntimeError(
f"Reranker API returned status {result.status_code}: {result.text}"
)
return json.dumps(result.json(), indent=2)
except Exception as e:
return f"Failed to rerank documents: {e!s}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/contextualai_rerank_tool/contextual_rerank_tool.py",
"license": "MIT License",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/couchbase_tool/couchbase_tool.py | from collections.abc import Callable
import json
from typing import Any
try:
from couchbase.cluster import Cluster # type: ignore[import-untyped]
from couchbase.options import SearchOptions # type: ignore[import-untyped]
import couchbase.search as search # type: ignore[import-untyped]
from couchbase.vector_search import ( # type: ignore[import-untyped]
VectorQuery,
VectorSearch,
)
COUCHBASE_AVAILABLE = True
except ImportError:
COUCHBASE_AVAILABLE = False
search = Any
Cluster = Any
SearchOptions = Any
VectorQuery = Any
VectorSearch = Any
from crewai.tools import BaseTool
from pydantic import BaseModel, ConfigDict, Field, SkipValidation
class CouchbaseToolSchema(BaseModel):
"""Input for CouchbaseTool."""
query: str = Field(
...,
description="The query to search retrieve relevant information from the Couchbase database. Pass only the query, not the question.",
)
class CouchbaseFTSVectorSearchTool(BaseTool):
"""Tool to search the Couchbase database."""
model_config = ConfigDict(arbitrary_types_allowed=True)
name: str = "CouchbaseFTSVectorSearchTool"
description: str = "A tool to search the Couchbase database for relevant information on internal documents."
args_schema: type[BaseModel] = CouchbaseToolSchema
cluster: SkipValidation[Cluster] = Field(
description="An instance of the Couchbase Cluster connected to the desired Couchbase server.",
)
collection_name: str = Field(
description="The name of the Couchbase collection to search",
)
scope_name: str = Field(
description="The name of the Couchbase scope containing the collection to search.",
)
bucket_name: str = Field(
description="The name of the Couchbase bucket to search",
)
index_name: str = Field(
description="The name of the Couchbase index to search",
)
embedding_key: str | None = Field(
default="embedding",
description="Name of the field in the search index that stores the vector",
)
scoped_index: bool = Field(
default=True,
description="Specify whether the index is scoped. Is True by default.",
)
limit: int | None = Field(default=3)
embedding_function: SkipValidation[Callable[[str], list[float]]] = Field(
description="A function that takes a string and returns a list of floats. This is used to embed the query before searching the database.",
)
def _check_bucket_exists(self) -> bool:
"""Check if the bucket exists in the linked Couchbase cluster."""
bucket_manager = self.cluster.buckets()
try:
bucket_manager.get_bucket(self.bucket_name)
return True
except Exception:
return False
def _check_scope_and_collection_exists(self) -> bool:
"""Check if the scope and collection exists in the linked Couchbase bucket
Raises a ValueError if either is not found.
"""
scope_collection_map: dict[str, Any] = {}
# Get a list of all scopes in the bucket
for scope in self._bucket.collections().get_all_scopes():
scope_collection_map[scope.name] = []
# Get a list of all the collections in the scope
for collection in scope.collections:
scope_collection_map[scope.name].append(collection.name)
# Check if the scope exists
if self.scope_name not in scope_collection_map.keys():
raise ValueError(
f"Scope {self.scope_name} not found in Couchbase "
f"bucket {self.bucket_name}"
)
# Check if the collection exists in the scope
if self.collection_name not in scope_collection_map[self.scope_name]:
raise ValueError(
f"Collection {self.collection_name} not found in scope "
f"{self.scope_name} in Couchbase bucket {self.bucket_name}"
)
return True
def _check_index_exists(self) -> bool:
"""Check if the Search index exists in the linked Couchbase cluster
Raises a ValueError if the index does not exist.
"""
if self.scoped_index:
all_indexes = [
index.name for index in self._scope.search_indexes().get_all_indexes()
]
if self.index_name not in all_indexes:
raise ValueError(
f"Index {self.index_name} does not exist. "
" Please create the index before searching."
)
else:
if not self.cluster:
raise ValueError("Cluster instance must be provided")
all_indexes = [
index.name for index in self.cluster.search_indexes().get_all_indexes()
]
if self.index_name not in all_indexes:
raise ValueError(
f"Index {self.index_name} does not exist. "
" Please create the index before searching."
)
return True
def __init__(self, **kwargs):
"""Initialize the CouchbaseFTSVectorSearchTool.
Args:
**kwargs: Keyword arguments to pass to the BaseTool constructor and
to configure the Couchbase connection and search parameters.
Requires 'cluster', 'bucket_name', 'scope_name',
'collection_name', 'index_name', and 'embedding_function'.
Raises:
ValueError: If required parameters are missing, the Couchbase cluster
cannot be reached, or the specified bucket, scope,
collection, or index does not exist.
"""
super().__init__(**kwargs)
if COUCHBASE_AVAILABLE:
try:
self._bucket = self.cluster.bucket(self.bucket_name)
self._scope = self._bucket.scope(self.scope_name)
self._collection = self._scope.collection(self.collection_name)
except Exception as e:
raise ValueError(
"Error connecting to couchbase. "
"Please check the connection and credentials"
) from e
# check if bucket exists
if not self._check_bucket_exists():
raise ValueError(
f"Bucket {self.bucket_name} does not exist. "
" Please create the bucket before searching."
)
self._check_scope_and_collection_exists()
self._check_index_exists()
else:
import click
if click.confirm(
"The 'couchbase' package is required to use the CouchbaseFTSVectorSearchTool. "
"Would you like to install it?"
):
import subprocess
subprocess.run(["uv", "add", "couchbase"], check=True) # noqa: S607
else:
raise ImportError(
"The 'couchbase' package is required to use the CouchbaseFTSVectorSearchTool. "
"Please install it with: uv add couchbase"
)
def _run(self, query: str) -> str:
"""Execute a vector search query against the Couchbase index.
Args:
query: The search query string.
Returns:
A JSON string containing the search results.
Raises:
ValueError: If the search query fails or returns results without fields.
"""
query_embedding = self.embedding_function(query)
fields = ["*"]
search_req = search.SearchRequest.create(
VectorSearch.from_vector_query(
VectorQuery(self.embedding_key, query_embedding, self.limit)
)
)
try:
if self.scoped_index:
search_iter = self._scope.search(
self.index_name,
search_req,
SearchOptions(
limit=self.limit,
fields=fields,
),
)
else:
search_iter = self.cluster.search(
self.index_name,
search_req,
SearchOptions(limit=self.limit, fields=fields),
)
json_response = []
for row in search_iter.rows():
json_response.append(row.fields) # noqa: PERF401
except Exception as e:
return f"Search failed with error: {e}"
return json.dumps(json_response, indent=2)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/couchbase_tool/couchbase_tool.py",
"license": "MIT License",
"lines": 199,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/crewai_platform_tools/crewai_platform_action_tool.py | """Crewai Enterprise Tools."""
import json
import os
from typing import Any
from crewai.tools import BaseTool
from crewai.utilities.pydantic_schema_utils import create_model_from_schema
from pydantic import Field, create_model
import requests
from crewai_tools.tools.crewai_platform_tools.misc import (
get_platform_api_base_url,
get_platform_integration_token,
)
class CrewAIPlatformActionTool(BaseTool):
action_name: str = Field(default="", description="The name of the action")
action_schema: dict[str, Any] = Field(
default_factory=dict, description="The schema of the action"
)
def __init__(
self,
description: str,
action_name: str,
action_schema: dict[str, Any],
):
parameters = action_schema.get("function", {}).get("parameters", {})
if parameters and parameters.get("properties"):
try:
if "title" not in parameters:
parameters = {**parameters, "title": f"{action_name}Schema"}
if "type" not in parameters:
parameters = {**parameters, "type": "object"}
args_schema = create_model_from_schema(parameters)
except Exception:
args_schema = create_model(f"{action_name}Schema")
else:
args_schema = create_model(f"{action_name}Schema")
super().__init__(
name=action_name.lower().replace(" ", "_"),
description=description,
args_schema=args_schema,
)
self.action_name = action_name
self.action_schema = action_schema
def _run(self, **kwargs: Any) -> str:
try:
cleaned_kwargs = {
key: value for key, value in kwargs.items() if value is not None
}
api_url = (
f"{get_platform_api_base_url()}/actions/{self.action_name}/execute"
)
token = get_platform_integration_token()
headers = {
"Authorization": f"Bearer {token}",
"Content-Type": "application/json",
}
payload = {
"integration": cleaned_kwargs if cleaned_kwargs else {"_noop": True}
}
response = requests.post(
url=api_url,
headers=headers,
json=payload,
timeout=60,
verify=os.environ.get("CREWAI_FACTORY", "false").lower() != "true",
)
data = response.json()
if not response.ok:
if isinstance(data, dict):
error_info = data.get("error", {})
if isinstance(error_info, dict):
error_message = error_info.get("message", json.dumps(data))
else:
error_message = str(error_info)
else:
error_message = str(data)
return f"API request failed: {error_message}"
return json.dumps(data, indent=2)
except Exception as e:
return f"Error executing action {self.action_name}: {e!s}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/crewai_platform_tools/crewai_platform_action_tool.py",
"license": "MIT License",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/crewai_platform_tools/crewai_platform_tool_builder.py | """CrewAI platform tool builder for fetching and creating action tools."""
import logging
import os
from types import TracebackType
from typing import Any
from crewai.tools import BaseTool
import requests
from crewai_tools.tools.crewai_platform_tools.crewai_platform_action_tool import (
CrewAIPlatformActionTool,
)
from crewai_tools.tools.crewai_platform_tools.misc import (
get_platform_api_base_url,
get_platform_integration_token,
)
logger = logging.getLogger(__name__)
class CrewaiPlatformToolBuilder:
"""Builds platform tools from remote action schemas."""
def __init__(
self,
apps: list[str],
) -> None:
self._apps = apps
self._actions_schema: dict[str, dict[str, Any]] = {}
self._tools: list[BaseTool] | None = None
def tools(self) -> list[BaseTool]:
"""Fetch actions and return built tools."""
if self._tools is None:
self._fetch_actions()
self._create_tools()
return self._tools if self._tools is not None else []
def _fetch_actions(self) -> None:
"""Fetch action schemas from the platform API."""
actions_url = f"{get_platform_api_base_url()}/actions"
headers = {"Authorization": f"Bearer {get_platform_integration_token()}"}
try:
response = requests.get(
actions_url,
headers=headers,
timeout=30,
params={"apps": ",".join(self._apps)},
verify=os.environ.get("CREWAI_FACTORY", "false").lower() != "true",
)
response.raise_for_status()
except Exception as e:
logger.error(f"Failed to fetch platform tools for apps {self._apps}: {e}")
return
raw_data = response.json()
self._actions_schema = {}
action_categories = raw_data.get("actions", {})
for app, action_list in action_categories.items():
if isinstance(action_list, list):
for action in action_list:
if not isinstance(action, dict):
continue
if action_name := action.get("name"):
action_schema = {
"function": {
"name": action_name,
"description": action.get(
"description", f"Execute {action_name}"
),
"parameters": action.get("parameters", {}),
"app": app,
}
}
self._actions_schema[action_name] = action_schema
def _create_tools(self) -> None:
"""Create tool instances from fetched action schemas."""
tools: list[BaseTool] = []
for action_name, action_schema in self._actions_schema.items():
function_details = action_schema.get("function", {})
description = function_details.get("description", f"Execute {action_name}")
tool = CrewAIPlatformActionTool(
description=description,
action_name=action_name,
action_schema=action_schema,
)
tools.append(tool)
self._tools = tools
def __enter__(self) -> list[BaseTool]:
"""Enter context manager and return tools."""
return self.tools()
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
"""Exit context manager."""
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/crewai_platform_tools/crewai_platform_tool_builder.py",
"license": "MIT License",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/crewai_platform_tools/crewai_platform_tools.py | import logging
from crewai.tools import BaseTool
from crewai_tools.adapters.tool_collection import ToolCollection
from crewai_tools.tools.crewai_platform_tools.crewai_platform_tool_builder import (
CrewaiPlatformToolBuilder,
)
logger = logging.getLogger(__name__)
def CrewaiPlatformTools( # noqa: N802
apps: list[str],
) -> ToolCollection[BaseTool]:
"""Factory function that returns crewai platform tools.
Args:
apps: List of platform apps to get tools that are available on the platform.
Returns:
A list of BaseTool instances for platform actions
"""
builder = CrewaiPlatformToolBuilder(apps=apps)
return builder.tools() # type: ignore
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/crewai_platform_tools/crewai_platform_tools.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/crewai_platform_tools/misc.py | import os
def get_platform_api_base_url() -> str:
"""Get the platform API base URL from environment or use default."""
base_url = os.getenv("CREWAI_PLUS_URL", "https://app.crewai.com")
return f"{base_url}/crewai_plus/api/v1/integrations"
def get_platform_integration_token() -> str:
"""Get the platform API base URL from environment or use default."""
token = os.getenv("CREWAI_PLATFORM_INTEGRATION_TOKEN") or ""
if not token:
raise ValueError(
"No platform integration token found, please set the CREWAI_PLATFORM_INTEGRATION_TOKEN environment variable"
)
return token # TODO: Use context manager to get token
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/crewai_platform_tools/misc.py",
"license": "MIT License",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py | from pydantic import BaseModel, Field
from crewai_tools.rag.data_types import DataType
from crewai_tools.tools.rag.rag_tool import RagTool
class FixedCSVSearchToolSchema(BaseModel):
"""Input for CSVSearchTool."""
search_query: str = Field(
...,
description="Mandatory search query you want to use to search the CSV's content",
)
class CSVSearchToolSchema(FixedCSVSearchToolSchema):
"""Input for CSVSearchTool."""
csv: str = Field(..., description="File path or URL of a CSV file to be searched")
class CSVSearchTool(RagTool):
name: str = "Search a CSV's content"
description: str = (
"A tool that can be used to semantic search a query from a CSV's content."
)
args_schema: type[BaseModel] = CSVSearchToolSchema
def __init__(self, csv: str | None = None, **kwargs):
super().__init__(**kwargs)
if csv is not None:
self.add(csv)
self.description = f"A tool that can be used to semantic search a query the {csv} CSV's content."
self.args_schema = FixedCSVSearchToolSchema
self._generate_description()
def add(self, csv: str) -> None:
super().add(csv, data_type=DataType.CSV)
def _run( # type: ignore[override]
self,
search_query: str,
csv: str | None = None,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
if csv is not None:
self.add(csv)
return super()._run(
query=search_query, similarity_threshold=similarity_threshold, limit=limit
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/csv_search_tool/csv_search_tool.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/dalle_tool/dalle_tool.py | import json
from typing import Literal
from crewai.tools import BaseTool, EnvVar
from openai import Omit, OpenAI
from pydantic import BaseModel, Field
class ImagePromptSchema(BaseModel):
"""Input for Dall-E Tool."""
image_description: str = Field(
description="Description of the image to be generated by Dall-E."
)
class DallETool(BaseTool):
name: str = "Dall-E Tool"
description: str = "Generates images using OpenAI's Dall-E model."
args_schema: type[BaseModel] = ImagePromptSchema
model: str = "dall-e-3"
size: (
Literal[
"auto",
"1024x1024",
"1536x1024",
"1024x1536",
"256x256",
"512x512",
"1792x1024",
"1024x1792",
]
| None
) = "1024x1024"
quality: (
Literal["standard", "hd", "low", "medium", "high", "auto"] | None | Omit
) = "standard"
n: int = 1
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="OPENAI_API_KEY",
description="API key for OpenAI services",
required=True,
),
]
)
def _run(self, **kwargs) -> str:
client = OpenAI()
image_description = kwargs.get("image_description")
if not image_description:
return "Image description is required."
response = client.images.generate(
model=self.model,
prompt=image_description,
size=self.size,
quality=self.quality,
n=self.n,
)
if not response or not response.data:
return "Failed to generate image."
return json.dumps(
{
"image_url": response.data[0].url,
"image_description": response.data[0].revised_prompt,
}
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/dalle_tool/dalle_tool.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py | from __future__ import annotations
import os
import time
from typing import TYPE_CHECKING, Any, TypeGuard, TypedDict
from crewai.tools import BaseTool
from pydantic import BaseModel, Field, model_validator
if TYPE_CHECKING:
from databricks.sdk import WorkspaceClient
class ExecutionContext(TypedDict, total=False):
catalog: str
schema: str
def _has_data_array(result: Any) -> TypeGuard[Any]:
"""Type guard to check if result has data_array attribute.
Args:
result: The result object to check.
Returns:
True if result.result.data_array exists and is not None.
"""
return (
hasattr(result, "result")
and result.result is not None
and hasattr(result.result, "data_array")
and result.result.data_array is not None
)
class DatabricksQueryToolSchema(BaseModel):
"""Input schema for DatabricksQueryTool."""
query: str = Field(
..., description="SQL query to execute against the Databricks workspace table"
)
catalog: str | None = Field(
None,
description="Databricks catalog name (optional, defaults to configured catalog)",
)
db_schema: str | None = Field(
None,
description="Databricks schema name (optional, defaults to configured schema)",
)
warehouse_id: str | None = Field(
None,
description="Databricks SQL warehouse ID (optional, defaults to configured warehouse)",
)
row_limit: int | None = Field(
1000, description="Maximum number of rows to return (default: 1000)"
)
@model_validator(mode="after")
def validate_input(self) -> DatabricksQueryToolSchema:
"""Validate the input parameters."""
# Ensure the query is not empty
if not self.query or not self.query.strip():
raise ValueError("Query cannot be empty")
# Add a LIMIT clause to the query if row_limit is provided and query doesn't have one
if self.row_limit and "limit" not in self.query.lower():
self.query = f"{self.query.rstrip(';')} LIMIT {self.row_limit};"
return self
class DatabricksQueryTool(BaseTool):
"""A tool for querying Databricks workspace tables using SQL.
This tool executes SQL queries against Databricks tables and returns the results.
It requires Databricks authentication credentials to be set as environment variables.
Authentication can be provided via:
- Databricks CLI profile: Set DATABRICKS_CONFIG_PROFILE environment variable
- Direct credentials: Set DATABRICKS_HOST and DATABRICKS_TOKEN environment variables
Example:
>>> tool = DatabricksQueryTool()
>>> results = tool.run(query="SELECT * FROM my_table LIMIT 10")
"""
name: str = "Databricks SQL Query"
description: str = (
"Execute SQL queries against Databricks workspace tables and return the results."
" Provide a 'query' parameter with the SQL query to execute."
)
args_schema: type[BaseModel] = DatabricksQueryToolSchema
# Optional default parameters
default_catalog: str | None = None
default_schema: str | None = None
default_warehouse_id: str | None = None
_workspace_client: WorkspaceClient | None = None
package_dependencies: list[str] = Field(default_factory=lambda: ["databricks-sdk"])
def __init__(
self,
default_catalog: str | None = None,
default_schema: str | None = None,
default_warehouse_id: str | None = None,
**kwargs: Any,
) -> None:
"""Initialize the DatabricksQueryTool.
Args:
default_catalog (Optional[str]): Default catalog to use for queries.
default_schema (Optional[str]): Default schema to use for queries.
default_warehouse_id (Optional[str]): Default SQL warehouse ID to use.
**kwargs: Additional keyword arguments passed to BaseTool.
"""
super().__init__(**kwargs)
self.default_catalog = default_catalog
self.default_schema = default_schema
self.default_warehouse_id = default_warehouse_id
self._validate_credentials()
def _validate_credentials(self) -> None:
"""Validate that Databricks credentials are available."""
has_profile = "DATABRICKS_CONFIG_PROFILE" in os.environ
has_direct_auth = (
"DATABRICKS_HOST" in os.environ and "DATABRICKS_TOKEN" in os.environ
)
if not (has_profile or has_direct_auth):
raise ValueError(
"Databricks authentication credentials are required. "
"Set either DATABRICKS_CONFIG_PROFILE or both DATABRICKS_HOST and DATABRICKS_TOKEN environment variables."
)
@property
def workspace_client(self) -> WorkspaceClient:
"""Get or create a Databricks WorkspaceClient instance."""
if self._workspace_client is None:
try:
from databricks.sdk import WorkspaceClient
self._workspace_client = WorkspaceClient()
except ImportError as e:
raise ImportError(
"`databricks-sdk` package not found, please run `uv add databricks-sdk`"
) from e
return self._workspace_client
def _format_results(self, results: list[dict[str, Any]]) -> str:
"""Format query results as a readable string."""
if not results:
return "Query returned no results."
# Get column names from the first row
if not results[0]:
return "Query returned empty rows with no columns."
columns = list(results[0].keys())
# If we have rows but they're all empty, handle that case
if not columns:
return "Query returned rows but with no column data."
# Calculate column widths based on data
col_widths = {col: len(col) for col in columns}
for row in results:
for col in columns:
# Convert value to string and get its length
# Handle None values gracefully
value_str = str(row[col]) if row[col] is not None else "NULL"
col_widths[col] = max(col_widths[col], len(value_str))
# Create header row
header = " | ".join(f"{col:{col_widths[col]}}" for col in columns)
separator = "-+-".join("-" * col_widths[col] for col in columns)
# Format data rows
data_rows = []
for row in results:
# Handle None values by displaying "NULL"
row_values = {
col: str(row[col]) if row[col] is not None else "NULL"
for col in columns
}
data_row = " | ".join(
f"{row_values[col]:{col_widths[col]}}" for col in columns
)
data_rows.append(data_row)
# Add row count information
result_info = f"({len(results)} row{'s' if len(results) != 1 else ''} returned)"
# Combine all parts
return f"{header}\n{separator}\n" + "\n".join(data_rows) + f"\n\n{result_info}"
def _run(
self,
**kwargs: Any,
) -> str:
"""Execute a SQL query against Databricks and return the results.
Args:
query (str): SQL query to execute
catalog (Optional[str]): Databricks catalog name
db_schema (Optional[str]): Databricks schema name
warehouse_id (Optional[str]): SQL warehouse ID
row_limit (Optional[int]): Maximum number of rows to return
Returns:
str: Formatted query results
"""
try:
# Get parameters with fallbacks to default values
query = kwargs.get("query")
catalog = kwargs.get("catalog") or self.default_catalog
db_schema = kwargs.get("db_schema") or self.default_schema
warehouse_id = kwargs.get("warehouse_id") or self.default_warehouse_id
row_limit = kwargs.get("row_limit", 1000)
# Validate schema and query
validated_input = DatabricksQueryToolSchema(
query=query,
catalog=catalog,
db_schema=db_schema,
warehouse_id=warehouse_id,
row_limit=row_limit,
)
# Extract validated parameters
query = validated_input.query
catalog = validated_input.catalog
db_schema = validated_input.db_schema
warehouse_id = validated_input.warehouse_id
if warehouse_id is None:
return "SQL warehouse ID must be provided either as a parameter or as a default."
# Setup SQL context with catalog/schema if provided
context: ExecutionContext = {}
if catalog:
context["catalog"] = catalog
if db_schema:
context["schema"] = db_schema
# Execute query
statement = self.workspace_client.statement_execution
try:
# Execute the statement
execution = statement.execute_statement(
warehouse_id=warehouse_id, statement=query, **context
)
statement_id = execution.statement_id
except Exception as execute_error:
# Handle immediate execution errors
return f"Error starting query execution: {execute_error!s}"
# Poll for results with better error handling
result = None
timeout = 300 # 5 minutes timeout
start_time = time.time()
poll_count = 0
previous_state = None # Track previous state to detect changes
if statement_id is None:
return "Failed to retrieve statement ID after execution."
while time.time() - start_time < timeout:
poll_count += 1
try:
# Get statement status
result = statement.get_statement(statement_id)
# Check if finished - be very explicit about state checking
if hasattr(result, "status") and hasattr(result.status, "state"):
state_value = str(
result.status.state # type: ignore[union-attr]
) # Convert to string to handle both string and enum
# Track state changes for debugging
if previous_state != state_value:
previous_state = state_value
# Check if state indicates completion
if "SUCCEEDED" in state_value:
break
if "FAILED" in state_value:
# Extract error message with more robust handling
error_info = "No detailed error info"
try:
# First try direct access to error.message
if (
hasattr(result.status, "error")
and result.status.error # type: ignore[union-attr]
):
if hasattr(result.status.error, "message"): # type: ignore[union-attr]
error_info = result.status.error.message # type: ignore[union-attr,assignment]
# Some APIs may have a different structure
elif hasattr(result.status.error, "error_message"): # type: ignore[union-attr]
error_info = result.status.error.error_message # type: ignore[union-attr]
# Last resort, try to convert the whole error object to string
else:
error_info = str(result.status.error) # type: ignore[union-attr]
except Exception as err_extract_error:
# If all else fails, try to get any info we can
error_info = (
f"Error details unavailable: {err_extract_error!s}"
)
# Return immediately on first FAILED state detection
return f"Query execution failed: {error_info}"
if "CANCELED" in state_value:
return "Query was canceled"
except Exception as poll_error:
# Don't immediately fail - try again a few times
if poll_count > 3:
return f"Error checking query status: {poll_error!s}"
# Wait before polling again
time.sleep(2)
# Check if we timed out
if result is None:
return "Query returned no result (likely timed out or failed)"
if not hasattr(result, "status") or not hasattr(result.status, "state"):
return "Query completed but returned an invalid result structure"
# Convert state to string for comparison
state_value = str(result.status.state) # type: ignore[union-attr]
if not any(
state in state_value for state in ["SUCCEEDED", "FAILED", "CANCELED"]
):
return f"Query timed out after 5 minutes (last state: {state_value})"
# Get results - adapt this based on the actual structure of the result object
chunk_results = []
# Check if we have results and a schema in a very defensive way
has_schema = (
hasattr(result, "manifest")
and result.manifest is not None
and hasattr(result.manifest, "schema")
and result.manifest.schema is not None
)
has_result = hasattr(result, "result") and result.result is not None
if has_schema and has_result:
try:
# Get schema for column names
columns = [col.name for col in result.manifest.schema.columns] # type: ignore[union-attr]
# Debug info for schema
# Keep track of all dynamic columns we create
all_columns = set(columns)
# Dump the raw structure of result data to help troubleshoot
if _has_data_array(result):
# Add defensive check for None data_array
if result.result.data_array is None:
# Return empty result handling rather than trying to process null data
return "Query executed successfully (no data returned)"
# IMPROVED DETECTION LOGIC: Check if we're possibly dealing with rows where each item
# contains a single value or character (which could indicate incorrect row structure)
is_likely_incorrect_row_structure = False
# Only try to analyze sample if data_array exists and has content
if (
_has_data_array(result)
and len(result.result.data_array) > 0
and len(result.result.data_array[0]) > 0
):
sample_size = min(20, len(result.result.data_array[0]))
if sample_size > 0:
single_char_count = 0
single_digit_count = 0
total_items = 0
for i in range(sample_size):
val = result.result.data_array[0][i]
total_items += 1
if (
isinstance(val, str)
and len(val) == 1
and not val.isdigit()
):
single_char_count += 1
elif (
isinstance(val, str)
and len(val) == 1
and val.isdigit()
):
single_digit_count += 1
# If a significant portion of the first values are single characters or digits,
# this likely indicates data is being incorrectly structured
if (
total_items > 0
and (single_char_count + single_digit_count)
/ total_items
> 0.5
):
is_likely_incorrect_row_structure = True
# Additional check: if many rows have just 1 item when we expect multiple columns
rows_with_single_item = 0
if (
hasattr(result.result, "data_array")
and result.result.data_array # type: ignore[union-attr]
and len(result.result.data_array) > 0 # type: ignore[union-attr]
):
sample_size_for_rows = (
min(sample_size, len(result.result.data_array[0])) # type: ignore[union-attr]
if "sample_size" in locals()
else min(20, len(result.result.data_array[0])) # type: ignore[union-attr]
)
rows_with_single_item = sum(
1 # type: ignore[misc]
for row in result.result.data_array[0][ # type: ignore[union-attr]
:sample_size_for_rows
]
if isinstance(row, list) and len(row) == 1
)
if (
rows_with_single_item > sample_size_for_rows * 0.5
and len(columns) > 1
):
is_likely_incorrect_row_structure = True
# Check if we're getting primarily single characters or the data structure seems off,
# we should use special handling
if (
"is_likely_incorrect_row_structure" in locals()
and is_likely_incorrect_row_structure
):
needs_special_string_handling = True
else:
needs_special_string_handling = False
# Process results differently based on detection
if (
"needs_special_string_handling" in locals()
and needs_special_string_handling
):
# We're dealing with data where the rows may be incorrectly structured
# Collect all values into a flat list
all_values: list[Any] = []
if (
hasattr(result.result, "data_array")
and result.result.data_array # type: ignore[union-attr]
):
# Flatten all values into a single list
for chunk in result.result.data_array: # type: ignore[union-attr]
for item in chunk:
if isinstance(item, (list, tuple)):
all_values.extend(item)
else:
all_values.append(item)
# Get the expected column count from schema
expected_column_count = len(columns)
# Try to reconstruct rows using pattern recognition
reconstructed_rows = []
# PATTERN RECOGNITION APPROACH
# Look for likely indicators of row boundaries in the data
# For Netflix data, we expect IDs as numbers, titles as text strings, etc.
# Use regex pattern to identify ID columns that likely start a new row
import re
id_pattern = re.compile(
r"^\d{5,9}$"
) # Netflix IDs are often 5-9 digits
id_indices = []
for i, val in enumerate(all_values):
if isinstance(val, str) and id_pattern.match(val):
# This value looks like an ID, might be the start of a row
if i < len(all_values) - 1:
next_few_values = all_values[i + 1 : i + 5]
# If following values look like they could be part of a title
if any(
isinstance(v, str) and len(v) > 1
for v in next_few_values
):
id_indices.append(i)
if id_indices:
# If we found potential row starts, use them to extract rows
for i in range(len(id_indices)):
start_idx = id_indices[i]
end_idx = (
id_indices[i + 1]
if i + 1 < len(id_indices)
else len(all_values)
)
# Extract values for this row
row_values = all_values[start_idx:end_idx]
# Special handling for Netflix title data
# Titles might be split into individual characters
if (
"Title" in columns
and len(row_values) > expected_column_count
):
# Try to reconstruct by looking for patterns
# We know ID is first, then Title (which may be split)
# Then other fields like Genre, etc.
# Take first value as ID
row_dict = {columns[0]: row_values[0]}
# Look for Genre or other non-title fields to determine where title ends
title_end_idx = 1
for j in range(2, min(100, len(row_values))):
val = row_values[j]
# Check for common genres or non-title markers
if isinstance(val, str) and val in [
"Comedy",
"Drama",
"Action",
"Horror",
"Thriller",
"Documentary",
]:
# Likely found the Genre field
title_end_idx = j
break
# Reconstruct title from individual characters
if title_end_idx > 1:
title_chars = row_values[1:title_end_idx]
# Check if they're individual characters
if all(
isinstance(c, str) and len(c) == 1
for c in title_chars
):
title = "".join(title_chars)
row_dict["Title"] = title
# Assign remaining values to columns
remaining_values = row_values[
title_end_idx:
]
for j, col_name in enumerate(
columns[2:], 2
):
if j - 2 < len(remaining_values):
row_dict[col_name] = (
remaining_values[j - 2]
)
else:
row_dict[col_name] = None
else:
# Fallback: simple mapping
for j, col_name in enumerate(columns):
if j < len(row_values):
row_dict[col_name] = row_values[j]
else:
row_dict[col_name] = None
else:
# Standard mapping
row_dict = {}
for j, col_name in enumerate(columns):
if j < len(row_values):
row_dict[col_name] = row_values[j]
else:
row_dict[col_name] = None
reconstructed_rows.append(row_dict)
else:
# More intelligent chunking - try to detect where columns like Title might be split
title_idx = (
columns.index("Title") if "Title" in columns else -1
)
if title_idx >= 0:
# Try to detect if title is split across multiple values
i = 0
while i < len(all_values):
# Check if this could be an ID (start of a row)
if isinstance(
all_values[i], str
) and id_pattern.match(all_values[i]):
row_dict = {columns[0]: all_values[i]}
i += 1
# Try to reconstruct title if it appears to be split
title_chars = []
while (
i < len(all_values)
and isinstance(all_values[i], str)
and len(all_values[i]) <= 1
and len(title_chars) < 100
): # Cap title length
title_chars.append(all_values[i])
i += 1
if title_chars:
row_dict[columns[title_idx]] = "".join(
title_chars
)
# Add remaining fields
for j in range(title_idx + 1, len(columns)):
if i < len(all_values):
row_dict[columns[j]] = all_values[i]
i += 1
else:
row_dict[columns[j]] = None
reconstructed_rows.append(row_dict)
else:
i += 1
# If we still don't have rows, use simple chunking as fallback
if not reconstructed_rows:
chunks = [
all_values[i : i + expected_column_count]
for i in range(
0, len(all_values), expected_column_count
)
]
for chunk in chunks:
# Skip chunks that seem to be partial/incomplete rows
if (
len(chunk) < expected_column_count * 0.75
): # Allow for some missing values
continue
row_dict = {}
# Map values to column names
for i, col in enumerate(columns):
if i < len(chunk):
row_dict[col] = chunk[i]
else:
row_dict[col] = None
reconstructed_rows.append(row_dict)
# Apply post-processing to fix known issues
if reconstructed_rows and "Title" in columns:
for row in reconstructed_rows:
# Fix titles that might still have issues
if (
isinstance(row.get("Title"), str)
and len(row.get("Title")) <= 1 # type: ignore[arg-type]
):
# This is likely still a fragmented title - mark as potentially incomplete
row["Title"] = f"[INCOMPLETE] {row.get('Title')}"
# Ensure we respect the row limit
if row_limit and len(reconstructed_rows) > row_limit:
reconstructed_rows = reconstructed_rows[:row_limit]
chunk_results = reconstructed_rows
else:
# Process normal result structure as before
# Check different result structures
if (
hasattr(result.result, "data_array")
and result.result.data_array # type: ignore[union-attr]
):
# Check if data appears to be malformed within chunks
for _chunk_idx, chunk in enumerate(
result.result.data_array # type: ignore[union-attr]
):
# Check if chunk might actually contain individual columns of a single row
# This is another way data might be malformed - check the first few values
if len(chunk) > 0 and len(columns) > 1:
# If there seems to be a mismatch between chunk structure and expected columns
first_few_values = chunk[: min(5, len(chunk))]
if all(
isinstance(val, (str, int, float))
and not isinstance(val, (list, dict))
for val in first_few_values
):
if (
len(chunk) > len(columns) * 3
): # Heuristic: if chunk has way more items than columns
# This chunk might actually be values of multiple rows - try to reconstruct
values = chunk # All values in this chunk
reconstructed_rows = []
# Try to create rows based on expected column count
for i in range(
0, len(values), len(columns)
):
if i + len(columns) <= len(
values
): # Ensure we have enough values
row_values = values[
i : i + len(columns)
]
row_dict = {
col: val
for col, val in zip(
columns,
row_values,
strict=False,
)
}
reconstructed_rows.append(row_dict)
if reconstructed_rows:
chunk_results.extend(reconstructed_rows)
continue # Skip normal processing for this chunk
# Special case: when chunk contains exactly the right number of values for a single row
# This handles the case where instead of a list of rows, we just got all values in a flat list
if all(
isinstance(val, (str, int, float))
and not isinstance(val, (list, dict))
for val in chunk
):
if len(chunk) == len(columns) or (
len(chunk) > 0
and len(chunk) % len(columns) == 0
):
# Process flat list of values as rows
for i in range(0, len(chunk), len(columns)):
row_values = chunk[i : i + len(columns)]
if len(row_values) == len(
columns
): # Only process complete rows
row_dict = {
col: val
for col, val in zip(
columns,
row_values,
strict=False,
)
}
chunk_results.append(row_dict)
# Skip regular row processing for this chunk
continue
# Normal processing for typical row structure
for _row_idx, row in enumerate(chunk):
# Ensure row is actually a collection of values
if not isinstance(row, (list, tuple, dict)):
# This might be a single value; skip it or handle specially
continue
# Convert each row to a dictionary with column names as keys
row_dict = {}
# Handle dict rows directly
if isinstance(row, dict):
# Use the existing column mapping
row_dict = dict(row)
elif isinstance(row, (list, tuple)):
# Map list of values to columns
for i, val in enumerate(row):
if (
i < len(columns)
): # Only process if we have a matching column
row_dict[columns[i]] = val
else:
# Extra values without column names
dynamic_col = f"Column_{i}"
row_dict[dynamic_col] = val
all_columns.add(dynamic_col)
# If we have fewer values than columns, set missing values to None
for col in columns:
if col not in row_dict:
row_dict[col] = None
chunk_results.append(row_dict)
elif hasattr(result.result, "data") and result.result.data: # type: ignore[union-attr]
# Alternative data structure
for _row_idx, row in enumerate(result.result.data): # type: ignore[union-attr]
# Debug info
# Safely create dictionary matching column names to values
row_dict = {}
for i, val in enumerate(row):
if i < len(
columns
): # Only process if we have a matching column
row_dict[columns[i]] = val
else:
# Extra values without column names
dynamic_col = f"Column_{i}"
row_dict[dynamic_col] = val
all_columns.add(dynamic_col)
# If we have fewer values than columns, set missing values to None
for i, col in enumerate(columns):
if i >= len(row):
row_dict[col] = None
chunk_results.append(row_dict)
# After processing all rows, ensure all rows have all columns
normalized_results = []
for row in chunk_results:
# Create a new row with all columns, defaulting to None for missing ones
normalized_row = {
col: row.get(col, None) for col in all_columns
}
normalized_results.append(normalized_row)
# Replace the original results with normalized ones
chunk_results = normalized_results
except Exception as results_error:
# Enhanced error message with more context
import traceback
error_details = traceback.format_exc()
return f"Error processing query results: {results_error!s}\n\nDetails:\n{error_details}"
# If we have no results but the query succeeded (e.g., for DDL statements)
if not chunk_results and hasattr(result, "status"):
state_value = str(result.status.state) # type: ignore[union-attr]
if "SUCCEEDED" in state_value:
return "Query executed successfully (no results to display)"
# Format and return results
return self._format_results(chunk_results) # type: ignore[arg-type]
except Exception as e:
# Include more details in the error message to help with debugging
import traceback
error_details = traceback.format_exc()
return (
f"Error executing Databricks query: {e!s}\n\nDetails:\n{error_details}"
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/databricks_query_tool/databricks_query_tool.py",
"license": "MIT License",
"lines": 722,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py | import os
from typing import Any
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
class FixedDirectoryReadToolSchema(BaseModel):
"""Input for DirectoryReadTool."""
class DirectoryReadToolSchema(FixedDirectoryReadToolSchema):
"""Input for DirectoryReadTool."""
directory: str = Field(..., description="Mandatory directory to list content")
class DirectoryReadTool(BaseTool):
name: str = "List files in directory"
description: str = (
"A tool that can be used to recursively list a directory's content."
)
args_schema: type[BaseModel] = DirectoryReadToolSchema
directory: str | None = None
def __init__(self, directory: str | None = None, **kwargs):
super().__init__(**kwargs)
if directory is not None:
self.directory = directory
self.description = f"A tool that can be used to list {directory}'s content."
self.args_schema = FixedDirectoryReadToolSchema
self._generate_description()
def _run(
self,
**kwargs: Any,
) -> Any:
directory: str | None = kwargs.get("directory", self.directory)
if directory is None:
raise ValueError("Directory must be provided.")
if directory[-1] == "/":
directory = directory[:-1]
files_list = [
f"{directory}/{(os.path.join(root, filename).replace(directory, '').lstrip(os.path.sep))}"
for root, dirs, files in os.walk(directory)
for filename in files
]
files = "\n- ".join(files_list)
return f"File paths: \n-{files}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/directory_read_tool/directory_read_tool.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py | from pydantic import BaseModel, Field
from crewai_tools.rag.data_types import DataType
from crewai_tools.tools.rag.rag_tool import RagTool
class FixedDirectorySearchToolSchema(BaseModel):
"""Input for DirectorySearchTool."""
search_query: str = Field(
...,
description="Mandatory search query you want to use to search the directory's content",
)
class DirectorySearchToolSchema(FixedDirectorySearchToolSchema):
"""Input for DirectorySearchTool."""
directory: str = Field(..., description="Mandatory directory you want to search")
class DirectorySearchTool(RagTool):
name: str = "Search a directory's content"
description: str = (
"A tool that can be used to semantic search a query from a directory's content."
)
args_schema: type[BaseModel] = DirectorySearchToolSchema
def __init__(self, directory: str | None = None, **kwargs):
super().__init__(**kwargs)
if directory is not None:
self.add(directory)
self.description = f"A tool that can be used to semantic search a query the {directory} directory's content."
self.args_schema = FixedDirectorySearchToolSchema
self._generate_description()
def add(self, directory: str) -> None:
super().add(directory, data_type=DataType.DIRECTORY)
def _run( # type: ignore[override]
self,
search_query: str,
directory: str | None = None,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
if directory is not None:
self.add(directory)
return super()._run(
query=search_query, similarity_threshold=similarity_threshold, limit=limit
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/directory_search_tool/directory_search_tool.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py | from typing import Any
from pydantic import BaseModel, Field
from crewai_tools.rag.data_types import DataType
from crewai_tools.tools.rag.rag_tool import RagTool
class FixedDOCXSearchToolSchema(BaseModel):
"""Input for DOCXSearchTool."""
docx: str | None = Field(
..., description="File path or URL of a DOCX file to be searched"
)
search_query: str = Field(
...,
description="Mandatory search query you want to use to search the DOCX's content",
)
class DOCXSearchToolSchema(FixedDOCXSearchToolSchema):
"""Input for DOCXSearchTool."""
search_query: str = Field(
...,
description="Mandatory search query you want to use to search the DOCX's content",
)
class DOCXSearchTool(RagTool):
name: str = "Search a DOCX's content"
description: str = (
"A tool that can be used to semantic search a query from a DOCX's content."
)
args_schema: type[BaseModel] = DOCXSearchToolSchema
def __init__(self, docx: str | None = None, **kwargs):
super().__init__(**kwargs)
if docx is not None:
self.add(docx)
self.description = f"A tool that can be used to semantic search a query the {docx} DOCX's content."
self.args_schema = FixedDOCXSearchToolSchema
self._generate_description()
def add(self, docx: str) -> None:
super().add(docx, data_type=DataType.DOCX)
def _run( # type: ignore[override]
self,
search_query: str,
docx: str | None = None,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> Any:
if docx is not None:
self.add(docx)
return super()._run(
query=search_query, similarity_threshold=similarity_threshold, limit=limit
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/docx_search_tool/docx_search_tool.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/exa_tools/exa_search_tool.py | from __future__ import annotations
from builtins import type as type_
import os
from typing import Any, TypedDict
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, ConfigDict, Field
from typing_extensions import Required
class SearchParams(TypedDict, total=False):
"""Parameters for Exa search API."""
type: Required[str | None]
start_published_date: str
end_published_date: str
include_domains: list[str]
class EXABaseToolSchema(BaseModel):
search_query: str = Field(
..., description="Mandatory search query you want to use to search the internet"
)
start_published_date: str | None = Field(
None, description="Start date for the search"
)
end_published_date: str | None = Field(None, description="End date for the search")
include_domains: list[str] | None = Field(
None, description="List of domains to include in the search"
)
class EXASearchTool(BaseTool):
model_config = ConfigDict(arbitrary_types_allowed=True)
name: str = "EXASearchTool"
description: str = "Search the internet using Exa"
args_schema: type_[BaseModel] = EXABaseToolSchema
client: Any | None = None
content: bool | None = False
summary: bool | None = False
type: str | None = "auto"
package_dependencies: list[str] = Field(default_factory=lambda: ["exa_py"])
api_key: str | None = Field(
default_factory=lambda: os.getenv("EXA_API_KEY"),
description="API key for Exa services",
json_schema_extra={"required": False},
)
base_url: str | None = Field(
default_factory=lambda: os.getenv("EXA_BASE_URL"),
description="API server url",
json_schema_extra={"required": False},
)
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="EXA_API_KEY",
description="API key for Exa services",
required=False,
),
EnvVar(
name="EXA_BASE_URL",
description="API url for the Exa services",
required=False,
),
]
)
def __init__(
self,
content: bool | None = False,
summary: bool | None = False,
type: str | None = "auto",
**kwargs,
):
super().__init__(
**kwargs,
)
try:
from exa_py import Exa
except ImportError as e:
import click
if click.confirm(
"You are missing the 'exa_py' package. Would you like to install it?"
):
import subprocess
subprocess.run(["uv", "add", "exa_py"], check=True) # noqa: S607
# Re-import after installation
from exa_py import Exa
else:
raise ImportError(
"You are missing the 'exa_py' package. Would you like to install it?"
) from e
client_kwargs: dict[str, str] = {}
if self.api_key:
client_kwargs["api_key"] = self.api_key
if self.base_url:
client_kwargs["base_url"] = self.base_url
self.client = Exa(**client_kwargs)
self.content = content
self.summary = summary
self.type = type
def _run(
self,
search_query: str,
start_published_date: str | None = None,
end_published_date: str | None = None,
include_domains: list[str] | None = None,
) -> Any:
if self.client is None:
raise ValueError("Client not initialized")
search_params: SearchParams = {
"type": self.type,
}
if start_published_date:
search_params["start_published_date"] = start_published_date
if end_published_date:
search_params["end_published_date"] = end_published_date
if include_domains:
search_params["include_domains"] = include_domains
if self.content:
results = self.client.search_and_contents(
search_query, summary=self.summary, **search_params
)
else:
results = self.client.search(search_query, **search_params)
return results
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/exa_tools/exa_search_tool.py",
"license": "MIT License",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/file_read_tool/file_read_tool.py | from typing import Any
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
class FileReadToolSchema(BaseModel):
"""Input for FileReadTool."""
file_path: str = Field(..., description="Mandatory file full path to read the file")
start_line: int | None = Field(
1, description="Line number to start reading from (1-indexed)"
)
line_count: int | None = Field(
None, description="Number of lines to read. If None, reads the entire file"
)
class FileReadTool(BaseTool):
"""A tool for reading file contents.
This tool inherits its schema handling from BaseTool to avoid recursive schema
definition issues. The args_schema is set to FileReadToolSchema which defines
the required file_path parameter. The schema should not be overridden in the
constructor as it would break the inheritance chain and cause infinite loops.
The tool supports two ways of specifying the file path:
1. At construction time via the file_path parameter
2. At runtime via the file_path parameter in the tool's input
Args:
file_path (Optional[str]): Path to the file to be read. If provided,
this becomes the default file path for the tool.
**kwargs: Additional keyword arguments passed to BaseTool.
Example:
>>> tool = FileReadTool(file_path="/path/to/file.txt")
>>> content = tool.run() # Reads /path/to/file.txt
>>> content = tool.run(file_path="/path/to/other.txt") # Reads other.txt
>>> content = tool.run(
... file_path="/path/to/file.txt", start_line=100, line_count=50
... ) # Reads lines 100-149
"""
name: str = "Read a file's content"
description: str = "A tool that reads the content of a file. To use this tool, provide a 'file_path' parameter with the path to the file you want to read. Optionally, provide 'start_line' to start reading from a specific line and 'line_count' to limit the number of lines read."
args_schema: type[BaseModel] = FileReadToolSchema
file_path: str | None = None
def __init__(self, file_path: str | None = None, **kwargs: Any) -> None:
"""Initialize the FileReadTool.
Args:
file_path (Optional[str]): Path to the file to be read. If provided,
this becomes the default file path for the tool.
**kwargs: Additional keyword arguments passed to BaseTool.
"""
if file_path is not None:
kwargs["description"] = (
f"A tool that reads file content. The default file is {file_path}, but you can provide a different 'file_path' parameter to read another file. You can also specify 'start_line' and 'line_count' to read specific parts of the file."
)
super().__init__(**kwargs)
self.file_path = file_path
def _run(
self,
file_path: str | None = None,
start_line: int | None = 1,
line_count: int | None = None,
) -> str:
file_path = file_path or self.file_path
start_line = start_line or 1
line_count = line_count or None
if file_path is None:
return "Error: No file path provided. Please provide a file path either in the constructor or as an argument."
try:
with open(file_path, "r") as file:
if start_line == 1 and line_count is None:
return file.read()
start_idx = max(start_line - 1, 0)
selected_lines = [
line
for i, line in enumerate(file)
if i >= start_idx
and (line_count is None or i < start_idx + line_count)
]
if not selected_lines and start_idx > 0:
return f"Error: Start line {start_line} exceeds the number of lines in the file."
return "".join(selected_lines)
except FileNotFoundError:
return f"Error: File not found at path: {file_path}"
except PermissionError:
return f"Error: Permission denied when trying to read file: {file_path}"
except Exception as e:
return f"Error: Failed to read file {file_path}. {e!s}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/file_read_tool/file_read_tool.py",
"license": "MIT License",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py | import os
from typing import Any
from crewai.tools import BaseTool
from pydantic import BaseModel
def strtobool(val) -> bool:
if isinstance(val, bool):
return val
val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"):
return True
if val in ("n", "no", "f", "false", "off", "0"):
return False
raise ValueError(f"invalid value to cast to bool: {val!r}")
class FileWriterToolInput(BaseModel):
filename: str
directory: str | None = "./"
overwrite: str | bool = False
content: str
class FileWriterTool(BaseTool):
name: str = "File Writer Tool"
description: str = "A tool to write content to a specified file. Accepts filename, content, and optionally a directory path and overwrite flag as input."
args_schema: type[BaseModel] = FileWriterToolInput
def _run(self, **kwargs: Any) -> str:
try:
# Create the directory if it doesn't exist
if kwargs.get("directory") and not os.path.exists(kwargs["directory"]):
os.makedirs(kwargs["directory"])
# Construct the full path
filepath = os.path.join(kwargs.get("directory") or "", kwargs["filename"])
# Convert overwrite to boolean
kwargs["overwrite"] = strtobool(kwargs["overwrite"])
# Check if file exists and overwrite is not allowed
if os.path.exists(filepath) and not kwargs["overwrite"]:
return f"File {filepath} already exists and overwrite option was not passed."
# Write content to the file
mode = "w" if kwargs["overwrite"] else "x"
with open(filepath, mode) as file:
file.write(kwargs["content"])
return f"Content successfully written to {filepath}"
except FileExistsError:
return (
f"File {filepath} already exists and overwrite option was not passed."
)
except KeyError as e:
return f"An error occurred while accessing key: {e!s}"
except Exception as e:
return f"An error occurred while writing to the file: {e!s}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/file_writer_tool/file_writer_tool.py",
"license": "MIT License",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/files_compressor_tool/files_compressor_tool.py | import os
import tarfile
import zipfile
from crewai.tools import BaseTool
from pydantic import BaseModel, Field
class FileCompressorToolInput(BaseModel):
"""Input schema for FileCompressorTool."""
input_path: str = Field(
..., description="Path to the file or directory to compress."
)
output_path: str | None = Field(
default=None, description="Optional output archive filename."
)
overwrite: bool = Field(
default=False,
description="Whether to overwrite the archive if it already exists.",
)
format: str = Field(
default="zip",
description="Compression format ('zip', 'tar', 'tar.gz', 'tar.bz2', 'tar.xz').",
)
class FileCompressorTool(BaseTool):
name: str = "File Compressor Tool"
description: str = (
"Compresses a file or directory into an archive (.zip currently supported). "
"Useful for archiving logs, documents, or backups."
)
args_schema: type[BaseModel] = FileCompressorToolInput
def _run(
self,
input_path: str,
output_path: str | None = None,
overwrite: bool = False,
format: str = "zip",
) -> str:
if not os.path.exists(input_path):
return f"Input path '{input_path}' does not exist."
if not output_path:
output_path = self._generate_output_path(input_path, format)
format_extension = {
"zip": ".zip",
"tar": ".tar",
"tar.gz": ".tar.gz",
"tar.bz2": ".tar.bz2",
"tar.xz": ".tar.xz",
}
if format not in format_extension:
return f"Compression format '{format}' is not supported. Allowed formats: {', '.join(format_extension.keys())}"
if not output_path.endswith(format_extension[format]):
return f"Error: If '{format}' format is chosen, output file must have a '{format_extension[format]}' extension."
if not self._prepare_output(output_path, overwrite):
return (
f"Output '{output_path}' already exists and overwrite is set to False."
)
try:
format_compression = {
"zip": self._compress_zip,
"tar": self._compress_tar,
"tar.gz": self._compress_tar,
"tar.bz2": self._compress_tar,
"tar.xz": self._compress_tar,
}
if format == "zip":
format_compression[format](input_path, output_path) # type: ignore[operator]
else:
format_compression[format](input_path, output_path, format) # type: ignore[operator]
return f"Successfully compressed '{input_path}' into '{output_path}'"
except FileNotFoundError:
return f"Error: File not found at path: {input_path}"
except PermissionError:
return f"Error: Permission denied when accessing '{input_path}' or writing '{output_path}'"
except Exception as e:
return f"An unexpected error occurred during compression: {e!s}"
@staticmethod
def _generate_output_path(input_path: str, format: str) -> str:
"""Generates output path based on input path and format."""
if os.path.isfile(input_path):
base_name = os.path.splitext(os.path.basename(input_path))[
0
] # Remove extension
else:
base_name = os.path.basename(os.path.normpath(input_path)) # Directory name
return os.path.join(os.getcwd(), f"{base_name}.{format}")
@staticmethod
def _prepare_output(output_path: str, overwrite: bool) -> bool:
"""Ensures output path is ready for writing."""
output_dir = os.path.dirname(output_path)
if output_dir and not os.path.exists(output_dir):
os.makedirs(output_dir)
if os.path.exists(output_path) and not overwrite:
return False
return True
@staticmethod
def _compress_zip(input_path: str, output_path: str):
"""Compresses input into a zip archive."""
with zipfile.ZipFile(output_path, "w", zipfile.ZIP_DEFLATED) as zipf:
if os.path.isfile(input_path):
zipf.write(input_path, os.path.basename(input_path))
else:
for root, _, files in os.walk(input_path):
for file in files:
full_path = os.path.join(root, file)
arcname = os.path.relpath(full_path, start=input_path)
zipf.write(full_path, arcname)
@staticmethod
def _compress_tar(input_path: str, output_path: str, format: str):
"""Compresses input into a tar archive with the given format."""
format_mode = {
"tar": "w",
"tar.gz": "w:gz",
"tar.bz2": "w:bz2",
"tar.xz": "w:xz",
}
if format not in format_mode:
raise ValueError(f"Unsupported tar format: {format}")
mode = format_mode[format]
with tarfile.open(output_path, mode) as tarf: # type: ignore[call-overload]
arcname = os.path.basename(input_path)
tarf.add(input_path, arcname=arcname)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/files_compressor_tool/files_compressor_tool.py",
"license": "MIT License",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
if TYPE_CHECKING:
from firecrawl import FirecrawlApp # type: ignore[import-untyped]
try:
from firecrawl import FirecrawlApp # type: ignore[import-untyped]
FIRECRAWL_AVAILABLE = True
except ImportError:
FIRECRAWL_AVAILABLE = False
class FirecrawlCrawlWebsiteToolSchema(BaseModel):
url: str = Field(description="Website URL")
class FirecrawlCrawlWebsiteTool(BaseTool):
"""Tool for crawling websites using Firecrawl v2 API. To run this tool, you need to have a Firecrawl API key.
Args:
api_key (str): Your Firecrawl API key.
config (dict): Optional. It contains Firecrawl v2 API parameters.
Default configuration options (Firecrawl v2 API):
max_discovery_depth (int): Maximum depth for discovering pages. Default: 2
ignore_sitemap (bool): Whether to ignore sitemap. Default: True
limit (int): Maximum number of pages to crawl. Default: 10
allow_external_links (bool): Allow crawling external links. Default: False
allow_subdomains (bool): Allow crawling subdomains. Default: False
delay (int): Delay between requests in milliseconds. Default: None
scrape_options (dict): Options for scraping content
- formats (list[str]): Content formats to return. Default: ["markdown"]
- only_main_content (bool): Only return main content. Default: True
- timeout (int): Timeout in milliseconds. Default: 10000
"""
model_config = ConfigDict(
arbitrary_types_allowed=True, validate_assignment=True, frozen=False
)
name: str = "Firecrawl web crawl tool"
description: str = "Crawl webpages using Firecrawl and return the contents"
args_schema: type[BaseModel] = FirecrawlCrawlWebsiteToolSchema
api_key: str | None = None
config: dict[str, Any] | None = Field(
default_factory=lambda: {
"max_discovery_depth": 2,
"ignore_sitemap": True,
"limit": 10,
"allow_external_links": False,
"allow_subdomains": False,
"delay": None,
"scrape_options": {
"formats": ["markdown"],
"only_main_content": True,
"timeout": 10000,
},
}
)
_firecrawl: FirecrawlApp | None = PrivateAttr(None)
package_dependencies: list[str] = Field(default_factory=lambda: ["firecrawl-py"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="FIRECRAWL_API_KEY",
description="API key for Firecrawl services",
required=True,
),
]
)
def __init__(self, api_key: str | None = None, **kwargs):
super().__init__(**kwargs)
self.api_key = api_key
self._initialize_firecrawl()
def _initialize_firecrawl(self) -> None:
try:
from firecrawl import FirecrawlApp # type: ignore
self._firecrawl = FirecrawlApp(api_key=self.api_key)
except ImportError:
import click
if click.confirm(
"You are missing the 'firecrawl-py' package. Would you like to install it?"
):
import subprocess
try:
subprocess.run(["uv", "add", "firecrawl-py"], check=True) # noqa: S607
from firecrawl import FirecrawlApp
self._firecrawl = FirecrawlApp(api_key=self.api_key)
except subprocess.CalledProcessError as e:
raise ImportError("Failed to install firecrawl-py package") from e
else:
raise ImportError(
"`firecrawl-py` package not found, please run `uv add firecrawl-py`"
) from None
def _run(self, url: str):
if not self._firecrawl:
raise RuntimeError("FirecrawlApp not properly initialized")
return self._firecrawl.crawl(url=url, poll_interval=2, **self.config)
try:
from firecrawl import FirecrawlApp
# Only rebuild if the class hasn't been initialized yet
if not hasattr(FirecrawlCrawlWebsiteTool, "_model_rebuilt"):
FirecrawlCrawlWebsiteTool.model_rebuild()
FirecrawlCrawlWebsiteTool._model_rebuilt = True # type: ignore[attr-defined]
except ImportError:
"""
When this tool is not used, then exception can be ignored.
"""
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/firecrawl_crawl_website_tool/firecrawl_crawl_website_tool.py",
"license": "MIT License",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
if TYPE_CHECKING:
from firecrawl import FirecrawlApp # type: ignore[import-untyped]
try:
from firecrawl import FirecrawlApp # type: ignore[import-untyped]
FIRECRAWL_AVAILABLE = True
except ImportError:
FIRECRAWL_AVAILABLE = False
class FirecrawlScrapeWebsiteToolSchema(BaseModel):
url: str = Field(description="Website URL")
class FirecrawlScrapeWebsiteTool(BaseTool):
"""Tool for scraping webpages using Firecrawl v2 API. To run this tool, you need to have a Firecrawl API key.
Args:
api_key (str): Your Firecrawl API key.
config (dict): Optional. It contains Firecrawl v2 API parameters.
Default configuration options (Firecrawl v2 API):
formats (list[str]): Content formats to return. Default: ["markdown"]
only_main_content (bool): Only return main content excluding headers, navs, footers, etc. Default: True
include_tags (list[str]): Tags to include in the output. Default: []
exclude_tags (list[str]): Tags to exclude from the output. Default: []
max_age (int): Returns cached version if younger than this age in milliseconds. Default: 172800000 (2 days)
headers (dict): Headers to send with the request (e.g., cookies, user-agent). Default: {}
wait_for (int): Delay in milliseconds before fetching content. Default: 0
mobile (bool): Emulate scraping from a mobile device. Default: False
skip_tls_verification (bool): Skip TLS certificate verification. Default: True
timeout (int): Request timeout in milliseconds. Default: None
remove_base64_images (bool): Remove base64 images from output. Default: True
block_ads (bool): Enable ad-blocking and cookie popup blocking. Default: True
proxy (str): Proxy type ("basic", "stealth", "auto"). Default: "auto"
store_in_cache (bool): Store page in Firecrawl index and cache. Default: True
"""
model_config = ConfigDict(
arbitrary_types_allowed=True, validate_assignment=True, frozen=False
)
name: str = "Firecrawl web scrape tool"
description: str = "Scrape webpages using Firecrawl and return the contents"
args_schema: type[BaseModel] = FirecrawlScrapeWebsiteToolSchema
api_key: str | None = None
config: dict[str, Any] = Field(
default_factory=lambda: {
"formats": ["markdown"],
"only_main_content": True,
"include_tags": [],
"exclude_tags": [],
"max_age": 172800000, # 2 days cache
"headers": {},
"wait_for": 0,
"mobile": False,
"skip_tls_verification": True,
"remove_base64_images": True,
"block_ads": True,
"proxy": "auto",
"store_in_cache": True,
}
)
_firecrawl: FirecrawlApp | None = PrivateAttr(None)
package_dependencies: list[str] = Field(default_factory=lambda: ["firecrawl-py"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="FIRECRAWL_API_KEY",
description="API key for Firecrawl services",
required=True,
),
]
)
def __init__(self, api_key: str | None = None, **kwargs):
super().__init__(**kwargs)
try:
from firecrawl import FirecrawlApp # type: ignore
except ImportError:
import click
if click.confirm(
"You are missing the 'firecrawl-py' package. Would you like to install it?"
):
import subprocess
subprocess.run(["uv", "add", "firecrawl-py"], check=True) # noqa: S607
from firecrawl import (
FirecrawlApp,
)
else:
raise ImportError(
"`firecrawl-py` package not found, please run `uv add firecrawl-py`"
) from None
self._firecrawl = FirecrawlApp(api_key=api_key)
def _run(self, url: str):
if not self._firecrawl:
raise RuntimeError("FirecrawlApp not properly initialized")
return self._firecrawl.scrape(url=url, **self.config)
try:
from firecrawl import FirecrawlApp
# Must rebuild model after class is defined
if not hasattr(FirecrawlScrapeWebsiteTool, "_model_rebuilt"):
FirecrawlScrapeWebsiteTool.model_rebuild()
FirecrawlScrapeWebsiteTool._model_rebuilt = True # type: ignore[attr-defined]
except ImportError:
"""
When this tool is not used, then exception can be ignored.
"""
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/firecrawl_scrape_website_tool/firecrawl_scrape_website_tool.py",
"license": "MIT License",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
if TYPE_CHECKING:
from firecrawl import FirecrawlApp # type: ignore[import-untyped]
try:
from firecrawl import FirecrawlApp # type: ignore[import-untyped]
FIRECRAWL_AVAILABLE = True
except ImportError:
FIRECRAWL_AVAILABLE = False
class FirecrawlSearchToolSchema(BaseModel):
query: str = Field(description="Search query")
class FirecrawlSearchTool(BaseTool):
"""Tool for searching webpages using Firecrawl v2 API. To run this tool, you need to have a Firecrawl API key.
Args:
api_key (str): Your Firecrawl API key.
config (dict): Optional. It contains Firecrawl v2 API parameters.
Default configuration options (Firecrawl v2 API):
limit (int): Maximum number of search results to return. Default: 5
tbs (str): Time-based search filter (e.g., "qdr:d" for past day). Default: None
location (str): Location for search results. Default: None
timeout (int): Request timeout in milliseconds. Default: None
scrape_options (dict): Options for scraping the search results. Default: {"formats": ["markdown"]}
- formats (list[str]): Content formats to return. Default: ["markdown"]
- only_main_content (bool): Only return main content. Default: True
- include_tags (list[str]): Tags to include. Default: []
- exclude_tags (list[str]): Tags to exclude. Default: []
- wait_for (int): Delay before fetching content in ms. Default: 0
- timeout (int): Request timeout in milliseconds. Default: None
"""
model_config = ConfigDict(
arbitrary_types_allowed=True, validate_assignment=True, frozen=False
)
name: str = "Firecrawl web search tool"
description: str = "Search webpages using Firecrawl and return the results"
args_schema: type[BaseModel] = FirecrawlSearchToolSchema
api_key: str | None = None
config: dict[str, Any] | None = Field(
default_factory=lambda: {
"limit": 5,
"tbs": None,
"location": None,
"timeout": None,
"scrape_options": {
"formats": ["markdown"],
"only_main_content": True,
"include_tags": [],
"exclude_tags": [],
"wait_for": 0,
},
}
)
_firecrawl: FirecrawlApp | None = PrivateAttr(None)
package_dependencies: list[str] = Field(default_factory=lambda: ["firecrawl-py"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="FIRECRAWL_API_KEY",
description="API key for Firecrawl services",
required=True,
),
]
)
def __init__(self, api_key: str | None = None, **kwargs):
super().__init__(**kwargs)
self.api_key = api_key
self._initialize_firecrawl()
def _initialize_firecrawl(self) -> None:
try:
from firecrawl import FirecrawlApp # type: ignore
self._firecrawl = FirecrawlApp(api_key=self.api_key)
except ImportError:
import click
if click.confirm(
"You are missing the 'firecrawl-py' package. Would you like to install it?"
):
import subprocess
try:
subprocess.run(["uv", "add", "firecrawl-py"], check=True) # noqa: S607
from firecrawl import FirecrawlApp
self._firecrawl = FirecrawlApp(api_key=self.api_key)
except subprocess.CalledProcessError as e:
raise ImportError("Failed to install firecrawl-py package") from e
else:
raise ImportError(
"`firecrawl-py` package not found, please run `uv add firecrawl-py`"
) from None
def _run(
self,
query: str,
) -> Any:
if not self._firecrawl:
raise RuntimeError("FirecrawlApp not properly initialized")
return self._firecrawl.search(
query=query,
**self.config,
)
try:
from firecrawl import FirecrawlApp # type: ignore
# Only rebuild if the class hasn't been initialized yet
if not hasattr(FirecrawlSearchTool, "_model_rebuilt"):
FirecrawlSearchTool.model_rebuild()
FirecrawlSearchTool._model_rebuilt = True # type: ignore[attr-defined]
except ImportError:
"""
When this tool is not used, then exception can be ignored.
"""
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/firecrawl_search_tool/firecrawl_search_tool.py",
"license": "MIT License",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/generate_crewai_automation_tool/generate_crewai_automation_tool.py | import os
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
import requests
class GenerateCrewaiAutomationToolSchema(BaseModel):
prompt: str = Field(
description="The prompt to generate the CrewAI automation, e.g. 'Generate a CrewAI automation that will scrape the website and store the data in a database.'"
)
organization_id: str | None = Field(
default=None,
description="The identifier for the CrewAI AMP organization. If not specified, a default organization will be used.",
)
class GenerateCrewaiAutomationTool(BaseTool):
name: str = "Generate CrewAI Automation"
description: str = (
"A tool that leverages CrewAI Studio's capabilities to automatically generate complete CrewAI "
"automations based on natural language descriptions. It translates high-level requirements into "
"functional CrewAI implementations."
)
args_schema: type[BaseModel] = GenerateCrewaiAutomationToolSchema
crewai_enterprise_url: str = Field(
default_factory=lambda: os.getenv("CREWAI_PLUS_URL", "https://app.crewai.com"),
description="The base URL of CrewAI AMP. If not provided, it will be loaded from the environment variable CREWAI_PLUS_URL with default https://app.crewai.com.",
)
personal_access_token: str | None = Field(
default_factory=lambda: os.getenv("CREWAI_PERSONAL_ACCESS_TOKEN"),
description="The user's Personal Access Token to access CrewAI AMP API. If not provided, it will be loaded from the environment variable CREWAI_PERSONAL_ACCESS_TOKEN.",
)
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="CREWAI_PERSONAL_ACCESS_TOKEN",
description="Personal Access Token for CrewAI Enterprise API",
required=True,
),
EnvVar(
name="CREWAI_PLUS_URL",
description="Base URL for CrewAI Enterprise API",
required=False,
),
]
)
def _run(self, **kwargs) -> str:
input_data = GenerateCrewaiAutomationToolSchema(**kwargs)
response = requests.post( # noqa: S113
f"{self.crewai_enterprise_url}/crewai_plus/api/v1/studio",
headers=self._get_headers(input_data.organization_id),
json={"prompt": input_data.prompt},
)
response.raise_for_status()
studio_project_url = response.json().get("url")
return f"Generated CrewAI Studio project URL: {studio_project_url}"
def _get_headers(self, organization_id: str | None = None) -> dict:
headers = {
"Authorization": f"Bearer {self.personal_access_token}",
"Content-Type": "application/json",
"Accept": "application/json",
}
if organization_id:
headers["X-Crewai-Organization-Id"] = organization_id
return headers
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/generate_crewai_automation_tool/generate_crewai_automation_tool.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/github_search_tool/github_search_tool.py | from pydantic import BaseModel, Field
from crewai_tools.rag.data_types import DataType
from crewai_tools.tools.rag.rag_tool import RagTool
class FixedGithubSearchToolSchema(BaseModel):
"""Input for GithubSearchTool."""
search_query: str = Field(
...,
description="Mandatory search query you want to use to search the github repo's content",
)
class GithubSearchToolSchema(FixedGithubSearchToolSchema):
"""Input for GithubSearchTool."""
github_repo: str = Field(..., description="Mandatory github you want to search")
content_types: list[str] = Field(
...,
description="Mandatory content types you want to be included search, options: [code, repo, pr, issue]",
)
class GithubSearchTool(RagTool):
name: str = "Search a github repo's content"
description: str = "A tool that can be used to semantic search a query from a github repo's content. This is not the GitHub API, but instead a tool that can provide semantic search capabilities."
summarize: bool = False
gh_token: str
args_schema: type[BaseModel] = GithubSearchToolSchema
content_types: list[str] = Field(
default_factory=lambda: ["code", "repo", "pr", "issue"],
description="Content types you want to be included search, options: [code, repo, pr, issue]",
)
def __init__(
self,
github_repo: str | None = None,
content_types: list[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
if github_repo and content_types:
self.add(repo=github_repo, content_types=content_types)
self.description = f"A tool that can be used to semantic search a query the {github_repo} github repo's content. This is not the GitHub API, but instead a tool that can provide semantic search capabilities."
self.args_schema = FixedGithubSearchToolSchema
self._generate_description()
def add(
self,
repo: str,
content_types: list[str] | None = None,
) -> None:
content_types = content_types or self.content_types
super().add(
f"https://github.com/{repo}",
data_type=DataType.GITHUB,
metadata={"content_types": content_types, "gh_token": self.gh_token},
)
def _run( # type: ignore[override]
self,
search_query: str,
github_repo: str | None = None,
content_types: list[str] | None = None,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
if github_repo:
self.add(
repo=github_repo,
content_types=content_types,
)
return super()._run(
query=search_query, similarity_threshold=similarity_threshold, limit=limit
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/github_search_tool/github_search_tool.py",
"license": "MIT License",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py | import os
from typing import Any, Literal
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field
class HyperbrowserLoadToolSchema(BaseModel):
url: str = Field(description="Website URL")
operation: Literal["scrape", "crawl"] = Field(
description="Operation to perform on the website. Either 'scrape' or 'crawl'"
)
params: dict | None = Field(
description="Optional params for scrape or crawl. For more information on the supported params, visit https://docs.hyperbrowser.ai/reference/sdks/python/scrape#start-scrape-job-and-wait or https://docs.hyperbrowser.ai/reference/sdks/python/crawl#start-crawl-job-and-wait"
)
class HyperbrowserLoadTool(BaseTool):
"""HyperbrowserLoadTool.
Scrape or crawl web pages and load the contents with optional parameters for configuring content extraction.
Requires the `hyperbrowser` package.
Get your API Key from https://app.hyperbrowser.ai/
Args:
api_key: The Hyperbrowser API key, can be set as an environment variable `HYPERBROWSER_API_KEY` or passed directly
"""
name: str = "Hyperbrowser web load tool"
description: str = "Scrape or crawl a website using Hyperbrowser and return the contents in properly formatted markdown or html"
args_schema: type[BaseModel] = HyperbrowserLoadToolSchema
api_key: str | None = None
hyperbrowser: Any | None = None
package_dependencies: list[str] = Field(default_factory=lambda: ["hyperbrowser"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="HYPERBROWSER_API_KEY",
description="API key for Hyperbrowser services",
required=False,
),
]
)
def __init__(self, api_key: str | None = None, **kwargs):
super().__init__(**kwargs)
self.api_key = api_key or os.getenv("HYPERBROWSER_API_KEY")
if not api_key:
raise ValueError(
"`api_key` is required, please set the `HYPERBROWSER_API_KEY` environment variable or pass it directly"
)
try:
from hyperbrowser import Hyperbrowser # type: ignore[import-untyped]
except ImportError as e:
raise ImportError(
"`hyperbrowser` package not found, please run `pip install hyperbrowser`"
) from e
if not self.api_key:
raise ValueError(
"HYPERBROWSER_API_KEY is not set. Please provide it either via the constructor with the `api_key` argument or by setting the HYPERBROWSER_API_KEY environment variable."
)
self.hyperbrowser = Hyperbrowser(api_key=self.api_key)
@staticmethod
def _prepare_params(params: dict) -> dict:
"""Prepare session and scrape options parameters."""
try:
from hyperbrowser.models.scrape import ( # type: ignore[import-untyped]
ScrapeOptions,
)
from hyperbrowser.models.session import ( # type: ignore[import-untyped]
CreateSessionParams,
)
except ImportError as e:
raise ImportError(
"`hyperbrowser` package not found, please run `pip install hyperbrowser`"
) from e
if "scrape_options" in params:
if "formats" in params["scrape_options"]:
formats = params["scrape_options"]["formats"]
if not all(fmt in ["markdown", "html"] for fmt in formats):
raise ValueError("formats can only contain 'markdown' or 'html'")
if "session_options" in params:
params["session_options"] = CreateSessionParams(**params["session_options"])
if "scrape_options" in params:
params["scrape_options"] = ScrapeOptions(**params["scrape_options"])
return params
def _extract_content(self, data: Any | None):
"""Extract content from response data."""
content = ""
if data:
content = data.markdown or data.html or ""
return content
def _run(
self,
url: str,
operation: Literal["scrape", "crawl"] = "scrape",
params: dict | None = None,
):
if params is None:
params = {}
try:
from hyperbrowser.models.crawl import ( # type: ignore[import-untyped]
StartCrawlJobParams,
)
from hyperbrowser.models.scrape import ( # type: ignore[import-untyped]
StartScrapeJobParams,
)
except ImportError as e:
raise ImportError(
"`hyperbrowser` package not found, please run `pip install hyperbrowser`"
) from e
params = self._prepare_params(params)
if operation == "scrape":
scrape_params = StartScrapeJobParams(url=url, **params)
scrape_resp = self.hyperbrowser.scrape.start_and_wait(scrape_params) # type: ignore[union-attr]
return self._extract_content(scrape_resp.data)
crawl_params = StartCrawlJobParams(url=url, **params)
crawl_resp = self.hyperbrowser.crawl.start_and_wait(crawl_params) # type: ignore[union-attr]
content = ""
if crawl_resp.data:
for page in crawl_resp.data:
page_content = self._extract_content(page)
if page_content:
content += (
f"\n{'-' * 50}\nUrl: {page.url}\nContent:\n{page_content}\n"
)
return content
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/hyperbrowser_load_tool/hyperbrowser_load_tool.py",
"license": "MIT License",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/invoke_crewai_automation_tool/invoke_crewai_automation_tool.py | import time
from typing import Any
from crewai.tools import BaseTool
from pydantic import BaseModel, Field, create_model
import requests
class InvokeCrewAIAutomationInput(BaseModel):
"""Input schema for InvokeCrewAIAutomationTool."""
prompt: str = Field(..., description="The prompt or query to send to the crew")
class InvokeCrewAIAutomationTool(BaseTool):
"""A CrewAI tool for invoking external crew/flows APIs.
This tool provides CrewAI Platform API integration with external crew services, supporting:
- Dynamic input schema configuration
- Automatic polling for task completion
- Bearer token authentication
- Comprehensive error handling
Example:
Basic usage:
>>> tool = InvokeCrewAIAutomationTool(
... crew_api_url="https://api.example.com",
... crew_bearer_token="your_token",
... crew_name="My Crew",
... crew_description="Description of what the crew does",
... )
With custom inputs:
>>> custom_inputs = {
... "param1": Field(..., description="Description of param1"),
... "param2": Field(
... default="default_value", description="Description of param2"
... ),
... }
>>> tool = InvokeCrewAIAutomationTool(
... crew_api_url="https://api.example.com",
... crew_bearer_token="your_token",
... crew_name="My Crew",
... crew_description="Description of what the crew does",
... crew_inputs=custom_inputs,
... )
Example:
>>> tools = [
... InvokeCrewAIAutomationTool(
... crew_api_url="https://canary-crew-[...].crewai.com",
... crew_bearer_token="[Your token: abcdef012345]",
... crew_name="State of AI Report",
... crew_description="Retrieves a report on state of AI for a given year.",
... crew_inputs={
... "year": Field(
... ..., description="Year to retrieve the report for (integer)"
... )
... },
... )
... ]
"""
name: str = "invoke_amp_automation"
description: str = "Invokes an CrewAI Platform Automation using API"
args_schema: type[BaseModel] = InvokeCrewAIAutomationInput
crew_api_url: str
crew_bearer_token: str
max_polling_time: int = 10 * 60 # 10 minutes
def __init__(
self,
crew_api_url: str,
crew_bearer_token: str,
crew_name: str,
crew_description: str,
max_polling_time: int = 10 * 60,
crew_inputs: dict[str, Any] | None = None,
):
"""Initialize the InvokeCrewAIAutomationTool.
Args:
crew_api_url: Base URL of the crew API service
crew_bearer_token: Bearer token for API authentication
crew_name: Name of the crew to invoke
crew_description: Description of the crew to invoke
max_polling_time: Maximum time in seconds to wait for task completion (default: 600 seconds = 10 minutes)
crew_inputs: Optional dictionary defining custom input schema fields
"""
# Create dynamic args_schema if custom inputs provided
if crew_inputs:
# Start with the base prompt field
fields = {}
# Add custom fields
for field_name, field_def in crew_inputs.items():
if isinstance(field_def, tuple):
fields[field_name] = field_def
else:
# Assume it's a Field object, extract type from annotation if available
fields[field_name] = (str, field_def)
# Create dynamic model
args_schema = create_model("DynamicInvokeCrewAIAutomationInput", **fields) # type: ignore[call-overload]
else:
args_schema = InvokeCrewAIAutomationInput
# Initialize the parent class with proper field values
super().__init__(
name=crew_name,
description=crew_description,
args_schema=args_schema,
crew_api_url=crew_api_url,
crew_bearer_token=crew_bearer_token,
max_polling_time=max_polling_time,
)
def _kickoff_crew(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Start a new crew task.
Args:
inputs: Dictionary containing the query and other input parameters
Returns:
Dictionary containing the crew task response. The response will contain the crew id which needs to be returned to check the status of the crew.
"""
response = requests.post(
f"{self.crew_api_url}/kickoff",
headers={
"Authorization": f"Bearer {self.crew_bearer_token}",
"Content-Type": "application/json",
},
json={"inputs": inputs},
timeout=30,
)
return response.json()
def _get_crew_status(self, crew_id: str) -> dict[str, Any]:
"""Get the status of a crew task.
Args:
crew_id: The ID of the crew task to check
Returns:
Dictionary containing the crew task status
"""
response = requests.get(
f"{self.crew_api_url}/status/{crew_id}",
headers={
"Authorization": f"Bearer {self.crew_bearer_token}",
"Content-Type": "application/json",
},
timeout=30,
)
return response.json()
def _run(self, **kwargs) -> str:
"""Execute the crew invocation tool."""
if kwargs is None:
kwargs = {}
# Start the crew
response = self._kickoff_crew(inputs=kwargs)
kickoff_id: str | None = response.get("kickoff_id")
if kickoff_id is None:
return f"Error: Failed to kickoff crew. Response: {response}"
# Poll for completion
for i in range(self.max_polling_time):
try:
status_response = self._get_crew_status(crew_id=kickoff_id)
if status_response.get("state", "").lower() == "success":
return status_response.get("result", "No result returned")
if status_response.get("state", "").lower() == "failed":
return f"Error: Crew task failed. Response: {status_response}"
except Exception as e:
if i == self.max_polling_time - 1: # Last attempt
return f"Error: Failed to get crew status after {self.max_polling_time} attempts. Last error: {e}"
time.sleep(1)
return f"Error: Crew did not complete within {self.max_polling_time} seconds"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/invoke_crewai_automation_tool/invoke_crewai_automation_tool.py",
"license": "MIT License",
"lines": 155,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py | from crewai.tools import BaseTool
from pydantic import BaseModel, Field
import requests
class JinaScrapeWebsiteToolInput(BaseModel):
"""Input schema for JinaScrapeWebsiteTool."""
website_url: str = Field(..., description="Mandatory website url to read the file")
class JinaScrapeWebsiteTool(BaseTool):
name: str = "JinaScrapeWebsiteTool"
description: str = "A tool that can be used to read a website content using Jina.ai reader and return markdown content."
args_schema: type[BaseModel] = JinaScrapeWebsiteToolInput
website_url: str | None = None
api_key: str | None = None
headers: dict = Field(default_factory=dict)
def __init__(
self,
website_url: str | None = None,
api_key: str | None = None,
custom_headers: dict | None = None,
**kwargs,
):
super().__init__(**kwargs)
if website_url is not None:
self.website_url = website_url
self.description = f"A tool that can be used to read {website_url}'s content and return markdown content."
self._generate_description()
if custom_headers is not None:
self.headers = custom_headers
if api_key is not None:
self.headers["Authorization"] = f"Bearer {api_key}"
def _run(self, website_url: str | None = None) -> str:
url = website_url or self.website_url
if not url:
raise ValueError(
"Website URL must be provided either during initialization or execution"
)
response = requests.get(
f"https://r.jina.ai/{url}", headers=self.headers, timeout=15
)
response.raise_for_status()
return response.text
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/jina_scrape_website_tool/jina_scrape_website_tool.py",
"license": "MIT License",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/json_search_tool/json_search_tool.py | from pydantic import BaseModel, Field
from crewai_tools.tools.rag.rag_tool import RagTool
class FixedJSONSearchToolSchema(BaseModel):
"""Input for JSONSearchTool."""
search_query: str = Field(
...,
description="Mandatory search query you want to use to search the JSON's content",
)
class JSONSearchToolSchema(FixedJSONSearchToolSchema):
"""Input for JSONSearchTool."""
json_path: str = Field(
..., description="File path or URL of a JSON file to be searched"
)
class JSONSearchTool(RagTool):
name: str = "Search a JSON's content"
description: str = (
"A tool that can be used to semantic search a query from a JSON's content."
)
args_schema: type[BaseModel] = JSONSearchToolSchema
def __init__(self, json_path: str | None = None, **kwargs):
super().__init__(**kwargs)
if json_path is not None:
self.add(json_path)
self.description = f"A tool that can be used to semantic search a query the {json_path} JSON's content."
self.args_schema = FixedJSONSearchToolSchema
self._generate_description()
def _run( # type: ignore[override]
self,
search_query: str,
json_path: str | None = None,
similarity_threshold: float | None = None,
limit: int | None = None,
) -> str:
if json_path is not None:
self.add(json_path)
return super()._run(
query=search_query, similarity_threshold=similarity_threshold, limit=limit
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/json_search_tool/json_search_tool.py",
"license": "MIT License",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/linkup/linkup_search_tool.py | import os
from typing import Any, Literal
from crewai.tools import BaseTool, EnvVar
try:
from linkup import LinkupClient
LINKUP_AVAILABLE = True
except ImportError:
LINKUP_AVAILABLE = False
LinkupClient = Any # type: ignore[misc,assignment] # type placeholder when package is not available
from pydantic import Field, PrivateAttr
class LinkupSearchTool(BaseTool):
name: str = "Linkup Search Tool"
description: str = (
"Performs an API call to Linkup to retrieve contextual information."
)
_client: LinkupClient = PrivateAttr() # type: ignore
package_dependencies: list[str] = Field(default_factory=lambda: ["linkup-sdk"])
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="LINKUP_API_KEY", description="API key for Linkup", required=True
),
]
)
def __init__(self, api_key: str | None = None) -> None:
"""Initialize the tool with an API key."""
super().__init__() # type: ignore[call-arg]
try:
from linkup import LinkupClient
except ImportError:
import click
if click.confirm(
"You are missing the 'linkup-sdk' package. Would you like to install it?"
):
import subprocess
subprocess.run(["uv", "add", "linkup-sdk"], check=True) # noqa: S607
from linkup import LinkupClient
else:
raise ImportError(
"The 'linkup-sdk' package is required to use the LinkupSearchTool. "
"Please install it with: uv add linkup-sdk"
) from None
self._client = LinkupClient(api_key=api_key or os.getenv("LINKUP_API_KEY"))
def _run(
self,
query: str,
depth: Literal["standard", "deep"] = "standard",
output_type: Literal[
"searchResults", "sourcedAnswer", "structured"
] = "searchResults",
) -> dict:
"""Executes a search using the Linkup API.
:param query: The query to search for.
:param depth: Search depth (default is "standard").
:param output_type: Desired result type (default is "searchResults").
:return: A dictionary containing the results or an error message.
"""
try:
response = self._client.search(
query=query, depth=depth, output_type=output_type
)
results = [
{"name": result.name, "url": result.url, "content": result.content}
for result in response.results
]
return {"success": True, "results": results}
except Exception as e:
return {"success": False, "error": str(e)}
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/linkup/linkup_search_tool.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.