sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
crewAIInc/crewAI:lib/crewai/tests/llms/test_multimodal_integration.py | """Integration tests for LLM multimodal functionality with cassettes.
These tests make actual API calls (recorded via VCR cassettes) to verify
multimodal content is properly sent and processed by each provider.
"""
from pathlib import Path
import pytest
from crewai.llm import LLM
from crewai_files import (
AudioFile,
File,
ImageFile,
PDFFile,
TextFile,
VideoFile,
format_multimodal_content,
)
from crewai_files.resolution.resolver import FileResolver, FileResolverConfig
# Path to test data files
TEST_FIXTURES_DIR = Path(__file__).parent.parent.parent.parent / "crewai-files" / "tests" / "fixtures"
TEST_IMAGE_PATH = TEST_FIXTURES_DIR / "revenue_chart.png"
TEST_TEXT_PATH = TEST_FIXTURES_DIR / "review_guidelines.txt"
TEST_VIDEO_PATH = TEST_FIXTURES_DIR / "sample_video.mp4"
TEST_AUDIO_PATH = TEST_FIXTURES_DIR / "sample_audio.wav"
@pytest.fixture
def test_image_bytes() -> bytes:
"""Load test image bytes."""
return TEST_IMAGE_PATH.read_bytes()
@pytest.fixture
def test_text_bytes() -> bytes:
"""Load test text bytes."""
return TEST_TEXT_PATH.read_bytes()
@pytest.fixture
def test_video_bytes() -> bytes:
"""Load test video bytes."""
if not TEST_VIDEO_PATH.exists():
pytest.skip("sample_video.mp4 fixture not found")
return TEST_VIDEO_PATH.read_bytes()
@pytest.fixture
def test_audio_bytes() -> bytes:
"""Load test audio bytes."""
if not TEST_AUDIO_PATH.exists():
pytest.skip("sample_audio.wav fixture not found")
return TEST_AUDIO_PATH.read_bytes()
# Minimal PDF for testing (real PDF structure)
MINIMAL_PDF = b"""%PDF-1.4
1 0 obj << /Type /Catalog /Pages 2 0 R >> endobj
2 0 obj << /Type /Pages /Kids [3 0 R] /Count 1 >> endobj
3 0 obj << /Type /Page /Parent 2 0 R /MediaBox [0 0 612 792] >> endobj
xref
0 4
0000000000 65535 f
0000000009 00000 n
0000000058 00000 n
0000000115 00000 n
trailer << /Size 4 /Root 1 0 R >>
startxref
196
%%EOF
"""
def _build_multimodal_message(llm: LLM, prompt: str, files: dict) -> list[dict]:
"""Build a multimodal message with text and file content."""
provider = getattr(llm, "provider", None) or llm.model
content_blocks = format_multimodal_content(files, provider)
return [
{
"role": "user",
"content": [
llm.format_text_content(prompt),
*content_blocks,
],
}
]
class TestOpenAIMultimodalIntegration:
"""Integration tests for OpenAI multimodal with real API calls."""
@pytest.mark.vcr()
def test_describe_image(self, test_image_bytes: bytes) -> None:
"""Test OpenAI can describe an image."""
llm = LLM(model="openai/gpt-4o-mini")
files = {"image": ImageFile(source=test_image_bytes)}
messages = _build_multimodal_message(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
class TestOpenAIO4MiniMultimodalIntegration:
"""Integration tests for OpenAI o4-mini reasoning model with vision."""
@pytest.mark.vcr()
def test_describe_image(self, test_image_bytes: bytes) -> None:
"""Test o4-mini can describe an image."""
llm = LLM(model="openai/o4-mini")
files = {"image": ImageFile(source=test_image_bytes)}
messages = _build_multimodal_message(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
class TestOpenAIGPT41MiniMultimodalIntegration:
"""Integration tests for OpenAI GPT-4.1-mini with vision."""
@pytest.mark.vcr()
def test_describe_image(self, test_image_bytes: bytes) -> None:
"""Test GPT-4.1-mini can describe an image."""
llm = LLM(model="openai/gpt-4.1-mini")
files = {"image": ImageFile(source=test_image_bytes)}
messages = _build_multimodal_message(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
class TestOpenAIGPT5MultimodalIntegration:
"""Integration tests for OpenAI GPT-5 with vision."""
@pytest.mark.vcr()
def test_describe_image(self, test_image_bytes: bytes) -> None:
"""Test GPT-5 can describe an image."""
llm = LLM(model="openai/gpt-5")
files = {"image": ImageFile(source=test_image_bytes)}
messages = _build_multimodal_message(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
class TestOpenAIGPT5MiniMultimodalIntegration:
"""Integration tests for OpenAI GPT-5-mini with vision."""
@pytest.mark.vcr()
def test_describe_image(self, test_image_bytes: bytes) -> None:
"""Test GPT-5-mini can describe an image."""
llm = LLM(model="openai/gpt-5-mini")
files = {"image": ImageFile(source=test_image_bytes)}
messages = _build_multimodal_message(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
class TestOpenAIGPT5NanoMultimodalIntegration:
"""Integration tests for OpenAI GPT-5-nano with vision."""
@pytest.mark.vcr()
def test_describe_image(self, test_image_bytes: bytes) -> None:
"""Test GPT-5-nano can describe an image."""
llm = LLM(model="openai/gpt-5-nano")
files = {"image": ImageFile(source=test_image_bytes)}
messages = _build_multimodal_message(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
class TestAnthropicMultimodalIntegration:
"""Integration tests for Anthropic multimodal with real API calls."""
@pytest.mark.vcr()
def test_describe_image(self, test_image_bytes: bytes) -> None:
"""Test Anthropic can describe an image."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
files = {"image": ImageFile(source=test_image_bytes)}
messages = _build_multimodal_message(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
@pytest.mark.vcr()
def test_analyze_pdf(self) -> None:
"""Test Anthropic can analyze a PDF."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
files = {"document": PDFFile(source=MINIMAL_PDF)}
messages = _build_multimodal_message(
llm,
"What type of document is this? Answer in one word.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
class TestAzureMultimodalIntegration:
"""Integration tests for Azure OpenAI multimodal with real API calls."""
@pytest.mark.vcr()
def test_describe_image(self, test_image_bytes: bytes) -> None:
"""Test Azure OpenAI can describe an image."""
llm = LLM(model="azure/gpt-4o")
files = {"image": ImageFile(source=test_image_bytes)}
messages = _build_multimodal_message(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
class TestBedrockMultimodalIntegration:
"""Integration tests for AWS Bedrock multimodal with real API calls."""
@pytest.mark.vcr()
def test_describe_image(self, test_image_bytes: bytes) -> None:
"""Test Bedrock Claude can describe an image."""
llm = LLM(model="bedrock/anthropic.claude-3-haiku-20240307-v1:0")
files = {"image": ImageFile(source=test_image_bytes)}
messages = _build_multimodal_message(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
@pytest.mark.vcr()
def test_analyze_pdf(self) -> None:
"""Test Bedrock Claude can analyze a PDF."""
llm = LLM(model="bedrock/anthropic.claude-3-haiku-20240307-v1:0")
files = {"document": PDFFile(source=MINIMAL_PDF)}
messages = _build_multimodal_message(
llm,
"What type of document is this? Answer in one word.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
class TestGeminiMultimodalIntegration:
"""Integration tests for Gemini multimodal with real API calls."""
@pytest.mark.vcr()
def test_describe_image(self, test_image_bytes: bytes) -> None:
"""Test Gemini can describe an image."""
llm = LLM(model="gemini/gemini-2.0-flash")
files = {"image": ImageFile(source=test_image_bytes)}
messages = _build_multimodal_message(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
@pytest.mark.vcr()
def test_analyze_text_file(self, test_text_bytes: bytes) -> None:
"""Test Gemini can analyze a text file."""
llm = LLM(model="gemini/gemini-2.0-flash")
files = {"readme": TextFile(source=test_text_bytes)}
messages = _build_multimodal_message(
llm,
"Summarize what this text file says in one sentence.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
@pytest.mark.vcr()
def test_analyze_video_file(self, test_video_bytes: bytes) -> None:
"""Test Gemini can analyze a video file."""
llm = LLM(model="gemini/gemini-2.0-flash")
files = {"video": VideoFile(source=test_video_bytes)}
messages = _build_multimodal_message(
llm,
"Describe what you see in this video in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
@pytest.mark.vcr()
def test_analyze_audio_file(self, test_audio_bytes: bytes) -> None:
"""Test Gemini can analyze an audio file."""
llm = LLM(model="gemini/gemini-2.0-flash")
files = {"audio": AudioFile(source=test_audio_bytes)}
messages = _build_multimodal_message(
llm,
"Describe what you hear in this audio in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
class TestLiteLLMMultimodalIntegration:
"""Integration tests for LiteLLM wrapper multimodal with real API calls."""
@pytest.mark.vcr()
def test_describe_image_gpt4o(self, test_image_bytes: bytes) -> None:
"""Test LiteLLM with GPT-4o can describe an image."""
llm = LLM(model="gpt-4o-mini", is_litellm=True)
files = {"image": ImageFile(source=test_image_bytes)}
messages = _build_multimodal_message(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
@pytest.mark.vcr()
def test_describe_image_claude(self, test_image_bytes: bytes) -> None:
"""Test LiteLLM with Claude can describe an image."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022", is_litellm=True)
files = {"image": ImageFile(source=test_image_bytes)}
messages = _build_multimodal_message(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
class TestMultipleFilesIntegration:
"""Integration tests for multiple files in a single request."""
@pytest.mark.vcr()
def test_multiple_images_openai(self, test_image_bytes: bytes) -> None:
"""Test OpenAI can process multiple images."""
llm = LLM(model="openai/gpt-4o-mini")
files = {
"image1": ImageFile(source=test_image_bytes),
"image2": ImageFile(source=test_image_bytes),
}
messages = _build_multimodal_message(
llm,
"How many images do you see? Answer with just the number.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert "2" in response or "two" in response.lower()
@pytest.mark.vcr()
def test_mixed_content_anthropic(self, test_image_bytes: bytes) -> None:
"""Test Anthropic can process image and PDF together."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
files = {
"image": ImageFile(source=test_image_bytes),
"document": PDFFile(source=MINIMAL_PDF),
}
messages = _build_multimodal_message(
llm,
"What types of files did I send you? List them briefly.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
class TestGenericFileIntegration:
"""Integration tests for the generic File class with auto-detection."""
@pytest.mark.vcr()
def test_generic_file_image_openai(self, test_image_bytes: bytes) -> None:
"""Test generic File auto-detects image and sends correct content type."""
llm = LLM(model="openai/gpt-4o-mini")
files = {"image": File(source=test_image_bytes)}
messages = _build_multimodal_message(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
@pytest.mark.vcr()
def test_generic_file_pdf_anthropic(self) -> None:
"""Test generic File auto-detects PDF and sends correct content type."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
files = {"document": File(source=MINIMAL_PDF)}
messages = _build_multimodal_message(
llm,
"What type of document is this? Answer in one word.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
@pytest.mark.vcr()
def test_generic_file_text_gemini(self, test_text_bytes: bytes) -> None:
"""Test generic File auto-detects text and sends correct content type."""
llm = LLM(model="gemini/gemini-2.0-flash")
files = {"content": File(source=test_text_bytes)}
messages = _build_multimodal_message(
llm,
"Summarize what this text says in one sentence.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
@pytest.mark.vcr()
def test_generic_file_mixed_types(self, test_image_bytes: bytes) -> None:
"""Test generic File works with multiple auto-detected types."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
files = {
"chart": File(source=test_image_bytes),
"doc": File(source=MINIMAL_PDF),
}
messages = _build_multimodal_message(
llm,
"What types of files did I send? List them briefly.",
files,
)
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
def _build_multimodal_message_with_upload(
llm: LLM, prompt: str, files: dict
) -> tuple[list[dict], list[dict]]:
"""Build a multimodal message using file_id uploads instead of inline base64.
Note: OpenAI Chat Completions API only supports file_id for PDFs via
type="file", not for images. For image file_id support, OpenAI requires
the Responses API (type="input_image"). Since crewAI uses Chat Completions,
we test file_id uploads with Anthropic which supports file_id for all types.
Returns:
Tuple of (messages, content_blocks) where content_blocks can be inspected
to verify file_id was used.
"""
from crewai_files.formatting.anthropic import AnthropicFormatter
config = FileResolverConfig(prefer_upload=True)
resolver = FileResolver(config=config)
formatter = AnthropicFormatter()
content_blocks = []
for file in files.values():
resolved = resolver.resolve(file, "anthropic")
block = formatter.format_block(file, resolved)
if block is not None:
content_blocks.append(block)
messages = [
{
"role": "user",
"content": [
llm.format_text_content(prompt),
*content_blocks,
],
}
]
return messages, content_blocks
def _build_responses_message_with_upload(
llm: LLM, prompt: str, files: dict
) -> tuple[list[dict], list[dict]]:
"""Build a Responses API message using file_id uploads.
The Responses API supports file_id for images via type="input_image".
Returns:
Tuple of (messages, content_blocks) where content_blocks can be inspected
to verify file_id was used.
"""
from crewai_files.formatting import OpenAIResponsesFormatter
config = FileResolverConfig(prefer_upload=True)
resolver = FileResolver(config=config)
content_blocks = []
for file in files.values():
resolved = resolver.resolve(file, "openai")
block = OpenAIResponsesFormatter.format_block(resolved, file.content_type)
content_blocks.append(block)
messages = [
{
"role": "user",
"content": [
{"type": "input_text", "text": prompt},
*content_blocks,
],
}
]
return messages, content_blocks
class TestAnthropicFileUploadIntegration:
"""Integration tests for Anthropic multimodal with file_id uploads.
We test file_id uploads with Anthropic because OpenAI Chat Completions API
only supports file_id references for PDFs (type="file"), not images.
OpenAI's Responses API supports image file_id (type="input_image"), but
crewAI currently uses Chat Completions. Anthropic supports file_id for
all content types including images.
"""
@pytest.mark.vcr()
def test_describe_image_with_file_id(self, test_image_bytes: bytes) -> None:
"""Test Anthropic can describe an image uploaded via Files API."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
files = {"image": ImageFile(source=test_image_bytes)}
messages, content_blocks = _build_multimodal_message_with_upload(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
# Verify we're using file_id, not base64
assert len(content_blocks) == 1
source = content_blocks[0].get("source", {})
assert source.get("type") == "file", (
f"Expected source type 'file' for file_id upload, got '{source.get('type')}'. "
"This test verifies file_id uploads work - if falling back to base64, "
"check that the Anthropic Files API uploader is working correctly."
)
assert "file_id" in source, "Expected file_id in source for file_id upload"
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
class TestOpenAIResponsesFileUploadIntegration:
"""Integration tests for OpenAI Responses API with file_id uploads.
The Responses API supports file_id for images via type="input_image",
unlike Chat Completions which only supports file_id for PDFs.
"""
@pytest.mark.vcr()
def test_describe_image_with_file_id(self, test_image_bytes: bytes) -> None:
"""Test OpenAI Responses API can describe an image uploaded via Files API."""
llm = LLM(model="openai/gpt-4o-mini", api="responses")
files = {"image": ImageFile(source=test_image_bytes)}
messages, content_blocks = _build_responses_message_with_upload(
llm,
"Describe this image in one sentence. Be brief.",
files,
)
# Verify we're using file_id with input_image type
assert len(content_blocks) == 1
block = content_blocks[0]
assert block.get("type") == "input_image", (
f"Expected type 'input_image' for Responses API, got '{block.get('type')}'. "
"This test verifies file_id uploads work with the Responses API."
)
assert "file_id" in block, "Expected file_id in block for file_id upload"
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
@pytest.mark.vcr()
def test_describe_image_via_format_api(self, test_image_bytes: bytes) -> None:
"""Test format_multimodal_content with api='responses' parameter."""
llm = LLM(model="openai/gpt-4o-mini", api="responses")
files = {"image": ImageFile(source=test_image_bytes)}
content_blocks = format_multimodal_content(files, "openai", api="responses")
# Verify content blocks use Responses API format
assert len(content_blocks) == 1
block = content_blocks[0]
assert block.get("type") == "input_image", (
f"Expected type 'input_image' for Responses API, got '{block.get('type')}'"
)
# Should have image_url (base64 data URL) since we're not forcing upload
assert "image_url" in block, "Expected image_url in block for inline image"
messages = [
{
"role": "user",
"content": [
{"type": "input_text", "text": "Describe this image in one sentence."},
*content_blocks,
],
}
]
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0
@pytest.mark.vcr()
def test_describe_image_via_format_api_with_upload(self, test_image_bytes: bytes) -> None:
"""Test format_multimodal_content with prefer_upload=True uploads the file."""
llm = LLM(model="openai/gpt-4o-mini", api="responses")
files = {"image": ImageFile(source=test_image_bytes)}
content_blocks = format_multimodal_content(
files, "openai", api="responses", prefer_upload=True
)
# Verify content blocks use file_id from upload
assert len(content_blocks) == 1
block = content_blocks[0]
assert block.get("type") == "input_image", (
f"Expected type 'input_image' for Responses API, got '{block.get('type')}'"
)
assert "file_id" in block, (
"Expected file_id in block when prefer_upload=True. "
f"Got keys: {list(block.keys())}"
)
messages = [
{
"role": "user",
"content": [
{"type": "input_text", "text": "Describe this image in one sentence."},
*content_blocks,
],
}
]
response = llm.call(messages)
assert response
assert isinstance(response, str)
assert len(response) > 0 | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/llms/test_multimodal_integration.py",
"license": "MIT License",
"lines": 604,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/test_agent_multimodal.py | """Integration tests for Agent multimodal functionality with input_files.
Tests agent.kickoff(input_files={...}) across different providers and file types.
"""
from pathlib import Path
import pytest
from crewai import Agent, LLM
from crewai_files import AudioFile, File, ImageFile, PDFFile, TextFile, VideoFile
TEST_FIXTURES_DIR = (
Path(__file__).parent.parent.parent / "crewai-files" / "tests" / "fixtures"
)
TEST_IMAGE_PATH = TEST_FIXTURES_DIR / "revenue_chart.png"
TEST_TEXT_PATH = TEST_FIXTURES_DIR / "review_guidelines.txt"
TEST_VIDEO_PATH = TEST_FIXTURES_DIR / "sample_video.mp4"
TEST_AUDIO_PATH = TEST_FIXTURES_DIR / "sample_audio.wav"
MINIMAL_PDF = b"""%PDF-1.4
1 0 obj << /Type /Catalog /Pages 2 0 R >> endobj
2 0 obj << /Type /Pages /Kids [3 0 R] /Count 1 >> endobj
3 0 obj << /Type /Page /Parent 2 0 R /MediaBox [0 0 612 792] >> endobj
xref
0 4
0000000000 65535 f
0000000009 00000 n
0000000058 00000 n
0000000115 00000 n
trailer << /Size 4 /Root 1 0 R >>
startxref
196
%%EOF
"""
OPENAI_IMAGE_MODELS = [
"openai/gpt-4o-mini",
"openai/gpt-4o",
"openai/o4-mini",
]
OPENAI_RESPONSES_MODELS = [
("openai/gpt-4o-mini", "responses"),
("openai/o4-mini", "responses"),
]
ANTHROPIC_MODELS = [
"anthropic/claude-3-5-haiku-20241022",
]
GEMINI_MODELS = [
"gemini/gemini-2.0-flash",
]
@pytest.fixture
def image_file() -> ImageFile:
"""Create an ImageFile from test fixture."""
return ImageFile(source=str(TEST_IMAGE_PATH))
@pytest.fixture
def image_bytes() -> bytes:
"""Load test image bytes."""
return TEST_IMAGE_PATH.read_bytes()
@pytest.fixture
def text_file() -> TextFile:
"""Create a TextFile from test fixture."""
return TextFile(source=str(TEST_TEXT_PATH))
@pytest.fixture
def text_bytes() -> bytes:
"""Load test text bytes."""
return TEST_TEXT_PATH.read_bytes()
@pytest.fixture
def pdf_file() -> PDFFile:
"""Create a PDFFile from minimal PDF bytes."""
return PDFFile(source=MINIMAL_PDF)
@pytest.fixture
def video_file() -> VideoFile:
"""Create a VideoFile from test fixture."""
if not TEST_VIDEO_PATH.exists():
pytest.skip("sample_video.mp4 fixture not found")
return VideoFile(source=str(TEST_VIDEO_PATH))
@pytest.fixture
def audio_file() -> AudioFile:
"""Create an AudioFile from test fixture."""
if not TEST_AUDIO_PATH.exists():
pytest.skip("sample_audio.wav fixture not found")
return AudioFile(source=str(TEST_AUDIO_PATH))
def _create_analyst_agent(llm: LLM) -> Agent:
"""Create a simple analyst agent for file analysis."""
return Agent(
role="File Analyst",
goal="Analyze and describe files accurately",
backstory="Expert at analyzing various file types.",
llm=llm,
verbose=False,
)
class TestAgentMultimodalOpenAI:
"""Test Agent with input_files using OpenAI models."""
@pytest.mark.vcr()
@pytest.mark.parametrize("model", OPENAI_IMAGE_MODELS)
def test_image_file(self, model: str, image_file: ImageFile) -> None:
"""Test agent can process an image file."""
llm = LLM(model=model)
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "Describe this image briefly."}],
input_files={"chart": image_file},
)
assert result
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", OPENAI_IMAGE_MODELS)
def test_image_bytes(self, model: str, image_bytes: bytes) -> None:
"""Test agent can process image bytes."""
llm = LLM(model=model)
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "Describe this image briefly."}],
input_files={"chart": ImageFile(source=image_bytes)},
)
assert result
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", OPENAI_IMAGE_MODELS)
def test_generic_file_image(self, model: str, image_bytes: bytes) -> None:
"""Test agent can process generic File with auto-detected image."""
llm = LLM(model=model)
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "Describe this image briefly."}],
input_files={"chart": File(source=image_bytes)},
)
assert result
assert result.raw
assert len(result.raw) > 0
class TestAgentMultimodalOpenAIResponses:
"""Test Agent with input_files using OpenAI Responses API."""
@pytest.mark.vcr()
@pytest.mark.parametrize("model,api", OPENAI_RESPONSES_MODELS)
def test_image_file(
self, model: str, api: str, image_file: ImageFile
) -> None:
"""Test agent can process an image file with Responses API."""
llm = LLM(model=model, api=api)
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "Describe this image briefly."}],
input_files={"chart": image_file},
)
assert result
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model,api", OPENAI_RESPONSES_MODELS)
def test_pdf_file(self, model: str, api: str, pdf_file: PDFFile) -> None:
"""Test agent can process a PDF file with Responses API."""
llm = LLM(model=model, api=api)
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "What type of document is this?"}],
input_files={"document": pdf_file},
)
assert result
assert result.raw
assert len(result.raw) > 0
class TestAgentMultimodalAnthropic:
"""Test Agent with input_files using Anthropic models."""
@pytest.mark.vcr()
@pytest.mark.parametrize("model", ANTHROPIC_MODELS)
def test_image_file(self, model: str, image_file: ImageFile) -> None:
"""Test agent can process an image file."""
llm = LLM(model=model)
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "Describe this image briefly."}],
input_files={"chart": image_file},
)
assert result
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", ANTHROPIC_MODELS)
def test_pdf_file(self, model: str, pdf_file: PDFFile) -> None:
"""Test agent can process a PDF file."""
llm = LLM(model=model)
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "What type of document is this?"}],
input_files={"document": pdf_file},
)
assert result
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", ANTHROPIC_MODELS)
def test_mixed_files(
self, model: str, image_file: ImageFile, pdf_file: PDFFile
) -> None:
"""Test agent can process multiple file types together."""
llm = LLM(model=model)
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "What files do you see?"}],
input_files={"chart": image_file, "document": pdf_file},
)
assert result
assert result.raw
assert len(result.raw) > 0
class TestAgentMultimodalGemini:
"""Test Agent with input_files using Gemini models."""
@pytest.mark.vcr()
@pytest.mark.parametrize("model", GEMINI_MODELS)
def test_image_file(self, model: str, image_file: ImageFile) -> None:
"""Test agent can process an image file."""
llm = LLM(model=model)
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "Describe this image briefly."}],
input_files={"chart": image_file},
)
assert result
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", GEMINI_MODELS)
def test_text_file(self, model: str, text_file: TextFile) -> None:
"""Test agent can process a text file."""
llm = LLM(model=model)
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "Summarize this text briefly."}],
input_files={"readme": text_file},
)
assert result
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", GEMINI_MODELS)
def test_video_file(self, model: str, video_file: VideoFile) -> None:
"""Test agent can process a video file."""
llm = LLM(model=model)
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "What do you see in this video?"}],
input_files={"video": video_file},
)
assert result
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", GEMINI_MODELS)
def test_audio_file(self, model: str, audio_file: AudioFile) -> None:
"""Test agent can process an audio file."""
llm = LLM(model=model)
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "What do you hear in this audio?"}],
input_files={"audio": audio_file},
)
assert result
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", GEMINI_MODELS)
def test_mixed_files(
self,
model: str,
image_file: ImageFile,
text_file: TextFile,
) -> None:
"""Test agent can process multiple file types together."""
llm = LLM(model=model)
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "What files do you see?"}],
input_files={"chart": image_file, "readme": text_file},
)
assert result
assert result.raw
assert len(result.raw) > 0
class TestAgentMultimodalFileTypes:
"""Test all file types with appropriate providers."""
@pytest.mark.vcr()
def test_image_openai(self, image_file: ImageFile) -> None:
"""Test image file with OpenAI."""
llm = LLM(model="openai/gpt-4o-mini")
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "Describe this image."}],
input_files={"image": image_file},
)
assert result.raw
@pytest.mark.vcr()
def test_pdf_anthropic(self, pdf_file: PDFFile) -> None:
"""Test PDF file with Anthropic."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "What is this document?"}],
input_files={"document": pdf_file},
)
assert result.raw
@pytest.mark.vcr()
def test_pdf_openai_responses(self, pdf_file: PDFFile) -> None:
"""Test PDF file with OpenAI Responses API."""
llm = LLM(model="openai/gpt-4o-mini", api="responses")
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "What is this document?"}],
input_files={"document": pdf_file},
)
assert result.raw
@pytest.mark.vcr()
def test_text_gemini(self, text_file: TextFile) -> None:
"""Test text file with Gemini."""
llm = LLM(model="gemini/gemini-2.0-flash")
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "Summarize this text."}],
input_files={"readme": text_file},
)
assert result.raw
@pytest.mark.vcr()
def test_video_gemini(self, video_file: VideoFile) -> None:
"""Test video file with Gemini."""
llm = LLM(model="gemini/gemini-2.0-flash")
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "Describe this video."}],
input_files={"video": video_file},
)
assert result.raw
@pytest.mark.vcr()
def test_audio_gemini(self, audio_file: AudioFile) -> None:
"""Test audio file with Gemini."""
llm = LLM(model="gemini/gemini-2.0-flash")
agent = _create_analyst_agent(llm)
result = agent.kickoff(
messages=[{"role": "user", "content": "Describe this audio."}],
input_files={"audio": audio_file},
)
assert result.raw
class TestAgentMultimodalAsync:
"""Test async agent execution with files."""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_async_agent_with_image(self, image_file: ImageFile) -> None:
"""Test async agent with image file."""
llm = LLM(model="openai/gpt-4o-mini")
agent = _create_analyst_agent(llm)
result = await agent.kickoff_async(
messages=[{"role": "user", "content": "Describe this image."}],
input_files={"chart": image_file},
)
assert result
assert result.raw
assert len(result.raw) > 0 | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/test_agent_multimodal.py",
"license": "MIT License",
"lines": 348,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/test_crew_multimodal.py | """Integration tests for Crew multimodal functionality with input_files.
Tests crew.kickoff(input_files={...}) across different providers and file types.
"""
from pathlib import Path
import pytest
from crewai import Agent, Crew, LLM, Task
from crewai_files import AudioFile, File, ImageFile, PDFFile, TextFile, VideoFile
TEST_FIXTURES_DIR = (
Path(__file__).parent.parent.parent / "crewai-files" / "tests" / "fixtures"
)
TEST_IMAGE_PATH = TEST_FIXTURES_DIR / "revenue_chart.png"
TEST_TEXT_PATH = TEST_FIXTURES_DIR / "review_guidelines.txt"
TEST_VIDEO_PATH = TEST_FIXTURES_DIR / "sample_video.mp4"
TEST_AUDIO_PATH = TEST_FIXTURES_DIR / "sample_audio.wav"
TEST_PDF_PATH = TEST_FIXTURES_DIR / "agents.pdf"
OPENAI_IMAGE_MODELS = [
"openai/gpt-4o-mini",
"openai/gpt-4o",
"openai/o4-mini",
"openai/gpt-4.1-mini",
]
OPENAI_RESPONSES_MODELS = [
("openai/gpt-4o-mini", "responses"),
("openai/o4-mini", "responses"),
]
ANTHROPIC_MODELS = [
"anthropic/claude-3-5-haiku-20241022",
"anthropic/claude-sonnet-4-20250514",
]
GEMINI_MODELS = [
"gemini/gemini-2.0-flash",
]
BEDROCK_MODELS = [
"bedrock/anthropic.claude-3-haiku-20240307-v1:0",
]
@pytest.fixture
def image_file() -> ImageFile:
"""Create an ImageFile from test fixture."""
return ImageFile(source=str(TEST_IMAGE_PATH))
@pytest.fixture
def image_bytes() -> bytes:
"""Load test image bytes."""
return TEST_IMAGE_PATH.read_bytes()
@pytest.fixture
def text_file() -> TextFile:
"""Create a TextFile from test fixture."""
return TextFile(source=str(TEST_TEXT_PATH))
@pytest.fixture
def text_bytes() -> bytes:
"""Load test text bytes."""
return TEST_TEXT_PATH.read_bytes()
@pytest.fixture
def pdf_file() -> PDFFile:
"""Create a PDFFile from test fixture."""
return PDFFile(source=str(TEST_PDF_PATH))
@pytest.fixture
def video_file() -> VideoFile:
"""Create a VideoFile from test fixture."""
if not TEST_VIDEO_PATH.exists():
pytest.skip("sample_video.mp4 fixture not found")
return VideoFile(source=str(TEST_VIDEO_PATH))
@pytest.fixture
def audio_file() -> AudioFile:
"""Create an AudioFile from test fixture."""
if not TEST_AUDIO_PATH.exists():
pytest.skip("sample_audio.wav fixture not found")
return AudioFile(source=str(TEST_AUDIO_PATH))
def _create_analyst_crew(llm: LLM) -> Crew:
"""Create a simple analyst crew for file analysis."""
agent = Agent(
role="File Analyst",
goal="Analyze and describe files accurately",
backstory="Expert at analyzing various file types.",
llm=llm,
verbose=False,
)
task = Task(
description="Describe the file(s) you see. Be brief, one sentence max.",
expected_output="A brief description of the file.",
agent=agent,
)
return Crew(agents=[agent], tasks=[task], verbose=False)
class TestCrewMultimodalOpenAI:
"""Test Crew with input_files using OpenAI models."""
@pytest.mark.vcr()
@pytest.mark.parametrize("model", OPENAI_IMAGE_MODELS)
def test_image_file(self, model: str, image_file: ImageFile) -> None:
"""Test crew can process an image file."""
llm = LLM(model=model)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"chart": image_file})
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", OPENAI_IMAGE_MODELS)
def test_image_bytes(self, model: str, image_bytes: bytes) -> None:
"""Test crew can process image bytes."""
llm = LLM(model=model)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"chart": ImageFile(source=image_bytes)})
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", OPENAI_IMAGE_MODELS)
def test_generic_file_image(self, model: str, image_bytes: bytes) -> None:
"""Test crew can process generic File with auto-detected image."""
llm = LLM(model=model)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"chart": File(source=image_bytes)})
assert result.raw
assert len(result.raw) > 0
class TestCrewMultimodalOpenAIResponses:
"""Test Crew with input_files using OpenAI Responses API."""
@pytest.mark.vcr()
@pytest.mark.parametrize("model,api", OPENAI_RESPONSES_MODELS)
def test_image_file(
self, model: str, api: str, image_file: ImageFile
) -> None:
"""Test crew can process an image file with Responses API."""
llm = LLM(model=model, api=api)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"chart": image_file})
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model,api", OPENAI_RESPONSES_MODELS)
def test_pdf_file(self, model: str, api: str, pdf_file: PDFFile) -> None:
"""Test crew can process a PDF file with Responses API."""
llm = LLM(model=model, api=api)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"document": pdf_file})
assert result.raw
assert len(result.raw) > 0
class TestCrewMultimodalAnthropic:
"""Test Crew with input_files using Anthropic models."""
@pytest.mark.vcr()
@pytest.mark.parametrize("model", ANTHROPIC_MODELS)
def test_image_file(self, model: str, image_file: ImageFile) -> None:
"""Test crew can process an image file."""
llm = LLM(model=model)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"chart": image_file})
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", ANTHROPIC_MODELS)
def test_pdf_file(self, model: str, pdf_file: PDFFile) -> None:
"""Test crew can process a PDF file."""
llm = LLM(model=model)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"document": pdf_file})
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", ANTHROPIC_MODELS)
def test_mixed_files(
self, model: str, image_file: ImageFile, pdf_file: PDFFile
) -> None:
"""Test crew can process multiple file types together."""
llm = LLM(model=model)
crew = _create_analyst_crew(llm)
result = crew.kickoff(
input_files={"chart": image_file, "document": pdf_file}
)
assert result.raw
assert len(result.raw) > 0
class TestCrewMultimodalGemini:
"""Test Crew with input_files using Gemini models."""
@pytest.mark.vcr()
@pytest.mark.parametrize("model", GEMINI_MODELS)
def test_image_file(self, model: str, image_file: ImageFile) -> None:
"""Test crew can process an image file."""
llm = LLM(model=model)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"chart": image_file})
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", GEMINI_MODELS)
def test_text_file(self, model: str, text_file: TextFile) -> None:
"""Test crew can process a text file."""
llm = LLM(model=model)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"readme": text_file})
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", GEMINI_MODELS)
def test_video_file(self, model: str, video_file: VideoFile) -> None:
"""Test crew can process a video file."""
llm = LLM(model=model)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"video": video_file})
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", GEMINI_MODELS)
def test_audio_file(self, model: str, audio_file: AudioFile) -> None:
"""Test crew can process an audio file."""
llm = LLM(model=model)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"audio": audio_file})
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", GEMINI_MODELS)
def test_mixed_files(
self,
model: str,
image_file: ImageFile,
text_file: TextFile,
) -> None:
"""Test crew can process multiple file types together."""
llm = LLM(model=model)
crew = _create_analyst_crew(llm)
result = crew.kickoff(
input_files={"chart": image_file, "readme": text_file}
)
assert result.raw
assert len(result.raw) > 0
class TestCrewMultimodalBedrock:
"""Test Crew with input_files using Bedrock models."""
@pytest.mark.vcr()
@pytest.mark.parametrize("model", BEDROCK_MODELS)
def test_image_file(self, model: str, image_file: ImageFile) -> None:
"""Test crew can process an image file."""
llm = LLM(model=model)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"chart": image_file})
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
@pytest.mark.parametrize("model", BEDROCK_MODELS)
def test_pdf_file(self, model: str, pdf_file: PDFFile) -> None:
"""Test crew can process a PDF file."""
llm = LLM(model=model)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"document": pdf_file})
assert result.raw
assert len(result.raw) > 0
class TestCrewMultimodalFileTypes:
"""Test all file types with appropriate providers."""
@pytest.mark.vcr()
def test_image_openai(self, image_file: ImageFile) -> None:
"""Test image file with OpenAI."""
llm = LLM(model="openai/gpt-4o-mini")
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"image": image_file})
assert result.raw
@pytest.mark.vcr()
def test_pdf_anthropic(self, pdf_file: PDFFile) -> None:
"""Test PDF file with Anthropic."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"document": pdf_file})
assert result.raw
@pytest.mark.vcr()
def test_pdf_openai_responses(self, pdf_file: PDFFile) -> None:
"""Test PDF file with OpenAI Responses API."""
llm = LLM(model="openai/gpt-4o-mini", api="responses")
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"document": pdf_file})
assert result.raw
@pytest.mark.vcr()
def test_text_gemini(self, text_file: TextFile) -> None:
"""Test text file with Gemini."""
llm = LLM(model="gemini/gemini-2.0-flash")
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"readme": text_file})
assert result.raw
@pytest.mark.vcr()
def test_video_gemini(self, video_file: VideoFile) -> None:
"""Test video file with Gemini."""
llm = LLM(model="gemini/gemini-2.0-flash")
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"video": video_file})
assert result.raw
@pytest.mark.vcr()
def test_audio_gemini(self, audio_file: AudioFile) -> None:
"""Test audio file with Gemini."""
llm = LLM(model="gemini/gemini-2.0-flash")
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"audio": audio_file})
assert result.raw
class TestCrewMultimodalUnsupportedTypes:
"""Test that unsupported file types fall back to read_file tool."""
@pytest.mark.vcr()
def test_video_with_openai_uses_tool(self, video_file: VideoFile) -> None:
"""Test video with OpenAI (no video support) uses read_file tool."""
llm = LLM(model="openai/gpt-4o-mini")
agent = Agent(
role="File Analyst",
goal="Analyze files",
backstory="Expert analyst.",
llm=llm,
verbose=False,
)
task = Task(
description="What type of file is this? Just name the file type.",
expected_output="The file type.",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=False)
result = crew.kickoff(input_files={"video": video_file})
assert result.raw
# Should mention video or the filename since it can't directly process it
@pytest.mark.vcr()
def test_audio_with_anthropic_uses_tool(self, audio_file: AudioFile) -> None:
"""Test audio with Anthropic (no audio support) uses read_file tool."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
agent = Agent(
role="File Analyst",
goal="Analyze files",
backstory="Expert analyst.",
llm=llm,
verbose=False,
)
task = Task(
description="What type of file is this? Just name the file type.",
expected_output="The file type.",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=False)
result = crew.kickoff(input_files={"audio": audio_file})
assert result.raw
class TestCrewMultimodalFileUpload:
"""Test file upload functionality with prefer_upload=True."""
@pytest.mark.vcr()
def test_image_upload_anthropic(self, image_file: ImageFile) -> None:
"""Test image upload to Anthropic Files API."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022", prefer_upload=True)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"chart": image_file})
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
def test_image_upload_openai_responses(self, image_file: ImageFile) -> None:
"""Test image upload to OpenAI Files API via Responses API."""
llm = LLM(model="openai/gpt-4o-mini", api="responses", prefer_upload=True)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"chart": image_file})
assert result.raw
assert len(result.raw) > 0
@pytest.mark.vcr()
def test_pdf_upload_anthropic(self, pdf_file: PDFFile) -> None:
"""Test PDF upload to Anthropic Files API."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022", prefer_upload=True)
crew = _create_analyst_crew(llm)
result = crew.kickoff(input_files={"document": pdf_file})
assert result.raw
assert len(result.raw) > 0 | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/test_crew_multimodal.py",
"license": "MIT License",
"lines": 351,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/test_flow_multimodal.py | """Integration tests for Flow multimodal functionality with input_files.
Tests flow.kickoff(input_files={...}) with crews that process files.
"""
from pathlib import Path
import pytest
from crewai import Agent, Crew, LLM, Task
from crewai.flow.flow import Flow, listen, start
from crewai_files import AudioFile, File, ImageFile, PDFFile, TextFile, VideoFile
TEST_FIXTURES_DIR = (
Path(__file__).parent.parent.parent / "crewai-files" / "tests" / "fixtures"
)
TEST_IMAGE_PATH = TEST_FIXTURES_DIR / "revenue_chart.png"
TEST_TEXT_PATH = TEST_FIXTURES_DIR / "review_guidelines.txt"
TEST_VIDEO_PATH = TEST_FIXTURES_DIR / "sample_video.mp4"
TEST_AUDIO_PATH = TEST_FIXTURES_DIR / "sample_audio.wav"
MINIMAL_PDF = b"""%PDF-1.4
1 0 obj << /Type /Catalog /Pages 2 0 R >> endobj
2 0 obj << /Type /Pages /Kids [3 0 R] /Count 1 >> endobj
3 0 obj << /Type /Page /Parent 2 0 R /MediaBox [0 0 612 792] >> endobj
xref
0 4
0000000000 65535 f
0000000009 00000 n
0000000058 00000 n
0000000115 00000 n
trailer << /Size 4 /Root 1 0 R >>
startxref
196
%%EOF
"""
@pytest.fixture
def image_file() -> ImageFile:
"""Create an ImageFile from test fixture."""
return ImageFile(source=str(TEST_IMAGE_PATH))
@pytest.fixture
def image_bytes() -> bytes:
"""Load test image bytes."""
return TEST_IMAGE_PATH.read_bytes()
@pytest.fixture
def text_file() -> TextFile:
"""Create a TextFile from test fixture."""
return TextFile(source=str(TEST_TEXT_PATH))
@pytest.fixture
def pdf_file() -> PDFFile:
"""Create a PDFFile from minimal PDF bytes."""
return PDFFile(source=MINIMAL_PDF)
@pytest.fixture
def video_file() -> VideoFile:
"""Create a VideoFile from test fixture."""
if not TEST_VIDEO_PATH.exists():
pytest.skip("sample_video.mp4 fixture not found")
return VideoFile(source=str(TEST_VIDEO_PATH))
@pytest.fixture
def audio_file() -> AudioFile:
"""Create an AudioFile from test fixture."""
if not TEST_AUDIO_PATH.exists():
pytest.skip("sample_audio.wav fixture not found")
return AudioFile(source=str(TEST_AUDIO_PATH))
def _create_analyst_crew(llm: LLM) -> Crew:
"""Create a simple analyst crew for file analysis."""
agent = Agent(
role="File Analyst",
goal="Analyze and describe files accurately",
backstory="Expert at analyzing various file types.",
llm=llm,
verbose=False,
)
task = Task(
description="Describe the file(s) you see. Be brief, one sentence max.",
expected_output="A brief description of the file.",
agent=agent,
)
return Crew(agents=[agent], tasks=[task], verbose=False)
class TestFlowMultimodalOpenAI:
"""Test Flow with input_files using OpenAI models."""
@pytest.mark.vcr()
def test_flow_with_image_file(self, image_file: ImageFile) -> None:
"""Test flow passes input_files to crew."""
class ImageAnalysisFlow(Flow):
@start()
def analyze_image(self) -> str:
llm = LLM(model="openai/gpt-4o-mini")
crew = _create_analyst_crew(llm)
result = crew.kickoff()
return result.raw
flow = ImageAnalysisFlow()
result = flow.kickoff(input_files={"chart": image_file})
assert result
assert isinstance(result, str)
assert len(result) > 0
@pytest.mark.vcr()
def test_flow_with_image_bytes(self, image_bytes: bytes) -> None:
"""Test flow with image bytes."""
class ImageAnalysisFlow(Flow):
@start()
def analyze_image(self) -> str:
llm = LLM(model="openai/gpt-4o-mini")
crew = _create_analyst_crew(llm)
result = crew.kickoff()
return result.raw
flow = ImageAnalysisFlow()
result = flow.kickoff(input_files={"chart": ImageFile(source=image_bytes)})
assert result
assert isinstance(result, str)
assert len(result) > 0
class TestFlowMultimodalAnthropic:
"""Test Flow with input_files using Anthropic models."""
@pytest.mark.vcr()
def test_flow_with_image_file(self, image_file: ImageFile) -> None:
"""Test flow passes input_files to crew."""
class ImageAnalysisFlow(Flow):
@start()
def analyze_image(self) -> str:
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
crew = _create_analyst_crew(llm)
result = crew.kickoff()
return result.raw
flow = ImageAnalysisFlow()
result = flow.kickoff(input_files={"chart": image_file})
assert result
assert isinstance(result, str)
assert len(result) > 0
@pytest.mark.vcr()
def test_flow_with_pdf_file(self, pdf_file: PDFFile) -> None:
"""Test flow with PDF file."""
class PDFAnalysisFlow(Flow):
@start()
def analyze_pdf(self) -> str:
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
crew = _create_analyst_crew(llm)
result = crew.kickoff()
return result.raw
flow = PDFAnalysisFlow()
result = flow.kickoff(input_files={"document": pdf_file})
assert result
assert isinstance(result, str)
assert len(result) > 0
class TestFlowMultimodalGemini:
"""Test Flow with input_files using Gemini models."""
@pytest.mark.vcr()
def test_flow_with_image_file(self, image_file: ImageFile) -> None:
"""Test flow with image file."""
class ImageAnalysisFlow(Flow):
@start()
def analyze_image(self) -> str:
llm = LLM(model="gemini/gemini-2.0-flash")
crew = _create_analyst_crew(llm)
result = crew.kickoff()
return result.raw
flow = ImageAnalysisFlow()
result = flow.kickoff(input_files={"chart": image_file})
assert result
assert isinstance(result, str)
assert len(result) > 0
@pytest.mark.vcr()
def test_flow_with_text_file(self, text_file: TextFile) -> None:
"""Test flow with text file."""
class TextAnalysisFlow(Flow):
@start()
def analyze_text(self) -> str:
llm = LLM(model="gemini/gemini-2.0-flash")
crew = _create_analyst_crew(llm)
result = crew.kickoff()
return result.raw
flow = TextAnalysisFlow()
result = flow.kickoff(input_files={"readme": text_file})
assert result
assert isinstance(result, str)
assert len(result) > 0
@pytest.mark.vcr()
def test_flow_with_video_file(self, video_file: VideoFile) -> None:
"""Test flow with video file."""
class VideoAnalysisFlow(Flow):
@start()
def analyze_video(self) -> str:
llm = LLM(model="gemini/gemini-2.0-flash")
crew = _create_analyst_crew(llm)
result = crew.kickoff()
return result.raw
flow = VideoAnalysisFlow()
result = flow.kickoff(input_files={"video": video_file})
assert result
assert isinstance(result, str)
assert len(result) > 0
@pytest.mark.vcr()
def test_flow_with_audio_file(self, audio_file: AudioFile) -> None:
"""Test flow with audio file."""
class AudioAnalysisFlow(Flow):
@start()
def analyze_audio(self) -> str:
llm = LLM(model="gemini/gemini-2.0-flash")
crew = _create_analyst_crew(llm)
result = crew.kickoff()
return result.raw
flow = AudioAnalysisFlow()
result = flow.kickoff(input_files={"audio": audio_file})
assert result
assert isinstance(result, str)
assert len(result) > 0
class TestFlowMultimodalMultiStep:
"""Test multi-step flows with file processing."""
@pytest.mark.vcr()
def test_flow_with_multiple_crews(self, image_file: ImageFile) -> None:
"""Test flow passes files through multiple crews."""
class MultiStepFlow(Flow):
@start()
def describe_image(self) -> str:
llm = LLM(model="openai/gpt-4o-mini")
crew = _create_analyst_crew(llm)
result = crew.kickoff()
return result.raw
@listen(describe_image)
def summarize_description(self, description: str) -> str:
llm = LLM(model="openai/gpt-4o-mini")
agent = Agent(
role="Summarizer",
goal="Summarize text concisely",
backstory="Expert at summarization.",
llm=llm,
verbose=False,
)
task = Task(
description=f"Summarize this in 5 words: {description}",
expected_output="A 5-word summary.",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=False)
result = crew.kickoff()
return result.raw
flow = MultiStepFlow()
result = flow.kickoff(input_files={"chart": image_file})
assert result
assert isinstance(result, str)
assert len(result) > 0
@pytest.mark.vcr()
def test_flow_with_mixed_files(
self, image_file: ImageFile, text_file: TextFile
) -> None:
"""Test flow with multiple file types."""
class MixedFilesFlow(Flow):
@start()
def analyze_files(self) -> str:
llm = LLM(model="gemini/gemini-2.0-flash")
crew = _create_analyst_crew(llm)
result = crew.kickoff()
return result.raw
flow = MixedFilesFlow()
result = flow.kickoff(
input_files={"chart": image_file, "readme": text_file}
)
assert result
assert isinstance(result, str)
assert len(result) > 0
class TestFlowMultimodalAsync:
"""Test async flow execution with files."""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_async_flow_with_image(self, image_file: ImageFile) -> None:
"""Test async flow with image file."""
class AsyncImageFlow(Flow):
@start()
def analyze_image(self) -> str:
llm = LLM(model="openai/gpt-4o-mini")
crew = _create_analyst_crew(llm)
result = crew.kickoff()
return result.raw
flow = AsyncImageFlow()
result = await flow.kickoff_async(input_files={"chart": image_file})
assert result
assert isinstance(result, str)
assert len(result) > 0 | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/test_flow_multimodal.py",
"license": "MIT License",
"lines": 271,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/tools/agent_tools/test_read_file_tool.py | """Unit tests for ReadFileTool."""
import base64
import pytest
from crewai.tools.agent_tools.read_file_tool import ReadFileTool
from crewai_files import ImageFile, PDFFile, TextFile
class TestReadFileTool:
"""Tests for ReadFileTool."""
def setup_method(self) -> None:
"""Set up test fixtures."""
self.tool = ReadFileTool()
def test_tool_metadata(self) -> None:
"""Test tool has correct name and description."""
assert self.tool.name == "read_file"
assert "Read content from an input file" in self.tool.description
def test_run_no_files_available(self) -> None:
"""Test _run returns message when no files are set."""
result = self.tool._run(file_name="any.txt")
assert result == "No input files available."
def test_run_file_not_found(self) -> None:
"""Test _run returns message when file not found."""
self.tool.set_files({"doc.txt": TextFile(source=b"content")})
result = self.tool._run(file_name="missing.txt")
assert "File 'missing.txt' not found" in result
assert "doc.txt" in result # Lists available files
def test_run_text_file(self) -> None:
"""Test reading a text file returns decoded content."""
text_content = "Hello, this is text content!"
self.tool.set_files({"readme.txt": TextFile(source=text_content.encode())})
result = self.tool._run(file_name="readme.txt")
assert result == text_content
def test_run_json_file(self) -> None:
"""Test reading a JSON file returns decoded content."""
json_content = '{"key": "value"}'
self.tool.set_files({"data.json": TextFile(source=json_content.encode())})
result = self.tool._run(file_name="data.json")
assert result == json_content
def test_run_binary_file_returns_base64(self) -> None:
"""Test reading a binary file returns base64 encoded content."""
# Minimal valid PNG structure for proper MIME detection
png_bytes = (
b"\x89PNG\r\n\x1a\n"
b"\x00\x00\x00\rIHDR"
b"\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00"
b"\x90wS\xde"
b"\x00\x00\x00\x00IEND\xaeB`\x82"
)
self.tool.set_files({"image.png": ImageFile(source=png_bytes)})
result = self.tool._run(file_name="image.png")
assert "[Binary file:" in result
assert "image/png" in result
assert "Base64:" in result
# Verify base64 can be decoded
b64_part = result.split("Base64: ")[1]
decoded = base64.b64decode(b64_part)
assert decoded == png_bytes
def test_run_pdf_file_returns_base64(self) -> None:
"""Test reading a PDF file returns base64 encoded content."""
pdf_bytes = b"%PDF-1.4 some content here"
self.tool.set_files({"doc.pdf": PDFFile(source=pdf_bytes)})
result = self.tool._run(file_name="doc.pdf")
assert "[Binary file:" in result
assert "application/pdf" in result
def test_set_files_none(self) -> None:
"""Test setting files to None."""
self.tool.set_files({"doc": TextFile(source=b"content")})
self.tool.set_files(None)
result = self.tool._run(file_name="doc")
assert result == "No input files available."
def test_run_multiple_files(self) -> None:
"""Test tool can access multiple files."""
self.tool.set_files({
"file1.txt": TextFile(source=b"content 1"),
"file2.txt": TextFile(source=b"content 2"),
"file3.txt": TextFile(source=b"content 3"),
})
assert self.tool._run(file_name="file1.txt") == "content 1"
assert self.tool._run(file_name="file2.txt") == "content 2"
assert self.tool._run(file_name="file3.txt") == "content 3"
def test_run_with_kwargs(self) -> None:
"""Test _run ignores extra kwargs."""
self.tool.set_files({"doc.txt": TextFile(source=b"content")})
result = self.tool._run(file_name="doc.txt", extra_arg="ignored")
assert result == "content"
def test_args_schema(self) -> None:
"""Test that args_schema is properly defined."""
schema = self.tool.args_schema
assert "file_name" in schema.model_fields
assert schema.model_fields["file_name"].is_required() | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/tools/agent_tools/test_read_file_tool.py",
"license": "MIT License",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/utilities/test_file_store.py | """Unit tests for file_store module."""
import uuid
import pytest
from crewai.utilities.file_store import (
clear_files,
clear_task_files,
get_all_files,
get_files,
get_task_files,
store_files,
store_task_files,
)
from crewai_files import TextFile
class TestFileStore:
"""Tests for synchronous file store operations."""
def setup_method(self) -> None:
"""Set up test fixtures."""
self.crew_id = uuid.uuid4()
self.task_id = uuid.uuid4()
self.test_file = TextFile(source=b"test content")
def teardown_method(self) -> None:
"""Clean up after tests."""
clear_files(self.crew_id)
clear_task_files(self.task_id)
def test_store_and_get_files(self) -> None:
"""Test storing and retrieving crew files."""
files = {"doc": self.test_file}
store_files(self.crew_id, files)
retrieved = get_files(self.crew_id)
assert retrieved is not None
assert "doc" in retrieved
assert retrieved["doc"].read() == b"test content"
def test_get_files_returns_none_when_empty(self) -> None:
"""Test that get_files returns None for non-existent keys."""
new_id = uuid.uuid4()
result = get_files(new_id)
assert result is None
def test_clear_files(self) -> None:
"""Test clearing crew files."""
files = {"doc": self.test_file}
store_files(self.crew_id, files)
clear_files(self.crew_id)
result = get_files(self.crew_id)
assert result is None
def test_store_and_get_task_files(self) -> None:
"""Test storing and retrieving task files."""
files = {"task_doc": self.test_file}
store_task_files(self.task_id, files)
retrieved = get_task_files(self.task_id)
assert retrieved is not None
assert "task_doc" in retrieved
def test_clear_task_files(self) -> None:
"""Test clearing task files."""
files = {"task_doc": self.test_file}
store_task_files(self.task_id, files)
clear_task_files(self.task_id)
result = get_task_files(self.task_id)
assert result is None
def test_get_all_files_merges_crew_and_task(self) -> None:
"""Test that get_all_files merges crew and task files."""
crew_file = TextFile(source=b"crew content")
task_file = TextFile(source=b"task content")
store_files(self.crew_id, {"crew_doc": crew_file})
store_task_files(self.task_id, {"task_doc": task_file})
merged = get_all_files(self.crew_id, self.task_id)
assert merged is not None
assert "crew_doc" in merged
assert "task_doc" in merged
def test_get_all_files_task_overrides_crew(self) -> None:
"""Test that task files override crew files with same name."""
crew_file = TextFile(source=b"crew version")
task_file = TextFile(source=b"task version")
store_files(self.crew_id, {"shared_doc": crew_file})
store_task_files(self.task_id, {"shared_doc": task_file})
merged = get_all_files(self.crew_id, self.task_id)
assert merged is not None
assert merged["shared_doc"].read() == b"task version"
def test_get_all_files_crew_only(self) -> None:
"""Test get_all_files with only crew files."""
store_files(self.crew_id, {"doc": self.test_file})
result = get_all_files(self.crew_id)
assert result is not None
assert "doc" in result
def test_get_all_files_returns_none_when_empty(self) -> None:
"""Test that get_all_files returns None when no files exist."""
new_crew_id = uuid.uuid4()
new_task_id = uuid.uuid4()
result = get_all_files(new_crew_id, new_task_id)
assert result is None
@pytest.mark.asyncio
class TestAsyncFileStore:
"""Tests for asynchronous file store operations."""
async def test_astore_and_aget_files(self) -> None:
"""Test async storing and retrieving crew files."""
from crewai.utilities.file_store import aclear_files, aget_files, astore_files
crew_id = uuid.uuid4()
test_file = TextFile(source=b"async content")
try:
await astore_files(crew_id, {"doc": test_file})
retrieved = await aget_files(crew_id)
assert retrieved is not None
assert "doc" in retrieved
assert retrieved["doc"].read() == b"async content"
finally:
await aclear_files(crew_id)
async def test_aget_all_files(self) -> None:
"""Test async get_all_files merging."""
from crewai.utilities.file_store import (
aclear_files,
aclear_task_files,
aget_all_files,
astore_files,
astore_task_files,
)
crew_id = uuid.uuid4()
task_id = uuid.uuid4()
try:
await astore_files(crew_id, {"crew": TextFile(source=b"crew")})
await astore_task_files(task_id, {"task": TextFile(source=b"task")})
merged = await aget_all_files(crew_id, task_id)
assert merged is not None
assert "crew" in merged
assert "task" in merged
finally:
await aclear_files(crew_id)
await aclear_task_files(task_id) | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/utilities/test_file_store.py",
"license": "MIT License",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/utilities/test_files.py | """Unit tests for files module."""
import io
import tempfile
from pathlib import Path
import pytest
from crewai_files import (
AudioFile,
File,
FileBytes,
FilePath,
FileSource,
FileStream,
ImageFile,
PDFFile,
TextFile,
VideoFile,
normalize_input_files,
wrap_file_source,
)
from crewai_files.core.sources import detect_content_type
class TestDetectContentType:
"""Tests for MIME type detection."""
def test_detect_plain_text(self) -> None:
"""Test detection of plain text content."""
result = detect_content_type(b"Hello, World!")
assert result == "text/plain"
def test_detect_json(self) -> None:
"""Test detection of JSON content."""
result = detect_content_type(b'{"key": "value"}')
assert result == "application/json"
def test_detect_png(self) -> None:
"""Test detection of PNG content."""
# Minimal valid PNG: header + IHDR chunk + IEND chunk
png_data = (
b"\x89PNG\r\n\x1a\n" # PNG signature
b"\x00\x00\x00\rIHDR" # IHDR chunk length and type
b"\x00\x00\x00\x01" # width: 1
b"\x00\x00\x00\x01" # height: 1
b"\x08\x02" # bit depth: 8, color type: 2 (RGB)
b"\x00\x00\x00" # compression, filter, interlace
b"\x90wS\xde" # CRC
b"\x00\x00\x00\x00IEND\xaeB`\x82" # IEND chunk
)
result = detect_content_type(png_data)
assert result == "image/png"
def test_detect_jpeg(self) -> None:
"""Test detection of JPEG header."""
jpeg_header = b"\xff\xd8\xff\xe0\x00\x10JFIF"
result = detect_content_type(jpeg_header)
assert result == "image/jpeg"
def test_detect_pdf(self) -> None:
"""Test detection of PDF header."""
pdf_header = b"%PDF-1.4"
result = detect_content_type(pdf_header)
assert result == "application/pdf"
class TestFilePath:
"""Tests for FilePath class."""
def test_create_from_existing_file(self, tmp_path: Path) -> None:
"""Test creating FilePath from an existing file."""
file_path = tmp_path / "test.txt"
file_path.write_text("test content")
fp = FilePath(path=file_path)
assert fp.filename == "test.txt"
assert fp.read() == b"test content"
def test_content_is_cached(self, tmp_path: Path) -> None:
"""Test that file content is cached after first read."""
file_path = tmp_path / "test.txt"
file_path.write_text("original")
fp = FilePath(path=file_path)
first_read = fp.read()
# Modify file after first read
file_path.write_text("modified")
second_read = fp.read()
assert first_read == second_read == b"original"
def test_raises_for_missing_file(self, tmp_path: Path) -> None:
"""Test that FilePath raises for non-existent files."""
with pytest.raises(ValueError, match="File not found"):
FilePath(path=tmp_path / "nonexistent.txt")
def test_raises_for_directory(self, tmp_path: Path) -> None:
"""Test that FilePath raises for directories."""
with pytest.raises(ValueError, match="Path is not a file"):
FilePath(path=tmp_path)
def test_content_type_detection(self, tmp_path: Path) -> None:
"""Test content type detection from file content."""
file_path = tmp_path / "test.txt"
file_path.write_text("plain text content")
fp = FilePath(path=file_path)
assert fp.content_type == "text/plain"
class TestFileBytes:
"""Tests for FileBytes class."""
def test_create_from_bytes(self) -> None:
"""Test creating FileBytes from raw bytes."""
fb = FileBytes(data=b"test data")
assert fb.read() == b"test data"
assert fb.filename is None
def test_create_with_filename(self) -> None:
"""Test creating FileBytes with optional filename."""
fb = FileBytes(data=b"test", filename="doc.txt")
assert fb.filename == "doc.txt"
def test_content_type_detection(self) -> None:
"""Test content type detection from bytes."""
fb = FileBytes(data=b"text content")
assert fb.content_type == "text/plain"
class TestFileStream:
"""Tests for FileStream class."""
def test_create_from_stream(self) -> None:
"""Test creating FileStream from a file-like object."""
stream = io.BytesIO(b"stream content")
fs = FileStream(stream=stream)
assert fs.read() == b"stream content"
def test_content_is_cached(self) -> None:
"""Test that stream content is cached."""
stream = io.BytesIO(b"original")
fs = FileStream(stream=stream)
first = fs.read()
# Even after modifying stream, cached content is returned
stream.seek(0)
stream.write(b"modified")
second = fs.read()
assert first == second == b"original"
def test_filename_from_stream(self, tmp_path: Path) -> None:
"""Test filename extraction from stream with name attribute."""
file_path = tmp_path / "named.txt"
file_path.write_text("content")
with open(file_path, "rb") as f:
fs = FileStream(stream=f)
assert fs.filename == "named.txt"
def test_close_stream(self) -> None:
"""Test closing the underlying stream."""
stream = io.BytesIO(b"data")
fs = FileStream(stream=stream)
fs.close()
assert stream.closed
class TestTypedFileWrappers:
"""Tests for typed file wrapper classes."""
def test_image_file_from_bytes(self) -> None:
"""Test ImageFile creation from bytes."""
# Minimal valid PNG structure
png_bytes = (
b"\x89PNG\r\n\x1a\n"
b"\x00\x00\x00\rIHDR"
b"\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00"
b"\x90wS\xde"
b"\x00\x00\x00\x00IEND\xaeB`\x82"
)
img = ImageFile(source=png_bytes)
assert img.content_type == "image/png"
def test_image_file_from_path(self, tmp_path: Path) -> None:
"""Test ImageFile creation from path string."""
file_path = tmp_path / "test.png"
file_path.write_bytes(b"\x89PNG\r\n\x1a\n" + b"\x00" * 100)
img = ImageFile(source=str(file_path))
assert img.filename == "test.png"
def test_text_file_read_text(self) -> None:
"""Test TextFile.read_text method."""
tf = TextFile(source=b"Hello, World!")
assert tf.read_text() == "Hello, World!"
def test_pdf_file_creation(self) -> None:
"""Test PDFFile creation."""
pdf_bytes = b"%PDF-1.4 content"
pdf = PDFFile(source=pdf_bytes)
assert pdf.read() == pdf_bytes
def test_audio_file_creation(self) -> None:
"""Test AudioFile creation."""
audio = AudioFile(source=b"audio data")
assert audio.read() == b"audio data"
def test_video_file_creation(self) -> None:
"""Test VideoFile creation."""
video = VideoFile(source=b"video data")
assert video.read() == b"video data"
def test_dict_unpacking(self, tmp_path: Path) -> None:
"""Test that files support ** unpacking syntax."""
file_path = tmp_path / "document.txt"
file_path.write_text("content")
tf = TextFile(source=str(file_path))
# Unpack into dict
result = {**tf}
assert "document" in result
assert result["document"] is tf
def test_dict_unpacking_no_filename(self) -> None:
"""Test dict unpacking with bytes (no filename)."""
tf = TextFile(source=b"content")
result = {**tf}
assert "file" in result
def test_keys_method(self, tmp_path: Path) -> None:
"""Test keys() method for dict unpacking."""
file_path = tmp_path / "test.txt"
file_path.write_text("content")
tf = TextFile(source=str(file_path))
assert tf.keys() == ["test"]
def test_getitem_valid_key(self, tmp_path: Path) -> None:
"""Test __getitem__ with valid key."""
file_path = tmp_path / "doc.txt"
file_path.write_text("content")
tf = TextFile(source=str(file_path))
assert tf["doc"] is tf
def test_getitem_invalid_key(self, tmp_path: Path) -> None:
"""Test __getitem__ with invalid key raises KeyError."""
file_path = tmp_path / "doc.txt"
file_path.write_text("content")
tf = TextFile(source=str(file_path))
with pytest.raises(KeyError):
_ = tf["wrong_key"]
class TestWrapFileSource:
"""Tests for wrap_file_source function."""
def test_wrap_image_source(self) -> None:
"""Test wrapping image source returns ImageFile."""
# Minimal valid PNG structure
png_bytes = (
b"\x89PNG\r\n\x1a\n"
b"\x00\x00\x00\rIHDR"
b"\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00"
b"\x90wS\xde"
b"\x00\x00\x00\x00IEND\xaeB`\x82"
)
source = FileBytes(data=png_bytes)
result = wrap_file_source(source)
assert isinstance(result, ImageFile)
def test_wrap_pdf_source(self) -> None:
"""Test wrapping PDF source returns PDFFile."""
source = FileBytes(data=b"%PDF-1.4 content")
result = wrap_file_source(source)
assert isinstance(result, PDFFile)
def test_wrap_text_source(self) -> None:
"""Test wrapping text source returns TextFile."""
source = FileBytes(data=b"plain text")
result = wrap_file_source(source)
assert isinstance(result, TextFile)
class TestNormalizeInputFiles:
"""Tests for normalize_input_files function."""
def test_normalize_path_strings(self, tmp_path: Path) -> None:
"""Test normalizing path strings."""
file1 = tmp_path / "doc1.txt"
file2 = tmp_path / "doc2.txt"
file1.write_text("content1")
file2.write_text("content2")
result = normalize_input_files([str(file1), str(file2)])
assert "doc1.txt" in result
assert "doc2.txt" in result
def test_normalize_path_objects(self, tmp_path: Path) -> None:
"""Test normalizing Path objects."""
file_path = tmp_path / "document.txt"
file_path.write_text("content")
result = normalize_input_files([file_path])
assert "document.txt" in result
def test_normalize_bytes(self) -> None:
"""Test normalizing raw bytes."""
result = normalize_input_files([b"content1", b"content2"])
assert "file_0" in result
assert "file_1" in result
def test_normalize_file_source(self) -> None:
"""Test normalizing FileSource objects."""
source = FileBytes(data=b"content", filename="named.txt")
result = normalize_input_files([source])
assert "named.txt" in result
def test_normalize_mixed_inputs(self, tmp_path: Path) -> None:
"""Test normalizing mixed input types."""
file_path = tmp_path / "path.txt"
file_path.write_text("from path")
inputs = [
str(file_path),
b"raw bytes",
FileBytes(data=b"source", filename="source.txt"),
]
result = normalize_input_files(inputs)
assert len(result) == 3
assert "path.txt" in result
assert "file_1" in result
assert "source.txt" in result
def test_empty_input(self) -> None:
"""Test normalizing empty input list."""
result = normalize_input_files([])
assert result == {}
class TestGenericFile:
"""Tests for the generic File class with auto-detection."""
def test_file_from_text_bytes(self) -> None:
"""Test File creation from text bytes auto-detects content type."""
f = File(source=b"Hello, World!")
assert f.content_type == "text/plain"
assert f.read() == b"Hello, World!"
def test_file_from_png_bytes(self) -> None:
"""Test File creation from PNG bytes auto-detects image type."""
png_bytes = (
b"\x89PNG\r\n\x1a\n"
b"\x00\x00\x00\rIHDR"
b"\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00"
b"\x90wS\xde"
b"\x00\x00\x00\x00IEND\xaeB`\x82"
)
f = File(source=png_bytes)
assert f.content_type == "image/png"
def test_file_from_pdf_bytes(self) -> None:
"""Test File creation from PDF bytes auto-detects PDF type."""
f = File(source=b"%PDF-1.4 content")
assert f.content_type == "application/pdf"
def test_file_from_path(self, tmp_path: Path) -> None:
"""Test File creation from path string."""
file_path = tmp_path / "document.txt"
file_path.write_text("file content")
f = File(source=str(file_path))
assert f.filename == "document.txt"
assert f.read() == b"file content"
assert f.content_type == "text/plain"
def test_file_from_path_object(self, tmp_path: Path) -> None:
"""Test File creation from Path object."""
file_path = tmp_path / "data.txt"
file_path.write_text("path object content")
f = File(source=file_path)
assert f.filename == "data.txt"
assert f.read_text() == "path object content"
def test_file_read_text(self) -> None:
"""Test File.read_text method."""
f = File(source=b"Text content here")
assert f.read_text() == "Text content here"
def test_file_dict_unpacking(self, tmp_path: Path) -> None:
"""Test File supports ** unpacking syntax."""
file_path = tmp_path / "report.txt"
file_path.write_text("report content")
f = File(source=str(file_path))
result = {**f}
assert "report" in result
assert result["report"] is f
def test_file_dict_unpacking_no_filename(self) -> None:
"""Test File dict unpacking with bytes (no filename)."""
f = File(source=b"content")
result = {**f}
assert "file" in result
def test_file_keys_method(self, tmp_path: Path) -> None:
"""Test File keys() method."""
file_path = tmp_path / "chart.png"
file_path.write_bytes(b"\x89PNG\r\n\x1a\n" + b"\x00" * 50)
f = File(source=str(file_path))
assert f.keys() == ["chart"]
def test_file_getitem(self, tmp_path: Path) -> None:
"""Test File __getitem__ with valid key."""
file_path = tmp_path / "image.png"
file_path.write_bytes(b"\x89PNG\r\n\x1a\n" + b"\x00" * 50)
f = File(source=str(file_path))
assert f["image"] is f
def test_file_getitem_invalid_key(self, tmp_path: Path) -> None:
"""Test File __getitem__ with invalid key raises KeyError."""
file_path = tmp_path / "doc.txt"
file_path.write_text("content")
f = File(source=str(file_path))
with pytest.raises(KeyError):
_ = f["wrong"]
def test_file_with_stream(self) -> None:
"""Test File creation from stream."""
stream = io.BytesIO(b"stream content")
f = File(source=stream)
assert f.read() == b"stream content"
assert f.content_type == "text/plain"
def test_file_default_mode(self) -> None:
"""Test File has default mode of 'auto'."""
f = File(source=b"content")
assert f.mode == "auto"
def test_file_custom_mode(self) -> None:
"""Test File with custom mode mode."""
f = File(source=b"content", mode="strict")
assert f.mode == "strict"
def test_file_chunk_mode(self) -> None:
"""Test File with chunk mode mode."""
f = File(source=b"content", mode="chunk")
assert f.mode == "chunk"
def test_image_file_with_mode(self) -> None:
"""Test ImageFile with custom mode."""
png_bytes = (
b"\x89PNG\r\n\x1a\n"
b"\x00\x00\x00\rIHDR"
b"\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00"
b"\x90wS\xde"
b"\x00\x00\x00\x00IEND\xaeB`\x82"
)
img = ImageFile(source=png_bytes, mode="strict")
assert img.mode == "strict"
assert img.content_type == "image/png" | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/utilities/test_files.py",
"license": "MIT License",
"lines": 376,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/agents/test_native_tool_calling.py | """Integration tests for native tool calling functionality.
These tests verify that agents can use native function calling
when the LLM supports it, across multiple providers.
"""
from __future__ import annotations
from collections.abc import Generator
import os
import threading
import time
from collections import Counter
from unittest.mock import Mock, patch
import pytest
from pydantic import BaseModel, Field
from crewai import Agent, Crew, Task
from crewai.events import crewai_event_bus
from crewai.hooks import register_after_tool_call_hook, register_before_tool_call_hook
from crewai.hooks.tool_hooks import ToolCallHookContext
from crewai.llm import LLM
from crewai.tools.base_tool import BaseTool
class CalculatorInput(BaseModel):
"""Input schema for calculator tool."""
expression: str = Field(description="Mathematical expression to evaluate")
class CalculatorTool(BaseTool):
"""A calculator tool that performs mathematical calculations."""
name: str = "calculator"
description: str = "Perform mathematical calculations. Use this for any math operations."
args_schema: type[BaseModel] = CalculatorInput
def _run(self, expression: str) -> str:
"""Execute the calculation."""
try:
# Safe evaluation for basic math
result = eval(expression) # noqa: S307
return f"The result of {expression} is {result}"
except Exception as e:
return f"Error calculating {expression}: {e}"
class WeatherInput(BaseModel):
"""Input schema for weather tool."""
location: str = Field(description="City name to get weather for")
class WeatherTool(BaseTool):
"""A mock weather tool for testing."""
name: str = "get_weather"
description: str = "Get the current weather for a location"
args_schema: type[BaseModel] = WeatherInput
def _run(self, location: str) -> str:
"""Get weather (mock implementation)."""
return f"The weather in {location} is sunny with a temperature of 72°F"
class FailingTool(BaseTool):
"""A tool that always fails."""
name: str = "failing_tool"
description: str = "This tool always fails"
def _run(self) -> str:
raise Exception("This tool always fails")
class LocalSearchInput(BaseModel):
query: str = Field(description="Search query")
class ParallelProbe:
"""Thread-safe in-memory recorder for tool execution windows."""
_lock = threading.Lock()
_windows: list[tuple[str, float, float]] = []
@classmethod
def reset(cls) -> None:
with cls._lock:
cls._windows = []
@classmethod
def record(cls, tool_name: str, start: float, end: float) -> None:
with cls._lock:
cls._windows.append((tool_name, start, end))
@classmethod
def windows(cls) -> list[tuple[str, float, float]]:
with cls._lock:
return list(cls._windows)
def _parallel_prompt() -> str:
return (
"This is a tool-calling compliance test. "
"In your next assistant turn, emit exactly 3 tool calls in the same response (parallel tool calls), in this order: "
"1) parallel_local_search_one(query='latest OpenAI model release notes'), "
"2) parallel_local_search_two(query='latest Anthropic model release notes'), "
"3) parallel_local_search_three(query='latest Gemini model release notes'). "
"Do not call any other tools and do not answer before those 3 tool calls are emitted. "
"After the tool results return, provide a one paragraph summary."
)
def _max_concurrency(windows: list[tuple[str, float, float]]) -> int:
points: list[tuple[float, int]] = []
for _, start, end in windows:
points.append((start, 1))
points.append((end, -1))
points.sort(key=lambda p: (p[0], p[1]))
current = 0
maximum = 0
for _, delta in points:
current += delta
if current > maximum:
maximum = current
return maximum
def _assert_tools_overlapped() -> None:
windows = ParallelProbe.windows()
local_windows = [
w
for w in windows
if w[0].startswith("parallel_local_search_")
]
assert len(local_windows) >= 3, f"Expected at least 3 local tool calls, got {len(local_windows)}"
assert _max_concurrency(local_windows) >= 2, "Expected overlapping local tool executions"
@pytest.fixture
def calculator_tool() -> CalculatorTool:
"""Create a calculator tool for testing."""
return CalculatorTool()
@pytest.fixture
def weather_tool() -> WeatherTool:
"""Create a weather tool for testing."""
return WeatherTool()
@pytest.fixture
def failing_tool() -> BaseTool:
"""Create a weather tool for testing."""
return FailingTool(
)
@pytest.fixture
def parallel_tools() -> list[BaseTool]:
"""Create local tools used to verify native parallel execution deterministically."""
class ParallelLocalSearchOne(BaseTool):
name: str = "parallel_local_search_one"
description: str = "Local search tool #1 for concurrency testing."
args_schema: type[BaseModel] = LocalSearchInput
def _run(self, query: str) -> str:
start = time.perf_counter()
time.sleep(1.0)
end = time.perf_counter()
ParallelProbe.record(self.name, start, end)
return f"[one] {query}"
class ParallelLocalSearchTwo(BaseTool):
name: str = "parallel_local_search_two"
description: str = "Local search tool #2 for concurrency testing."
args_schema: type[BaseModel] = LocalSearchInput
def _run(self, query: str) -> str:
start = time.perf_counter()
time.sleep(1.0)
end = time.perf_counter()
ParallelProbe.record(self.name, start, end)
return f"[two] {query}"
class ParallelLocalSearchThree(BaseTool):
name: str = "parallel_local_search_three"
description: str = "Local search tool #3 for concurrency testing."
args_schema: type[BaseModel] = LocalSearchInput
def _run(self, query: str) -> str:
start = time.perf_counter()
time.sleep(1.0)
end = time.perf_counter()
ParallelProbe.record(self.name, start, end)
return f"[three] {query}"
return [
ParallelLocalSearchOne(),
ParallelLocalSearchTwo(),
ParallelLocalSearchThree(),
]
def _attach_parallel_probe_handler() -> None:
@crewai_event_bus.on(ToolUsageFinishedEvent)
def _capture_tool_window(_source, event: ToolUsageFinishedEvent):
if not event.tool_name.startswith("parallel_local_search_"):
return
ParallelProbe.record(
event.tool_name,
event.started_at.timestamp(),
event.finished_at.timestamp(),
)
# =============================================================================
# OpenAI Provider Tests
# =============================================================================
class TestOpenAINativeToolCalling:
"""Tests for native tool calling with OpenAI models."""
@pytest.mark.vcr()
def test_openai_agent_with_native_tool_calling(
self, calculator_tool: CalculatorTool
) -> None:
"""Test OpenAI agent can use native tool calling."""
agent = Agent(
role="Math Assistant",
goal="Help users with mathematical calculations",
backstory="You are a helpful math assistant.",
tools=[calculator_tool],
llm=LLM(model="gpt-4o-mini"),
verbose=False,
max_iter=3,
)
task = Task(
description="Calculate what is 15 * 8",
expected_output="The result of the calculation",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
assert result.raw is not None
assert "120" in str(result.raw)
def test_openai_agent_kickoff_with_tools_mocked(
self, calculator_tool: CalculatorTool
) -> None:
"""Test OpenAI agent kickoff with mocked LLM call."""
llm = LLM(model="gpt-5-nano")
with patch.object(llm, "call", return_value="The answer is 120.") as mock_call:
agent = Agent(
role="Math Assistant",
goal="Calculate math",
backstory="You calculate.",
tools=[calculator_tool],
llm=llm,
verbose=False,
)
task = Task(
description="Calculate 15 * 8",
expected_output="Result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert mock_call.called
assert result is not None
@pytest.mark.vcr()
@pytest.mark.timeout(180)
def test_openai_parallel_native_tool_calling_test_crew(
self, parallel_tools: list[BaseTool]
) -> None:
agent = Agent(
role="Parallel Tool Agent",
goal="Use both tools exactly as instructed",
backstory="You follow tool instructions precisely.",
tools=parallel_tools,
llm=LLM(model="gpt-5-nano", temperature=1),
verbose=False,
max_iter=3,
)
task = Task(
description=_parallel_prompt(),
expected_output="A one sentence summary of both tool outputs",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
_assert_tools_overlapped()
@pytest.mark.vcr()
@pytest.mark.timeout(180)
def test_openai_parallel_native_tool_calling_test_agent_kickoff(
self, parallel_tools: list[BaseTool]
) -> None:
agent = Agent(
role="Parallel Tool Agent",
goal="Use both tools exactly as instructed",
backstory="You follow tool instructions precisely.",
tools=parallel_tools,
llm=LLM(model="gpt-4o-mini"),
verbose=False,
max_iter=3,
)
result = agent.kickoff(_parallel_prompt())
assert result is not None
_assert_tools_overlapped()
@pytest.mark.vcr()
@pytest.mark.timeout(180)
def test_openai_parallel_native_tool_calling_tool_hook_parity_crew(
self, parallel_tools: list[BaseTool]
) -> None:
hook_calls: dict[str, list[dict[str, str]]] = {"before": [], "after": []}
def before_hook(context: ToolCallHookContext) -> bool | None:
if context.tool_name.startswith("parallel_local_search_"):
hook_calls["before"].append(
{
"tool_name": context.tool_name,
"query": str(context.tool_input.get("query", "")),
}
)
return None
def after_hook(context: ToolCallHookContext) -> str | None:
if context.tool_name.startswith("parallel_local_search_"):
hook_calls["after"].append(
{
"tool_name": context.tool_name,
"query": str(context.tool_input.get("query", "")),
}
)
return None
register_before_tool_call_hook(before_hook)
register_after_tool_call_hook(after_hook)
try:
agent = Agent(
role="Parallel Tool Agent",
goal="Use both tools exactly as instructed",
backstory="You follow tool instructions precisely.",
tools=parallel_tools,
llm=LLM(model="gpt-5-nano", temperature=1),
verbose=False,
max_iter=3,
)
task = Task(
description=_parallel_prompt(),
expected_output="A one sentence summary of both tool outputs",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
_assert_tools_overlapped()
before_names = [call["tool_name"] for call in hook_calls["before"]]
after_names = [call["tool_name"] for call in hook_calls["after"]]
assert len(before_names) >= 3, "Expected before hooks for all parallel calls"
assert Counter(before_names) == Counter(after_names)
assert all(call["query"] for call in hook_calls["before"])
assert all(call["query"] for call in hook_calls["after"])
finally:
from crewai.hooks import (
unregister_after_tool_call_hook,
unregister_before_tool_call_hook,
)
unregister_before_tool_call_hook(before_hook)
unregister_after_tool_call_hook(after_hook)
@pytest.mark.vcr()
@pytest.mark.timeout(180)
def test_openai_parallel_native_tool_calling_tool_hook_parity_agent_kickoff(
self, parallel_tools: list[BaseTool]
) -> None:
hook_calls: dict[str, list[dict[str, str]]] = {"before": [], "after": []}
def before_hook(context: ToolCallHookContext) -> bool | None:
if context.tool_name.startswith("parallel_local_search_"):
hook_calls["before"].append(
{
"tool_name": context.tool_name,
"query": str(context.tool_input.get("query", "")),
}
)
return None
def after_hook(context: ToolCallHookContext) -> str | None:
if context.tool_name.startswith("parallel_local_search_"):
hook_calls["after"].append(
{
"tool_name": context.tool_name,
"query": str(context.tool_input.get("query", "")),
}
)
return None
register_before_tool_call_hook(before_hook)
register_after_tool_call_hook(after_hook)
try:
agent = Agent(
role="Parallel Tool Agent",
goal="Use both tools exactly as instructed",
backstory="You follow tool instructions precisely.",
tools=parallel_tools,
llm=LLM(model="gpt-5-nano", temperature=1),
verbose=False,
max_iter=3,
)
result = agent.kickoff(_parallel_prompt())
assert result is not None
_assert_tools_overlapped()
before_names = [call["tool_name"] for call in hook_calls["before"]]
after_names = [call["tool_name"] for call in hook_calls["after"]]
assert len(before_names) >= 3, "Expected before hooks for all parallel calls"
assert Counter(before_names) == Counter(after_names)
assert all(call["query"] for call in hook_calls["before"])
assert all(call["query"] for call in hook_calls["after"])
finally:
from crewai.hooks import (
unregister_after_tool_call_hook,
unregister_before_tool_call_hook,
)
unregister_before_tool_call_hook(before_hook)
unregister_after_tool_call_hook(after_hook)
# =============================================================================
# Anthropic Provider Tests
# =============================================================================
class TestAnthropicNativeToolCalling:
"""Tests for native tool calling with Anthropic models."""
@pytest.fixture(autouse=True)
def mock_anthropic_api_key(self):
"""Mock ANTHROPIC_API_KEY for tests."""
if "ANTHROPIC_API_KEY" not in os.environ:
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}):
yield
else:
yield
@pytest.mark.vcr()
def test_anthropic_agent_with_native_tool_calling(
self, calculator_tool: CalculatorTool
) -> None:
"""Test Anthropic agent can use native tool calling."""
agent = Agent(
role="Math Assistant",
goal="Help users with mathematical calculations",
backstory="You are a helpful math assistant.",
tools=[calculator_tool],
llm=LLM(model="anthropic/claude-3-5-haiku-20241022"),
verbose=False,
max_iter=3,
)
task = Task(
description="Calculate what is 15 * 8",
expected_output="The result of the calculation",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
assert result.raw is not None
def test_anthropic_agent_kickoff_with_tools_mocked(
self, calculator_tool: CalculatorTool
) -> None:
"""Test Anthropic agent kickoff with mocked LLM call."""
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
with patch.object(llm, "call", return_value="The answer is 120.") as mock_call:
agent = Agent(
role="Math Assistant",
goal="Calculate math",
backstory="You calculate.",
tools=[calculator_tool],
llm=llm,
verbose=False,
)
task = Task(
description="Calculate 15 * 8",
expected_output="Result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert mock_call.called
assert result is not None
@pytest.mark.vcr()
def test_anthropic_parallel_native_tool_calling_test_crew(
self, parallel_tools: list[BaseTool]
) -> None:
agent = Agent(
role="Parallel Tool Agent",
goal="Use both tools exactly as instructed",
backstory="You follow tool instructions precisely.",
tools=parallel_tools,
llm=LLM(model="anthropic/claude-sonnet-4-6"),
verbose=False,
max_iter=3,
)
task = Task(
description=_parallel_prompt(),
expected_output="A one sentence summary of both tool outputs",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
_assert_tools_overlapped()
@pytest.mark.vcr()
def test_anthropic_parallel_native_tool_calling_test_agent_kickoff(
self, parallel_tools: list[BaseTool]
) -> None:
agent = Agent(
role="Parallel Tool Agent",
goal="Use both tools exactly as instructed",
backstory="You follow tool instructions precisely.",
tools=parallel_tools,
llm=LLM(model="anthropic/claude-sonnet-4-6"),
verbose=False,
max_iter=3,
)
result = agent.kickoff(_parallel_prompt())
assert result is not None
_assert_tools_overlapped()
# =============================================================================
# Google/Gemini Provider Tests
# =============================================================================
class TestGeminiNativeToolCalling:
"""Tests for native tool calling with Gemini models."""
@pytest.fixture(autouse=True)
def mock_google_api_key(self):
"""Mock GOOGLE_API_KEY for tests."""
if "GOOGLE_API_KEY" not in os.environ and "GEMINI_API_KEY" not in os.environ:
with patch.dict(os.environ, {"GOOGLE_API_KEY": "test-key"}):
yield
else:
yield
@pytest.mark.vcr()
def test_gemini_agent_with_native_tool_calling(
self, calculator_tool: CalculatorTool
) -> None:
"""Test Gemini agent can use native tool calling."""
agent = Agent(
role="Math Assistant",
goal="Help users with mathematical calculations",
backstory="You are a helpful math assistant.",
tools=[calculator_tool],
llm=LLM(model="gemini/gemini-2.5-flash"),
)
task = Task(
description="Calculate what is 15 * 8",
expected_output="The result of the calculation",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
assert result.raw is not None
def test_gemini_agent_kickoff_with_tools_mocked(
self, calculator_tool: CalculatorTool
) -> None:
"""Test Gemini agent kickoff with mocked LLM call."""
llm = LLM(model="gemini/gemini-2.5-flash")
with patch.object(llm, "call", return_value="The answer is 120.") as mock_call:
agent = Agent(
role="Math Assistant",
goal="Calculate math",
backstory="You calculate.",
tools=[calculator_tool],
llm=llm,
verbose=False,
)
task = Task(
description="Calculate 15 * 8",
expected_output="Result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert mock_call.called
assert result is not None
@pytest.mark.vcr()
def test_gemini_parallel_native_tool_calling_test_crew(
self, parallel_tools: list[BaseTool]
) -> None:
agent = Agent(
role="Parallel Tool Agent",
goal="Use both tools exactly as instructed",
backstory="You follow tool instructions precisely.",
tools=parallel_tools,
llm=LLM(model="gemini/gemini-2.5-flash"),
verbose=False,
max_iter=3,
)
task = Task(
description=_parallel_prompt(),
expected_output="A one sentence summary of both tool outputs",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
_assert_tools_overlapped()
@pytest.mark.vcr()
def test_gemini_parallel_native_tool_calling_test_agent_kickoff(
self, parallel_tools: list[BaseTool]
) -> None:
agent = Agent(
role="Parallel Tool Agent",
goal="Use both tools exactly as instructed",
backstory="You follow tool instructions precisely.",
tools=parallel_tools,
llm=LLM(model="gemini/gemini-2.5-flash"),
verbose=False,
max_iter=3,
)
result = agent.kickoff(_parallel_prompt())
assert result is not None
_assert_tools_overlapped()
# =============================================================================
# Azure Provider Tests
# =============================================================================
class TestAzureNativeToolCalling:
"""Tests for native tool calling with Azure OpenAI models."""
@pytest.fixture(autouse=True)
def mock_azure_env(self):
"""Mock Azure environment variables for tests."""
env_vars = {
"AZURE_API_KEY": "test-key",
"AZURE_API_BASE": "https://test.openai.azure.com",
"AZURE_API_VERSION": "2024-02-15-preview",
}
# Only patch if keys are not already in environment
if "AZURE_API_KEY" not in os.environ:
with patch.dict(os.environ, env_vars):
yield
else:
yield
@pytest.mark.vcr()
def test_azure_agent_with_native_tool_calling(
self, calculator_tool: CalculatorTool
) -> None:
"""Test Azure agent can use native tool calling."""
agent = Agent(
role="Math Assistant",
goal="Help users with mathematical calculations",
backstory="You are a helpful math assistant.",
tools=[calculator_tool],
llm=LLM(model="azure/gpt-5-nano"),
verbose=False,
max_iter=3,
)
task = Task(
description="Calculate what is 15 * 8",
expected_output="The result of the calculation",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
assert result.raw is not None
assert "120" in str(result.raw)
def test_azure_agent_kickoff_with_tools_mocked(
self, calculator_tool: CalculatorTool
) -> None:
"""Test Azure agent kickoff with mocked LLM call."""
llm = LLM(
model="azure/gpt-5-nano",
api_key="test-key",
base_url="https://test.openai.azure.com",
)
with patch.object(llm, "call", return_value="The answer is 120.") as mock_call:
agent = Agent(
role="Math Assistant",
goal="Calculate math",
backstory="You calculate.",
tools=[calculator_tool],
llm=llm,
verbose=False,
)
task = Task(
description="Calculate 15 * 8",
expected_output="Result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert mock_call.called
assert result is not None
@pytest.mark.vcr()
def test_azure_parallel_native_tool_calling_test_crew(
self, parallel_tools: list[BaseTool]
) -> None:
agent = Agent(
role="Parallel Tool Agent",
goal="Use both tools exactly as instructed",
backstory="You follow tool instructions precisely.",
tools=parallel_tools,
llm=LLM(model="azure/gpt-5-nano"),
verbose=False,
max_iter=3,
)
task = Task(
description=_parallel_prompt(),
expected_output="A one sentence summary of both tool outputs",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
_assert_tools_overlapped()
@pytest.mark.vcr()
def test_azure_parallel_native_tool_calling_test_agent_kickoff(
self, parallel_tools: list[BaseTool]
) -> None:
agent = Agent(
role="Parallel Tool Agent",
goal="Use both tools exactly as instructed",
backstory="You follow tool instructions precisely.",
tools=parallel_tools,
llm=LLM(model="azure/gpt-5-nano"),
verbose=False,
max_iter=3,
)
result = agent.kickoff(_parallel_prompt())
assert result is not None
_assert_tools_overlapped()
# =============================================================================
# Bedrock Provider Tests
# =============================================================================
class TestBedrockNativeToolCalling:
"""Tests for native tool calling with AWS Bedrock models."""
@pytest.fixture(autouse=True)
def validate_bedrock_credentials_for_live_recording(self):
"""Run Bedrock tests only when explicitly enabled."""
run_live_bedrock = os.getenv("RUN_BEDROCK_LIVE_TESTS", "false").lower() == "true"
if not run_live_bedrock:
pytest.skip(
"Skipping Bedrock tests by default. "
"Set RUN_BEDROCK_LIVE_TESTS=true with valid AWS credentials to enable."
)
access_key = os.getenv("AWS_ACCESS_KEY_ID", "")
secret_key = os.getenv("AWS_SECRET_ACCESS_KEY", "")
if (
not access_key
or not secret_key
or access_key.startswith(("fake-", "test-"))
or secret_key.startswith(("fake-", "test-"))
):
pytest.skip(
"Skipping Bedrock tests: valid AWS credentials are required when "
"RUN_BEDROCK_LIVE_TESTS=true."
)
yield
@pytest.mark.vcr()
def test_bedrock_agent_kickoff_with_tools_mocked(
self, calculator_tool: CalculatorTool
) -> None:
"""Test Bedrock agent kickoff with mocked LLM call."""
llm = LLM(model="bedrock/anthropic.claude-3-haiku-20240307-v1:0")
agent = Agent(
role="Math Assistant",
goal="Calculate math",
backstory="You calculate.",
tools=[calculator_tool],
llm=llm,
verbose=False,
max_iter=5,
)
task = Task(
description="Calculate 15 * 8",
expected_output="Result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
assert result.raw is not None
assert "120" in str(result.raw)
@pytest.mark.vcr()
def test_bedrock_parallel_native_tool_calling_test_crew(
self, parallel_tools: list[BaseTool]
) -> None:
agent = Agent(
role="Parallel Tool Agent",
goal="Use both tools exactly as instructed",
backstory="You follow tool instructions precisely.",
tools=parallel_tools,
llm=LLM(model="bedrock/anthropic.claude-3-haiku-20240307-v1:0"),
verbose=False,
max_iter=3,
)
task = Task(
description=_parallel_prompt(),
expected_output="A one sentence summary of both tool outputs",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
_assert_tools_overlapped()
@pytest.mark.vcr()
def test_bedrock_parallel_native_tool_calling_test_agent_kickoff(
self, parallel_tools: list[BaseTool]
) -> None:
agent = Agent(
role="Parallel Tool Agent",
goal="Use both tools exactly as instructed",
backstory="You follow tool instructions precisely.",
tools=parallel_tools,
llm=LLM(model="bedrock/anthropic.claude-3-haiku-20240307-v1:0"),
verbose=False,
max_iter=3,
)
result = agent.kickoff(_parallel_prompt())
assert result is not None
_assert_tools_overlapped()
# =============================================================================
# Cross-Provider Native Tool Calling Behavior Tests
# =============================================================================
class TestNativeToolCallingBehavior:
"""Tests for native tool calling behavior across providers."""
def test_supports_function_calling_check(self) -> None:
"""Test that supports_function_calling() is properly checked."""
# OpenAI should support function calling
openai_llm = LLM(model="gpt-5-nano")
assert hasattr(openai_llm, "supports_function_calling")
assert openai_llm.supports_function_calling() is True
def test_anthropic_supports_function_calling(self) -> None:
"""Test that Anthropic models support function calling."""
with patch.dict(os.environ, {"ANTHROPIC_API_KEY": "test-key"}):
llm = LLM(model="anthropic/claude-3-5-haiku-20241022")
assert hasattr(llm, "supports_function_calling")
assert llm.supports_function_calling() is True
def test_gemini_supports_function_calling(self) -> None:
"""Test that Gemini models support function calling."""
llm = LLM(model="gemini/gemini-2.5-flash")
assert hasattr(llm, "supports_function_calling")
assert llm.supports_function_calling() is True
# =============================================================================
# Token Usage Tests
# =============================================================================
class TestNativeToolCallingTokenUsage:
"""Tests for token usage with native tool calling."""
@pytest.mark.vcr()
def test_openai_native_tool_calling_token_usage(
self, calculator_tool: CalculatorTool
) -> None:
"""Test token usage tracking with OpenAI native tool calling."""
agent = Agent(
role="Calculator",
goal="Perform calculations efficiently",
backstory="You calculate things.",
tools=[calculator_tool],
llm=LLM(model="gpt-5-nano"),
verbose=False,
max_iter=3,
)
task = Task(
description="What is 100 / 4?",
expected_output="The result",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
assert result.token_usage is not None
assert result.token_usage.total_tokens > 0
assert result.token_usage.successful_requests >= 1
print(f"\n[OPENAI NATIVE TOOL CALLING TOKEN USAGE]")
print(f" Prompt tokens: {result.token_usage.prompt_tokens}")
print(f" Completion tokens: {result.token_usage.completion_tokens}")
print(f" Total tokens: {result.token_usage.total_tokens}")
@pytest.mark.vcr()
def test_native_tool_calling_error_handling(failing_tool: FailingTool):
"""Test that native tool calling handles errors properly and emits error events."""
import threading
from crewai.events import crewai_event_bus
from crewai.events.types.tool_usage_events import ToolUsageErrorEvent
received_events = []
event_received = threading.Event()
@crewai_event_bus.on(ToolUsageErrorEvent)
def handle_tool_error(source, event):
received_events.append(event)
event_received.set()
agent = Agent(
role="Calculator",
goal="Perform calculations efficiently",
backstory="You calculate things.",
tools=[failing_tool],
llm=LLM(model="gpt-5-nano"),
verbose=False,
max_iter=3,
)
result = agent.kickoff("Use the failing_tool to do something.")
assert result is not None
# Verify error event was emitted
assert event_received.wait(timeout=10), "ToolUsageErrorEvent was not emitted"
assert len(received_events) >= 1
# Verify event attributes
error_event = received_events[0]
assert error_event.tool_name == "failing_tool"
assert error_event.agent_role == agent.role
assert "This tool always fails" in str(error_event.error)
# =============================================================================
# Max Usage Count Tests for Native Tool Calling
# =============================================================================
class CountingInput(BaseModel):
"""Input schema for counting tool."""
value: str = Field(description="Value to count")
class CountingTool(BaseTool):
"""A tool that counts its usage."""
name: str = "counting_tool"
description: str = "A tool that counts how many times it's been called"
args_schema: type[BaseModel] = CountingInput
def _run(self, value: str) -> str:
"""Return the value with a count prefix."""
return f"Counted: {value}"
class TestMaxUsageCountWithNativeToolCalling:
"""Tests for max_usage_count with native tool calling."""
@pytest.mark.vcr()
def test_max_usage_count_tracked_in_native_tool_calling(self) -> None:
"""Test that max_usage_count is properly tracked when using native tool calling."""
tool = CountingTool(max_usage_count=3)
# Verify initial state
assert tool.max_usage_count == 3
assert tool.current_usage_count == 0
agent = Agent(
role="Counting Agent",
goal="Call the counting tool multiple times",
backstory="You are an agent that counts things.",
tools=[tool],
llm=LLM(model="gpt-5-nano"),
verbose=False,
max_iter=5,
)
task = Task(
description="Call the counting_tool 3 times with values 'first', 'second', and 'third'",
expected_output="The results of the counting operations",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
crew.kickoff()
# Verify usage count was tracked
assert tool.max_usage_count == 3
assert tool.current_usage_count <= tool.max_usage_count
@pytest.mark.vcr()
def test_max_usage_count_limit_enforced_in_native_tool_calling(self) -> None:
"""Test that when max_usage_count is reached, tool returns error message."""
tool = CountingTool(max_usage_count=2)
agent = Agent(
role="Counting Agent",
goal="Use the counting tool as many times as requested",
backstory="You are an agent that counts things. You must try to use the tool for each value requested.",
tools=[tool],
llm=LLM(model="gpt-5-nano"),
verbose=False,
max_iter=5,
)
# Request more tool calls than the max_usage_count allows
task = Task(
description="Call the counting_tool 4 times with values 'one', 'two', 'three', and 'four'",
expected_output="The results of the counting operations, noting any failures",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
# The tool should have been limited to max_usage_count (2) calls
assert result is not None
assert tool.current_usage_count == tool.max_usage_count
# After hitting the limit, further calls should have been rejected
@pytest.mark.vcr()
def test_tool_usage_increments_after_successful_execution(self) -> None:
"""Test that usage count increments after each successful native tool call."""
tool = CountingTool(max_usage_count=10)
assert tool.current_usage_count == 0
agent = Agent(
role="Counting Agent",
goal="Use the counting tool exactly as requested",
backstory="You are an agent that counts things precisely.",
tools=[tool],
llm=LLM(model="gpt-5-nano"),
verbose=False,
max_iter=5,
)
task = Task(
description="Call the counting_tool exactly 2 times: first with value 'alpha', then with value 'beta'",
expected_output="The results showing both 'Counted: alpha' and 'Counted: beta'",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task])
result = crew.kickoff()
assert result is not None
# Verify the requested calls occurred while keeping usage bounded.
assert tool.current_usage_count >= 2
assert tool.current_usage_count <= tool.max_usage_count
# =============================================================================
# JSON Parse Error Handling Tests
# =============================================================================
class TestNativeToolCallingJsonParseError:
"""Tests that malformed JSON tool arguments produce clear errors
instead of silently dropping all arguments."""
def _make_executor(self, tools: list[BaseTool]) -> "CrewAgentExecutor":
"""Create a minimal CrewAgentExecutor with mocked dependencies."""
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.tools.base_tool import to_langchain
structured_tools = to_langchain(tools)
mock_agent = Mock()
mock_agent.key = "test_agent"
mock_agent.role = "tester"
mock_agent.verbose = False
mock_agent.fingerprint = None
mock_agent.tools_results = []
mock_task = Mock()
mock_task.name = "test"
mock_task.description = "test"
mock_task.id = "test-id"
executor = object.__new__(CrewAgentExecutor)
executor.agent = mock_agent
executor.task = mock_task
executor.crew = Mock()
executor.tools = structured_tools
executor.original_tools = tools
executor.tools_handler = None
executor._printer = Mock()
executor.messages = []
return executor
def test_malformed_json_returns_parse_error(self) -> None:
"""Malformed JSON args must return a descriptive error, not silently become {}."""
class CodeTool(BaseTool):
name: str = "execute_code"
description: str = "Run code"
def _run(self, code: str) -> str:
return f"ran: {code}"
tool = CodeTool()
executor = self._make_executor([tool])
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
_, available_functions, _ = convert_tools_to_openai_schema([tool])
malformed_json = '{"code": "print("hello")"}'
result = executor._execute_single_native_tool_call(
call_id="call_123",
func_name="execute_code",
func_args=malformed_json,
available_functions=available_functions,
)
assert "Failed to parse tool arguments as JSON" in result["result"]
assert tool.current_usage_count == 0
def test_valid_json_still_executes_normally(self) -> None:
"""Valid JSON args should execute the tool as before."""
class CodeTool(BaseTool):
name: str = "execute_code"
description: str = "Run code"
def _run(self, code: str) -> str:
return f"ran: {code}"
tool = CodeTool()
executor = self._make_executor([tool])
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
_, available_functions, _ = convert_tools_to_openai_schema([tool])
valid_json = '{"code": "print(1)"}'
result = executor._execute_single_native_tool_call(
call_id="call_456",
func_name="execute_code",
func_args=valid_json,
available_functions=available_functions,
)
assert result["result"] == "ran: print(1)"
def test_dict_args_bypass_json_parsing(self) -> None:
"""When func_args is already a dict, no JSON parsing occurs."""
class CodeTool(BaseTool):
name: str = "execute_code"
description: str = "Run code"
def _run(self, code: str) -> str:
return f"ran: {code}"
tool = CodeTool()
executor = self._make_executor([tool])
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
_, available_functions, _ = convert_tools_to_openai_schema([tool])
result = executor._execute_single_native_tool_call(
call_id="call_789",
func_name="execute_code",
func_args={"code": "x = 42"},
available_functions=available_functions,
)
assert result["result"] == "ran: x = 42"
def test_schema_validation_catches_missing_args_on_native_path(self) -> None:
"""The native function calling path should now enforce args_schema,
catching missing required fields before _run is called."""
class StrictTool(BaseTool):
name: str = "strict_tool"
description: str = "A tool with required args"
def _run(self, code: str, language: str) -> str:
return f"{language}: {code}"
tool = StrictTool()
executor = self._make_executor([tool])
from crewai.utilities.agent_utils import convert_tools_to_openai_schema
_, available_functions, _ = convert_tools_to_openai_schema([tool])
result = executor._execute_single_native_tool_call(
call_id="call_schema",
func_name="strict_tool",
func_args={"code": "print(1)"},
available_functions=available_functions,
)
assert "Error" in result["result"]
assert "validation failed" in result["result"].lower() or "missing" in result["result"].lower()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/agents/test_native_tool_calling.py",
"license": "MIT License",
"lines": 1042,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/utilities/test_agent_utils.py | """Tests for agent utility functions."""
from __future__ import annotations
import asyncio
from typing import Any, Literal, Optional
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from pydantic import BaseModel, Field
from crewai.tools.base_tool import BaseTool
from crewai.utilities.agent_utils import (
_asummarize_chunks,
_estimate_token_count,
_extract_summary_tags,
_format_messages_for_summary,
_split_messages_into_chunks,
convert_tools_to_openai_schema,
parse_tool_call_args,
summarize_messages,
)
class CalculatorInput(BaseModel):
"""Input schema for calculator tool."""
expression: str = Field(description="Mathematical expression to evaluate")
class CalculatorTool(BaseTool):
"""A simple calculator tool for testing."""
name: str = "calculator"
description: str = "Perform mathematical calculations"
args_schema: type[BaseModel] = CalculatorInput
def _run(self, expression: str) -> str:
"""Execute the calculation."""
try:
result = eval(expression) # noqa: S307
return str(result)
except Exception as e:
return f"Error: {e}"
class SearchInput(BaseModel):
"""Input schema for search tool."""
query: str = Field(description="Search query")
max_results: int = Field(default=10, description="Maximum number of results")
class SearchTool(BaseTool):
"""A search tool for testing."""
name: str = "web_search"
description: str = "Search the web for information"
args_schema: type[BaseModel] = SearchInput
def _run(self, query: str, max_results: int = 10) -> str:
"""Execute the search."""
return f"Search results for '{query}' (max {max_results})"
class NoSchemaTool(BaseTool):
"""A tool without an args schema for testing edge cases."""
name: str = "simple_tool"
description: str = "A simple tool with no schema"
def _run(self, **kwargs: Any) -> str:
"""Execute the tool."""
return "Simple tool executed"
class TestConvertToolsToOpenaiSchema:
"""Tests for convert_tools_to_openai_schema function."""
def test_converts_single_tool(self) -> None:
"""Test converting a single tool to OpenAI schema."""
tools = [CalculatorTool()]
schemas, functions, _ = convert_tools_to_openai_schema(tools)
assert len(schemas) == 1
assert len(functions) == 1
schema = schemas[0]
assert schema["type"] == "function"
assert schema["function"]["name"] == "calculator"
assert schema["function"]["description"] == "Perform mathematical calculations"
assert "properties" in schema["function"]["parameters"]
assert "expression" in schema["function"]["parameters"]["properties"]
def test_converts_multiple_tools(self) -> None:
"""Test converting multiple tools to OpenAI schema."""
tools = [CalculatorTool(), SearchTool()]
schemas, functions, _ = convert_tools_to_openai_schema(tools)
assert len(schemas) == 2
assert len(functions) == 2
# Check calculator
calc_schema = next(s for s in schemas if s["function"]["name"] == "calculator")
assert calc_schema["function"]["description"] == "Perform mathematical calculations"
# Check search
search_schema = next(s for s in schemas if s["function"]["name"] == "web_search")
assert search_schema["function"]["description"] == "Search the web for information"
assert "query" in search_schema["function"]["parameters"]["properties"]
assert "max_results" in search_schema["function"]["parameters"]["properties"]
def test_functions_dict_contains_callables(self) -> None:
"""Test that the functions dict maps names to callable run methods."""
tools = [CalculatorTool(), SearchTool()]
schemas, functions, _ = convert_tools_to_openai_schema(tools)
assert "calculator" in functions
assert "web_search" in functions
assert callable(functions["calculator"])
assert callable(functions["web_search"])
def test_function_can_be_called(self) -> None:
"""Test that the returned function can be called."""
tools = [CalculatorTool()]
schemas, functions, _ = convert_tools_to_openai_schema(tools)
result = functions["calculator"](expression="2 + 2")
assert result == "4"
def test_empty_tools_list(self) -> None:
"""Test with an empty tools list."""
schemas, functions, _ = convert_tools_to_openai_schema([])
assert schemas == []
assert functions == {}
def test_schema_has_required_fields(self) -> None:
"""Test that the schema includes required fields information."""
tools = [SearchTool()]
schemas, functions, _ = convert_tools_to_openai_schema(tools)
schema = schemas[0]
params = schema["function"]["parameters"]
# Should have required array
assert "required" in params
assert "query" in params["required"]
def test_tool_without_args_schema(self) -> None:
"""Test converting a tool that doesn't have an args_schema."""
# Create a minimal tool without args_schema
class MinimalTool(BaseTool):
name: str = "minimal"
description: str = "A minimal tool"
def _run(self) -> str:
return "done"
tools = [MinimalTool()]
schemas, functions, _ = convert_tools_to_openai_schema(tools)
assert len(schemas) == 1
schema = schemas[0]
assert schema["function"]["name"] == "minimal"
# Parameters should be empty dict or have minimal schema
assert isinstance(schema["function"]["parameters"], dict)
def test_schema_structure_matches_openai_format(self) -> None:
"""Test that the schema structure matches OpenAI's expected format."""
tools = [CalculatorTool()]
schemas, functions, _ = convert_tools_to_openai_schema(tools)
schema = schemas[0]
# Top level must have "type": "function"
assert schema["type"] == "function"
# Must have "function" key with nested structure
assert "function" in schema
func = schema["function"]
# Function must have name and description
assert "name" in func
assert "description" in func
assert isinstance(func["name"], str)
assert isinstance(func["description"], str)
# Parameters should be a valid JSON schema
assert "parameters" in func
params = func["parameters"]
assert isinstance(params, dict)
def test_removes_redundant_schema_fields(self) -> None:
"""Test that redundant title and description are removed from parameters."""
tools = [CalculatorTool()]
schemas, functions, _ = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
# Title should be removed as it's redundant with function name
assert "title" not in params
def test_preserves_field_descriptions(self) -> None:
"""Test that field descriptions are preserved in the schema."""
tools = [SearchTool()]
schemas, functions, _ = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
query_prop = params["properties"]["query"]
# Field description should be preserved
assert "description" in query_prop
assert query_prop["description"] == "Search query"
def test_preserves_default_values(self) -> None:
"""Test that default values are preserved in the schema."""
tools = [SearchTool()]
schemas, functions, _ = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
max_results_prop = params["properties"]["max_results"]
# Default value should be preserved
assert "default" in max_results_prop
assert max_results_prop["default"] == 10
def _make_mock_i18n() -> MagicMock:
"""Create a mock i18n with the new structured prompt keys."""
mock_i18n = MagicMock()
mock_i18n.slice.side_effect = lambda key: {
"summarizer_system_message": "You are a precise assistant that creates structured summaries.",
"summarize_instruction": "Summarize the conversation:\n{conversation}",
"summary": "<summary>\n{merged_summary}\n</summary>\nContinue the task.",
}.get(key, "")
return mock_i18n
class MCPStyleInput(BaseModel):
"""Input schema mimicking an MCP tool with optional fields."""
query: str = Field(description="Search query")
filter_type: Optional[Literal["internal", "user"]] = Field(
default=None, description="Filter type"
)
page_id: Optional[str] = Field(
default=None, description="Page UUID"
)
class MCPStyleTool(BaseTool):
"""A tool mimicking MCP tool schemas with optional fields."""
name: str = "mcp_search"
description: str = "Search with optional filters"
args_schema: type[BaseModel] = MCPStyleInput
def _run(self, **kwargs: Any) -> str:
return "result"
class TestOptionalFieldsPreserveNull:
"""Tests that optional tool fields preserve null in the schema."""
def test_optional_string_allows_null(self) -> None:
"""Optional[str] fields should include null in the schema so the LLM
can send null instead of being forced to guess a value."""
tools = [MCPStyleTool()]
schemas, _, _ = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
page_id_prop = params["properties"]["page_id"]
assert "anyOf" in page_id_prop
type_options = [opt.get("type") for opt in page_id_prop["anyOf"]]
assert "string" in type_options
assert "null" in type_options
def test_optional_literal_allows_null(self) -> None:
"""Optional[Literal[...]] fields should include null."""
tools = [MCPStyleTool()]
schemas, _, _ = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
filter_prop = params["properties"]["filter_type"]
assert "anyOf" in filter_prop
has_null = any(opt.get("type") == "null" for opt in filter_prop["anyOf"])
assert has_null
def test_required_field_stays_non_null(self) -> None:
"""Required fields without Optional should NOT have null."""
tools = [MCPStyleTool()]
schemas, _, _ = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
query_prop = params["properties"]["query"]
assert query_prop.get("type") == "string"
assert "anyOf" not in query_prop
def test_all_fields_in_required_for_strict_mode(self) -> None:
"""All fields (including optional) must be in required for strict mode."""
tools = [MCPStyleTool()]
schemas, _, _ = convert_tools_to_openai_schema(tools)
params = schemas[0]["function"]["parameters"]
assert "query" in params["required"]
assert "filter_type" in params["required"]
assert "page_id" in params["required"]
class TestSummarizeMessages:
"""Tests for summarize_messages function."""
def test_preserves_files_from_user_messages(self) -> None:
"""Test that files attached to user messages are preserved after summarization."""
mock_files = {"image.png": MagicMock(), "doc.pdf": MagicMock()}
messages: list[dict[str, Any]] = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "Analyze this image", "files": mock_files},
{"role": "assistant", "content": "I can see the image shows..."},
{"role": "user", "content": "What about the colors?"},
]
mock_llm = MagicMock()
mock_llm.get_context_window_size.return_value = 1000
mock_llm.call.return_value = "<summary>Summarized conversation about image analysis.</summary>"
summarize_messages(
messages=messages,
llm=mock_llm,
callbacks=[],
i18n=_make_mock_i18n(),
)
# System message preserved + summary message = 2
assert len(messages) == 2
assert messages[0]["role"] == "system"
summary_msg = messages[1]
assert summary_msg["role"] == "user"
assert "files" in summary_msg
assert summary_msg["files"] == mock_files
def test_merges_files_from_multiple_user_messages(self) -> None:
"""Test that files from multiple user messages are merged."""
file1 = MagicMock()
file2 = MagicMock()
file3 = MagicMock()
messages: list[dict[str, Any]] = [
{"role": "user", "content": "First image", "files": {"img1.png": file1}},
{"role": "assistant", "content": "I see the first image."},
{"role": "user", "content": "Second image", "files": {"img2.png": file2, "doc.pdf": file3}},
{"role": "assistant", "content": "I see the second image and document."},
]
mock_llm = MagicMock()
mock_llm.get_context_window_size.return_value = 1000
mock_llm.call.return_value = "<summary>Summarized conversation.</summary>"
summarize_messages(
messages=messages,
llm=mock_llm,
callbacks=[],
i18n=_make_mock_i18n(),
)
assert len(messages) == 1
assert "files" in messages[0]
assert messages[0]["files"] == {
"img1.png": file1,
"img2.png": file2,
"doc.pdf": file3,
}
def test_works_without_files(self) -> None:
"""Test that summarization works when no files are attached."""
messages: list[dict[str, Any]] = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there!"},
]
mock_llm = MagicMock()
mock_llm.get_context_window_size.return_value = 1000
mock_llm.call.return_value = "<summary>A greeting exchange.</summary>"
summarize_messages(
messages=messages,
llm=mock_llm,
callbacks=[],
i18n=_make_mock_i18n(),
)
assert len(messages) == 1
assert "files" not in messages[0]
def test_modifies_original_messages_list(self) -> None:
"""Test that the original messages list is modified in-place."""
messages: list[dict[str, Any]] = [
{"role": "user", "content": "First message"},
{"role": "assistant", "content": "Response"},
{"role": "user", "content": "Second message"},
]
original_list_id = id(messages)
mock_llm = MagicMock()
mock_llm.get_context_window_size.return_value = 1000
mock_llm.call.return_value = "<summary>Summary</summary>"
summarize_messages(
messages=messages,
llm=mock_llm,
callbacks=[],
i18n=_make_mock_i18n(),
)
assert id(messages) == original_list_id
assert len(messages) == 1
def test_preserves_system_messages(self) -> None:
"""Test that system messages are preserved and not summarized."""
messages: list[dict[str, Any]] = [
{"role": "system", "content": "You are a research assistant."},
{"role": "user", "content": "Find information about AI."},
{"role": "assistant", "content": "I found several resources on AI."},
]
mock_llm = MagicMock()
mock_llm.get_context_window_size.return_value = 1000
mock_llm.call.return_value = "<summary>User asked about AI, assistant found resources.</summary>"
summarize_messages(
messages=messages,
llm=mock_llm,
callbacks=[],
i18n=_make_mock_i18n(),
)
assert len(messages) == 2
assert messages[0]["role"] == "system"
assert messages[0]["content"] == "You are a research assistant."
assert messages[1]["role"] == "user"
def test_formats_conversation_with_role_labels(self) -> None:
"""Test that the LLM receives role-labeled conversation text."""
messages: list[dict[str, Any]] = [
{"role": "system", "content": "System prompt."},
{"role": "user", "content": "Hello there"},
{"role": "assistant", "content": "Hi! How can I help?"},
]
mock_llm = MagicMock()
mock_llm.get_context_window_size.return_value = 1000
mock_llm.call.return_value = "<summary>Greeting exchange.</summary>"
summarize_messages(
messages=messages,
llm=mock_llm,
callbacks=[],
i18n=_make_mock_i18n(),
)
# Check what was passed to llm.call
call_args = mock_llm.call.call_args[0][0]
user_msg_content = call_args[1]["content"]
assert "[USER]:" in user_msg_content
assert "[ASSISTANT]:" in user_msg_content
# System content should NOT appear in summarization input
assert "System prompt." not in user_msg_content
def test_extracts_summary_from_tags(self) -> None:
"""Test that <summary> tags are extracted from LLM response."""
messages: list[dict[str, Any]] = [
{"role": "user", "content": "Do something."},
{"role": "assistant", "content": "Done."},
]
mock_llm = MagicMock()
mock_llm.get_context_window_size.return_value = 1000
mock_llm.call.return_value = "Here is the summary:\n<summary>The extracted summary content.</summary>\nExtra text."
summarize_messages(
messages=messages,
llm=mock_llm,
callbacks=[],
i18n=_make_mock_i18n(),
)
assert "The extracted summary content." in messages[0]["content"]
def test_handles_tool_messages(self) -> None:
"""Test that tool messages are properly formatted in summarization."""
messages: list[dict[str, Any]] = [
{"role": "user", "content": "Search for Python."},
{"role": "assistant", "content": None, "tool_calls": [
{"function": {"name": "web_search", "arguments": '{"query": "Python"}'}}
]},
{"role": "tool", "content": "Python is a programming language.", "name": "web_search"},
{"role": "assistant", "content": "Python is a programming language."},
]
mock_llm = MagicMock()
mock_llm.get_context_window_size.return_value = 1000
mock_llm.call.return_value = "<summary>User searched for Python info.</summary>"
summarize_messages(
messages=messages,
llm=mock_llm,
callbacks=[],
i18n=_make_mock_i18n(),
)
# Verify the conversation text sent to LLM contains tool labels
call_args = mock_llm.call.call_args[0][0]
user_msg_content = call_args[1]["content"]
assert "[TOOL_RESULT (web_search)]:" in user_msg_content
def test_only_system_messages_no_op(self) -> None:
"""Test that only system messages results in no-op (no summarization)."""
messages: list[dict[str, Any]] = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "system", "content": "Additional system instructions."},
]
mock_llm = MagicMock()
mock_llm.get_context_window_size.return_value = 1000
summarize_messages(
messages=messages,
llm=mock_llm,
callbacks=[],
i18n=_make_mock_i18n(),
)
# No LLM call should have been made
mock_llm.call.assert_not_called()
# System messages should remain untouched
assert len(messages) == 2
assert messages[0]["content"] == "You are a helpful assistant."
assert messages[1]["content"] == "Additional system instructions."
class TestFormatMessagesForSummary:
"""Tests for _format_messages_for_summary helper."""
def test_skips_system_messages(self) -> None:
messages: list[dict[str, Any]] = [
{"role": "system", "content": "System prompt"},
{"role": "user", "content": "Hello"},
]
result = _format_messages_for_summary(messages)
assert "System prompt" not in result
assert "[USER]: Hello" in result
def test_formats_user_and_assistant(self) -> None:
messages: list[dict[str, Any]] = [
{"role": "user", "content": "Question"},
{"role": "assistant", "content": "Answer"},
]
result = _format_messages_for_summary(messages)
assert "[USER]: Question" in result
assert "[ASSISTANT]: Answer" in result
def test_formats_tool_messages(self) -> None:
messages: list[dict[str, Any]] = [
{"role": "tool", "content": "Result data", "name": "search_tool"},
]
result = _format_messages_for_summary(messages)
assert "[TOOL_RESULT (search_tool)]:" in result
assert "Result data" in result
def test_handles_none_content_with_tool_calls(self) -> None:
messages: list[dict[str, Any]] = [
{"role": "assistant", "content": None, "tool_calls": [
{"function": {"name": "calculator", "arguments": "{}"}}
]},
]
result = _format_messages_for_summary(messages)
assert "[Called tools: calculator]" in result
def test_handles_none_content_without_tool_calls(self) -> None:
messages: list[dict[str, Any]] = [
{"role": "assistant", "content": None},
]
result = _format_messages_for_summary(messages)
assert "[ASSISTANT]:" in result
def test_handles_multimodal_content(self) -> None:
messages: list[dict[str, Any]] = [
{"role": "user", "content": [
{"type": "text", "text": "Describe this image"},
{"type": "image_url", "image_url": {"url": "data:image/png;base64,..."}}
]},
]
result = _format_messages_for_summary(messages)
assert "[USER]: Describe this image" in result
def test_empty_messages(self) -> None:
result = _format_messages_for_summary([])
assert result == ""
class TestExtractSummaryTags:
"""Tests for _extract_summary_tags helper."""
def test_extracts_content_from_tags(self) -> None:
text = "Preamble\n<summary>The actual summary.</summary>\nPostamble"
assert _extract_summary_tags(text) == "The actual summary."
def test_handles_multiline_content(self) -> None:
text = "<summary>\nLine 1\nLine 2\nLine 3\n</summary>"
result = _extract_summary_tags(text)
assert "Line 1" in result
assert "Line 2" in result
assert "Line 3" in result
def test_falls_back_when_no_tags(self) -> None:
text = "Just a plain summary without tags."
assert _extract_summary_tags(text) == text
def test_handles_empty_string(self) -> None:
assert _extract_summary_tags("") == ""
def test_extracts_first_match(self) -> None:
text = "<summary>First</summary> text <summary>Second</summary>"
assert _extract_summary_tags(text) == "First"
class TestSplitMessagesIntoChunks:
"""Tests for _split_messages_into_chunks helper."""
def test_single_chunk_when_under_limit(self) -> None:
messages: list[dict[str, Any]] = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi"},
]
chunks = _split_messages_into_chunks(messages, max_tokens=1000)
assert len(chunks) == 1
assert len(chunks[0]) == 2
def test_splits_at_message_boundaries(self) -> None:
messages: list[dict[str, Any]] = [
{"role": "user", "content": "A" * 100}, # ~25 tokens
{"role": "assistant", "content": "B" * 100}, # ~25 tokens
{"role": "user", "content": "C" * 100}, # ~25 tokens
]
# max_tokens=30 should cause splits
chunks = _split_messages_into_chunks(messages, max_tokens=30)
assert len(chunks) == 3
def test_excludes_system_messages(self) -> None:
messages: list[dict[str, Any]] = [
{"role": "system", "content": "System prompt"},
{"role": "user", "content": "Hello"},
]
chunks = _split_messages_into_chunks(messages, max_tokens=1000)
assert len(chunks) == 1
# The system message should not be in any chunk
for chunk in chunks:
for msg in chunk:
assert msg.get("role") != "system"
def test_empty_messages(self) -> None:
chunks = _split_messages_into_chunks([], max_tokens=1000)
assert chunks == []
def test_only_system_messages(self) -> None:
messages: list[dict[str, Any]] = [
{"role": "system", "content": "System prompt"},
]
chunks = _split_messages_into_chunks(messages, max_tokens=1000)
assert chunks == []
def test_handles_none_content(self) -> None:
messages: list[dict[str, Any]] = [
{"role": "assistant", "content": None},
{"role": "user", "content": "Follow up"},
]
chunks = _split_messages_into_chunks(messages, max_tokens=1000)
assert len(chunks) == 1
assert len(chunks[0]) == 2
class TestEstimateTokenCount:
"""Tests for _estimate_token_count helper."""
def test_empty_string(self) -> None:
assert _estimate_token_count("") == 0
def test_short_string(self) -> None:
assert _estimate_token_count("hello") == 1 # 5 // 4 = 1
def test_longer_string(self) -> None:
assert _estimate_token_count("a" * 100) == 25 # 100 // 4 = 25
def test_approximation_is_conservative(self) -> None:
# For English text, actual token count is typically lower than char/4
text = "The quick brown fox jumps over the lazy dog."
estimated = _estimate_token_count(text)
assert estimated > 0
assert estimated == len(text) // 4
class TestParallelSummarization:
"""Tests for parallel chunk summarization via asyncio."""
def _make_messages_for_n_chunks(self, n: int) -> list[dict[str, Any]]:
"""Build a message list that will produce exactly *n* chunks.
Each message has 400 chars (~100 tokens). With max_tokens=100 returned
by the mock LLM, each message lands in its own chunk.
"""
msgs: list[dict[str, Any]] = []
for i in range(n):
msgs.append({"role": "user", "content": f"msg-{i} " + "x" * 400})
return msgs
def test_multiple_chunks_use_acall(self) -> None:
"""When there are multiple chunks, summarize_messages should use
llm.acall (parallel) instead of llm.call (sequential)."""
messages = self._make_messages_for_n_chunks(3)
mock_llm = MagicMock()
mock_llm.get_context_window_size.return_value = 100 # force multiple chunks
mock_llm.acall = AsyncMock(
side_effect=[
"<summary>Summary chunk 1</summary>",
"<summary>Summary chunk 2</summary>",
"<summary>Summary chunk 3</summary>",
]
)
summarize_messages(
messages=messages,
llm=mock_llm,
callbacks=[],
i18n=_make_mock_i18n(),
)
# acall should have been awaited once per chunk
assert mock_llm.acall.await_count == 3
# sync call should NOT have been used for chunk summarization
mock_llm.call.assert_not_called()
def test_single_chunk_uses_sync_call(self) -> None:
"""When there is only one chunk, summarize_messages should use
the sync llm.call path (no async overhead)."""
messages: list[dict[str, Any]] = [
{"role": "user", "content": "Short message"},
{"role": "assistant", "content": "Short reply"},
]
mock_llm = MagicMock()
mock_llm.get_context_window_size.return_value = 100_000
mock_llm.call.return_value = "<summary>Short summary</summary>"
summarize_messages(
messages=messages,
llm=mock_llm,
callbacks=[],
i18n=_make_mock_i18n(),
)
mock_llm.call.assert_called_once()
def test_parallel_results_preserve_order(self) -> None:
"""Summaries must appear in the same order as the original chunks,
regardless of which async call finishes first."""
messages = self._make_messages_for_n_chunks(3)
mock_llm = MagicMock()
mock_llm.get_context_window_size.return_value = 100
# Simulate varying latencies — chunk 2 finishes before chunk 0
async def _delayed_acall(msgs: Any, **kwargs: Any) -> str:
user_content = msgs[1]["content"]
if "msg-0" in user_content:
await asyncio.sleep(0.05)
return "<summary>Summary-A</summary>"
elif "msg-1" in user_content:
return "<summary>Summary-B</summary>" # fastest
else:
await asyncio.sleep(0.02)
return "<summary>Summary-C</summary>"
mock_llm.acall = _delayed_acall
summarize_messages(
messages=messages,
llm=mock_llm,
callbacks=[],
i18n=_make_mock_i18n(),
)
# The final summary message should have A, B, C in order
summary_content = messages[-1]["content"]
pos_a = summary_content.index("Summary-A")
pos_b = summary_content.index("Summary-B")
pos_c = summary_content.index("Summary-C")
assert pos_a < pos_b < pos_c
def test_asummarize_chunks_returns_ordered_results(self) -> None:
"""Direct test of the async helper _asummarize_chunks."""
chunk_a: list[dict[str, Any]] = [{"role": "user", "content": "Chunk A"}]
chunk_b: list[dict[str, Any]] = [{"role": "user", "content": "Chunk B"}]
mock_llm = MagicMock()
mock_llm.acall = AsyncMock(
side_effect=[
"<summary>Result A</summary>",
"<summary>Result B</summary>",
]
)
results = asyncio.run(
_asummarize_chunks(
chunks=[chunk_a, chunk_b],
llm=mock_llm,
callbacks=[],
i18n=_make_mock_i18n(),
)
)
assert len(results) == 2
assert results[0]["content"] == "Result A"
assert results[1]["content"] == "Result B"
@patch("crewai.utilities.agent_utils.is_inside_event_loop", return_value=True)
def test_works_inside_existing_event_loop(self, _mock_loop: Any) -> None:
"""When called from inside a running event loop (e.g. a Flow),
the ThreadPoolExecutor fallback should still work."""
messages = self._make_messages_for_n_chunks(2)
mock_llm = MagicMock()
mock_llm.get_context_window_size.return_value = 100
mock_llm.acall = AsyncMock(
side_effect=[
"<summary>Flow summary 1</summary>",
"<summary>Flow summary 2</summary>",
]
)
summarize_messages(
messages=messages,
llm=mock_llm,
callbacks=[],
i18n=_make_mock_i18n(),
)
assert mock_llm.acall.await_count == 2
# Verify the merged summary made it into messages
assert "Flow summary 1" in messages[-1]["content"]
assert "Flow summary 2" in messages[-1]["content"]
def _build_long_conversation() -> list[dict[str, Any]]:
"""Build a multi-turn conversation that produces multiple chunks at max_tokens=200.
Each non-system message is ~100-140 estimated tokens (400-560 chars),
so a max_tokens of 200 yields roughly 3 chunks from 6 messages.
"""
return [
{
"role": "system",
"content": "You are a helpful research assistant.",
},
{
"role": "user",
"content": (
"Tell me about the history of the Python programming language. "
"Who created it, when was it first released, and what were the "
"main design goals? Please provide a detailed overview covering "
"the major milestones from its inception through Python 3."
),
},
{
"role": "assistant",
"content": (
"Python was created by Guido van Rossum and first released in 1991. "
"The main design goals were code readability and simplicity. Key milestones: "
"Python 1.0 (1994) introduced functional programming tools like lambda and map. "
"Python 2.0 (2000) added list comprehensions and garbage collection. "
"Python 3.0 (2008) was a major backward-incompatible release that fixed "
"fundamental design flaws. Python 2 reached end-of-life in January 2020."
),
},
{
"role": "user",
"content": (
"What about the async/await features? When were they introduced "
"and how do they compare to similar features in JavaScript and C#? "
"Also explain the Global Interpreter Lock and its implications."
),
},
{
"role": "assistant",
"content": (
"Async/await was introduced in Python 3.5 (PEP 492, 2015). "
"Unlike JavaScript which is single-threaded by design, Python's asyncio "
"is an opt-in framework. C# introduced async/await in 2012 (C# 5.0) and "
"was a major inspiration for Python's implementation. "
"The GIL (Global Interpreter Lock) is a mutex that protects access to "
"Python objects, preventing multiple threads from executing Python bytecodes "
"simultaneously. This means CPU-bound multithreaded programs don't benefit "
"from multiple cores. PEP 703 proposes making the GIL optional in CPython."
),
},
{
"role": "user",
"content": (
"Explain the Python package ecosystem. How does pip work, what is PyPI, "
"and what are virtual environments? Compare pip with conda and uv."
),
},
{
"role": "assistant",
"content": (
"PyPI (Python Package Index) is the official repository hosting 400k+ packages. "
"pip is the standard package installer that downloads from PyPI. "
"Virtual environments (venv) create isolated Python installations to avoid "
"dependency conflicts between projects. conda is a cross-language package manager "
"popular in data science that can manage non-Python dependencies. "
"uv is a new Rust-based tool that is 10-100x faster than pip and aims to replace "
"pip, pip-tools, and virtualenv with a single unified tool."
),
},
]
class TestParallelSummarizationVCR:
"""VCR-backed integration tests for parallel summarization.
These tests use a real LLM but patch get_context_window_size to force
multiple chunks, exercising the asyncio.gather + acall parallel path.
To record cassettes:
PYTEST_VCR_RECORD_MODE=all uv run pytest lib/crewai/tests/utilities/test_agent_utils.py::TestParallelSummarizationVCR -v
"""
@pytest.mark.vcr()
def test_parallel_summarize_openai(self) -> None:
"""Test that parallel summarization with gpt-4o-mini produces a valid summary."""
from crewai.llm import LLM
from crewai.utilities.i18n import I18N
llm = LLM(model="gpt-4o-mini", temperature=0)
i18n = I18N()
messages = _build_long_conversation()
original_system = messages[0]["content"]
# Patch get_context_window_size to return 200 — forces multiple chunks
with patch.object(type(llm), "get_context_window_size", return_value=200):
# Verify we actually get multiple chunks with this window size
non_system = [m for m in messages if m.get("role") != "system"]
chunks = _split_messages_into_chunks(non_system, max_tokens=200)
assert len(chunks) > 1, f"Expected multiple chunks, got {len(chunks)}"
summarize_messages(
messages=messages,
llm=llm,
callbacks=[],
i18n=i18n,
)
# System message preserved
assert messages[0]["role"] == "system"
assert messages[0]["content"] == original_system
# Summary produced as a user message
summary_msg = messages[-1]
assert summary_msg["role"] == "user"
assert len(summary_msg["content"]) > 0
@pytest.mark.vcr()
def test_parallel_summarize_preserves_files(self) -> None:
"""Test that file references survive parallel summarization."""
from crewai.llm import LLM
from crewai.utilities.i18n import I18N
llm = LLM(model="gpt-4o-mini", temperature=0)
i18n = I18N()
messages = _build_long_conversation()
mock_file = MagicMock()
messages[1]["files"] = {"report.pdf": mock_file}
with patch.object(type(llm), "get_context_window_size", return_value=200):
summarize_messages(
messages=messages,
llm=llm,
callbacks=[],
i18n=i18n,
)
summary_msg = messages[-1]
assert summary_msg["role"] == "user"
assert "files" in summary_msg
assert "report.pdf" in summary_msg["files"]
class TestParseToolCallArgs:
"""Unit tests for parse_tool_call_args."""
def test_valid_json_string_returns_dict(self) -> None:
args_dict, error = parse_tool_call_args('{"code": "print(1)"}', "run_code", "call_1")
assert error is None
assert args_dict == {"code": "print(1)"}
def test_malformed_json_returns_error_dict(self) -> None:
args_dict, error = parse_tool_call_args('{"code": "print("hi")"}', "run_code", "call_1")
assert args_dict is None
assert error is not None
assert error["call_id"] == "call_1"
assert error["func_name"] == "run_code"
assert error["from_cache"] is False
assert "Failed to parse tool arguments as JSON" in error["result"]
assert "run_code" in error["result"]
def test_malformed_json_preserves_original_tool(self) -> None:
mock_tool = object()
_, error = parse_tool_call_args("{bad}", "my_tool", "call_2", original_tool=mock_tool)
assert error is not None
assert error["original_tool"] is mock_tool
def test_malformed_json_original_tool_defaults_to_none(self) -> None:
_, error = parse_tool_call_args("{bad}", "my_tool", "call_3")
assert error is not None
assert error["original_tool"] is None
def test_dict_input_returned_directly(self) -> None:
func_args = {"code": "x = 42"}
args_dict, error = parse_tool_call_args(func_args, "run_code", "call_4")
assert error is None
assert args_dict == {"code": "x = 42"}
def test_empty_dict_input_returned_directly(self) -> None:
args_dict, error = parse_tool_call_args({}, "run_code", "call_5")
assert error is None
assert args_dict == {}
def test_valid_json_with_nested_values(self) -> None:
args_dict, error = parse_tool_call_args(
'{"query": "hello", "options": {"limit": 10}}', "search", "call_6"
)
assert error is None
assert args_dict == {"query": "hello", "options": {"limit": 10}}
def test_error_result_has_correct_keys(self) -> None:
_, error = parse_tool_call_args("{bad json}", "tool", "call_7")
assert error is not None
assert set(error.keys()) == {"call_id", "func_name", "result", "from_cache", "original_tool"}
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/utilities/test_agent_utils.py",
"license": "MIT License",
"lines": 849,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/events/event_context.py | """Event context management for parent-child relationship tracking."""
from collections.abc import Generator
from contextlib import contextmanager
import contextvars
from dataclasses import dataclass
from enum import Enum
from crewai.events.utils.console_formatter import ConsoleFormatter
class MismatchBehavior(Enum):
"""Behavior when event pairs don't match."""
WARN = "warn"
RAISE = "raise"
SILENT = "silent"
@dataclass
class EventContextConfig:
"""Configuration for event context behavior."""
max_stack_depth: int = 100
mismatch_behavior: MismatchBehavior = MismatchBehavior.WARN
empty_pop_behavior: MismatchBehavior = MismatchBehavior.WARN
class StackDepthExceededError(Exception):
"""Raised when stack depth limit is exceeded."""
class EventPairingError(Exception):
"""Raised when event pairs don't match."""
class EmptyStackError(Exception):
"""Raised when popping from empty stack."""
_event_id_stack: contextvars.ContextVar[tuple[tuple[str, str], ...]] = (
contextvars.ContextVar("_event_id_stack", default=())
)
_event_context_config: contextvars.ContextVar[EventContextConfig | None] = (
contextvars.ContextVar("_event_context_config", default=None)
)
_last_event_id: contextvars.ContextVar[str | None] = contextvars.ContextVar(
"_last_event_id", default=None
)
_triggering_event_id: contextvars.ContextVar[str | None] = contextvars.ContextVar(
"_triggering_event_id", default=None
)
_default_config = EventContextConfig()
_console = ConsoleFormatter()
def get_current_parent_id() -> str | None:
"""Get the current parent event ID from the stack."""
stack = _event_id_stack.get()
return stack[-1][0] if stack else None
def get_enclosing_parent_id() -> str | None:
"""Get the parent of the current scope (stack[-2])."""
stack = _event_id_stack.get()
return stack[-2][0] if len(stack) >= 2 else None
def get_last_event_id() -> str | None:
"""Get the ID of the last emitted event for linear chain tracking.
Returns:
The event_id of the previously emitted event, or None if no event yet.
"""
return _last_event_id.get()
def reset_last_event_id() -> None:
"""Reset the last event ID to None.
Should be called at the start of a new flow or when resetting event state.
"""
_last_event_id.set(None)
def set_last_event_id(event_id: str) -> None:
"""Set the ID of the last emitted event.
Args:
event_id: The event_id to set as the last emitted event.
"""
_last_event_id.set(event_id)
def get_triggering_event_id() -> str | None:
"""Get the ID of the event that triggered the current execution.
Returns:
The event_id of the triggering event, or None if not in a triggered context.
"""
return _triggering_event_id.get()
def set_triggering_event_id(event_id: str | None) -> None:
"""Set the ID of the triggering event for causal chain tracking.
Args:
event_id: The event_id that triggered the current execution, or None.
"""
_triggering_event_id.set(event_id)
@contextmanager
def triggered_by_scope(event_id: str) -> Generator[None, None, None]:
"""Context manager to set the triggering event ID for causal chain tracking.
All events emitted within this context will have their triggered_by_event_id
set to the provided event_id.
Args:
event_id: The event_id that triggered the current execution.
"""
previous = _triggering_event_id.get()
_triggering_event_id.set(event_id)
try:
yield
finally:
_triggering_event_id.set(previous)
def push_event_scope(event_id: str, event_type: str = "") -> None:
"""Push an event ID and type onto the scope stack."""
config = _event_context_config.get() or _default_config
stack = _event_id_stack.get()
if 0 < config.max_stack_depth <= len(stack):
raise StackDepthExceededError(
f"Event stack depth limit ({config.max_stack_depth}) exceeded. "
f"This usually indicates missing ending events."
)
_event_id_stack.set((*stack, (event_id, event_type)))
def pop_event_scope() -> tuple[str, str] | None:
"""Pop an event entry from the scope stack."""
stack = _event_id_stack.get()
if not stack:
return None
_event_id_stack.set(stack[:-1])
return stack[-1]
def handle_empty_pop(event_type_name: str) -> None:
"""Handle a pop attempt on an empty stack."""
config = _event_context_config.get() or _default_config
msg = (
f"Ending event '{event_type_name}' emitted with empty scope stack. "
"Missing starting event?"
)
if config.empty_pop_behavior == MismatchBehavior.RAISE:
raise EmptyStackError(msg)
if config.empty_pop_behavior == MismatchBehavior.WARN:
_console.print(f"[CrewAIEventsBus] Warning: {msg}")
def handle_mismatch(
event_type_name: str,
popped_type: str,
expected_start: str,
) -> None:
"""Handle a mismatched event pair."""
config = _event_context_config.get() or _default_config
msg = (
f"Event pairing mismatch. '{event_type_name}' closed '{popped_type}' "
f"(expected '{expected_start}')"
)
if config.mismatch_behavior == MismatchBehavior.RAISE:
raise EventPairingError(msg)
if config.mismatch_behavior == MismatchBehavior.WARN:
_console.print(f"[CrewAIEventsBus] Warning: {msg}")
@contextmanager
def event_scope(event_id: str, event_type: str = "") -> Generator[None, None, None]:
"""Context manager to establish a parent event scope."""
stack = _event_id_stack.get()
already_on_stack = any(entry[0] == event_id for entry in stack)
if not already_on_stack:
push_event_scope(event_id, event_type)
try:
yield
finally:
if not already_on_stack:
pop_event_scope()
SCOPE_STARTING_EVENTS: frozenset[str] = frozenset(
{
"flow_started",
"method_execution_started",
"crew_kickoff_started",
"crew_train_started",
"crew_test_started",
"agent_execution_started",
"agent_evaluation_started",
"lite_agent_execution_started",
"task_started",
"llm_call_started",
"llm_guardrail_started",
"tool_usage_started",
"mcp_connection_started",
"mcp_tool_execution_started",
"memory_retrieval_started",
"memory_save_started",
"memory_query_started",
"knowledge_query_started",
"knowledge_search_query_started",
"a2a_delegation_started",
"a2a_conversation_started",
"a2a_server_task_started",
"a2a_parallel_delegation_started",
"agent_reasoning_started",
}
)
SCOPE_ENDING_EVENTS: frozenset[str] = frozenset(
{
"flow_finished",
"flow_paused",
"method_execution_finished",
"method_execution_failed",
"method_execution_paused",
"crew_kickoff_completed",
"crew_kickoff_failed",
"crew_train_completed",
"crew_train_failed",
"crew_test_completed",
"crew_test_failed",
"agent_execution_completed",
"agent_execution_error",
"agent_evaluation_completed",
"agent_evaluation_failed",
"lite_agent_execution_completed",
"lite_agent_execution_error",
"task_completed",
"task_failed",
"llm_call_completed",
"llm_call_failed",
"llm_guardrail_completed",
"llm_guardrail_failed",
"tool_usage_finished",
"tool_usage_error",
"mcp_connection_completed",
"mcp_connection_failed",
"mcp_tool_execution_completed",
"mcp_tool_execution_failed",
"memory_retrieval_completed",
"memory_retrieval_failed",
"memory_save_completed",
"memory_save_failed",
"memory_query_completed",
"memory_query_failed",
"knowledge_query_completed",
"knowledge_query_failed",
"knowledge_search_query_completed",
"knowledge_search_query_failed",
"a2a_delegation_completed",
"a2a_conversation_completed",
"a2a_server_task_completed",
"a2a_server_task_canceled",
"a2a_server_task_failed",
"a2a_parallel_delegation_completed",
"agent_reasoning_completed",
"agent_reasoning_failed",
}
)
VALID_EVENT_PAIRS: dict[str, str] = {
"flow_finished": "flow_started",
"flow_paused": "flow_started",
"method_execution_finished": "method_execution_started",
"method_execution_failed": "method_execution_started",
"method_execution_paused": "method_execution_started",
"crew_kickoff_completed": "crew_kickoff_started",
"crew_kickoff_failed": "crew_kickoff_started",
"crew_train_completed": "crew_train_started",
"crew_train_failed": "crew_train_started",
"crew_test_completed": "crew_test_started",
"crew_test_failed": "crew_test_started",
"agent_execution_completed": "agent_execution_started",
"agent_execution_error": "agent_execution_started",
"agent_evaluation_completed": "agent_evaluation_started",
"agent_evaluation_failed": "agent_evaluation_started",
"lite_agent_execution_completed": "lite_agent_execution_started",
"lite_agent_execution_error": "lite_agent_execution_started",
"task_completed": "task_started",
"task_failed": "task_started",
"llm_call_completed": "llm_call_started",
"llm_call_failed": "llm_call_started",
"llm_guardrail_completed": "llm_guardrail_started",
"llm_guardrail_failed": "llm_guardrail_started",
"tool_usage_finished": "tool_usage_started",
"tool_usage_error": "tool_usage_started",
"mcp_connection_completed": "mcp_connection_started",
"mcp_connection_failed": "mcp_connection_started",
"mcp_tool_execution_completed": "mcp_tool_execution_started",
"mcp_tool_execution_failed": "mcp_tool_execution_started",
"memory_retrieval_completed": "memory_retrieval_started",
"memory_retrieval_failed": "memory_retrieval_started",
"memory_save_completed": "memory_save_started",
"memory_save_failed": "memory_save_started",
"memory_query_completed": "memory_query_started",
"memory_query_failed": "memory_query_started",
"knowledge_query_completed": "knowledge_query_started",
"knowledge_query_failed": "knowledge_query_started",
"knowledge_search_query_completed": "knowledge_search_query_started",
"knowledge_search_query_failed": "knowledge_search_query_started",
"a2a_delegation_completed": "a2a_delegation_started",
"a2a_conversation_completed": "a2a_conversation_started",
"a2a_server_task_completed": "a2a_server_task_started",
"a2a_server_task_canceled": "a2a_server_task_started",
"a2a_server_task_failed": "a2a_server_task_started",
"a2a_parallel_delegation_completed": "a2a_parallel_delegation_started",
"agent_reasoning_completed": "agent_reasoning_started",
"agent_reasoning_failed": "agent_reasoning_started",
}
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/events/event_context.py",
"license": "MIT License",
"lines": 272,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/tests/events/test_event_context.py | """Tests for event context management."""
import pytest
from crewai.events.event_context import (
SCOPE_ENDING_EVENTS,
SCOPE_STARTING_EVENTS,
VALID_EVENT_PAIRS,
EmptyStackError,
EventPairingError,
MismatchBehavior,
StackDepthExceededError,
_event_context_config,
EventContextConfig,
get_current_parent_id,
get_enclosing_parent_id,
get_last_event_id,
get_triggering_event_id,
handle_empty_pop,
handle_mismatch,
pop_event_scope,
push_event_scope,
reset_last_event_id,
set_last_event_id,
set_triggering_event_id,
triggered_by_scope,
)
class TestStackOperations:
"""Tests for stack push/pop operations."""
def test_empty_stack_returns_none(self) -> None:
assert get_current_parent_id() is None
assert get_enclosing_parent_id() is None
def test_push_and_get_parent(self) -> None:
push_event_scope("event-1", "task_started")
assert get_current_parent_id() == "event-1"
def test_nested_push(self) -> None:
push_event_scope("event-1", "crew_kickoff_started")
push_event_scope("event-2", "task_started")
assert get_current_parent_id() == "event-2"
assert get_enclosing_parent_id() == "event-1"
def test_pop_restores_parent(self) -> None:
push_event_scope("event-1", "crew_kickoff_started")
push_event_scope("event-2", "task_started")
popped = pop_event_scope()
assert popped == ("event-2", "task_started")
assert get_current_parent_id() == "event-1"
def test_pop_empty_stack_returns_none(self) -> None:
assert pop_event_scope() is None
class TestStackDepthLimit:
"""Tests for stack depth limit."""
def test_depth_limit_exceeded_raises(self) -> None:
_event_context_config.set(EventContextConfig(max_stack_depth=3))
push_event_scope("event-1", "type-1")
push_event_scope("event-2", "type-2")
push_event_scope("event-3", "type-3")
with pytest.raises(StackDepthExceededError):
push_event_scope("event-4", "type-4")
class TestMismatchHandling:
"""Tests for mismatch behavior."""
def test_handle_mismatch_raises_when_configured(self) -> None:
_event_context_config.set(
EventContextConfig(mismatch_behavior=MismatchBehavior.RAISE)
)
with pytest.raises(EventPairingError):
handle_mismatch("task_completed", "llm_call_started", "task_started")
def test_handle_empty_pop_raises_when_configured(self) -> None:
_event_context_config.set(
EventContextConfig(empty_pop_behavior=MismatchBehavior.RAISE)
)
with pytest.raises(EmptyStackError):
handle_empty_pop("task_completed")
class TestEventTypeSets:
"""Tests for event type set completeness."""
def test_all_ending_events_have_pairs(self) -> None:
for ending_event in SCOPE_ENDING_EVENTS:
assert ending_event in VALID_EVENT_PAIRS
def test_all_pairs_reference_starting_events(self) -> None:
for ending_event, starting_event in VALID_EVENT_PAIRS.items():
assert starting_event in SCOPE_STARTING_EVENTS
def test_starting_and_ending_are_disjoint(self) -> None:
overlap = SCOPE_STARTING_EVENTS & SCOPE_ENDING_EVENTS
assert not overlap
class TestLastEventIdTracking:
"""Tests for linear chain event ID tracking."""
def test_initial_last_event_id_is_none(self) -> None:
reset_last_event_id()
assert get_last_event_id() is None
def test_set_and_get_last_event_id(self) -> None:
reset_last_event_id()
set_last_event_id("event-123")
assert get_last_event_id() == "event-123"
def test_reset_clears_last_event_id(self) -> None:
set_last_event_id("event-123")
reset_last_event_id()
assert get_last_event_id() is None
def test_overwrite_last_event_id(self) -> None:
reset_last_event_id()
set_last_event_id("event-1")
set_last_event_id("event-2")
assert get_last_event_id() == "event-2"
class TestTriggeringEventIdTracking:
"""Tests for causal chain event ID tracking."""
def test_initial_triggering_event_id_is_none(self) -> None:
set_triggering_event_id(None)
assert get_triggering_event_id() is None
def test_set_and_get_triggering_event_id(self) -> None:
set_triggering_event_id("trigger-123")
assert get_triggering_event_id() == "trigger-123"
set_triggering_event_id(None)
def test_set_none_clears_triggering_event_id(self) -> None:
set_triggering_event_id("trigger-123")
set_triggering_event_id(None)
assert get_triggering_event_id() is None
class TestTriggeredByScope:
"""Tests for triggered_by_scope context manager."""
def test_scope_sets_triggering_id(self) -> None:
set_triggering_event_id(None)
with triggered_by_scope("trigger-456"):
assert get_triggering_event_id() == "trigger-456"
def test_scope_restores_previous_value(self) -> None:
set_triggering_event_id(None)
with triggered_by_scope("trigger-456"):
pass
assert get_triggering_event_id() is None
def test_nested_scopes(self) -> None:
set_triggering_event_id(None)
with triggered_by_scope("outer"):
assert get_triggering_event_id() == "outer"
with triggered_by_scope("inner"):
assert get_triggering_event_id() == "inner"
assert get_triggering_event_id() == "outer"
assert get_triggering_event_id() is None
def test_scope_restores_on_exception(self) -> None:
set_triggering_event_id(None)
try:
with triggered_by_scope("trigger-789"):
raise ValueError("test error")
except ValueError:
pass
assert get_triggering_event_id() is None
def test_agent_scope_preserved_after_tool_error_event() -> None:
from crewai.events import crewai_event_bus
from crewai.events.types.tool_usage_events import (
ToolUsageErrorEvent,
ToolUsageStartedEvent,
)
push_event_scope("crew-1", "crew_kickoff_started")
push_event_scope("task-1", "task_started")
push_event_scope("agent-1", "agent_execution_started")
crewai_event_bus.emit(
None,
ToolUsageStartedEvent(
tool_name="test_tool",
tool_args={},
agent_key="test_agent",
)
)
crewai_event_bus.emit(
None,
ToolUsageErrorEvent(
tool_name="test_tool",
tool_args={},
agent_key="test_agent",
error=ValueError("test error"),
)
)
crewai_event_bus.flush()
assert get_current_parent_id() == "agent-1"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/events/test_event_context.py",
"license": "MIT License",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/events/test_event_ordering.py | """Tests for event ordering and parent-child relationships."""
import pytest
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.events.base_events import BaseEvent
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.agent_events import (
AgentExecutionCompletedEvent,
AgentExecutionStartedEvent,
)
from crewai.events.types.crew_events import (
CrewKickoffCompletedEvent,
CrewKickoffStartedEvent,
)
from crewai.events.types.flow_events import (
FlowFinishedEvent,
FlowStartedEvent,
MethodExecutionFinishedEvent,
MethodExecutionStartedEvent,
)
from crewai.events.types.llm_events import (
LLMCallCompletedEvent,
LLMCallStartedEvent,
)
from crewai.events.types.task_events import (
TaskCompletedEvent,
TaskStartedEvent,
)
from crewai.flow.flow import Flow, listen, start
from crewai.task import Task
class EventCollector:
"""Collects events and provides helpers to find related events."""
def __init__(self) -> None:
self.events: list[BaseEvent] = []
def add(self, event: BaseEvent) -> None:
self.events.append(event)
def first(self, event_type: type[BaseEvent]) -> BaseEvent | None:
for e in self.events:
if isinstance(e, event_type):
return e
return None
def all_of(self, event_type: type[BaseEvent]) -> list[BaseEvent]:
return [e for e in self.events if isinstance(e, event_type)]
def with_parent(self, parent_id: str) -> list[BaseEvent]:
return [e for e in self.events if e.parent_event_id == parent_id]
@pytest.fixture
def collector() -> EventCollector:
"""Fixture that collects events during test execution."""
c = EventCollector()
@crewai_event_bus.on(CrewKickoffStartedEvent)
def h1(source, event):
c.add(event)
@crewai_event_bus.on(CrewKickoffCompletedEvent)
def h2(source, event):
c.add(event)
@crewai_event_bus.on(TaskStartedEvent)
def h3(source, event):
c.add(event)
@crewai_event_bus.on(TaskCompletedEvent)
def h4(source, event):
c.add(event)
@crewai_event_bus.on(AgentExecutionStartedEvent)
def h5(source, event):
c.add(event)
@crewai_event_bus.on(AgentExecutionCompletedEvent)
def h6(source, event):
c.add(event)
@crewai_event_bus.on(LLMCallStartedEvent)
def h7(source, event):
c.add(event)
@crewai_event_bus.on(LLMCallCompletedEvent)
def h8(source, event):
c.add(event)
@crewai_event_bus.on(FlowStartedEvent)
def h9(source, event):
c.add(event)
@crewai_event_bus.on(FlowFinishedEvent)
def h10(source, event):
c.add(event)
@crewai_event_bus.on(MethodExecutionStartedEvent)
def h11(source, event):
c.add(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def h12(source, event):
c.add(event)
return c
class TestCrewEventOrdering:
"""Tests for event ordering in crew execution."""
@pytest.mark.vcr()
def test_crew_events_have_event_ids(self, collector: EventCollector) -> None:
"""Every crew event should have a unique event_id."""
agent = Agent(
role="Responder",
goal="Respond briefly",
backstory="You give short answers.",
verbose=False,
)
task = Task(
description="Say 'hello' and nothing else.",
expected_output="The word hello.",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=False)
crew.kickoff()
crewai_event_bus.flush()
started = collector.first(CrewKickoffStartedEvent)
completed = collector.first(CrewKickoffCompletedEvent)
assert started is not None
assert started.event_id is not None
assert len(started.event_id) > 0
assert completed is not None
assert completed.event_id is not None
assert completed.event_id != started.event_id
@pytest.mark.vcr()
def test_crew_completed_after_started(self, collector: EventCollector) -> None:
"""Crew completed event should have higher sequence than started."""
agent = Agent(
role="Responder",
goal="Respond briefly",
backstory="You give short answers.",
verbose=False,
)
task = Task(
description="Say 'yes' and nothing else.",
expected_output="The word yes.",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=False)
crew.kickoff()
crewai_event_bus.flush()
started = collector.first(CrewKickoffStartedEvent)
completed = collector.first(CrewKickoffCompletedEvent)
assert started is not None
assert completed is not None
assert started.emission_sequence is not None
assert completed.emission_sequence is not None
assert completed.emission_sequence > started.emission_sequence
@pytest.mark.vcr()
def test_task_parent_is_crew(self, collector: EventCollector) -> None:
"""Task events should have crew event as parent."""
agent = Agent(
role="Responder",
goal="Respond briefly",
backstory="You give short answers.",
verbose=False,
)
task = Task(
description="Say 'ok' and nothing else.",
expected_output="The word ok.",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=False)
crew.kickoff()
crewai_event_bus.flush()
crew_started = collector.first(CrewKickoffStartedEvent)
task_started = collector.first(TaskStartedEvent)
assert crew_started is not None
assert task_started is not None
assert task_started.parent_event_id == crew_started.event_id
class TestAgentEventOrdering:
"""Tests for event ordering in agent execution."""
@pytest.mark.vcr()
def test_agent_events_have_event_ids(self, collector: EventCollector) -> None:
"""Agent execution events should have event_ids."""
agent = Agent(
role="Helper",
goal="Help with tasks",
backstory="You help.",
verbose=False,
)
task = Task(
description="Say 'done' and nothing else.",
expected_output="The word done.",
agent=agent,
)
agent.execute_task(task)
crewai_event_bus.flush()
started = collector.first(AgentExecutionStartedEvent)
completed = collector.first(AgentExecutionCompletedEvent)
if started:
assert started.event_id is not None
if completed:
assert completed.event_id is not None
@pytest.mark.vcr()
def test_llm_events_have_parent(self, collector: EventCollector) -> None:
"""LLM call events should have a parent event."""
agent = Agent(
role="Helper",
goal="Help with tasks",
backstory="You help.",
verbose=False,
)
task = Task(
description="Say 'hi' and nothing else.",
expected_output="The word hi.",
agent=agent,
)
agent.execute_task(task)
crewai_event_bus.flush()
llm_started = collector.first(LLMCallStartedEvent)
if llm_started:
assert llm_started.event_id is not None
# LLM events should have some parent in the hierarchy
assert llm_started.parent_event_id is not None
class TestFlowWithCrewEventOrdering:
"""Tests for event ordering in flows containing crews."""
@pytest.mark.asyncio
@pytest.mark.vcr()
async def test_flow_events_have_ids(self, collector: EventCollector) -> None:
"""Flow events should have event_ids."""
agent = Agent(
role="Worker",
goal="Do work",
backstory="You work.",
verbose=False,
)
task = Task(
description="Say 'complete' and nothing else.",
expected_output="The word complete.",
agent=agent,
)
class SimpleFlow(Flow):
@start()
async def run_crew(self):
c = Crew(agents=[agent], tasks=[task], verbose=False)
return await c.akickoff()
flow = SimpleFlow()
await flow.akickoff()
crewai_event_bus.flush()
flow_started = collector.first(FlowStartedEvent)
flow_finished = collector.first(FlowFinishedEvent)
assert flow_started is not None
assert flow_started.event_id is not None
assert flow_finished is not None
assert flow_finished.event_id is not None
@pytest.mark.asyncio
@pytest.mark.vcr()
async def test_method_parent_is_flow(self, collector: EventCollector) -> None:
"""Method execution events should have flow as parent."""
agent = Agent(
role="Worker",
goal="Do work",
backstory="You work.",
verbose=False,
)
task = Task(
description="Say 'ready' and nothing else.",
expected_output="The word ready.",
agent=agent,
)
class FlowWithMethod(Flow):
@start()
async def my_method(self):
c = Crew(agents=[agent], tasks=[task], verbose=False)
return await c.akickoff()
flow = FlowWithMethod()
await flow.akickoff()
crewai_event_bus.flush()
flow_started = collector.first(FlowStartedEvent)
method_started = collector.first(MethodExecutionStartedEvent)
assert flow_started is not None
assert method_started is not None
assert method_started.parent_event_id == flow_started.event_id
@pytest.mark.asyncio
@pytest.mark.vcr()
async def test_crew_parent_is_method(self, collector: EventCollector) -> None:
"""Crew inside flow method should have method as parent."""
agent = Agent(
role="Worker",
goal="Do work",
backstory="You work.",
verbose=False,
)
task = Task(
description="Say 'go' and nothing else.",
expected_output="The word go.",
agent=agent,
)
class FlowWithCrew(Flow):
@start()
async def run_it(self):
c = Crew(agents=[agent], tasks=[task], verbose=False)
return await c.akickoff()
flow = FlowWithCrew()
await flow.akickoff()
crewai_event_bus.flush()
method_started = collector.first(MethodExecutionStartedEvent)
crew_started = collector.first(CrewKickoffStartedEvent)
assert method_started is not None
assert crew_started is not None
assert crew_started.parent_event_id == method_started.event_id
class TestFlowWithMultipleCrewsEventOrdering:
"""Tests for event ordering in flows with multiple crews."""
@pytest.mark.asyncio
@pytest.mark.vcr()
async def test_two_crews_have_different_ids(
self, collector: EventCollector
) -> None:
"""Two crews in a flow should have different event_ids."""
agent1 = Agent(
role="First",
goal="Be first",
backstory="You go first.",
verbose=False,
)
agent2 = Agent(
role="Second",
goal="Be second",
backstory="You go second.",
verbose=False,
)
task1 = Task(
description="Say '1' and nothing else.",
expected_output="The number 1.",
agent=agent1,
)
task2 = Task(
description="Say '2' and nothing else.",
expected_output="The number 2.",
agent=agent2,
)
class TwoCrewFlow(Flow):
@start()
async def first(self):
c = Crew(agents=[agent1], tasks=[task1], verbose=False)
return await c.akickoff()
@listen(first)
async def second(self, _):
c = Crew(agents=[agent2], tasks=[task2], verbose=False)
return await c.akickoff()
flow = TwoCrewFlow()
await flow.akickoff()
crewai_event_bus.flush()
crew_started_events = collector.all_of(CrewKickoffStartedEvent)
assert len(crew_started_events) >= 2
assert crew_started_events[0].event_id != crew_started_events[1].event_id
@pytest.mark.asyncio
@pytest.mark.vcr()
async def test_second_crew_after_first(self, collector: EventCollector) -> None:
"""Second crew should have higher sequence than first."""
agent1 = Agent(
role="First",
goal="Be first",
backstory="You go first.",
verbose=False,
)
agent2 = Agent(
role="Second",
goal="Be second",
backstory="You go second.",
verbose=False,
)
task1 = Task(
description="Say 'a' and nothing else.",
expected_output="The letter a.",
agent=agent1,
)
task2 = Task(
description="Say 'b' and nothing else.",
expected_output="The letter b.",
agent=agent2,
)
class SequentialCrewFlow(Flow):
@start()
async def crew_a(self):
c = Crew(agents=[agent1], tasks=[task1], verbose=False)
return await c.akickoff()
@listen(crew_a)
async def crew_b(self, _):
c = Crew(agents=[agent2], tasks=[task2], verbose=False)
return await c.akickoff()
flow = SequentialCrewFlow()
await flow.akickoff()
crewai_event_bus.flush()
crew_started_events = collector.all_of(CrewKickoffStartedEvent)
assert len(crew_started_events) >= 2
first = crew_started_events[0]
second = crew_started_events[1]
assert first.emission_sequence is not None
assert second.emission_sequence is not None
assert second.emission_sequence > first.emission_sequence
@pytest.mark.asyncio
@pytest.mark.vcr()
async def test_tasks_have_correct_crew_parents(
self, collector: EventCollector
) -> None:
"""Tasks in different crews should have their own crew as parent."""
agent1 = Agent(
role="Alpha",
goal="Do alpha work",
backstory="You are alpha.",
verbose=False,
)
agent2 = Agent(
role="Beta",
goal="Do beta work",
backstory="You are beta.",
verbose=False,
)
task1 = Task(
description="Say 'alpha' and nothing else.",
expected_output="The word alpha.",
agent=agent1,
)
task2 = Task(
description="Say 'beta' and nothing else.",
expected_output="The word beta.",
agent=agent2,
)
class ParentTestFlow(Flow):
@start()
async def alpha_crew(self):
c = Crew(agents=[agent1], tasks=[task1], verbose=False)
return await c.akickoff()
@listen(alpha_crew)
async def beta_crew(self, _):
c = Crew(agents=[agent2], tasks=[task2], verbose=False)
return await c.akickoff()
flow = ParentTestFlow()
await flow.akickoff()
crewai_event_bus.flush()
crew_started_events = collector.all_of(CrewKickoffStartedEvent)
task_started_events = collector.all_of(TaskStartedEvent)
assert len(crew_started_events) >= 2
assert len(task_started_events) >= 2
crew1_id = crew_started_events[0].event_id
crew2_id = crew_started_events[1].event_id
task1_parent = task_started_events[0].parent_event_id
task2_parent = task_started_events[1].parent_event_id
assert task1_parent == crew1_id
assert task2_parent == crew2_id
class TestPreviousEventIdChain:
"""Tests for previous_event_id linear chain tracking."""
@pytest.mark.asyncio
async def test_previous_event_id_chain(self) -> None:
"""Events should have previous_event_id pointing to the prior event."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class SimpleFlow(Flow):
@start()
async def step_one(self):
return "step_one_done"
@listen(step_one)
async def step_two(self, result):
return "step_two_done"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(FlowStartedEvent)
def h1(source, event):
events.append(event)
@crewai_event_bus.on(FlowFinishedEvent)
def h2(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionStartedEvent)
def h3(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def h4(source, event):
events.append(event)
flow = SimpleFlow()
await flow.akickoff()
crewai_event_bus.flush()
assert len(events) >= 4
all_events = sorted(events, key=lambda e: e.emission_sequence or 0)
all_event_ids = {e.event_id for e in all_events}
for event in all_events[1:]:
assert event.previous_event_id is not None, (
f"Event {event.type} (seq {event.emission_sequence}) has no previous_event_id"
)
if event.previous_event_id in all_event_ids:
prev = next(e for e in all_events if e.event_id == event.previous_event_id)
assert (prev.emission_sequence or 0) < (event.emission_sequence or 0), (
f"Event {event.type} (seq {event.emission_sequence}) has previous pointing "
f"to {prev.type} (seq {prev.emission_sequence}) which is not earlier"
)
@pytest.mark.asyncio
async def test_first_event_has_previous_pointing_back(self) -> None:
"""Non-first events should have previous_event_id set."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
events: list[BaseEvent] = []
class MinimalFlow(Flow):
@start()
async def do_nothing(self):
return "done"
reset_emission_counter()
reset_last_event_id()
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(FlowStartedEvent)
def capture1(source, event):
events.append(event)
@crewai_event_bus.on(FlowFinishedEvent)
def capture2(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture3(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture4(source, event):
events.append(event)
flow = MinimalFlow()
await flow.akickoff()
crewai_event_bus.flush()
assert len(events) >= 2
sorted_events = sorted(events, key=lambda e: e.emission_sequence or 0)
for event in sorted_events[1:]:
assert event.previous_event_id is not None, (
f"Event {event.type} (seq {event.emission_sequence}) should have previous_event_id set"
)
class TestTriggeredByEventId:
"""Tests for triggered_by_event_id causal chain tracking."""
@pytest.mark.asyncio
async def test_triggered_by_event_id_for_listeners(self) -> None:
"""Listener events should have triggered_by_event_id pointing to the triggering method_execution_finished event."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class ListenerFlow(Flow):
@start()
async def start_method(self):
return "started"
@listen(start_method)
async def listener_method(self, result):
return "listened"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture_finished(source, event):
events.append(event)
flow = ListenerFlow()
await flow.akickoff()
crewai_event_bus.flush()
started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)]
finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)]
assert len(started_events) >= 2
assert len(finished_events) >= 2
start_method_finished = next(
(e for e in finished_events if e.method_name == "start_method"), None
)
listener_started = next(
(e for e in started_events if e.method_name == "listener_method"), None
)
assert start_method_finished is not None
assert listener_started is not None
assert listener_started.triggered_by_event_id == start_method_finished.event_id
@pytest.mark.asyncio
async def test_start_method_has_no_triggered_by(self) -> None:
"""Start method events should have triggered_by_event_id=None."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class StartOnlyFlow(Flow):
@start()
async def my_start(self):
return "started"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
events.append(event)
flow = StartOnlyFlow()
await flow.akickoff()
crewai_event_bus.flush()
start_event = next(
(e for e in events if e.method_name == "my_start"), None
)
assert start_event is not None
assert start_event.triggered_by_event_id is None
@pytest.mark.asyncio
async def test_chained_listeners_triggered_by(self) -> None:
"""Chained listeners should have triggered_by_event_id pointing to their triggering method."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class ChainedFlow(Flow):
@start()
async def first(self):
return "first"
@listen(first)
async def second(self, result):
return "second"
@listen(second)
async def third(self, result):
return "third"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture_finished(source, event):
events.append(event)
flow = ChainedFlow()
await flow.akickoff()
crewai_event_bus.flush()
started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)]
finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)]
first_finished = next(
(e for e in finished_events if e.method_name == "first"), None
)
second_started = next(
(e for e in started_events if e.method_name == "second"), None
)
second_finished = next(
(e for e in finished_events if e.method_name == "second"), None
)
third_started = next(
(e for e in started_events if e.method_name == "third"), None
)
assert first_finished is not None
assert second_started is not None
assert second_finished is not None
assert third_started is not None
assert second_started.triggered_by_event_id == first_finished.event_id
assert third_started.triggered_by_event_id == second_finished.event_id
@pytest.mark.asyncio
async def test_parallel_listeners_same_trigger(self) -> None:
"""Parallel listeners should all have triggered_by_event_id pointing to the same triggering event."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class ParallelFlow(Flow):
@start()
async def trigger(self):
return "trigger"
@listen(trigger)
async def listener_a(self, result):
return "a"
@listen(trigger)
async def listener_b(self, result):
return "b"
@listen(trigger)
async def listener_c(self, result):
return "c"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture_finished(source, event):
events.append(event)
flow = ParallelFlow()
await flow.akickoff()
crewai_event_bus.flush()
started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)]
finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)]
trigger_finished = next(
(e for e in finished_events if e.method_name == "trigger"), None
)
listener_a_started = next(
(e for e in started_events if e.method_name == "listener_a"), None
)
listener_b_started = next(
(e for e in started_events if e.method_name == "listener_b"), None
)
listener_c_started = next(
(e for e in started_events if e.method_name == "listener_c"), None
)
assert trigger_finished is not None
assert listener_a_started is not None
assert listener_b_started is not None
assert listener_c_started is not None
# All parallel listeners should point to the same triggering event
assert listener_a_started.triggered_by_event_id == trigger_finished.event_id
assert listener_b_started.triggered_by_event_id == trigger_finished.event_id
assert listener_c_started.triggered_by_event_id == trigger_finished.event_id
@pytest.mark.asyncio
async def test_or_condition_triggered_by(self) -> None:
"""Listener with OR condition should have triggered_by_event_id pointing to whichever method triggered it."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
from crewai.flow.flow import or_
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class OrConditionFlow(Flow):
@start()
async def path_a(self):
return "a"
@listen(or_(path_a, "path_b"))
async def after_either(self, result):
return "done"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture_finished(source, event):
events.append(event)
flow = OrConditionFlow()
await flow.akickoff()
crewai_event_bus.flush()
started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)]
finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)]
path_a_finished = next(
(e for e in finished_events if e.method_name == "path_a"), None
)
after_either_started = next(
(e for e in started_events if e.method_name == "after_either"), None
)
assert path_a_finished is not None
assert after_either_started is not None
# The OR listener should be triggered by path_a since that's what ran
assert after_either_started.triggered_by_event_id == path_a_finished.event_id
@pytest.mark.asyncio
async def test_router_triggered_by(self) -> None:
"""Events from router-triggered paths should have correct triggered_by_event_id."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
from crewai.flow.flow import router
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class RouterFlow(Flow):
@start()
async def begin(self):
return "begin"
@router(begin)
async def route_decision(self, result):
return "approved"
@listen("approved")
async def handle_approved(self):
return "handled"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture_finished(source, event):
events.append(event)
flow = RouterFlow()
await flow.akickoff()
crewai_event_bus.flush()
started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)]
finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)]
begin_finished = next(
(e for e in finished_events if e.method_name == "begin"), None
)
route_decision_started = next(
(e for e in started_events if e.method_name == "route_decision"), None
)
route_decision_finished = next(
(e for e in finished_events if e.method_name == "route_decision"), None
)
handle_approved_started = next(
(e for e in started_events if e.method_name == "handle_approved"), None
)
assert begin_finished is not None
assert route_decision_started is not None
assert route_decision_finished is not None
assert handle_approved_started is not None
# Router should be triggered by begin
assert route_decision_started.triggered_by_event_id == begin_finished.event_id
# Handler should be triggered by router's finished event
assert handle_approved_started.triggered_by_event_id == route_decision_finished.event_id
@pytest.mark.asyncio
async def test_multiple_kickoffs_maintain_chains(self) -> None:
"""Multiple akickoff() calls should maintain correct triggered_by chains for each execution."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
reset_emission_counter()
reset_last_event_id()
first_run_events: list[BaseEvent] = []
second_run_events: list[BaseEvent] = []
class ReusableFlow(Flow):
@start()
async def begin(self):
return "begin"
@listen(begin)
async def process(self, result):
return "processed"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
if len(second_run_events) == 0 and not capturing_second:
first_run_events.append(event)
else:
second_run_events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture_finished(source, event):
if len(second_run_events) == 0 and not capturing_second:
first_run_events.append(event)
else:
second_run_events.append(event)
# First kickoff
capturing_second = False
flow1 = ReusableFlow()
await flow1.akickoff()
crewai_event_bus.flush()
# Second kickoff
capturing_second = True
flow2 = ReusableFlow()
await flow2.akickoff()
crewai_event_bus.flush()
# Should have events from both runs
assert len(first_run_events) >= 4 # 2 started + 2 finished
assert len(second_run_events) >= 4
# Check first run's triggered_by chain
first_started = [e for e in first_run_events if isinstance(e, MethodExecutionStartedEvent)]
first_finished = [e for e in first_run_events if isinstance(e, MethodExecutionFinishedEvent)]
first_begin_finished = next(
(e for e in first_finished if e.method_name == "begin"), None
)
first_process_started = next(
(e for e in first_started if e.method_name == "process"), None
)
assert first_begin_finished is not None
assert first_process_started is not None
assert first_process_started.triggered_by_event_id == first_begin_finished.event_id
# Check second run's triggered_by chain
second_started = [e for e in second_run_events if isinstance(e, MethodExecutionStartedEvent)]
second_finished = [e for e in second_run_events if isinstance(e, MethodExecutionFinishedEvent)]
second_begin_finished = next(
(e for e in second_finished if e.method_name == "begin"), None
)
second_process_started = next(
(e for e in second_started if e.method_name == "process"), None
)
assert second_begin_finished is not None
assert second_process_started is not None
assert second_process_started.triggered_by_event_id == second_begin_finished.event_id
# Verify the two runs have different event_ids (not reusing)
assert first_begin_finished.event_id != second_begin_finished.event_id
# Verify each run has its own independent previous_event_id chain
# (chains reset at each top-level execution)
first_sorted = sorted(first_run_events, key=lambda e: e.emission_sequence or 0)
for event in first_sorted[1:]:
assert event.previous_event_id is not None, (
f"First run event {event.type} (seq {event.emission_sequence}) should have previous_event_id"
)
second_sorted = sorted(second_run_events, key=lambda e: e.emission_sequence or 0)
for event in second_sorted[1:]:
assert event.previous_event_id is not None, (
f"Second run event {event.type} (seq {event.emission_sequence}) should have previous_event_id"
)
@pytest.mark.asyncio
async def test_parallel_flows_maintain_separate_triggered_by_chains(self) -> None:
"""Parallel flow executions should maintain correct triggered_by chains independently."""
import asyncio
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class ParallelTestFlow(Flow):
def __init__(self, name: str):
super().__init__()
self.flow_name = name
@start()
async def begin(self):
await asyncio.sleep(0.01) # Small delay to interleave
return self.flow_name
@listen(begin)
async def process(self, result):
await asyncio.sleep(0.01)
return f"{result}_processed"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture_finished(source, event):
events.append(event)
# Run two flows in parallel
flow_a = ParallelTestFlow("flow_a")
flow_b = ParallelTestFlow("flow_b")
await asyncio.gather(flow_a.akickoff(), flow_b.akickoff())
crewai_event_bus.flush()
# Should have events from both flows (4 events each = 8 total)
assert len(events) >= 8
started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)]
finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)]
# Find flow_a's events by checking the result contains "flow_a"
flow_a_begin_finished = [
e for e in finished_events
if e.method_name == "begin" and "flow_a" in str(e.result)
]
flow_a_process_started = [
e for e in started_events
if e.method_name == "process"
]
flow_b_begin_finished = [
e for e in finished_events
if e.method_name == "begin" and "flow_b" in str(e.result)
]
assert len(flow_a_begin_finished) >= 1
assert len(flow_b_begin_finished) >= 1
# Each flow's process should be triggered by its own begin
# Find which process events were triggered by which begin events
for process_event in flow_a_process_started:
trigger_id = process_event.triggered_by_event_id
assert trigger_id is not None
# The triggering event should be a begin finished event
triggering_event = next(
(e for e in finished_events if e.event_id == trigger_id), None
)
assert triggering_event is not None
assert triggering_event.method_name == "begin"
# Verify previous_event_id forms a valid chain across all events
all_sorted = sorted(events, key=lambda e: e.emission_sequence or 0)
for event in all_sorted[1:]:
assert event.previous_event_id is not None
@pytest.mark.asyncio
async def test_and_condition_triggered_by_last_method(self) -> None:
"""AND condition listener should have triggered_by_event_id pointing to the last completing method."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
from crewai.flow.flow import and_
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class AndConditionFlow(Flow):
@start()
async def method_a(self):
return "a"
@listen(method_a)
async def method_b(self, result):
return "b"
@listen(and_(method_a, method_b))
async def after_both(self, result):
return "both_done"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture_finished(source, event):
events.append(event)
flow = AndConditionFlow()
await flow.akickoff()
crewai_event_bus.flush()
started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)]
finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)]
method_b_finished = next(
(e for e in finished_events if e.method_name == "method_b"), None
)
after_both_started = next(
(e for e in started_events if e.method_name == "after_both"), None
)
assert method_b_finished is not None
assert after_both_started is not None
# The AND listener should be triggered by method_b (the last one to complete)
assert after_both_started.triggered_by_event_id == method_b_finished.event_id
@pytest.mark.asyncio
async def test_exception_handling_triggered_by(self) -> None:
"""Events emitted after exception should still have correct triggered_by."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
from crewai.events.types.flow_events import MethodExecutionFailedEvent
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class ExceptionFlow(Flow):
@start()
async def will_fail(self):
raise ValueError("intentional error")
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture_finished(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFailedEvent)
def capture_failed(source, event):
events.append(event)
@crewai_event_bus.on(FlowStartedEvent)
def capture_flow_started(source, event):
events.append(event)
flow = ExceptionFlow()
try:
await flow.akickoff()
except ValueError:
pass # Expected
crewai_event_bus.flush()
# Even with exception, events should have proper previous_event_id chain
all_sorted = sorted(events, key=lambda e: e.emission_sequence or 0)
for event in all_sorted[1:]:
assert event.previous_event_id is not None, (
f"Event {event.type} (seq {event.emission_sequence}) should have previous_event_id"
)
@pytest.mark.asyncio
async def test_sync_method_in_flow_triggered_by(self) -> None:
"""Synchronous methods should still have correct triggered_by."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class SyncFlow(Flow):
@start()
def sync_start(self): # Synchronous method
return "sync_done"
@listen(sync_start)
async def async_listener(self, result):
return "async_done"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture_finished(source, event):
events.append(event)
flow = SyncFlow()
await flow.akickoff()
crewai_event_bus.flush()
started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)]
finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)]
sync_start_finished = next(
(e for e in finished_events if e.method_name == "sync_start"), None
)
async_listener_started = next(
(e for e in started_events if e.method_name == "async_listener"), None
)
assert sync_start_finished is not None
assert async_listener_started is not None
assert async_listener_started.triggered_by_event_id == sync_start_finished.event_id
@pytest.mark.asyncio
async def test_multiple_start_methods_triggered_by(self) -> None:
"""Multiple start methods should each have triggered_by_event_id=None."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class MultiStartFlow(Flow):
@start()
async def start_one(self):
return "one"
@start()
async def start_two(self):
return "two"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
events.append(event)
flow = MultiStartFlow()
await flow.akickoff()
crewai_event_bus.flush()
started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)]
start_one = next(
(e for e in started_events if e.method_name == "start_one"), None
)
start_two = next(
(e for e in started_events if e.method_name == "start_two"), None
)
assert start_one is not None
assert start_two is not None
# Both start methods should have no triggered_by (they're entry points)
assert start_one.triggered_by_event_id is None
assert start_two.triggered_by_event_id is None
@pytest.mark.asyncio
async def test_none_return_triggered_by(self) -> None:
"""Methods returning None should still have correct triggered_by chain."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class NoneReturnFlow(Flow):
@start()
async def returns_none(self):
return None
@listen(returns_none)
async def after_none(self, result):
return "got_none"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture_finished(source, event):
events.append(event)
flow = NoneReturnFlow()
await flow.akickoff()
crewai_event_bus.flush()
started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)]
finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)]
returns_none_finished = next(
(e for e in finished_events if e.method_name == "returns_none"), None
)
after_none_started = next(
(e for e in started_events if e.method_name == "after_none"), None
)
assert returns_none_finished is not None
assert after_none_started is not None
assert after_none_started.triggered_by_event_id == returns_none_finished.event_id
@pytest.mark.asyncio
async def test_deeply_nested_chain_triggered_by(self) -> None:
"""Deeply nested listener chains (5+) should maintain correct triggered_by."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class DeepChainFlow(Flow):
@start()
async def level_0(self):
return "0"
@listen(level_0)
async def level_1(self, result):
return "1"
@listen(level_1)
async def level_2(self, result):
return "2"
@listen(level_2)
async def level_3(self, result):
return "3"
@listen(level_3)
async def level_4(self, result):
return "4"
@listen(level_4)
async def level_5(self, result):
return "5"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture_finished(source, event):
events.append(event)
flow = DeepChainFlow()
await flow.akickoff()
crewai_event_bus.flush()
started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)]
finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)]
# Verify each level triggers the next
for i in range(5):
prev_finished = next(
(e for e in finished_events if e.method_name == f"level_{i}"), None
)
next_started = next(
(e for e in started_events if e.method_name == f"level_{i+1}"), None
)
assert prev_finished is not None, f"level_{i} finished event not found"
assert next_started is not None, f"level_{i+1} started event not found"
assert next_started.triggered_by_event_id == prev_finished.event_id, (
f"level_{i+1} should be triggered by level_{i}"
)
@pytest.mark.asyncio
async def test_router_conditional_path_triggered_by(self) -> None:
"""Router with conditional paths should have correct triggered_by for the selected path."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
from crewai.flow.flow import router
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class ConditionalRouterFlow(Flow):
@start()
async def begin(self):
return "begin"
@router(begin)
async def conditional_router(self, result):
# Conditionally return one route
return "path_a"
@listen("path_a")
async def handle_path_a(self):
return "a_done"
@listen("path_b")
async def handle_path_b(self):
return "b_done"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture_finished(source, event):
events.append(event)
flow = ConditionalRouterFlow()
await flow.akickoff()
crewai_event_bus.flush()
started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)]
finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)]
router_finished = next(
(e for e in finished_events if e.method_name == "conditional_router"), None
)
handle_path_a_started = next(
(e for e in started_events if e.method_name == "handle_path_a"), None
)
handle_path_b_started = next(
(e for e in started_events if e.method_name == "handle_path_b"), None
)
assert router_finished is not None
assert handle_path_a_started is not None
# path_b should NOT be executed since router returned "path_a"
assert handle_path_b_started is None
# The selected path should be triggered by the router
assert handle_path_a_started.triggered_by_event_id == router_finished.event_id
class TestCrewEventsInFlowTriggeredBy:
"""Tests for triggered_by in crew events running inside flows."""
@pytest.mark.asyncio
async def test_flow_listener_triggered_by_in_nested_context(self) -> None:
"""Nested listener contexts should maintain correct triggered_by chains."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class NestedFlow(Flow):
@start()
async def trigger_method(self):
return "trigger"
@listen(trigger_method)
async def middle_method(self, result):
return "middle"
@listen(middle_method)
async def final_method(self, result):
return "final"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_method_started(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture_method_finished(source, event):
events.append(event)
flow = NestedFlow()
await flow.akickoff()
crewai_event_bus.flush()
method_started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)]
method_finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)]
trigger_finished = next(
(e for e in method_finished_events if e.method_name == "trigger_method"), None
)
middle_started = next(
(e for e in method_started_events if e.method_name == "middle_method"), None
)
middle_finished = next(
(e for e in method_finished_events if e.method_name == "middle_method"), None
)
final_started = next(
(e for e in method_started_events if e.method_name == "final_method"), None
)
assert trigger_finished is not None
assert middle_started is not None
assert middle_finished is not None
assert final_started is not None
# middle should be triggered by trigger_method
assert middle_started.triggered_by_event_id == trigger_finished.event_id
# final should be triggered by middle_method
assert final_started.triggered_by_event_id == middle_finished.event_id
# All events should have proper previous_event_id chain
all_sorted = sorted(events, key=lambda e: e.emission_sequence or 0)
for event in all_sorted[1:]:
assert event.previous_event_id is not None
def test_sync_kickoff_triggered_by(self) -> None:
"""Synchronous kickoff() should maintain correct triggered_by chains."""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_context import reset_last_event_id
reset_emission_counter()
reset_last_event_id()
events: list[BaseEvent] = []
class SyncKickoffFlow(Flow):
@start()
def start_method(self):
return "started"
@listen(start_method)
def listener_method(self, result):
return "listened"
with crewai_event_bus.scoped_handlers():
@crewai_event_bus.on(MethodExecutionStartedEvent)
def capture_started(source, event):
events.append(event)
@crewai_event_bus.on(MethodExecutionFinishedEvent)
def capture_finished(source, event):
events.append(event)
flow = SyncKickoffFlow()
flow.kickoff() # Synchronous kickoff
crewai_event_bus.flush()
started_events = [e for e in events if isinstance(e, MethodExecutionStartedEvent)]
finished_events = [e for e in events if isinstance(e, MethodExecutionFinishedEvent)]
start_finished = next(
(e for e in finished_events if e.method_name == "start_method"), None
)
listener_started = next(
(e for e in started_events if e.method_name == "listener_method"), None
)
assert start_finished is not None
assert listener_started is not None
# Listener should be triggered by start_method
assert listener_started.triggered_by_event_id == start_finished.event_id
# Verify previous_event_id chain
all_sorted = sorted(events, key=lambda e: e.emission_sequence or 0)
for event in all_sorted[1:]:
assert event.previous_event_id is not None
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/events/test_event_ordering.py",
"license": "MIT License",
"lines": 1295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/cli/authentication/providers/keycloak.py | from crewai.cli.authentication.providers.base_provider import BaseProvider
class KeycloakProvider(BaseProvider):
def get_authorize_url(self) -> str:
return f"{self._oauth2_base_url()}/realms/{self.settings.extra.get('realm')}/protocol/openid-connect/auth/device"
def get_token_url(self) -> str:
return f"{self._oauth2_base_url()}/realms/{self.settings.extra.get('realm')}/protocol/openid-connect/token"
def get_jwks_url(self) -> str:
return f"{self._oauth2_base_url()}/realms/{self.settings.extra.get('realm')}/protocol/openid-connect/certs"
def get_issuer(self) -> str:
return f"{self._oauth2_base_url()}/realms/{self.settings.extra.get('realm')}"
def get_audience(self) -> str:
return self.settings.audience or "no-audience-provided"
def get_client_id(self) -> str:
if self.settings.client_id is None:
raise ValueError(
"Client ID is required. Please set it in the configuration."
)
return self.settings.client_id
def get_required_fields(self) -> list[str]:
return ["realm"]
def _oauth2_base_url(self) -> str:
domain = self.settings.domain.removeprefix("https://").removeprefix("http://")
return f"https://{domain}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/cli/authentication/providers/keycloak.py",
"license": "MIT License",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/tests/cli/authentication/providers/test_keycloak.py | import pytest
from crewai.cli.authentication.main import Oauth2Settings
from crewai.cli.authentication.providers.keycloak import KeycloakProvider
class TestKeycloakProvider:
@pytest.fixture(autouse=True)
def setup_method(self):
self.valid_settings = Oauth2Settings(
provider="keycloak",
domain="keycloak.example.com",
client_id="test-client-id",
audience="test-audience",
extra={
"realm": "test-realm"
}
)
self.provider = KeycloakProvider(self.valid_settings)
def test_initialization_with_valid_settings(self):
provider = KeycloakProvider(self.valid_settings)
assert provider.settings == self.valid_settings
assert provider.settings.provider == "keycloak"
assert provider.settings.domain == "keycloak.example.com"
assert provider.settings.client_id == "test-client-id"
assert provider.settings.audience == "test-audience"
assert provider.settings.extra.get("realm") == "test-realm"
def test_get_authorize_url(self):
expected_url = "https://keycloak.example.com/realms/test-realm/protocol/openid-connect/auth/device"
assert self.provider.get_authorize_url() == expected_url
def test_get_authorize_url_with_different_domain(self):
settings = Oauth2Settings(
provider="keycloak",
domain="auth.company.com",
client_id="test-client",
audience="test-audience",
extra={
"realm": "my-realm"
}
)
provider = KeycloakProvider(settings)
expected_url = "https://auth.company.com/realms/my-realm/protocol/openid-connect/auth/device"
assert provider.get_authorize_url() == expected_url
def test_get_token_url(self):
expected_url = "https://keycloak.example.com/realms/test-realm/protocol/openid-connect/token"
assert self.provider.get_token_url() == expected_url
def test_get_token_url_with_different_domain(self):
settings = Oauth2Settings(
provider="keycloak",
domain="sso.enterprise.com",
client_id="test-client",
audience="test-audience",
extra={
"realm": "enterprise-realm"
}
)
provider = KeycloakProvider(settings)
expected_url = "https://sso.enterprise.com/realms/enterprise-realm/protocol/openid-connect/token"
assert provider.get_token_url() == expected_url
def test_get_jwks_url(self):
expected_url = "https://keycloak.example.com/realms/test-realm/protocol/openid-connect/certs"
assert self.provider.get_jwks_url() == expected_url
def test_get_jwks_url_with_different_domain(self):
settings = Oauth2Settings(
provider="keycloak",
domain="identity.org",
client_id="test-client",
audience="test-audience",
extra={
"realm": "org-realm"
}
)
provider = KeycloakProvider(settings)
expected_url = "https://identity.org/realms/org-realm/protocol/openid-connect/certs"
assert provider.get_jwks_url() == expected_url
def test_get_issuer(self):
expected_issuer = "https://keycloak.example.com/realms/test-realm"
assert self.provider.get_issuer() == expected_issuer
def test_get_issuer_with_different_domain(self):
settings = Oauth2Settings(
provider="keycloak",
domain="login.myapp.io",
client_id="test-client",
audience="test-audience",
extra={
"realm": "app-realm"
}
)
provider = KeycloakProvider(settings)
expected_issuer = "https://login.myapp.io/realms/app-realm"
assert provider.get_issuer() == expected_issuer
def test_get_audience(self):
assert self.provider.get_audience() == "test-audience"
def test_get_client_id(self):
assert self.provider.get_client_id() == "test-client-id"
def test_get_required_fields(self):
assert self.provider.get_required_fields() == ["realm"]
def test_oauth2_base_url(self):
assert self.provider._oauth2_base_url() == "https://keycloak.example.com"
def test_oauth2_base_url_strips_https_prefix(self):
settings = Oauth2Settings(
provider="keycloak",
domain="https://keycloak.example.com",
client_id="test-client-id",
audience="test-audience",
extra={
"realm": "test-realm"
}
)
provider = KeycloakProvider(settings)
assert provider._oauth2_base_url() == "https://keycloak.example.com"
def test_oauth2_base_url_strips_http_prefix(self):
settings = Oauth2Settings(
provider="keycloak",
domain="http://keycloak.example.com",
client_id="test-client-id",
audience="test-audience",
extra={
"realm": "test-realm"
}
)
provider = KeycloakProvider(settings)
assert provider._oauth2_base_url() == "https://keycloak.example.com"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/cli/authentication/providers/test_keycloak.py",
"license": "MIT License",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/utils/task.py | """A2A task utilities for server-side task management."""
from __future__ import annotations
import asyncio
import base64
from collections.abc import Callable, Coroutine
from datetime import datetime
from functools import wraps
import json
import logging
import os
from typing import TYPE_CHECKING, Any, ParamSpec, TypeVar, TypedDict, cast
from urllib.parse import urlparse
from a2a.server.agent_execution import RequestContext
from a2a.server.events import EventQueue
from a2a.types import (
Artifact,
FileWithBytes,
FileWithUri,
InternalError,
InvalidParamsError,
Message,
Part,
Task as A2ATask,
TaskState,
TaskStatus,
TaskStatusUpdateEvent,
)
from a2a.utils import (
get_data_parts,
get_file_parts,
new_agent_text_message,
new_data_artifact,
new_text_artifact,
)
from a2a.utils.errors import ServerError
from aiocache import SimpleMemoryCache, caches # type: ignore[import-untyped]
from pydantic import BaseModel
from crewai.a2a.utils.agent_card import _get_server_config
from crewai.a2a.utils.content_type import validate_message_parts
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AServerTaskCanceledEvent,
A2AServerTaskCompletedEvent,
A2AServerTaskFailedEvent,
A2AServerTaskStartedEvent,
)
from crewai.task import Task
from crewai.utilities.pydantic_schema_utils import create_model_from_schema
if TYPE_CHECKING:
from crewai.a2a.extensions.server import ExtensionContext, ServerExtensionRegistry
from crewai.agent import Agent
logger = logging.getLogger(__name__)
P = ParamSpec("P")
T = TypeVar("T")
class RedisCacheConfig(TypedDict, total=False):
"""Configuration for aiocache Redis backend."""
cache: str
endpoint: str
port: int
db: int
password: str
def _parse_redis_url(url: str) -> RedisCacheConfig:
"""Parse a Redis URL into aiocache configuration.
Args:
url: Redis connection URL (e.g., redis://localhost:6379/0).
Returns:
Configuration dict for aiocache.RedisCache.
"""
parsed = urlparse(url)
config: RedisCacheConfig = {
"cache": "aiocache.RedisCache",
"endpoint": parsed.hostname or "localhost",
"port": parsed.port or 6379,
}
if parsed.path and parsed.path != "/":
try:
config["db"] = int(parsed.path.lstrip("/"))
except ValueError:
pass
if parsed.password:
config["password"] = parsed.password
return config
_redis_url = os.environ.get("REDIS_URL")
caches.set_config(
{
"default": _parse_redis_url(_redis_url)
if _redis_url
else {
"cache": "aiocache.SimpleMemoryCache",
}
}
)
def cancellable(
fn: Callable[P, Coroutine[Any, Any, T]],
) -> Callable[P, Coroutine[Any, Any, T]]:
"""Decorator that enables cancellation for A2A task execution.
Runs a cancellation watcher concurrently with the wrapped function.
When a cancel event is published, the execution is cancelled.
Args:
fn: The async function to wrap.
Returns:
Wrapped function with cancellation support.
"""
@wraps(fn)
async def wrapper(*args: P.args, **kwargs: P.kwargs) -> T:
"""Wrap function with cancellation monitoring."""
context: RequestContext | None = None
for arg in args:
if isinstance(arg, RequestContext):
context = arg
break
if context is None:
context = cast(RequestContext | None, kwargs.get("context"))
if context is None:
return await fn(*args, **kwargs)
task_id = context.task_id
cache = caches.get("default")
async def poll_for_cancel() -> bool:
"""Poll cache for cancellation flag."""
while True:
if await cache.get(f"cancel:{task_id}"):
return True
await asyncio.sleep(0.1)
async def watch_for_cancel() -> bool:
"""Watch for cancellation events via pub/sub or polling."""
if isinstance(cache, SimpleMemoryCache):
return await poll_for_cancel()
try:
client = cache.client
pubsub = client.pubsub()
await pubsub.subscribe(f"cancel:{task_id}")
async for message in pubsub.listen():
if message["type"] == "message":
return True
except (OSError, ConnectionError) as e:
logger.warning(
"Cancel watcher Redis error, falling back to polling",
extra={"task_id": task_id, "error": str(e)},
)
return await poll_for_cancel()
return False
execute_task = asyncio.create_task(fn(*args, **kwargs))
cancel_watch = asyncio.create_task(watch_for_cancel())
try:
done, _ = await asyncio.wait(
[execute_task, cancel_watch],
return_when=asyncio.FIRST_COMPLETED,
)
if cancel_watch in done:
execute_task.cancel()
try:
await execute_task
except asyncio.CancelledError:
pass
raise asyncio.CancelledError(f"Task {task_id} was cancelled")
cancel_watch.cancel()
return execute_task.result()
finally:
await cache.delete(f"cancel:{task_id}")
return wrapper
def _convert_a2a_files_to_file_inputs(
a2a_files: list[FileWithBytes | FileWithUri],
) -> dict[str, Any]:
"""Convert a2a file types to crewai FileInput dict.
Args:
a2a_files: List of FileWithBytes or FileWithUri from a2a SDK.
Returns:
Dictionary mapping file names to FileInput objects.
"""
try:
from crewai_files import File, FileBytes
except ImportError:
logger.debug("crewai_files not installed, returning empty file dict")
return {}
file_dict: dict[str, Any] = {}
for idx, a2a_file in enumerate(a2a_files):
if isinstance(a2a_file, FileWithBytes):
file_bytes = base64.b64decode(a2a_file.bytes)
name = a2a_file.name or f"file_{idx}"
file_source = FileBytes(data=file_bytes, filename=a2a_file.name)
file_dict[name] = File(source=file_source)
elif isinstance(a2a_file, FileWithUri):
name = a2a_file.name or f"file_{idx}"
file_dict[name] = File(source=a2a_file.uri)
return file_dict
def _extract_response_schema(parts: list[Part]) -> dict[str, Any] | None:
"""Extract response schema from message parts metadata.
The client may include a JSON schema in TextPart metadata to specify
the expected response format (see delegation.py line 463).
Args:
parts: List of message parts.
Returns:
JSON schema dict if found, None otherwise.
"""
for part in parts:
if part.root.kind == "text" and part.root.metadata:
schema = part.root.metadata.get("schema")
if schema and isinstance(schema, dict):
return schema # type: ignore[no-any-return]
return None
def _create_result_artifact(
result: Any,
task_id: str,
) -> Artifact:
"""Create artifact from task result, using DataPart for structured data.
Args:
result: The task execution result.
task_id: The task ID for naming the artifact.
Returns:
Artifact with appropriate part type (DataPart for dict/Pydantic, TextPart for strings).
"""
artifact_name = f"result_{task_id}"
if isinstance(result, dict):
return new_data_artifact(artifact_name, result)
if isinstance(result, BaseModel):
return new_data_artifact(artifact_name, result.model_dump())
return new_text_artifact(artifact_name, str(result))
def _build_task_description(
user_message: str,
structured_inputs: list[dict[str, Any]],
) -> str:
"""Build task description including structured data if present.
Args:
user_message: The original user message text.
structured_inputs: List of structured data from DataParts.
Returns:
Task description with structured data appended if present.
"""
if not structured_inputs:
return user_message
structured_json = json.dumps(structured_inputs, indent=2)
return f"{user_message}\n\nStructured Data:\n{structured_json}"
async def execute(
agent: Agent,
context: RequestContext,
event_queue: EventQueue,
) -> None:
"""Execute an A2A task using a CrewAI agent.
Args:
agent: The CrewAI agent to execute the task.
context: The A2A request context containing the user's message.
event_queue: The event queue for sending responses back.
"""
await _execute_impl(agent, context, event_queue, None, None)
@cancellable
async def _execute_impl(
agent: Agent,
context: RequestContext,
event_queue: EventQueue,
extension_registry: ServerExtensionRegistry | None,
extension_context: ExtensionContext | None,
) -> None:
"""Internal implementation for task execution with optional extensions."""
server_config = _get_server_config(agent)
if context.message and context.message.parts and server_config:
allowed_modes = server_config.default_input_modes
invalid_types = validate_message_parts(context.message.parts, allowed_modes)
if invalid_types:
raise ServerError(
InvalidParamsError(
message=f"Unsupported content type(s): {', '.join(invalid_types)}. "
f"Supported: {', '.join(allowed_modes)}"
)
)
if extension_registry and extension_context:
await extension_registry.invoke_on_request(extension_context)
user_message = context.get_user_input()
response_model: type[BaseModel] | None = None
structured_inputs: list[dict[str, Any]] = []
a2a_files: list[FileWithBytes | FileWithUri] = []
if context.message and context.message.parts:
schema = _extract_response_schema(context.message.parts)
if schema:
try:
response_model = create_model_from_schema(schema)
except Exception as e:
logger.debug(
"Failed to create response model from schema",
extra={"error": str(e), "schema_title": schema.get("title")},
)
structured_inputs = get_data_parts(context.message.parts)
a2a_files = get_file_parts(context.message.parts)
task_id = context.task_id
context_id = context.context_id
if task_id is None or context_id is None:
msg = "task_id and context_id are required"
crewai_event_bus.emit(
agent,
A2AServerTaskFailedEvent(
task_id="",
context_id="",
error=msg,
from_agent=agent,
),
)
raise ServerError(InvalidParamsError(message=msg)) from None
task = Task(
description=_build_task_description(user_message, structured_inputs),
expected_output="Response to the user's request",
agent=agent,
response_model=response_model,
input_files=_convert_a2a_files_to_file_inputs(a2a_files),
)
crewai_event_bus.emit(
agent,
A2AServerTaskStartedEvent(
task_id=task_id,
context_id=context_id,
from_task=task,
from_agent=agent,
),
)
try:
result = await agent.aexecute_task(task=task, tools=agent.tools)
if extension_registry and extension_context:
result = await extension_registry.invoke_on_response(
extension_context, result
)
result_str = str(result)
history: list[Message] = [context.message] if context.message else []
history.append(new_agent_text_message(result_str, context_id, task_id))
await event_queue.enqueue_event(
A2ATask(
id=task_id,
context_id=context_id,
status=TaskStatus(state=TaskState.completed),
artifacts=[_create_result_artifact(result, task_id)],
history=history,
)
)
crewai_event_bus.emit(
agent,
A2AServerTaskCompletedEvent(
task_id=task_id,
context_id=context_id,
result=str(result),
from_task=task,
from_agent=agent,
),
)
except asyncio.CancelledError:
crewai_event_bus.emit(
agent,
A2AServerTaskCanceledEvent(
task_id=task_id,
context_id=context_id,
from_task=task,
from_agent=agent,
),
)
raise
except Exception as e:
crewai_event_bus.emit(
agent,
A2AServerTaskFailedEvent(
task_id=task_id,
context_id=context_id,
error=str(e),
from_task=task,
from_agent=agent,
),
)
raise ServerError(
error=InternalError(message=f"Task execution failed: {e}")
) from e
async def execute_with_extensions(
agent: Agent,
context: RequestContext,
event_queue: EventQueue,
extension_registry: ServerExtensionRegistry,
extension_context: ExtensionContext,
) -> None:
"""Execute an A2A task with extension hooks.
Args:
agent: The CrewAI agent to execute the task.
context: The A2A request context containing the user's message.
event_queue: The event queue for sending responses back.
extension_registry: Registry of server extensions.
extension_context: Context for extension hooks.
"""
await _execute_impl(
agent, context, event_queue, extension_registry, extension_context
)
async def cancel(
context: RequestContext,
event_queue: EventQueue,
) -> A2ATask | None:
"""Cancel an A2A task.
Publishes a cancel event that the cancellable decorator listens for.
Args:
context: The A2A request context containing task information.
event_queue: The event queue for sending the cancellation status.
Returns:
The canceled task with updated status.
"""
task_id = context.task_id
context_id = context.context_id
if task_id is None or context_id is None:
raise ServerError(InvalidParamsError(message="task_id and context_id required"))
if context.current_task and context.current_task.status.state in (
TaskState.completed,
TaskState.failed,
TaskState.canceled,
):
return context.current_task
cache = caches.get("default")
await cache.set(f"cancel:{task_id}", True, ttl=3600)
if not isinstance(cache, SimpleMemoryCache):
await cache.client.publish(f"cancel:{task_id}", "cancel")
await event_queue.enqueue_event(
TaskStatusUpdateEvent(
task_id=task_id,
context_id=context_id,
status=TaskStatus(state=TaskState.canceled),
final=True,
)
)
if context.current_task:
context.current_task.status = TaskStatus(state=TaskState.canceled)
return context.current_task
return None
def list_tasks(
tasks: list[A2ATask],
context_id: str | None = None,
status: TaskState | None = None,
status_timestamp_after: datetime | None = None,
page_size: int = 50,
page_token: str | None = None,
history_length: int | None = None,
include_artifacts: bool = False,
) -> tuple[list[A2ATask], str | None, int]:
"""Filter and paginate A2A tasks.
Provides filtering by context, status, and timestamp, along with
cursor-based pagination. This is a pure utility function that operates
on an in-memory list of tasks - storage retrieval is handled separately.
Args:
tasks: All tasks to filter.
context_id: Filter by context ID to get tasks in a conversation.
status: Filter by task state (e.g., completed, working).
status_timestamp_after: Filter to tasks updated after this time.
page_size: Maximum tasks per page (default 50).
page_token: Base64-encoded cursor from previous response.
history_length: Limit history messages per task (None = full history).
include_artifacts: Whether to include task artifacts (default False).
Returns:
Tuple of (filtered_tasks, next_page_token, total_count).
- filtered_tasks: Tasks matching filters, paginated and trimmed.
- next_page_token: Token for next page, or None if no more pages.
- total_count: Total number of tasks matching filters (before pagination).
"""
filtered: list[A2ATask] = []
for task in tasks:
if context_id and task.context_id != context_id:
continue
if status and task.status.state != status:
continue
if status_timestamp_after and task.status.timestamp:
ts = datetime.fromisoformat(task.status.timestamp.replace("Z", "+00:00"))
if ts <= status_timestamp_after:
continue
filtered.append(task)
def get_timestamp(t: A2ATask) -> datetime:
"""Extract timestamp from task status for sorting."""
if t.status.timestamp is None:
return datetime.min
return datetime.fromisoformat(t.status.timestamp.replace("Z", "+00:00"))
filtered.sort(key=get_timestamp, reverse=True)
total = len(filtered)
start = 0
if page_token:
try:
cursor_id = base64.b64decode(page_token).decode()
for idx, task in enumerate(filtered):
if task.id == cursor_id:
start = idx + 1
break
except (ValueError, UnicodeDecodeError):
pass
page = filtered[start : start + page_size]
result: list[A2ATask] = []
for task in page:
task = task.model_copy(deep=True)
if history_length is not None and task.history:
task.history = task.history[-history_length:]
if not include_artifacts:
task.artifacts = None
result.append(task)
next_token: str | None = None
if result and len(result) == page_size:
next_token = base64.b64encode(result[-1].id.encode()).decode()
return result, next_token, total
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/utils/task.py",
"license": "MIT License",
"lines": 491,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/tests/a2a/utils/test_task.py | """Tests for A2A task utilities."""
from __future__ import annotations
import asyncio
from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
import pytest_asyncio
from a2a.server.agent_execution import RequestContext
from a2a.server.events import EventQueue
from a2a.types import Message, Task as A2ATask, TaskState, TaskStatus
from crewai.a2a.utils.task import cancel, cancellable, execute
@pytest.fixture
def mock_agent() -> MagicMock:
"""Create a mock CrewAI agent."""
agent = MagicMock()
agent.role = "Test Agent"
agent.tools = []
agent.aexecute_task = AsyncMock(return_value="Task completed successfully")
return agent
@pytest.fixture
def mock_task(mock_context: MagicMock) -> MagicMock:
"""Create a mock Task."""
task = MagicMock()
task.id = mock_context.task_id
task.name = "Mock Task"
task.description = "Mock task description"
return task
@pytest.fixture
def mock_context() -> MagicMock:
"""Create a mock RequestContext."""
context = MagicMock(spec=RequestContext)
context.task_id = "test-task-123"
context.context_id = "test-context-456"
context.get_user_input.return_value = "Test user message"
context.message = MagicMock(spec=Message)
context.message.parts = []
context.current_task = None
return context
@pytest.fixture
def mock_event_queue() -> AsyncMock:
"""Create a mock EventQueue."""
queue = AsyncMock(spec=EventQueue)
queue.enqueue_event = AsyncMock()
return queue
@pytest_asyncio.fixture(autouse=True)
async def clear_cache(mock_context: MagicMock) -> None:
"""Clear cancel flag from cache before each test."""
from aiocache import caches
cache = caches.get("default")
await cache.delete(f"cancel:{mock_context.task_id}")
class TestCancellableDecorator:
"""Tests for the cancellable decorator."""
@pytest.mark.asyncio
async def test_executes_function_without_context(self) -> None:
"""Function executes normally when no RequestContext is provided."""
call_count = 0
@cancellable
async def my_func(value: int) -> int:
nonlocal call_count
call_count += 1
return value * 2
result = await my_func(5)
assert result == 10
assert call_count == 1
@pytest.mark.asyncio
async def test_executes_function_with_context(self, mock_context: MagicMock) -> None:
"""Function executes normally with RequestContext when not cancelled."""
@cancellable
async def my_func(context: RequestContext) -> str:
await asyncio.sleep(0.01)
return "completed"
result = await my_func(mock_context)
assert result == "completed"
@pytest.mark.asyncio
async def test_cancellation_raises_cancelled_error(
self, mock_context: MagicMock
) -> None:
"""Function raises CancelledError when cancel flag is set."""
from aiocache import caches
cache = caches.get("default")
@cancellable
async def slow_func(context: RequestContext) -> str:
await asyncio.sleep(1.0)
return "should not reach"
await cache.set(f"cancel:{mock_context.task_id}", True)
with pytest.raises(asyncio.CancelledError):
await slow_func(mock_context)
@pytest.mark.asyncio
async def test_cleanup_removes_cancel_flag(self, mock_context: MagicMock) -> None:
"""Cancel flag is cleaned up after execution."""
from aiocache import caches
cache = caches.get("default")
@cancellable
async def quick_func(context: RequestContext) -> str:
return "done"
await quick_func(mock_context)
flag = await cache.get(f"cancel:{mock_context.task_id}")
assert flag is None
@pytest.mark.asyncio
async def test_extracts_context_from_kwargs(self, mock_context: MagicMock) -> None:
"""Context can be passed as keyword argument."""
@cancellable
async def my_func(value: int, context: RequestContext | None = None) -> int:
return value + 1
result = await my_func(10, context=mock_context)
assert result == 11
class TestExecute:
"""Tests for the execute function."""
@pytest.mark.asyncio
async def test_successful_execution(
self,
mock_agent: MagicMock,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
mock_task: MagicMock,
) -> None:
"""Execute completes successfully and enqueues completed task."""
with (
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus,
):
await execute(mock_agent, mock_context, mock_event_queue)
mock_agent.aexecute_task.assert_called_once()
mock_event_queue.enqueue_event.assert_called_once()
assert mock_bus.emit.call_count == 2
@pytest.mark.asyncio
async def test_emits_started_event(
self,
mock_agent: MagicMock,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
mock_task: MagicMock,
) -> None:
"""Execute emits A2AServerTaskStartedEvent."""
with (
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus,
):
await execute(mock_agent, mock_context, mock_event_queue)
first_call = mock_bus.emit.call_args_list[0]
event = first_call[0][1]
assert event.type == "a2a_server_task_started"
assert event.task_id == mock_context.task_id
assert event.context_id == mock_context.context_id
@pytest.mark.asyncio
async def test_emits_completed_event(
self,
mock_agent: MagicMock,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
mock_task: MagicMock,
) -> None:
"""Execute emits A2AServerTaskCompletedEvent on success."""
with (
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus,
):
await execute(mock_agent, mock_context, mock_event_queue)
second_call = mock_bus.emit.call_args_list[1]
event = second_call[0][1]
assert event.type == "a2a_server_task_completed"
assert event.task_id == mock_context.task_id
assert event.result == "Task completed successfully"
@pytest.mark.asyncio
async def test_emits_failed_event_on_exception(
self,
mock_agent: MagicMock,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
mock_task: MagicMock,
) -> None:
"""Execute emits A2AServerTaskFailedEvent on exception."""
mock_agent.aexecute_task = AsyncMock(side_effect=ValueError("Test error"))
with (
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus,
):
with pytest.raises(Exception):
await execute(mock_agent, mock_context, mock_event_queue)
failed_call = mock_bus.emit.call_args_list[1]
event = failed_call[0][1]
assert event.type == "a2a_server_task_failed"
assert "Test error" in event.error
@pytest.mark.asyncio
async def test_emits_canceled_event_on_cancellation(
self,
mock_agent: MagicMock,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
mock_task: MagicMock,
) -> None:
"""Execute emits A2AServerTaskCanceledEvent on CancelledError."""
mock_agent.aexecute_task = AsyncMock(side_effect=asyncio.CancelledError())
with (
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
patch("crewai.a2a.utils.task.crewai_event_bus") as mock_bus,
):
with pytest.raises(asyncio.CancelledError):
await execute(mock_agent, mock_context, mock_event_queue)
canceled_call = mock_bus.emit.call_args_list[1]
event = canceled_call[0][1]
assert event.type == "a2a_server_task_canceled"
assert event.task_id == mock_context.task_id
class TestCancel:
"""Tests for the cancel function."""
@pytest.mark.asyncio
async def test_sets_cancel_flag_in_cache(
self,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
) -> None:
"""Cancel sets the cancel flag in cache."""
from aiocache import caches
cache = caches.get("default")
await cancel(mock_context, mock_event_queue)
flag = await cache.get(f"cancel:{mock_context.task_id}")
assert flag is True
@pytest.mark.asyncio
async def test_enqueues_task_status_update_event(
self,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
) -> None:
"""Cancel enqueues TaskStatusUpdateEvent with canceled state."""
await cancel(mock_context, mock_event_queue)
mock_event_queue.enqueue_event.assert_called_once()
event = mock_event_queue.enqueue_event.call_args[0][0]
assert event.task_id == mock_context.task_id
assert event.context_id == mock_context.context_id
assert event.status.state == TaskState.canceled
assert event.final is True
@pytest.mark.asyncio
async def test_returns_none_when_no_current_task(
self,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
) -> None:
"""Cancel returns None when context has no current_task."""
mock_context.current_task = None
result = await cancel(mock_context, mock_event_queue)
assert result is None
@pytest.mark.asyncio
async def test_returns_updated_task_when_current_task_exists(
self,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
) -> None:
"""Cancel returns updated task when context has current_task."""
current_task = MagicMock(spec=A2ATask)
current_task.status = TaskStatus(state=TaskState.working)
mock_context.current_task = current_task
result = await cancel(mock_context, mock_event_queue)
assert result is current_task
assert result.status.state == TaskState.canceled
@pytest.mark.asyncio
async def test_cleanup_after_cancel(
self,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
) -> None:
"""Cancel flag persists for cancellable decorator to detect."""
from aiocache import caches
cache = caches.get("default")
await cancel(mock_context, mock_event_queue)
flag = await cache.get(f"cancel:{mock_context.task_id}")
assert flag is True
await cache.delete(f"cancel:{mock_context.task_id}")
class TestExecuteAndCancelIntegration:
"""Integration tests for execute and cancel working together."""
@pytest.mark.asyncio
async def test_cancel_stops_running_execute(
self,
mock_agent: MagicMock,
mock_context: MagicMock,
mock_event_queue: AsyncMock,
mock_task: MagicMock,
) -> None:
"""Calling cancel stops a running execute."""
async def slow_task(**kwargs: Any) -> str:
await asyncio.sleep(2.0)
return "should not complete"
mock_agent.aexecute_task = slow_task
with (
patch("crewai.a2a.utils.task.Task", return_value=mock_task),
patch("crewai.a2a.utils.task.crewai_event_bus"),
):
execute_task = asyncio.create_task(
execute(mock_agent, mock_context, mock_event_queue)
)
await asyncio.sleep(0.1)
await cancel(mock_context, mock_event_queue)
with pytest.raises(asyncio.CancelledError):
await execute_task | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/a2a/utils/test_task.py",
"license": "MIT License",
"lines": 293,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/utils/agent_card.py | """AgentCard utilities for A2A client and server operations."""
from __future__ import annotations
import asyncio
from collections.abc import MutableMapping
from functools import lru_cache
import ssl
import time
from types import MethodType
from typing import TYPE_CHECKING
from a2a.client.errors import A2AClientHTTPError
from a2a.types import AgentCapabilities, AgentCard, AgentSkill
from aiocache import cached # type: ignore[import-untyped]
from aiocache.serializers import PickleSerializer # type: ignore[import-untyped]
import httpx
from crewai.a2a.auth.client_schemes import APIKeyAuth, HTTPDigestAuth
from crewai.a2a.auth.utils import (
_auth_store,
configure_auth_client,
retry_on_401,
)
from crewai.a2a.config import A2AServerConfig
from crewai.crew import Crew
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AAgentCardFetchedEvent,
A2AAuthenticationFailedEvent,
A2AConnectionErrorEvent,
)
if TYPE_CHECKING:
from crewai.a2a.auth.client_schemes import ClientAuthScheme
from crewai.agent import Agent
from crewai.task import Task
def _get_tls_verify(auth: ClientAuthScheme | None) -> ssl.SSLContext | bool | str:
"""Get TLS verify parameter from auth scheme.
Args:
auth: Optional authentication scheme with TLS config.
Returns:
SSL context, CA cert path, True for default verification,
or False if verification disabled.
"""
if auth and auth.tls:
return auth.tls.get_httpx_ssl_context()
return True
async def _prepare_auth_headers(
auth: ClientAuthScheme | None,
timeout: int,
) -> tuple[MutableMapping[str, str], ssl.SSLContext | bool | str]:
"""Prepare authentication headers and TLS verification settings.
Args:
auth: Optional authentication scheme.
timeout: Request timeout in seconds.
Returns:
Tuple of (headers dict, TLS verify setting).
"""
headers: MutableMapping[str, str] = {}
verify = _get_tls_verify(auth)
if auth:
async with httpx.AsyncClient(
timeout=timeout, verify=verify
) as temp_auth_client:
if isinstance(auth, (HTTPDigestAuth, APIKeyAuth)):
configure_auth_client(auth, temp_auth_client)
headers = await auth.apply_auth(temp_auth_client, {})
return headers, verify
def _get_server_config(agent: Agent) -> A2AServerConfig | None:
"""Get A2AServerConfig from an agent's a2a configuration.
Args:
agent: The Agent instance to check.
Returns:
A2AServerConfig if present, None otherwise.
"""
if agent.a2a is None:
return None
if isinstance(agent.a2a, A2AServerConfig):
return agent.a2a
if isinstance(agent.a2a, list):
for config in agent.a2a:
if isinstance(config, A2AServerConfig):
return config
return None
def fetch_agent_card(
endpoint: str,
auth: ClientAuthScheme | None = None,
timeout: int = 30,
use_cache: bool = True,
cache_ttl: int = 300,
) -> AgentCard:
"""Fetch AgentCard from an A2A endpoint with optional caching.
Args:
endpoint: A2A agent endpoint URL (AgentCard URL).
auth: Optional ClientAuthScheme for authentication.
timeout: Request timeout in seconds.
use_cache: Whether to use caching (default True).
cache_ttl: Cache TTL in seconds (default 300 = 5 minutes).
Returns:
AgentCard object with agent capabilities and skills.
Raises:
httpx.HTTPStatusError: If the request fails.
A2AClientHTTPError: If authentication fails.
"""
if use_cache:
if auth:
auth_data = auth.model_dump_json(
exclude={
"_access_token",
"_token_expires_at",
"_refresh_token",
"_authorization_callback",
}
)
auth_hash = _auth_store.compute_key(type(auth).__name__, auth_data)
else:
auth_hash = _auth_store.compute_key("none", "")
_auth_store.set(auth_hash, auth)
ttl_hash = int(time.time() // cache_ttl)
return _fetch_agent_card_cached(endpoint, auth_hash, timeout, ttl_hash)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(
afetch_agent_card(endpoint=endpoint, auth=auth, timeout=timeout)
)
finally:
loop.close()
async def afetch_agent_card(
endpoint: str,
auth: ClientAuthScheme | None = None,
timeout: int = 30,
use_cache: bool = True,
) -> AgentCard:
"""Fetch AgentCard from an A2A endpoint asynchronously.
Native async implementation. Use this when running in an async context.
Args:
endpoint: A2A agent endpoint URL (AgentCard URL).
auth: Optional ClientAuthScheme for authentication.
timeout: Request timeout in seconds.
use_cache: Whether to use caching (default True).
Returns:
AgentCard object with agent capabilities and skills.
Raises:
httpx.HTTPStatusError: If the request fails.
A2AClientHTTPError: If authentication fails.
"""
if use_cache:
if auth:
auth_data = auth.model_dump_json(
exclude={
"_access_token",
"_token_expires_at",
"_refresh_token",
"_authorization_callback",
}
)
auth_hash = _auth_store.compute_key(type(auth).__name__, auth_data)
else:
auth_hash = _auth_store.compute_key("none", "")
_auth_store.set(auth_hash, auth)
agent_card: AgentCard = await _afetch_agent_card_cached(
endpoint, auth_hash, timeout
)
return agent_card
return await _afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
@lru_cache()
def _fetch_agent_card_cached(
endpoint: str,
auth_hash: str,
timeout: int,
_ttl_hash: int,
) -> AgentCard:
"""Cached sync version of fetch_agent_card."""
auth = _auth_store.get(auth_hash)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(
_afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
)
finally:
loop.close()
@cached(ttl=300, serializer=PickleSerializer()) # type: ignore[untyped-decorator]
async def _afetch_agent_card_cached(
endpoint: str,
auth_hash: str,
timeout: int,
) -> AgentCard:
"""Cached async implementation of AgentCard fetching."""
auth = _auth_store.get(auth_hash)
return await _afetch_agent_card_impl(endpoint=endpoint, auth=auth, timeout=timeout)
async def _afetch_agent_card_impl(
endpoint: str,
auth: ClientAuthScheme | None,
timeout: int,
) -> AgentCard:
"""Internal async implementation of AgentCard fetching."""
start_time = time.perf_counter()
if "/.well-known/agent-card.json" in endpoint:
base_url = endpoint.replace("/.well-known/agent-card.json", "")
agent_card_path = "/.well-known/agent-card.json"
else:
url_parts = endpoint.split("/", 3)
base_url = f"{url_parts[0]}//{url_parts[2]}"
agent_card_path = (
f"/{url_parts[3]}"
if len(url_parts) > 3 and url_parts[3]
else "/.well-known/agent-card.json"
)
headers, verify = await _prepare_auth_headers(auth, timeout)
async with httpx.AsyncClient(
timeout=timeout, headers=headers, verify=verify
) as temp_client:
if auth and isinstance(auth, (HTTPDigestAuth, APIKeyAuth)):
configure_auth_client(auth, temp_client)
agent_card_url = f"{base_url}{agent_card_path}"
async def _fetch_agent_card_request() -> httpx.Response:
return await temp_client.get(agent_card_url)
try:
response = await retry_on_401(
request_func=_fetch_agent_card_request,
auth_scheme=auth,
client=temp_client,
headers=temp_client.headers,
max_retries=2,
)
response.raise_for_status()
agent_card = AgentCard.model_validate(response.json())
fetch_time_ms = (time.perf_counter() - start_time) * 1000
agent_card_dict = agent_card.model_dump(exclude_none=True)
crewai_event_bus.emit(
None,
A2AAgentCardFetchedEvent(
endpoint=endpoint,
a2a_agent_name=agent_card.name,
agent_card=agent_card_dict,
protocol_version=agent_card.protocol_version,
provider=agent_card_dict.get("provider"),
cached=False,
fetch_time_ms=fetch_time_ms,
),
)
return agent_card
except httpx.HTTPStatusError as e:
elapsed_ms = (time.perf_counter() - start_time) * 1000
response_body = e.response.text[:1000] if e.response.text else None
if e.response.status_code == 401:
error_details = ["Authentication failed"]
www_auth = e.response.headers.get("WWW-Authenticate")
if www_auth:
error_details.append(f"WWW-Authenticate: {www_auth}")
if not auth:
error_details.append("No auth scheme provided")
msg = " | ".join(error_details)
auth_type = type(auth).__name__ if auth else None
crewai_event_bus.emit(
None,
A2AAuthenticationFailedEvent(
endpoint=endpoint,
auth_type=auth_type,
error=msg,
status_code=401,
metadata={
"elapsed_ms": elapsed_ms,
"response_body": response_body,
"www_authenticate": www_auth,
"request_url": str(e.request.url),
},
),
)
raise A2AClientHTTPError(401, msg) from e
crewai_event_bus.emit(
None,
A2AConnectionErrorEvent(
endpoint=endpoint,
error=str(e),
error_type="http_error",
status_code=e.response.status_code,
operation="fetch_agent_card",
metadata={
"elapsed_ms": elapsed_ms,
"response_body": response_body,
"request_url": str(e.request.url),
},
),
)
raise
except httpx.TimeoutException as e:
elapsed_ms = (time.perf_counter() - start_time) * 1000
crewai_event_bus.emit(
None,
A2AConnectionErrorEvent(
endpoint=endpoint,
error=str(e),
error_type="timeout",
operation="fetch_agent_card",
metadata={
"elapsed_ms": elapsed_ms,
"timeout_config": timeout,
"request_url": str(e.request.url) if e.request else None,
},
),
)
raise
except httpx.ConnectError as e:
elapsed_ms = (time.perf_counter() - start_time) * 1000
crewai_event_bus.emit(
None,
A2AConnectionErrorEvent(
endpoint=endpoint,
error=str(e),
error_type="connection_error",
operation="fetch_agent_card",
metadata={
"elapsed_ms": elapsed_ms,
"request_url": str(e.request.url) if e.request else None,
},
),
)
raise
except httpx.RequestError as e:
elapsed_ms = (time.perf_counter() - start_time) * 1000
crewai_event_bus.emit(
None,
A2AConnectionErrorEvent(
endpoint=endpoint,
error=str(e),
error_type="request_error",
operation="fetch_agent_card",
metadata={
"elapsed_ms": elapsed_ms,
"request_url": str(e.request.url) if e.request else None,
},
),
)
raise
def _task_to_skill(task: Task) -> AgentSkill:
"""Convert a CrewAI Task to an A2A AgentSkill.
Args:
task: The CrewAI Task to convert.
Returns:
AgentSkill representing the task's capability.
"""
task_name = task.name or task.description[:50]
task_id = task_name.lower().replace(" ", "_")
tags: list[str] = []
if task.agent:
tags.append(task.agent.role.lower().replace(" ", "-"))
return AgentSkill(
id=task_id,
name=task_name,
description=task.description,
tags=tags,
examples=[task.expected_output] if task.expected_output else None,
)
def _tool_to_skill(tool_name: str, tool_description: str) -> AgentSkill:
"""Convert an Agent's tool to an A2A AgentSkill.
Args:
tool_name: Name of the tool.
tool_description: Description of what the tool does.
Returns:
AgentSkill representing the tool's capability.
"""
tool_id = tool_name.lower().replace(" ", "_")
return AgentSkill(
id=tool_id,
name=tool_name,
description=tool_description,
tags=[tool_name.lower().replace(" ", "-")],
)
def _crew_to_agent_card(crew: Crew, url: str) -> AgentCard:
"""Generate an A2A AgentCard from a Crew instance.
Args:
crew: The Crew instance to generate a card for.
url: The base URL where this crew will be exposed.
Returns:
AgentCard describing the crew's capabilities.
"""
crew_name = getattr(crew, "name", None) or crew.__class__.__name__
description_parts: list[str] = []
crew_description = getattr(crew, "description", None)
if crew_description:
description_parts.append(crew_description)
else:
agent_roles = [agent.role for agent in crew.agents]
description_parts.append(
f"A crew of {len(crew.agents)} agents: {', '.join(agent_roles)}"
)
skills = [_task_to_skill(task) for task in crew.tasks]
return AgentCard(
name=crew_name,
description=" ".join(description_parts),
url=url,
version="1.0.0",
capabilities=AgentCapabilities(
streaming=True,
push_notifications=True,
),
default_input_modes=["text/plain", "application/json"],
default_output_modes=["text/plain", "application/json"],
skills=skills,
)
def _agent_to_agent_card(agent: Agent, url: str) -> AgentCard:
"""Generate an A2A AgentCard from an Agent instance.
Uses A2AServerConfig values when available, falling back to agent properties.
If signing_config is provided, the card will be signed with JWS.
Args:
agent: The Agent instance to generate a card for.
url: The base URL where this agent will be exposed.
Returns:
AgentCard describing the agent's capabilities.
"""
from crewai.a2a.utils.agent_card_signing import sign_agent_card
server_config = _get_server_config(agent) or A2AServerConfig()
name = server_config.name or agent.role
description_parts = [agent.goal]
if agent.backstory:
description_parts.append(agent.backstory)
description = server_config.description or " ".join(description_parts)
skills: list[AgentSkill] = (
server_config.skills.copy() if server_config.skills else []
)
if not skills:
if agent.tools:
for tool in agent.tools:
tool_name = getattr(tool, "name", None) or tool.__class__.__name__
tool_desc = getattr(tool, "description", None) or f"Tool: {tool_name}"
skills.append(_tool_to_skill(tool_name, tool_desc))
if not skills:
skills.append(
AgentSkill(
id=agent.role.lower().replace(" ", "_"),
name=agent.role,
description=agent.goal,
tags=[agent.role.lower().replace(" ", "-")],
)
)
capabilities = server_config.capabilities
if server_config.server_extensions:
from crewai.a2a.extensions.server import ServerExtensionRegistry
registry = ServerExtensionRegistry(server_config.server_extensions)
ext_list = registry.get_agent_extensions()
existing_exts = list(capabilities.extensions) if capabilities.extensions else []
existing_uris = {e.uri for e in existing_exts}
for ext in ext_list:
if ext.uri not in existing_uris:
existing_exts.append(ext)
capabilities = capabilities.model_copy(update={"extensions": existing_exts})
card = AgentCard(
name=name,
description=description,
url=server_config.url or url,
version=server_config.version,
capabilities=capabilities,
default_input_modes=server_config.default_input_modes,
default_output_modes=server_config.default_output_modes,
skills=skills,
preferred_transport=server_config.transport.preferred,
protocol_version=server_config.protocol_version,
provider=server_config.provider,
documentation_url=server_config.documentation_url,
icon_url=server_config.icon_url,
additional_interfaces=server_config.additional_interfaces,
security=server_config.security,
security_schemes=server_config.security_schemes,
supports_authenticated_extended_card=server_config.supports_authenticated_extended_card,
)
if server_config.signing_config:
signature = sign_agent_card(
card,
private_key=server_config.signing_config.get_private_key(),
key_id=server_config.signing_config.key_id,
algorithm=server_config.signing_config.algorithm,
)
card = card.model_copy(update={"signatures": [signature]})
elif server_config.signatures:
card = card.model_copy(update={"signatures": server_config.signatures})
return card
def inject_a2a_server_methods(agent: Agent) -> None:
"""Inject A2A server methods onto an Agent instance.
Adds a `to_agent_card(url: str) -> AgentCard` method to the agent
that generates an A2A-compliant AgentCard.
Only injects if the agent has an A2AServerConfig.
Args:
agent: The Agent instance to inject methods onto.
"""
if _get_server_config(agent) is None:
return
def _to_agent_card(self: Agent, url: str) -> AgentCard:
return _agent_to_agent_card(self, url)
object.__setattr__(agent, "to_agent_card", MethodType(_to_agent_card, agent))
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/utils/agent_card.py",
"license": "MIT License",
"lines": 488,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/utils/response_model.py | """Response model utilities for A2A agent interactions."""
from __future__ import annotations
from typing import TypeAlias
from pydantic import BaseModel, Field, create_model
from crewai.a2a.config import A2AClientConfig, A2AConfig, A2AServerConfig
from crewai.types.utils import create_literals_from_strings
A2AConfigTypes: TypeAlias = A2AConfig | A2AServerConfig | A2AClientConfig
A2AClientConfigTypes: TypeAlias = A2AConfig | A2AClientConfig
def create_agent_response_model(agent_ids: tuple[str, ...]) -> type[BaseModel] | None:
"""Create a dynamic AgentResponse model with Literal types for agent IDs.
Args:
agent_ids: List of available A2A agent IDs.
Returns:
Dynamically created Pydantic model with Literal-constrained a2a_ids field,
or None if agent_ids is empty.
"""
if not agent_ids:
return None
DynamicLiteral = create_literals_from_strings(agent_ids) # noqa: N806
return create_model(
"AgentResponse",
a2a_ids=(
tuple[DynamicLiteral, ...], # type: ignore[valid-type]
Field(
default_factory=tuple,
max_length=len(agent_ids),
description="A2A agent IDs to delegate to.",
),
),
message=(
str,
Field(
description="The message content. If is_a2a=true, this is sent to the A2A agent. If is_a2a=false, this is your final answer ending the conversation."
),
),
is_a2a=(
bool,
Field(
description="Set to false when the remote agent has answered your question - extract their answer and return it as your final message. Set to true ONLY if you need to ask a NEW, DIFFERENT question. NEVER repeat the same request - if the conversation history shows the agent already answered, set is_a2a=false immediately."
),
),
__base__=BaseModel,
)
def extract_a2a_agent_ids_from_config(
a2a_config: list[A2AConfigTypes] | A2AConfigTypes | None,
) -> tuple[list[A2AClientConfigTypes], tuple[str, ...]]:
"""Extract A2A agent IDs from A2A configuration.
Filters out A2AServerConfig since it doesn't have an endpoint for delegation.
Args:
a2a_config: A2A configuration (any type).
Returns:
Tuple of client A2A configs list and agent endpoint IDs.
"""
if a2a_config is None:
return [], ()
configs: list[A2AConfigTypes]
if isinstance(a2a_config, (A2AConfig, A2AClientConfig, A2AServerConfig)):
configs = [a2a_config]
else:
configs = a2a_config
# Filter to only client configs (those with endpoint)
client_configs: list[A2AClientConfigTypes] = [
config for config in configs if isinstance(config, (A2AConfig, A2AClientConfig))
]
return client_configs, tuple(config.endpoint for config in client_configs)
def get_a2a_agents_and_response_model(
a2a_config: list[A2AConfigTypes] | A2AConfigTypes | None,
) -> tuple[list[A2AClientConfigTypes], type[BaseModel] | None]:
"""Get A2A agent configs and response model.
Args:
a2a_config: A2A configuration (any type).
Returns:
Tuple of client A2A configs and response model.
"""
a2a_agents, agent_ids = extract_a2a_agent_ids_from_config(a2a_config=a2a_config)
return a2a_agents, create_agent_response_model(agent_ids)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/utils/response_model.py",
"license": "MIT License",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/tests/a2a/utils/test_agent_card.py | """Tests for A2A agent card utilities."""
from __future__ import annotations
from a2a.types import AgentCard, AgentSkill
from crewai import Agent
from crewai.a2a.config import A2AClientConfig, A2AServerConfig
from crewai.a2a.utils.agent_card import inject_a2a_server_methods
class TestInjectA2AServerMethods:
"""Tests for inject_a2a_server_methods function."""
def test_agent_with_server_config_gets_to_agent_card_method(self) -> None:
"""Agent with A2AServerConfig should have to_agent_card method injected."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
assert hasattr(agent, "to_agent_card")
assert callable(agent.to_agent_card)
def test_agent_without_server_config_no_injection(self) -> None:
"""Agent without A2AServerConfig should not get to_agent_card method."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AClientConfig(endpoint="http://example.com"),
)
assert not hasattr(agent, "to_agent_card")
def test_agent_without_a2a_no_injection(self) -> None:
"""Agent without any a2a config should not get to_agent_card method."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
)
assert not hasattr(agent, "to_agent_card")
def test_agent_with_mixed_configs_gets_injection(self) -> None:
"""Agent with list containing A2AServerConfig should get to_agent_card."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=[
A2AClientConfig(endpoint="http://example.com"),
A2AServerConfig(name="My Agent"),
],
)
assert hasattr(agent, "to_agent_card")
assert callable(agent.to_agent_card)
def test_manual_injection_on_plain_agent(self) -> None:
"""inject_a2a_server_methods should work when called manually."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
)
# Manually set server config and inject
object.__setattr__(agent, "a2a", A2AServerConfig())
inject_a2a_server_methods(agent)
assert hasattr(agent, "to_agent_card")
assert callable(agent.to_agent_card)
class TestToAgentCard:
"""Tests for the injected to_agent_card method."""
def test_returns_agent_card(self) -> None:
"""to_agent_card should return an AgentCard instance."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
assert isinstance(card, AgentCard)
def test_uses_agent_role_as_name(self) -> None:
"""AgentCard name should default to agent role."""
agent = Agent(
role="Data Analyst",
goal="Analyze data",
backstory="Expert analyst",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
assert card.name == "Data Analyst"
def test_uses_server_config_name(self) -> None:
"""AgentCard name should prefer A2AServerConfig.name over role."""
agent = Agent(
role="Data Analyst",
goal="Analyze data",
backstory="Expert analyst",
a2a=A2AServerConfig(name="Custom Agent Name"),
)
card = agent.to_agent_card("http://localhost:8000")
assert card.name == "Custom Agent Name"
def test_uses_goal_as_description(self) -> None:
"""AgentCard description should include agent goal."""
agent = Agent(
role="Test Agent",
goal="Accomplish important tasks",
backstory="Has extensive experience",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
assert "Accomplish important tasks" in card.description
def test_uses_server_config_description(self) -> None:
"""AgentCard description should prefer A2AServerConfig.description."""
agent = Agent(
role="Test Agent",
goal="Accomplish important tasks",
backstory="Has extensive experience",
a2a=A2AServerConfig(description="Custom description"),
)
card = agent.to_agent_card("http://localhost:8000")
assert card.description == "Custom description"
def test_uses_provided_url(self) -> None:
"""AgentCard url should use the provided URL."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://my-server.com:9000")
assert card.url == "http://my-server.com:9000"
def test_uses_server_config_url(self) -> None:
"""AgentCard url should prefer A2AServerConfig.url over provided URL."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(url="http://configured-url.com"),
)
card = agent.to_agent_card("http://fallback-url.com")
assert card.url == "http://configured-url.com/"
def test_generates_default_skill(self) -> None:
"""AgentCard should have at least one skill based on agent role."""
agent = Agent(
role="Research Assistant",
goal="Help with research",
backstory="Skilled researcher",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
assert len(card.skills) >= 1
skill = card.skills[0]
assert skill.name == "Research Assistant"
assert skill.description == "Help with research"
def test_uses_server_config_skills(self) -> None:
"""AgentCard skills should prefer A2AServerConfig.skills."""
custom_skill = AgentSkill(
id="custom-skill",
name="Custom Skill",
description="A custom skill",
tags=["custom"],
)
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(skills=[custom_skill]),
)
card = agent.to_agent_card("http://localhost:8000")
assert len(card.skills) == 1
assert card.skills[0].id == "custom-skill"
assert card.skills[0].name == "Custom Skill"
def test_includes_custom_version(self) -> None:
"""AgentCard should include version from A2AServerConfig."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(version="2.0.0"),
)
card = agent.to_agent_card("http://localhost:8000")
assert card.version == "2.0.0"
def test_default_version(self) -> None:
"""AgentCard should have default version 1.0.0."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
assert card.version == "1.0.0"
class TestAgentCardJsonStructure:
"""Tests for the JSON structure of AgentCard."""
def test_json_has_required_fields(self) -> None:
"""AgentCard JSON should contain all required A2A protocol fields."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
json_data = card.model_dump()
assert "name" in json_data
assert "description" in json_data
assert "url" in json_data
assert "version" in json_data
assert "skills" in json_data
assert "capabilities" in json_data
assert "defaultInputModes" in json_data
assert "defaultOutputModes" in json_data
def test_json_skills_structure(self) -> None:
"""Each skill in JSON should have required fields."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
json_data = card.model_dump()
assert len(json_data["skills"]) >= 1
skill = json_data["skills"][0]
assert "id" in skill
assert "name" in skill
assert "description" in skill
assert "tags" in skill
def test_json_capabilities_structure(self) -> None:
"""Capabilities in JSON should have expected fields."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
json_data = card.model_dump()
capabilities = json_data["capabilities"]
assert "streaming" in capabilities
assert "pushNotifications" in capabilities
def test_json_serializable(self) -> None:
"""AgentCard should be JSON serializable."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
json_str = card.model_dump_json()
assert isinstance(json_str, str)
assert "Test Agent" in json_str
assert "http://localhost:8000" in json_str
def test_json_excludes_none_values(self) -> None:
"""AgentCard JSON with exclude_none should omit None fields."""
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
a2a=A2AServerConfig(),
)
card = agent.to_agent_card("http://localhost:8000")
json_data = card.model_dump(exclude_none=True)
assert "provider" not in json_data
assert "documentationUrl" not in json_data
assert "iconUrl" not in json_data
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/a2a/utils/test_agent_card.py",
"license": "MIT License",
"lines": 258,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/errors.py | """A2A error codes and error response utilities.
This module provides a centralized mapping of all A2A protocol error codes
as defined in the A2A specification, plus custom CrewAI extensions.
Error codes follow JSON-RPC 2.0 conventions:
- -32700 to -32600: Standard JSON-RPC errors
- -32099 to -32000: Server errors (A2A-specific)
- -32768 to -32100: Reserved for implementation-defined errors
"""
from __future__ import annotations
from dataclasses import dataclass, field
from enum import IntEnum
from typing import Any
from a2a.client.errors import A2AClientTimeoutError
class A2APollingTimeoutError(A2AClientTimeoutError):
"""Raised when polling exceeds the configured timeout."""
class A2AErrorCode(IntEnum):
"""A2A protocol error codes.
Codes follow JSON-RPC 2.0 specification with A2A-specific extensions.
"""
# JSON-RPC 2.0 Standard Errors (-32700 to -32600)
JSON_PARSE_ERROR = -32700
"""Invalid JSON was received by the server."""
INVALID_REQUEST = -32600
"""The JSON sent is not a valid Request object."""
METHOD_NOT_FOUND = -32601
"""The method does not exist / is not available."""
INVALID_PARAMS = -32602
"""Invalid method parameter(s)."""
INTERNAL_ERROR = -32603
"""Internal JSON-RPC error."""
# A2A-Specific Errors (-32099 to -32000)
TASK_NOT_FOUND = -32001
"""The specified task was not found."""
TASK_NOT_CANCELABLE = -32002
"""The task cannot be canceled (already completed/failed)."""
PUSH_NOTIFICATION_NOT_SUPPORTED = -32003
"""Push notifications are not supported by this agent."""
UNSUPPORTED_OPERATION = -32004
"""The requested operation is not supported."""
CONTENT_TYPE_NOT_SUPPORTED = -32005
"""Incompatible content types between client and server."""
INVALID_AGENT_RESPONSE = -32006
"""The agent produced an invalid response."""
# CrewAI Custom Extensions (-32768 to -32100)
UNSUPPORTED_VERSION = -32009
"""The requested A2A protocol version is not supported."""
UNSUPPORTED_EXTENSION = -32010
"""Client does not support required protocol extensions."""
AUTHENTICATION_REQUIRED = -32011
"""Authentication is required for this operation."""
AUTHORIZATION_FAILED = -32012
"""Authorization check failed (insufficient permissions)."""
RATE_LIMIT_EXCEEDED = -32013
"""Rate limit exceeded for this client/operation."""
TASK_TIMEOUT = -32014
"""Task execution timed out."""
TRANSPORT_NEGOTIATION_FAILED = -32015
"""Failed to negotiate a compatible transport protocol."""
CONTEXT_NOT_FOUND = -32016
"""The specified context was not found."""
SKILL_NOT_FOUND = -32017
"""The specified skill was not found."""
ARTIFACT_NOT_FOUND = -32018
"""The specified artifact was not found."""
# Error code to default message mapping
ERROR_MESSAGES: dict[int, str] = {
A2AErrorCode.JSON_PARSE_ERROR: "Parse error",
A2AErrorCode.INVALID_REQUEST: "Invalid Request",
A2AErrorCode.METHOD_NOT_FOUND: "Method not found",
A2AErrorCode.INVALID_PARAMS: "Invalid params",
A2AErrorCode.INTERNAL_ERROR: "Internal error",
A2AErrorCode.TASK_NOT_FOUND: "Task not found",
A2AErrorCode.TASK_NOT_CANCELABLE: "Task not cancelable",
A2AErrorCode.PUSH_NOTIFICATION_NOT_SUPPORTED: "Push Notification is not supported",
A2AErrorCode.UNSUPPORTED_OPERATION: "This operation is not supported",
A2AErrorCode.CONTENT_TYPE_NOT_SUPPORTED: "Incompatible content types",
A2AErrorCode.INVALID_AGENT_RESPONSE: "Invalid agent response",
A2AErrorCode.UNSUPPORTED_VERSION: "Unsupported A2A version",
A2AErrorCode.UNSUPPORTED_EXTENSION: "Client does not support required extensions",
A2AErrorCode.AUTHENTICATION_REQUIRED: "Authentication required",
A2AErrorCode.AUTHORIZATION_FAILED: "Authorization failed",
A2AErrorCode.RATE_LIMIT_EXCEEDED: "Rate limit exceeded",
A2AErrorCode.TASK_TIMEOUT: "Task execution timed out",
A2AErrorCode.TRANSPORT_NEGOTIATION_FAILED: "Transport negotiation failed",
A2AErrorCode.CONTEXT_NOT_FOUND: "Context not found",
A2AErrorCode.SKILL_NOT_FOUND: "Skill not found",
A2AErrorCode.ARTIFACT_NOT_FOUND: "Artifact not found",
}
@dataclass
class A2AError(Exception):
"""Base exception for A2A protocol errors.
Attributes:
code: The A2A/JSON-RPC error code.
message: Human-readable error message.
data: Optional additional error data.
"""
code: int
message: str | None = None
data: Any = None
def __post_init__(self) -> None:
if self.message is None:
self.message = ERROR_MESSAGES.get(self.code, "Unknown error")
super().__init__(self.message)
def to_dict(self) -> dict[str, Any]:
"""Convert to JSON-RPC error object format."""
error: dict[str, Any] = {
"code": self.code,
"message": self.message,
}
if self.data is not None:
error["data"] = self.data
return error
def to_response(self, request_id: str | int | None = None) -> dict[str, Any]:
"""Convert to full JSON-RPC error response."""
return {
"jsonrpc": "2.0",
"error": self.to_dict(),
"id": request_id,
}
@dataclass
class JSONParseError(A2AError):
"""Invalid JSON was received."""
code: int = field(default=A2AErrorCode.JSON_PARSE_ERROR, init=False)
@dataclass
class InvalidRequestError(A2AError):
"""The JSON sent is not a valid Request object."""
code: int = field(default=A2AErrorCode.INVALID_REQUEST, init=False)
@dataclass
class MethodNotFoundError(A2AError):
"""The method does not exist / is not available."""
code: int = field(default=A2AErrorCode.METHOD_NOT_FOUND, init=False)
method: str | None = None
def __post_init__(self) -> None:
if self.message is None and self.method:
self.message = f"Method not found: {self.method}"
super().__post_init__()
@dataclass
class InvalidParamsError(A2AError):
"""Invalid method parameter(s)."""
code: int = field(default=A2AErrorCode.INVALID_PARAMS, init=False)
param: str | None = None
reason: str | None = None
def __post_init__(self) -> None:
if self.message is None:
if self.param and self.reason:
self.message = f"Invalid parameter '{self.param}': {self.reason}"
elif self.param:
self.message = f"Invalid parameter: {self.param}"
super().__post_init__()
@dataclass
class InternalError(A2AError):
"""Internal JSON-RPC error."""
code: int = field(default=A2AErrorCode.INTERNAL_ERROR, init=False)
@dataclass
class TaskNotFoundError(A2AError):
"""The specified task was not found."""
code: int = field(default=A2AErrorCode.TASK_NOT_FOUND, init=False)
task_id: str | None = None
def __post_init__(self) -> None:
if self.message is None and self.task_id:
self.message = f"Task not found: {self.task_id}"
super().__post_init__()
@dataclass
class TaskNotCancelableError(A2AError):
"""The task cannot be canceled."""
code: int = field(default=A2AErrorCode.TASK_NOT_CANCELABLE, init=False)
task_id: str | None = None
reason: str | None = None
def __post_init__(self) -> None:
if self.message is None:
if self.task_id and self.reason:
self.message = f"Task {self.task_id} cannot be canceled: {self.reason}"
elif self.task_id:
self.message = f"Task {self.task_id} cannot be canceled"
super().__post_init__()
@dataclass
class PushNotificationNotSupportedError(A2AError):
"""Push notifications are not supported."""
code: int = field(default=A2AErrorCode.PUSH_NOTIFICATION_NOT_SUPPORTED, init=False)
@dataclass
class UnsupportedOperationError(A2AError):
"""The requested operation is not supported."""
code: int = field(default=A2AErrorCode.UNSUPPORTED_OPERATION, init=False)
operation: str | None = None
def __post_init__(self) -> None:
if self.message is None and self.operation:
self.message = f"Operation not supported: {self.operation}"
super().__post_init__()
@dataclass
class ContentTypeNotSupportedError(A2AError):
"""Incompatible content types."""
code: int = field(default=A2AErrorCode.CONTENT_TYPE_NOT_SUPPORTED, init=False)
requested_types: list[str] | None = None
supported_types: list[str] | None = None
def __post_init__(self) -> None:
if self.message is None and self.requested_types and self.supported_types:
self.message = (
f"Content type not supported. Requested: {self.requested_types}, "
f"Supported: {self.supported_types}"
)
super().__post_init__()
@dataclass
class InvalidAgentResponseError(A2AError):
"""The agent produced an invalid response."""
code: int = field(default=A2AErrorCode.INVALID_AGENT_RESPONSE, init=False)
@dataclass
class UnsupportedVersionError(A2AError):
"""The requested A2A version is not supported."""
code: int = field(default=A2AErrorCode.UNSUPPORTED_VERSION, init=False)
requested_version: str | None = None
supported_versions: list[str] | None = None
def __post_init__(self) -> None:
if self.message is None and self.requested_version:
msg = f"Unsupported A2A version: {self.requested_version}"
if self.supported_versions:
msg += f". Supported versions: {', '.join(self.supported_versions)}"
self.message = msg
super().__post_init__()
@dataclass
class UnsupportedExtensionError(A2AError):
"""Client does not support required extensions."""
code: int = field(default=A2AErrorCode.UNSUPPORTED_EXTENSION, init=False)
required_extensions: list[str] | None = None
def __post_init__(self) -> None:
if self.message is None and self.required_extensions:
self.message = f"Client does not support required extensions: {', '.join(self.required_extensions)}"
super().__post_init__()
@dataclass
class AuthenticationRequiredError(A2AError):
"""Authentication is required."""
code: int = field(default=A2AErrorCode.AUTHENTICATION_REQUIRED, init=False)
@dataclass
class AuthorizationFailedError(A2AError):
"""Authorization check failed."""
code: int = field(default=A2AErrorCode.AUTHORIZATION_FAILED, init=False)
required_scope: str | None = None
def __post_init__(self) -> None:
if self.message is None and self.required_scope:
self.message = (
f"Authorization failed. Required scope: {self.required_scope}"
)
super().__post_init__()
@dataclass
class RateLimitExceededError(A2AError):
"""Rate limit exceeded."""
code: int = field(default=A2AErrorCode.RATE_LIMIT_EXCEEDED, init=False)
retry_after: int | None = None
def __post_init__(self) -> None:
if self.message is None and self.retry_after:
self.message = (
f"Rate limit exceeded. Retry after {self.retry_after} seconds"
)
if self.retry_after:
self.data = {"retry_after": self.retry_after}
super().__post_init__()
@dataclass
class TaskTimeoutError(A2AError):
"""Task execution timed out."""
code: int = field(default=A2AErrorCode.TASK_TIMEOUT, init=False)
task_id: str | None = None
timeout_seconds: float | None = None
def __post_init__(self) -> None:
if self.message is None:
if self.task_id and self.timeout_seconds:
self.message = (
f"Task {self.task_id} timed out after {self.timeout_seconds}s"
)
elif self.task_id:
self.message = f"Task {self.task_id} timed out"
super().__post_init__()
@dataclass
class TransportNegotiationFailedError(A2AError):
"""Failed to negotiate a compatible transport protocol."""
code: int = field(default=A2AErrorCode.TRANSPORT_NEGOTIATION_FAILED, init=False)
client_transports: list[str] | None = None
server_transports: list[str] | None = None
def __post_init__(self) -> None:
if self.message is None and self.client_transports and self.server_transports:
self.message = (
f"Transport negotiation failed. Client: {self.client_transports}, "
f"Server: {self.server_transports}"
)
super().__post_init__()
@dataclass
class ContextNotFoundError(A2AError):
"""The specified context was not found."""
code: int = field(default=A2AErrorCode.CONTEXT_NOT_FOUND, init=False)
context_id: str | None = None
def __post_init__(self) -> None:
if self.message is None and self.context_id:
self.message = f"Context not found: {self.context_id}"
super().__post_init__()
@dataclass
class SkillNotFoundError(A2AError):
"""The specified skill was not found."""
code: int = field(default=A2AErrorCode.SKILL_NOT_FOUND, init=False)
skill_id: str | None = None
def __post_init__(self) -> None:
if self.message is None and self.skill_id:
self.message = f"Skill not found: {self.skill_id}"
super().__post_init__()
@dataclass
class ArtifactNotFoundError(A2AError):
"""The specified artifact was not found."""
code: int = field(default=A2AErrorCode.ARTIFACT_NOT_FOUND, init=False)
artifact_id: str | None = None
def __post_init__(self) -> None:
if self.message is None and self.artifact_id:
self.message = f"Artifact not found: {self.artifact_id}"
super().__post_init__()
def create_error_response(
code: int | A2AErrorCode,
message: str | None = None,
data: Any = None,
request_id: str | int | None = None,
) -> dict[str, Any]:
"""Create a JSON-RPC error response.
Args:
code: Error code (A2AErrorCode or int).
message: Optional error message (uses default if not provided).
data: Optional additional error data.
request_id: Request ID for correlation.
Returns:
Dict in JSON-RPC error response format.
"""
error = A2AError(code=int(code), message=message, data=data)
return error.to_response(request_id)
def is_retryable_error(code: int) -> bool:
"""Check if an error is potentially retryable.
Args:
code: Error code to check.
Returns:
True if the error might be resolved by retrying.
"""
retryable_codes = {
A2AErrorCode.INTERNAL_ERROR,
A2AErrorCode.RATE_LIMIT_EXCEEDED,
A2AErrorCode.TASK_TIMEOUT,
}
return code in retryable_codes
def is_client_error(code: int) -> bool:
"""Check if an error is a client-side error.
Args:
code: Error code to check.
Returns:
True if the error is due to client request issues.
"""
client_error_codes = {
A2AErrorCode.JSON_PARSE_ERROR,
A2AErrorCode.INVALID_REQUEST,
A2AErrorCode.METHOD_NOT_FOUND,
A2AErrorCode.INVALID_PARAMS,
A2AErrorCode.TASK_NOT_FOUND,
A2AErrorCode.CONTENT_TYPE_NOT_SUPPORTED,
A2AErrorCode.UNSUPPORTED_VERSION,
A2AErrorCode.UNSUPPORTED_EXTENSION,
A2AErrorCode.CONTEXT_NOT_FOUND,
A2AErrorCode.SKILL_NOT_FOUND,
A2AErrorCode.ARTIFACT_NOT_FOUND,
}
return code in client_error_codes
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/errors.py",
"license": "MIT License",
"lines": 361,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/task_helpers.py | """Helper functions for processing A2A task results."""
from __future__ import annotations
from collections.abc import AsyncIterator
from typing import TYPE_CHECKING, Any, TypedDict
import uuid
from a2a.client.errors import A2AClientHTTPError
from a2a.types import (
AgentCard,
Message,
Part,
Role,
Task,
TaskArtifactUpdateEvent,
TaskState,
TaskStatusUpdateEvent,
TextPart,
)
from typing_extensions import NotRequired
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AConnectionErrorEvent,
A2AResponseReceivedEvent,
)
if TYPE_CHECKING:
from a2a.types import Task as A2ATask
SendMessageEvent = (
tuple[Task, TaskStatusUpdateEvent | TaskArtifactUpdateEvent | None] | Message
)
TERMINAL_STATES: frozenset[TaskState] = frozenset(
{
TaskState.completed,
TaskState.failed,
TaskState.rejected,
TaskState.canceled,
}
)
ACTIONABLE_STATES: frozenset[TaskState] = frozenset(
{
TaskState.input_required,
TaskState.auth_required,
}
)
PENDING_STATES: frozenset[TaskState] = frozenset(
{
TaskState.submitted,
TaskState.working,
}
)
class TaskStateResult(TypedDict):
"""Result dictionary from processing A2A task state."""
status: TaskState
history: list[Message]
result: NotRequired[str]
error: NotRequired[str]
agent_card: NotRequired[dict[str, Any]]
a2a_agent_name: NotRequired[str | None]
def extract_task_result_parts(a2a_task: A2ATask) -> list[str]:
"""Extract result parts from A2A task status message, history, and artifacts.
Args:
a2a_task: A2A Task object with status, history, and artifacts
Returns:
List of result text parts
"""
result_parts: list[str] = []
if a2a_task.status and a2a_task.status.message:
msg = a2a_task.status.message
result_parts.extend(
part.root.text for part in msg.parts if part.root.kind == "text"
)
if not result_parts and a2a_task.history:
for history_msg in reversed(a2a_task.history):
if history_msg.role == Role.agent:
result_parts.extend(
part.root.text
for part in history_msg.parts
if part.root.kind == "text"
)
break
if a2a_task.artifacts:
result_parts.extend(
part.root.text
for artifact in a2a_task.artifacts
for part in artifact.parts
if part.root.kind == "text"
)
return result_parts
def extract_error_message(a2a_task: A2ATask, default: str) -> str:
"""Extract error message from A2A task.
Args:
a2a_task: A2A Task object
default: Default message if no error found
Returns:
Error message string
"""
if a2a_task.status and a2a_task.status.message:
msg = a2a_task.status.message
if msg:
for part in msg.parts:
if part.root.kind == "text":
return str(part.root.text)
return str(msg)
if a2a_task.history:
for history_msg in reversed(a2a_task.history):
for part in history_msg.parts:
if part.root.kind == "text":
return str(part.root.text)
return default
def process_task_state(
a2a_task: A2ATask,
new_messages: list[Message],
agent_card: AgentCard,
turn_number: int,
is_multiturn: bool,
agent_role: str | None,
result_parts: list[str] | None = None,
endpoint: str | None = None,
a2a_agent_name: str | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
is_final: bool = True,
) -> TaskStateResult | None:
"""Process A2A task state and return result dictionary.
Shared logic for both polling and streaming handlers.
Args:
a2a_task: The A2A task to process.
new_messages: List to collect messages (modified in place).
agent_card: The agent card.
turn_number: Current turn number.
is_multiturn: Whether multi-turn conversation.
agent_role: Agent role for logging.
result_parts: Accumulated result parts (streaming passes accumulated,
polling passes None to extract from task).
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
from_task: Optional CrewAI Task for event metadata.
from_agent: Optional CrewAI Agent for event metadata.
is_final: Whether this is the final response in the stream.
Returns:
Result dictionary if terminal/actionable state, None otherwise.
"""
if result_parts is None:
result_parts = []
if a2a_task.status.state == TaskState.completed:
if not result_parts:
extracted_parts = extract_task_result_parts(a2a_task)
result_parts.extend(extracted_parts)
if a2a_task.history:
new_messages.extend(a2a_task.history)
response_text = " ".join(result_parts) if result_parts else ""
message_id = None
if a2a_task.status and a2a_task.status.message:
message_id = a2a_task.status.message.message_id
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=response_text,
turn_number=turn_number,
context_id=a2a_task.context_id,
message_id=message_id,
is_multiturn=is_multiturn,
status="completed",
final=is_final,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.completed,
agent_card=agent_card.model_dump(exclude_none=True),
result=response_text,
history=new_messages,
)
if a2a_task.status.state == TaskState.input_required:
if a2a_task.history:
new_messages.extend(a2a_task.history)
response_text = extract_error_message(a2a_task, "Additional input required")
if response_text and not a2a_task.history:
agent_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=response_text))],
context_id=a2a_task.context_id,
task_id=a2a_task.id,
)
new_messages.append(agent_message)
input_message_id = None
if a2a_task.status and a2a_task.status.message:
input_message_id = a2a_task.status.message.message_id
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=response_text,
turn_number=turn_number,
context_id=a2a_task.context_id,
message_id=input_message_id,
is_multiturn=is_multiturn,
status="input_required",
final=is_final,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.input_required,
error=response_text,
history=new_messages,
agent_card=agent_card.model_dump(exclude_none=True),
)
if a2a_task.status.state in {TaskState.failed, TaskState.rejected}:
error_msg = extract_error_message(a2a_task, "Task failed without error message")
if a2a_task.history:
new_messages.extend(a2a_task.history)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
if a2a_task.status.state == TaskState.auth_required:
error_msg = extract_error_message(a2a_task, "Authentication required")
return TaskStateResult(
status=TaskState.auth_required,
error=error_msg,
history=new_messages,
)
if a2a_task.status.state == TaskState.canceled:
error_msg = extract_error_message(a2a_task, "Task was canceled")
return TaskStateResult(
status=TaskState.canceled,
error=error_msg,
history=new_messages,
)
if a2a_task.status.state in PENDING_STATES:
return None
return None
async def send_message_and_get_task_id(
event_stream: AsyncIterator[SendMessageEvent],
new_messages: list[Message],
agent_card: AgentCard,
turn_number: int,
is_multiturn: bool,
agent_role: str | None,
from_task: Any | None = None,
from_agent: Any | None = None,
endpoint: str | None = None,
a2a_agent_name: str | None = None,
context_id: str | None = None,
) -> str | TaskStateResult:
"""Send message and process initial response.
Handles the common pattern of sending a message and either:
- Getting an immediate Message response (task completed synchronously)
- Getting a Task that needs polling/waiting for completion
Args:
event_stream: Async iterator from client.send_message()
new_messages: List to collect messages (modified in place)
agent_card: The agent card
turn_number: Current turn number
is_multiturn: Whether multi-turn conversation
agent_role: Agent role for logging
from_task: Optional CrewAI Task object for event metadata.
from_agent: Optional CrewAI Agent object for event metadata.
endpoint: Optional A2A endpoint URL.
a2a_agent_name: Optional A2A agent name.
context_id: Optional A2A context ID for correlation.
Returns:
Task ID string if agent needs polling/waiting, or TaskStateResult if done.
"""
try:
async for event in event_stream:
if isinstance(event, Message):
new_messages.append(event)
result_parts = [
part.root.text for part in event.parts if part.root.kind == "text"
]
response_text = " ".join(result_parts) if result_parts else ""
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=response_text,
turn_number=turn_number,
context_id=event.context_id,
message_id=event.message_id,
is_multiturn=is_multiturn,
status="completed",
final=True,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.completed,
result=response_text,
history=new_messages,
agent_card=agent_card.model_dump(exclude_none=True),
)
if isinstance(event, tuple):
a2a_task, _ = event
if a2a_task.status.state in TERMINAL_STATES | ACTIONABLE_STATES:
result = process_task_state(
a2a_task=a2a_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=turn_number,
is_multiturn=is_multiturn,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
)
if result:
return result
return a2a_task.id
return TaskStateResult(
status=TaskState.failed,
error="No task ID received from initial message",
history=new_messages,
)
except A2AClientHTTPError as e:
error_msg = f"HTTP Error {e.status_code}: {e!s}"
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=context_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
None,
A2AConnectionErrorEvent(
endpoint=endpoint or "",
error=str(e),
error_type="http_error",
status_code=e.status_code,
a2a_agent_name=a2a_agent_name,
operation="send_message",
context_id=context_id,
from_task=from_task,
from_agent=from_agent,
),
)
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=turn_number,
context_id=context_id,
is_multiturn=is_multiturn,
status="failed",
final=True,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
except Exception as e:
error_msg = f"Unexpected error during send_message: {e!s}"
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=context_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
None,
A2AConnectionErrorEvent(
endpoint=endpoint or "",
error=str(e),
error_type="unexpected_error",
a2a_agent_name=a2a_agent_name,
operation="send_message",
context_id=context_id,
from_task=from_task,
from_agent=from_agent,
),
)
crewai_event_bus.emit(
None,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=turn_number,
context_id=context_id,
is_multiturn=is_multiturn,
status="failed",
final=True,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
finally:
aclose = getattr(event_stream, "aclose", None)
if aclose:
await aclose()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/task_helpers.py",
"license": "MIT License",
"lines": 417,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/updates/base.py | """Base types for A2A update mechanism handlers."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, NamedTuple, Protocol, TypedDict
from pydantic import GetCoreSchemaHandler
from pydantic_core import CoreSchema, core_schema
class CommonParams(NamedTuple):
"""Common parameters shared across all update handlers.
Groups the frequently-passed parameters to reduce duplication.
"""
turn_number: int
is_multiturn: bool
agent_role: str | None
endpoint: str
a2a_agent_name: str | None
context_id: str | None
from_task: Any
from_agent: Any
if TYPE_CHECKING:
from a2a.client import Client
from a2a.types import AgentCard, Message, Task
from crewai.a2a.task_helpers import TaskStateResult
from crewai.a2a.updates.push_notifications.config import PushNotificationConfig
class BaseHandlerKwargs(TypedDict, total=False):
"""Base kwargs shared by all handlers."""
turn_number: int
is_multiturn: bool
agent_role: str | None
context_id: str | None
task_id: str | None
endpoint: str | None
agent_branch: Any
a2a_agent_name: str | None
from_task: Any
from_agent: Any
class PollingHandlerKwargs(BaseHandlerKwargs, total=False):
"""Kwargs for polling handler."""
polling_interval: float
polling_timeout: float
history_length: int
max_polls: int | None
class StreamingHandlerKwargs(BaseHandlerKwargs, total=False):
"""Kwargs for streaming handler."""
class PushNotificationHandlerKwargs(BaseHandlerKwargs, total=False):
"""Kwargs for push notification handler."""
config: PushNotificationConfig
result_store: PushNotificationResultStore
polling_timeout: float
polling_interval: float
class PushNotificationResultStore(Protocol):
"""Protocol for storing and retrieving push notification results.
This protocol defines the interface for a result store that the
PushNotificationHandler uses to wait for task completion.
"""
@classmethod
def __get_pydantic_core_schema__(
cls,
_source_type: Any,
_handler: GetCoreSchemaHandler,
) -> CoreSchema:
return core_schema.any_schema()
async def wait_for_result(
self,
task_id: str,
timeout: float,
poll_interval: float = 1.0,
) -> Task | None:
"""Wait for a task result to be available.
Args:
task_id: The task ID to wait for.
timeout: Max seconds to wait before returning None.
poll_interval: Seconds between polling attempts.
Returns:
The completed Task object, or None if timeout.
"""
...
async def get_result(self, task_id: str) -> Task | None:
"""Get a task result if available.
Args:
task_id: The task ID to retrieve.
Returns:
The Task object if available, None otherwise.
"""
...
async def store_result(self, task: Task) -> None:
"""Store a task result.
Args:
task: The Task object to store.
"""
...
class UpdateHandler(Protocol):
"""Protocol for A2A update mechanism handlers."""
@staticmethod
async def execute(
client: Client,
message: Message,
new_messages: list[Message],
agent_card: AgentCard,
**kwargs: Any,
) -> TaskStateResult:
"""Execute the update mechanism and return result.
Args:
client: A2A client instance.
message: Message to send.
new_messages: List to collect messages (modified in place).
agent_card: The agent card.
**kwargs: Additional handler-specific parameters.
Returns:
Result dictionary with status, result/error, and history.
"""
...
def extract_common_params(kwargs: BaseHandlerKwargs) -> CommonParams:
"""Extract common parameters from handler kwargs.
Args:
kwargs: Handler kwargs dict.
Returns:
CommonParams with extracted values.
Raises:
ValueError: If endpoint is not provided.
"""
endpoint = kwargs.get("endpoint")
if endpoint is None:
raise ValueError("endpoint is required for update handlers")
return CommonParams(
turn_number=kwargs.get("turn_number", 0),
is_multiturn=kwargs.get("is_multiturn", False),
agent_role=kwargs.get("agent_role"),
endpoint=endpoint,
a2a_agent_name=kwargs.get("a2a_agent_name"),
context_id=kwargs.get("context_id"),
from_task=kwargs.get("from_task"),
from_agent=kwargs.get("from_agent"),
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/updates/base.py",
"license": "MIT License",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/updates/polling/config.py | """Polling update mechanism configuration."""
from __future__ import annotations
from pydantic import BaseModel, Field
class PollingConfig(BaseModel):
"""Configuration for polling-based task updates.
Attributes:
interval: Seconds between poll attempts.
timeout: Max seconds to poll before raising timeout error.
max_polls: Max number of poll attempts.
history_length: Number of messages to retrieve per poll.
"""
interval: float = Field(
default=2.0, gt=0, description="Seconds between poll attempts"
)
timeout: float | None = Field(default=None, gt=0, description="Max seconds to poll")
max_polls: int | None = Field(default=None, gt=0, description="Max poll attempts")
history_length: int = Field(
default=100, gt=0, description="Messages to retrieve per poll"
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/updates/polling/config.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/updates/polling/handler.py | """Polling update mechanism handler."""
from __future__ import annotations
import asyncio
import time
from typing import TYPE_CHECKING, Any
import uuid
from a2a.client import Client
from a2a.client.errors import A2AClientHTTPError
from a2a.types import (
AgentCard,
Message,
Part,
Role,
TaskQueryParams,
TaskState,
TextPart,
)
from typing_extensions import Unpack
from crewai.a2a.errors import A2APollingTimeoutError
from crewai.a2a.task_helpers import (
ACTIONABLE_STATES,
TERMINAL_STATES,
TaskStateResult,
process_task_state,
send_message_and_get_task_id,
)
from crewai.a2a.updates.base import PollingHandlerKwargs
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AConnectionErrorEvent,
A2APollingStartedEvent,
A2APollingStatusEvent,
A2AResponseReceivedEvent,
)
if TYPE_CHECKING:
from a2a.types import Task as A2ATask
async def _poll_task_until_complete(
client: Client,
task_id: str,
polling_interval: float,
polling_timeout: float,
agent_branch: Any | None = None,
history_length: int = 100,
max_polls: int | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
context_id: str | None = None,
endpoint: str | None = None,
a2a_agent_name: str | None = None,
) -> A2ATask:
"""Poll task status until terminal state reached.
Args:
client: A2A client instance.
task_id: Task ID to poll.
polling_interval: Seconds between poll attempts.
polling_timeout: Max seconds before timeout.
agent_branch: Agent tree branch for logging.
history_length: Number of messages to retrieve per poll.
max_polls: Max number of poll attempts (None = unlimited).
from_task: Optional CrewAI Task object for event metadata.
from_agent: Optional CrewAI Agent object for event metadata.
context_id: A2A context ID for correlation.
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent from agent card.
Returns:
Final task object in terminal state.
Raises:
A2APollingTimeoutError: If polling exceeds timeout or max_polls.
"""
start_time = time.monotonic()
poll_count = 0
while True:
poll_count += 1
task = await client.get_task(
TaskQueryParams(id=task_id, history_length=history_length)
)
elapsed = time.monotonic() - start_time
effective_context_id = task.context_id or context_id
crewai_event_bus.emit(
agent_branch,
A2APollingStatusEvent(
task_id=task_id,
context_id=effective_context_id,
state=str(task.status.state.value),
elapsed_seconds=elapsed,
poll_count=poll_count,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
if task.status.state in TERMINAL_STATES | ACTIONABLE_STATES:
return task
if elapsed > polling_timeout:
raise A2APollingTimeoutError(
f"Polling timeout after {polling_timeout}s ({poll_count} polls)"
)
if max_polls and poll_count >= max_polls:
raise A2APollingTimeoutError(
f"Max polls ({max_polls}) exceeded after {elapsed:.1f}s"
)
await asyncio.sleep(polling_interval)
class PollingHandler:
"""Polling-based update handler."""
@staticmethod
async def execute(
client: Client,
message: Message,
new_messages: list[Message],
agent_card: AgentCard,
**kwargs: Unpack[PollingHandlerKwargs],
) -> TaskStateResult:
"""Execute A2A delegation using polling for updates.
Args:
client: A2A client instance.
message: Message to send.
new_messages: List to collect messages.
agent_card: The agent card.
**kwargs: Polling-specific parameters.
Returns:
Dictionary with status, result/error, and history.
"""
polling_interval = kwargs.get("polling_interval", 2.0)
polling_timeout = kwargs.get("polling_timeout", 300.0)
endpoint = kwargs.get("endpoint", "")
agent_branch = kwargs.get("agent_branch")
turn_number = kwargs.get("turn_number", 0)
is_multiturn = kwargs.get("is_multiturn", False)
agent_role = kwargs.get("agent_role")
history_length = kwargs.get("history_length", 100)
max_polls = kwargs.get("max_polls")
context_id = kwargs.get("context_id")
task_id = kwargs.get("task_id")
a2a_agent_name = kwargs.get("a2a_agent_name")
from_task = kwargs.get("from_task")
from_agent = kwargs.get("from_agent")
try:
result_or_task_id = await send_message_and_get_task_id(
event_stream=client.send_message(message),
new_messages=new_messages,
agent_card=agent_card,
turn_number=turn_number,
is_multiturn=is_multiturn,
agent_role=agent_role,
from_task=from_task,
from_agent=from_agent,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
context_id=context_id,
)
if not isinstance(result_or_task_id, str):
return result_or_task_id
task_id = result_or_task_id
crewai_event_bus.emit(
agent_branch,
A2APollingStartedEvent(
task_id=task_id,
context_id=context_id,
polling_interval=polling_interval,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
final_task = await _poll_task_until_complete(
client=client,
task_id=task_id,
polling_interval=polling_interval,
polling_timeout=polling_timeout,
agent_branch=agent_branch,
history_length=history_length,
max_polls=max_polls,
from_task=from_task,
from_agent=from_agent,
context_id=context_id,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
)
result = process_task_state(
a2a_task=final_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=turn_number,
is_multiturn=is_multiturn,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
)
if result:
return result
return TaskStateResult(
status=TaskState.failed,
error=f"Unexpected task state: {final_task.status.state}",
history=new_messages,
)
except A2APollingTimeoutError as e:
error_msg = str(e)
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=turn_number,
context_id=context_id,
is_multiturn=is_multiturn,
status="failed",
final=True,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
except A2AClientHTTPError as e:
error_msg = f"HTTP Error {e.status_code}: {e!s}"
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=endpoint,
error=str(e),
error_type="http_error",
status_code=e.status_code,
a2a_agent_name=a2a_agent_name,
operation="polling",
context_id=context_id,
task_id=task_id,
from_task=from_task,
from_agent=from_agent,
),
)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=turn_number,
context_id=context_id,
is_multiturn=is_multiturn,
status="failed",
final=True,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
except Exception as e:
error_msg = f"Unexpected error during polling: {e!s}"
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=endpoint,
error=str(e),
error_type="unexpected_error",
a2a_agent_name=a2a_agent_name,
operation="polling",
context_id=context_id,
task_id=task_id,
from_task=from_task,
from_agent=from_agent,
),
)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=turn_number,
context_id=context_id,
is_multiturn=is_multiturn,
status="failed",
final=True,
agent_role=agent_role,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/updates/polling/handler.py",
"license": "MIT License",
"lines": 321,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/updates/push_notifications/config.py | """Push notification update mechanism configuration."""
from __future__ import annotations
from typing import Annotated
from a2a.types import PushNotificationAuthenticationInfo
from pydantic import AnyHttpUrl, BaseModel, BeforeValidator, Field
from crewai.a2a.updates.base import PushNotificationResultStore
from crewai.a2a.updates.push_notifications.signature import WebhookSignatureConfig
def _coerce_signature(
value: str | WebhookSignatureConfig | None,
) -> WebhookSignatureConfig | None:
"""Convert string secret to WebhookSignatureConfig."""
if value is None:
return None
if isinstance(value, str):
return WebhookSignatureConfig.hmac_sha256(secret=value)
return value
SignatureInput = Annotated[
WebhookSignatureConfig | None,
BeforeValidator(_coerce_signature),
]
class PushNotificationConfig(BaseModel):
"""Configuration for webhook-based task updates.
Attributes:
url: Callback URL where agent sends push notifications.
id: Unique identifier for this config.
token: Token to validate incoming notifications.
authentication: Auth info for agent to use when calling webhook.
timeout: Max seconds to wait for task completion.
interval: Seconds between result polling attempts.
result_store: Store for receiving push notification results.
signature: HMAC signature config. Pass a string (secret) for defaults,
or WebhookSignatureConfig for custom settings.
"""
url: AnyHttpUrl = Field(description="Callback URL for push notifications")
id: str | None = Field(default=None, description="Unique config identifier")
token: str | None = Field(default=None, description="Validation token")
authentication: PushNotificationAuthenticationInfo | None = Field(
default=None, description="Auth info for agent to use when calling webhook"
)
timeout: float | None = Field(
default=300.0, gt=0, description="Max seconds to wait for task completion"
)
interval: float = Field(
default=2.0, gt=0, description="Seconds between result polling attempts"
)
result_store: PushNotificationResultStore | None = Field(
default=None, description="Result store for push notification handling"
)
signature: SignatureInput = Field(
default=None,
description="HMAC signature config. Pass a string (secret) for simple usage, "
"or WebhookSignatureConfig for custom headers/tolerance.",
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/updates/push_notifications/config.py",
"license": "MIT License",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/updates/push_notifications/handler.py | """Push notification (webhook) update mechanism handler."""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any
import uuid
from a2a.client import Client
from a2a.client.errors import A2AClientHTTPError
from a2a.types import (
AgentCard,
Message,
Part,
Role,
TaskState,
TextPart,
)
from typing_extensions import Unpack
from crewai.a2a.task_helpers import (
TaskStateResult,
process_task_state,
send_message_and_get_task_id,
)
from crewai.a2a.updates.base import (
CommonParams,
PushNotificationHandlerKwargs,
PushNotificationResultStore,
extract_common_params,
)
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AConnectionErrorEvent,
A2APushNotificationRegisteredEvent,
A2APushNotificationTimeoutEvent,
A2AResponseReceivedEvent,
)
if TYPE_CHECKING:
from a2a.types import Task as A2ATask
logger = logging.getLogger(__name__)
def _handle_push_error(
error: Exception,
error_msg: str,
error_type: str,
new_messages: list[Message],
agent_branch: Any | None,
params: CommonParams,
task_id: str | None,
status_code: int | None = None,
) -> TaskStateResult:
"""Handle push notification errors with consistent event emission.
Args:
error: The exception that occurred.
error_msg: Formatted error message for the result.
error_type: Type of error for the event.
new_messages: List to append error message to.
agent_branch: Agent tree branch for events.
params: Common handler parameters.
task_id: A2A task ID.
status_code: HTTP status code if applicable.
Returns:
TaskStateResult with failed status.
"""
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=params.context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=str(error),
error_type=error_type,
status_code=status_code,
a2a_agent_name=params.a2a_agent_name,
operation="push_notification",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=params.turn_number,
context_id=params.context_id,
is_multiturn=params.is_multiturn,
status="failed",
final=True,
agent_role=params.agent_role,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
async def _wait_for_push_result(
task_id: str,
result_store: PushNotificationResultStore,
timeout: float,
poll_interval: float,
agent_branch: Any | None = None,
from_task: Any | None = None,
from_agent: Any | None = None,
context_id: str | None = None,
endpoint: str | None = None,
a2a_agent_name: str | None = None,
) -> A2ATask | None:
"""Wait for push notification result.
Args:
task_id: Task ID to wait for.
result_store: Store to retrieve results from.
timeout: Max seconds to wait.
poll_interval: Seconds between polling attempts.
agent_branch: Agent tree branch for logging.
from_task: Optional CrewAI Task object for event metadata.
from_agent: Optional CrewAI Agent object for event metadata.
context_id: A2A context ID for correlation.
endpoint: A2A agent endpoint URL.
a2a_agent_name: Name of the A2A agent.
Returns:
Final task object, or None if timeout.
"""
task = await result_store.wait_for_result(
task_id=task_id,
timeout=timeout,
poll_interval=poll_interval,
)
if task is None:
crewai_event_bus.emit(
agent_branch,
A2APushNotificationTimeoutEvent(
task_id=task_id,
context_id=context_id,
timeout_seconds=timeout,
endpoint=endpoint,
a2a_agent_name=a2a_agent_name,
from_task=from_task,
from_agent=from_agent,
),
)
return task
class PushNotificationHandler:
"""Push notification (webhook) based update handler."""
@staticmethod
async def execute(
client: Client,
message: Message,
new_messages: list[Message],
agent_card: AgentCard,
**kwargs: Unpack[PushNotificationHandlerKwargs],
) -> TaskStateResult:
"""Execute A2A delegation using push notifications for updates.
Args:
client: A2A client instance.
message: Message to send.
new_messages: List to collect messages.
agent_card: The agent card.
**kwargs: Push notification-specific parameters.
Returns:
Dictionary with status, result/error, and history.
Raises:
ValueError: If result_store or config not provided.
"""
config = kwargs.get("config")
result_store = kwargs.get("result_store")
polling_timeout = kwargs.get("polling_timeout", 300.0)
polling_interval = kwargs.get("polling_interval", 2.0)
agent_branch = kwargs.get("agent_branch")
task_id = kwargs.get("task_id")
params = extract_common_params(kwargs)
if config is None:
error_msg = (
"PushNotificationConfig is required for push notification handler"
)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=error_msg,
error_type="configuration_error",
a2a_agent_name=params.a2a_agent_name,
operation="push_notification",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
if result_store is None:
error_msg = (
"PushNotificationResultStore is required for push notification handler"
)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=error_msg,
error_type="configuration_error",
a2a_agent_name=params.a2a_agent_name,
operation="push_notification",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
try:
result_or_task_id = await send_message_and_get_task_id(
event_stream=client.send_message(message),
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
from_task=params.from_task,
from_agent=params.from_agent,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
context_id=params.context_id,
)
if not isinstance(result_or_task_id, str):
return result_or_task_id
task_id = result_or_task_id
crewai_event_bus.emit(
agent_branch,
A2APushNotificationRegisteredEvent(
task_id=task_id,
context_id=params.context_id,
callback_url=str(config.url),
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
logger.debug(
"Push notification callback for task %s configured at %s (via initial request)",
task_id,
config.url,
)
final_task = await _wait_for_push_result(
task_id=task_id,
result_store=result_store,
timeout=polling_timeout,
poll_interval=polling_interval,
agent_branch=agent_branch,
from_task=params.from_task,
from_agent=params.from_agent,
context_id=params.context_id,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
)
if final_task is None:
return TaskStateResult(
status=TaskState.failed,
error=f"Push notification timeout after {polling_timeout}s",
history=new_messages,
)
result = process_task_state(
a2a_task=final_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
)
if result:
return result
return TaskStateResult(
status=TaskState.failed,
error=f"Unexpected task state: {final_task.status.state}",
history=new_messages,
)
except A2AClientHTTPError as e:
return _handle_push_error(
error=e,
error_msg=f"HTTP Error {e.status_code}: {e!s}",
error_type="http_error",
new_messages=new_messages,
agent_branch=agent_branch,
params=params,
task_id=task_id,
status_code=e.status_code,
)
except Exception as e:
return _handle_push_error(
error=e,
error_msg=f"Unexpected error during push notification: {e!s}",
error_type="unexpected_error",
new_messages=new_messages,
agent_branch=agent_branch,
params=params,
task_id=task_id,
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/updates/push_notifications/handler.py",
"license": "MIT License",
"lines": 317,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/updates/streaming/config.py | """Streaming update mechanism configuration."""
from __future__ import annotations
from pydantic import BaseModel
class StreamingConfig(BaseModel):
"""Configuration for SSE-based task updates."""
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/updates/streaming/config.py",
"license": "MIT License",
"lines": 5,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/updates/streaming/handler.py | """Streaming (SSE) update mechanism handler."""
from __future__ import annotations
import asyncio
import logging
from typing import Final
import uuid
from a2a.client import Client
from a2a.client.errors import A2AClientHTTPError
from a2a.types import (
AgentCard,
Message,
Part,
Role,
Task,
TaskArtifactUpdateEvent,
TaskIdParams,
TaskQueryParams,
TaskState,
TaskStatusUpdateEvent,
TextPart,
)
from typing_extensions import Unpack
from crewai.a2a.task_helpers import (
ACTIONABLE_STATES,
TERMINAL_STATES,
TaskStateResult,
process_task_state,
)
from crewai.a2a.updates.base import StreamingHandlerKwargs, extract_common_params
from crewai.a2a.updates.streaming.params import (
process_status_update,
)
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.a2a_events import (
A2AArtifactReceivedEvent,
A2AConnectionErrorEvent,
A2AResponseReceivedEvent,
A2AStreamingChunkEvent,
A2AStreamingStartedEvent,
)
logger = logging.getLogger(__name__)
MAX_RESUBSCRIBE_ATTEMPTS: Final[int] = 3
RESUBSCRIBE_BACKOFF_BASE: Final[float] = 1.0
class StreamingHandler:
"""SSE streaming-based update handler."""
@staticmethod
async def _try_recover_from_interruption( # type: ignore[misc]
client: Client,
task_id: str,
new_messages: list[Message],
agent_card: AgentCard,
result_parts: list[str],
**kwargs: Unpack[StreamingHandlerKwargs],
) -> TaskStateResult | None:
"""Attempt to recover from a stream interruption by checking task state.
If the task completed while we were disconnected, returns the result.
If the task is still running, attempts to resubscribe and continue.
Args:
client: A2A client instance.
task_id: The task ID to recover.
new_messages: List of collected messages.
agent_card: The agent card.
result_parts: Accumulated result text parts.
**kwargs: Handler parameters.
Returns:
TaskStateResult if recovery succeeded (task finished or resubscribe worked).
None if recovery not possible (caller should handle failure).
Note:
When None is returned, recovery failed and the original exception should
be handled by the caller. All recovery attempts are logged.
"""
params = extract_common_params(kwargs) # type: ignore[arg-type]
try:
a2a_task: Task = await client.get_task(TaskQueryParams(id=task_id))
if a2a_task.status.state in TERMINAL_STATES:
logger.info(
"Task completed during stream interruption",
extra={"task_id": task_id, "state": str(a2a_task.status.state)},
)
return process_task_state(
a2a_task=a2a_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
result_parts=result_parts,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
)
if a2a_task.status.state in ACTIONABLE_STATES:
logger.info(
"Task in actionable state during stream interruption",
extra={"task_id": task_id, "state": str(a2a_task.status.state)},
)
return process_task_state(
a2a_task=a2a_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
result_parts=result_parts,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
is_final=False,
)
logger.info(
"Task still running, attempting resubscribe",
extra={"task_id": task_id, "state": str(a2a_task.status.state)},
)
for attempt in range(MAX_RESUBSCRIBE_ATTEMPTS):
try:
backoff = RESUBSCRIBE_BACKOFF_BASE * (2**attempt)
if attempt > 0:
await asyncio.sleep(backoff)
event_stream = client.resubscribe(TaskIdParams(id=task_id))
async for event in event_stream:
if isinstance(event, tuple):
resubscribed_task, update = event
is_final_update = (
process_status_update(update, result_parts)
if isinstance(update, TaskStatusUpdateEvent)
else False
)
if isinstance(update, TaskArtifactUpdateEvent):
artifact = update.artifact
result_parts.extend(
part.root.text
for part in artifact.parts
if part.root.kind == "text"
)
if (
is_final_update
or resubscribed_task.status.state
in TERMINAL_STATES | ACTIONABLE_STATES
):
return process_task_state(
a2a_task=resubscribed_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
result_parts=result_parts,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
is_final=is_final_update,
)
elif isinstance(event, Message):
new_messages.append(event)
result_parts.extend(
part.root.text
for part in event.parts
if part.root.kind == "text"
)
final_task = await client.get_task(TaskQueryParams(id=task_id))
return process_task_state(
a2a_task=final_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
result_parts=result_parts,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
)
except Exception as resubscribe_error: # noqa: PERF203
logger.warning(
"Resubscribe attempt failed",
extra={
"task_id": task_id,
"attempt": attempt + 1,
"max_attempts": MAX_RESUBSCRIBE_ATTEMPTS,
"error": str(resubscribe_error),
},
)
if attempt == MAX_RESUBSCRIBE_ATTEMPTS - 1:
return None
except Exception as e:
logger.warning(
"Failed to recover from stream interruption due to unexpected error",
extra={
"task_id": task_id,
"error": str(e),
"error_type": type(e).__name__,
},
exc_info=True,
)
return None
logger.warning(
"Recovery exhausted all resubscribe attempts without success",
extra={"task_id": task_id, "max_attempts": MAX_RESUBSCRIBE_ATTEMPTS},
)
return None
@staticmethod
async def execute(
client: Client,
message: Message,
new_messages: list[Message],
agent_card: AgentCard,
**kwargs: Unpack[StreamingHandlerKwargs],
) -> TaskStateResult:
"""Execute A2A delegation using SSE streaming for updates.
Args:
client: A2A client instance.
message: Message to send.
new_messages: List to collect messages.
agent_card: The agent card.
**kwargs: Streaming-specific parameters.
Returns:
Dictionary with status, result/error, and history.
"""
task_id = kwargs.get("task_id")
agent_branch = kwargs.get("agent_branch")
params = extract_common_params(kwargs)
result_parts: list[str] = []
final_result: TaskStateResult | None = None
event_stream = client.send_message(message)
chunk_index = 0
current_task_id: str | None = task_id
crewai_event_bus.emit(
agent_branch,
A2AStreamingStartedEvent(
task_id=task_id,
context_id=params.context_id,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
try:
async for event in event_stream:
if isinstance(event, tuple):
a2a_task, _ = event
current_task_id = a2a_task.id
if isinstance(event, Message):
new_messages.append(event)
message_context_id = event.context_id or params.context_id
for part in event.parts:
if part.root.kind == "text":
text = part.root.text
result_parts.append(text)
crewai_event_bus.emit(
agent_branch,
A2AStreamingChunkEvent(
task_id=event.task_id or task_id,
context_id=message_context_id,
chunk=text,
chunk_index=chunk_index,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
chunk_index += 1
elif isinstance(event, tuple):
a2a_task, update = event
if isinstance(update, TaskArtifactUpdateEvent):
artifact = update.artifact
result_parts.extend(
part.root.text
for part in artifact.parts
if part.root.kind == "text"
)
artifact_size = None
if artifact.parts:
artifact_size = sum(
len(p.root.text.encode())
if p.root.kind == "text"
else len(getattr(p.root, "data", b""))
for p in artifact.parts
)
effective_context_id = a2a_task.context_id or params.context_id
crewai_event_bus.emit(
agent_branch,
A2AArtifactReceivedEvent(
task_id=a2a_task.id,
artifact_id=artifact.artifact_id,
artifact_name=artifact.name,
artifact_description=artifact.description,
mime_type=artifact.parts[0].root.kind
if artifact.parts
else None,
size_bytes=artifact_size,
append=update.append or False,
last_chunk=update.last_chunk or False,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
context_id=effective_context_id,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
is_final_update = (
process_status_update(update, result_parts)
if isinstance(update, TaskStatusUpdateEvent)
else False
)
if (
not is_final_update
and a2a_task.status.state
not in TERMINAL_STATES | ACTIONABLE_STATES
):
continue
final_result = process_task_state(
a2a_task=a2a_task,
new_messages=new_messages,
agent_card=agent_card,
turn_number=params.turn_number,
is_multiturn=params.is_multiturn,
agent_role=params.agent_role,
result_parts=result_parts,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
is_final=is_final_update,
)
if final_result:
break
except A2AClientHTTPError as e:
if current_task_id:
logger.info(
"Stream interrupted with HTTP error, attempting recovery",
extra={
"task_id": current_task_id,
"error": str(e),
"status_code": e.status_code,
},
)
recovery_kwargs = {k: v for k, v in kwargs.items() if k != "task_id"}
recovered_result = (
await StreamingHandler._try_recover_from_interruption(
client=client,
task_id=current_task_id,
new_messages=new_messages,
agent_card=agent_card,
result_parts=result_parts,
**recovery_kwargs,
)
)
if recovered_result:
logger.info(
"Successfully recovered task after HTTP error",
extra={
"task_id": current_task_id,
"status": str(recovered_result.get("status")),
},
)
return recovered_result
logger.warning(
"Failed to recover from HTTP error, returning failure",
extra={
"task_id": current_task_id,
"status_code": e.status_code,
"original_error": str(e),
},
)
error_msg = f"HTTP Error {e.status_code}: {e!s}"
error_type = "http_error"
status_code = e.status_code
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=params.context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=str(e),
error_type=error_type,
status_code=status_code,
a2a_agent_name=params.a2a_agent_name,
operation="streaming",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=params.turn_number,
context_id=params.context_id,
is_multiturn=params.is_multiturn,
status="failed",
final=True,
agent_role=params.agent_role,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
except (asyncio.TimeoutError, asyncio.CancelledError, ConnectionError) as e:
error_type = type(e).__name__.lower()
if current_task_id:
logger.info(
f"Stream interrupted with {error_type}, attempting recovery",
extra={"task_id": current_task_id, "error": str(e)},
)
recovery_kwargs = {k: v for k, v in kwargs.items() if k != "task_id"}
recovered_result = (
await StreamingHandler._try_recover_from_interruption(
client=client,
task_id=current_task_id,
new_messages=new_messages,
agent_card=agent_card,
result_parts=result_parts,
**recovery_kwargs,
)
)
if recovered_result:
logger.info(
f"Successfully recovered task after {error_type}",
extra={
"task_id": current_task_id,
"status": str(recovered_result.get("status")),
},
)
return recovered_result
logger.warning(
f"Failed to recover from {error_type}, returning failure",
extra={
"task_id": current_task_id,
"error_type": error_type,
"original_error": str(e),
},
)
error_msg = f"Connection error during streaming: {e!s}"
status_code = None
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=params.context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=str(e),
error_type=error_type,
status_code=status_code,
a2a_agent_name=params.a2a_agent_name,
operation="streaming",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=params.turn_number,
context_id=params.context_id,
is_multiturn=params.is_multiturn,
status="failed",
final=True,
agent_role=params.agent_role,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
except Exception as e:
logger.exception(
"Unexpected error during streaming",
extra={
"task_id": current_task_id,
"error_type": type(e).__name__,
"endpoint": params.endpoint,
},
)
error_msg = f"Unexpected error during streaming: {type(e).__name__}: {e!s}"
error_type = "unexpected_error"
status_code = None
error_message = Message(
role=Role.agent,
message_id=str(uuid.uuid4()),
parts=[Part(root=TextPart(text=error_msg))],
context_id=params.context_id,
task_id=task_id,
)
new_messages.append(error_message)
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=str(e),
error_type=error_type,
status_code=status_code,
a2a_agent_name=params.a2a_agent_name,
operation="streaming",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
crewai_event_bus.emit(
agent_branch,
A2AResponseReceivedEvent(
response=error_msg,
turn_number=params.turn_number,
context_id=params.context_id,
is_multiturn=params.is_multiturn,
status="failed",
final=True,
agent_role=params.agent_role,
endpoint=params.endpoint,
a2a_agent_name=params.a2a_agent_name,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
return TaskStateResult(
status=TaskState.failed,
error=error_msg,
history=new_messages,
)
finally:
aclose = getattr(event_stream, "aclose", None)
if aclose:
try:
await aclose()
except Exception as close_error:
crewai_event_bus.emit(
agent_branch,
A2AConnectionErrorEvent(
endpoint=params.endpoint,
error=str(close_error),
error_type="stream_close_error",
a2a_agent_name=params.a2a_agent_name,
operation="stream_close",
context_id=params.context_id,
task_id=task_id,
from_task=params.from_task,
from_agent=params.from_agent,
),
)
if final_result:
return final_result
return TaskStateResult(
status=TaskState.completed,
result=" ".join(result_parts) if result_parts else "",
history=new_messages,
agent_card=agent_card.model_dump(exclude_none=True),
)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/updates/streaming/handler.py",
"license": "MIT License",
"lines": 589,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/tests/a2a/test_a2a_integration.py | from __future__ import annotations
import os
import uuid
import pytest
import pytest_asyncio
from a2a.client import ClientFactory
from a2a.types import AgentCard, Message, Part, Role, TaskState, TextPart
from crewai.a2a.updates.polling.handler import PollingHandler
from crewai.a2a.updates.streaming.handler import StreamingHandler
A2A_TEST_ENDPOINT = os.getenv("A2A_TEST_ENDPOINT", "http://localhost:9999")
@pytest_asyncio.fixture
async def a2a_client():
"""Create A2A client for test server."""
client = await ClientFactory.connect(A2A_TEST_ENDPOINT)
yield client
await client.close()
@pytest.fixture
def test_message() -> Message:
"""Create a simple test message."""
return Message(
role=Role.user,
parts=[Part(root=TextPart(text="What is 2 + 2?"))],
message_id=str(uuid.uuid4()),
)
@pytest_asyncio.fixture
async def agent_card(a2a_client) -> AgentCard:
"""Fetch the real agent card from the server."""
return await a2a_client.get_card()
class TestA2AAgentCardFetching:
"""Integration tests for agent card fetching."""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_fetch_agent_card(self, a2a_client) -> None:
"""Test fetching an agent card from the server."""
card = await a2a_client.get_card()
assert card is not None
assert card.name == "GPT Assistant"
assert card.url is not None
assert card.capabilities is not None
assert card.capabilities.streaming is True
class TestA2APollingIntegration:
"""Integration tests for A2A polling handler."""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_polling_completes_task(
self,
a2a_client,
test_message: Message,
agent_card: AgentCard,
) -> None:
"""Test that polling handler completes a task successfully."""
new_messages: list[Message] = []
result = await PollingHandler.execute(
client=a2a_client,
message=test_message,
new_messages=new_messages,
agent_card=agent_card,
polling_interval=0.5,
polling_timeout=30.0,
)
assert isinstance(result, dict)
assert result["status"] == TaskState.completed
assert result.get("result") is not None
assert "4" in result["result"]
class TestA2AStreamingIntegration:
"""Integration tests for A2A streaming handler."""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_streaming_completes_task(
self,
a2a_client,
test_message: Message,
agent_card: AgentCard,
) -> None:
"""Test that streaming handler completes a task successfully."""
new_messages: list[Message] = []
result = await StreamingHandler.execute(
client=a2a_client,
message=test_message,
new_messages=new_messages,
agent_card=agent_card,
endpoint=agent_card.url,
)
assert isinstance(result, dict)
assert result["status"] == TaskState.completed
assert result.get("result") is not None
class TestA2ATaskOperations:
"""Integration tests for task operations."""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_send_message_and_get_response(
self,
a2a_client,
test_message: Message,
) -> None:
"""Test sending a message and getting a response."""
from a2a.types import Task
final_task: Task | None = None
async for event in a2a_client.send_message(test_message):
if isinstance(event, tuple) and len(event) >= 1:
task, _ = event
if isinstance(task, Task):
final_task = task
assert final_task is not None
assert final_task.id is not None
assert final_task.status is not None
assert final_task.status.state == TaskState.completed
class TestA2APushNotificationHandler:
"""Tests for push notification handler.
These tests use mocks for the result store since webhook callbacks
are incoming requests that can't be recorded with VCR.
"""
@pytest.fixture
def mock_agent_card(self) -> AgentCard:
"""Create a minimal valid agent card for testing."""
from a2a.types import AgentCapabilities
return AgentCard(
name="Test Agent",
description="Test agent for push notification tests",
url="http://localhost:9999",
version="1.0.0",
capabilities=AgentCapabilities(streaming=True, push_notifications=True),
default_input_modes=["text"],
default_output_modes=["text"],
skills=[],
)
@pytest.fixture
def mock_task(self) -> "Task":
"""Create a minimal valid task for testing."""
from a2a.types import Task, TaskStatus
return Task(
id="task-123",
context_id="ctx-123",
status=TaskStatus(state=TaskState.working),
)
@pytest.mark.asyncio
async def test_push_handler_waits_for_result(
self,
mock_agent_card: AgentCard,
mock_task,
) -> None:
"""Test that push handler waits for result from store."""
from unittest.mock import AsyncMock, MagicMock
from a2a.types import Task, TaskStatus
from pydantic import AnyHttpUrl
from crewai.a2a.updates.push_notifications.config import PushNotificationConfig
from crewai.a2a.updates.push_notifications.handler import PushNotificationHandler
completed_task = Task(
id="task-123",
context_id="ctx-123",
status=TaskStatus(state=TaskState.completed),
history=[],
)
mock_store = MagicMock()
mock_store.wait_for_result = AsyncMock(return_value=completed_task)
async def mock_send_message(*args, **kwargs):
yield (mock_task, None)
mock_client = MagicMock()
mock_client.send_message = mock_send_message
config = PushNotificationConfig(
url=AnyHttpUrl("http://localhost:8080/a2a/callback"),
token="secret-token",
result_store=mock_store,
)
test_msg = Message(
role=Role.user,
parts=[Part(root=TextPart(text="What is 2+2?"))],
message_id="msg-001",
)
new_messages: list[Message] = []
result = await PushNotificationHandler.execute(
client=mock_client,
message=test_msg,
new_messages=new_messages,
agent_card=mock_agent_card,
config=config,
result_store=mock_store,
polling_timeout=30.0,
polling_interval=1.0,
endpoint=mock_agent_card.url,
)
mock_store.wait_for_result.assert_called_once_with(
task_id="task-123",
timeout=30.0,
poll_interval=1.0,
)
assert result["status"] == TaskState.completed
@pytest.mark.asyncio
async def test_push_handler_returns_failure_on_timeout(
self,
mock_agent_card: AgentCard,
) -> None:
"""Test that push handler returns failure when result store times out."""
from unittest.mock import AsyncMock, MagicMock
from a2a.types import Task, TaskStatus
from pydantic import AnyHttpUrl
from crewai.a2a.updates.push_notifications.config import PushNotificationConfig
from crewai.a2a.updates.push_notifications.handler import PushNotificationHandler
mock_store = MagicMock()
mock_store.wait_for_result = AsyncMock(return_value=None)
working_task = Task(
id="task-456",
context_id="ctx-456",
status=TaskStatus(state=TaskState.working),
)
async def mock_send_message(*args, **kwargs):
yield (working_task, None)
mock_client = MagicMock()
mock_client.send_message = mock_send_message
config = PushNotificationConfig(
url=AnyHttpUrl("http://localhost:8080/a2a/callback"),
token="token",
result_store=mock_store,
)
test_msg = Message(
role=Role.user,
parts=[Part(root=TextPart(text="test"))],
message_id="msg-002",
)
new_messages: list[Message] = []
result = await PushNotificationHandler.execute(
client=mock_client,
message=test_msg,
new_messages=new_messages,
agent_card=mock_agent_card,
config=config,
result_store=mock_store,
polling_timeout=5.0,
polling_interval=0.5,
endpoint=mock_agent_card.url,
)
assert result["status"] == TaskState.failed
assert "timeout" in result.get("error", "").lower()
@pytest.mark.asyncio
async def test_push_handler_requires_config(
self,
mock_agent_card: AgentCard,
) -> None:
"""Test that push handler fails gracefully without config."""
from unittest.mock import MagicMock
from crewai.a2a.updates.push_notifications.handler import PushNotificationHandler
mock_client = MagicMock()
test_msg = Message(
role=Role.user,
parts=[Part(root=TextPart(text="test"))],
message_id="msg-003",
)
new_messages: list[Message] = []
result = await PushNotificationHandler.execute(
client=mock_client,
message=test_msg,
new_messages=new_messages,
agent_card=mock_agent_card,
endpoint=mock_agent_card.url,
)
assert result["status"] == TaskState.failed
assert "config" in result.get("error", "").lower()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/a2a/test_a2a_integration.py",
"license": "MIT License",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/flow/flow_config.py | """Global Flow configuration.
This module provides a singleton configuration object that can be used to
customize Flow behavior at runtime.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from crewai.flow.async_feedback.types import HumanFeedbackProvider
from crewai.flow.input_provider import InputProvider
class FlowConfig:
"""Global configuration for Flow execution.
Attributes:
hitl_provider: The human-in-the-loop feedback provider.
Defaults to None (uses console input).
Can be overridden by deployments at startup.
input_provider: The input provider used by ``Flow.ask()``.
Defaults to None (uses ``ConsoleProvider``).
Can be overridden by
deployments at startup.
"""
def __init__(self) -> None:
self._hitl_provider: HumanFeedbackProvider | None = None
self._input_provider: InputProvider | None = None
@property
def hitl_provider(self) -> Any:
"""Get the configured HITL provider."""
return self._hitl_provider
@hitl_provider.setter
def hitl_provider(self, provider: Any) -> None:
"""Set the HITL provider."""
self._hitl_provider = provider
@property
def input_provider(self) -> Any:
"""Get the configured input provider for ``Flow.ask()``.
Returns:
The configured InputProvider instance, or None if not set
(in which case ``ConsoleInputProvider`` is used as default).
"""
return self._input_provider
@input_provider.setter
def input_provider(self, provider: Any) -> None:
"""Set the input provider for ``Flow.ask()``.
Args:
provider: An object implementing the ``InputProvider`` protocol.
Example:
```python
from crewai.flow import flow_config
flow_config.input_provider = WebSocketInputProvider(...)
```
"""
self._input_provider = provider
# Singleton instance
flow_config = FlowConfig()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/flow/flow_config.py",
"license": "MIT License",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/tests/llms/test_tool_call_streaming.py | """Tests for tool call streaming events across LLM providers.
These tests verify that when streaming is enabled and the LLM makes a tool call,
the stream chunk events include proper tool call information with
call_type=LLMCallType.TOOL_CALL.
"""
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from crewai.events.types.llm_events import LLMCallType, LLMStreamChunkEvent, ToolCall
from crewai.llm import LLM
@pytest.fixture
def get_temperature_tool_schema() -> dict[str, Any]:
"""Create a temperature tool schema for native function calling."""
return {
"type": "function",
"function": {
"name": "get_current_temperature",
"description": "Get the current temperature in a city.",
"parameters": {
"type": "object",
"properties": {
"city": {
"type": "string",
"description": "The name of the city to get the temperature for.",
}
},
"required": ["city"],
},
},
}
@pytest.fixture
def mock_emit() -> MagicMock:
"""Mock the event bus emit function."""
from crewai.events.event_bus import CrewAIEventsBus
with patch.object(CrewAIEventsBus, "emit") as mock:
yield mock
def get_tool_call_events(mock_emit: MagicMock) -> list[LLMStreamChunkEvent]:
"""Extract tool call streaming events from mock emit calls."""
tool_call_events = []
for call in mock_emit.call_args_list:
event = call[1].get("event") if len(call) > 1 else None
if isinstance(event, LLMStreamChunkEvent) and event.call_type == LLMCallType.TOOL_CALL:
tool_call_events.append(event)
return tool_call_events
def get_all_stream_events(mock_emit: MagicMock) -> list[LLMStreamChunkEvent]:
"""Extract all streaming events from mock emit calls."""
stream_events = []
for call in mock_emit.call_args_list:
event = call[1].get("event") if len(call) > 1 else None
if isinstance(event, LLMStreamChunkEvent):
stream_events.append(event)
return stream_events
class TestOpenAIToolCallStreaming:
"""Tests for OpenAI provider tool call streaming events."""
@pytest.mark.vcr()
def test_openai_streaming_emits_tool_call_events(
self, get_temperature_tool_schema: dict[str, Any], mock_emit: MagicMock
) -> None:
"""Test that OpenAI streaming emits tool call events with correct call_type."""
llm = LLM(model="openai/gpt-4o-mini", stream=True)
llm.call(
messages=[
{"role": "user", "content": "What is the temperature in San Francisco?"},
],
tools=[get_temperature_tool_schema],
available_functions={
"get_current_temperature": lambda city: f"The temperature in {city} is 72°F"
},
)
tool_call_events = get_tool_call_events(mock_emit)
assert len(tool_call_events) > 0, "Should receive tool call streaming events"
first_tool_call_event = tool_call_events[0]
assert first_tool_call_event.call_type == LLMCallType.TOOL_CALL
assert first_tool_call_event.tool_call is not None
assert isinstance(first_tool_call_event.tool_call, ToolCall)
assert first_tool_call_event.tool_call.function is not None
assert first_tool_call_event.tool_call.function.name == "get_current_temperature"
assert first_tool_call_event.tool_call.type == "function"
assert first_tool_call_event.tool_call.index >= 0
class TestToolCallStreamingEventStructure:
"""Tests for the structure and content of tool call streaming events."""
@pytest.mark.vcr()
def test_tool_call_event_accumulates_arguments(
self, get_temperature_tool_schema: dict[str, Any], mock_emit: MagicMock
) -> None:
"""Test that tool call events accumulate arguments progressively."""
llm = LLM(model="openai/gpt-4o-mini", stream=True)
llm.call(
messages=[
{"role": "user", "content": "What is the temperature in San Francisco?"},
],
tools=[get_temperature_tool_schema],
available_functions={
"get_current_temperature": lambda city: f"The temperature in {city} is 72°F"
},
)
tool_call_events = get_tool_call_events(mock_emit)
assert len(tool_call_events) >= 2, "Should receive multiple tool call streaming events"
for evt in tool_call_events:
assert evt.tool_call is not None
assert evt.tool_call.function is not None
@pytest.mark.vcr()
def test_tool_call_events_have_consistent_tool_id(
self, get_temperature_tool_schema: dict[str, Any], mock_emit: MagicMock
) -> None:
"""Test that all events for the same tool call have the same tool ID."""
llm = LLM(model="openai/gpt-4o-mini", stream=True)
llm.call(
messages=[
{"role": "user", "content": "What is the temperature in San Francisco?"},
],
tools=[get_temperature_tool_schema],
available_functions={
"get_current_temperature": lambda city: f"The temperature in {city} is 72°F"
},
)
tool_call_events = get_tool_call_events(mock_emit)
assert len(tool_call_events) >= 1, "Should receive tool call streaming events"
if len(tool_call_events) > 1:
events_by_index: dict[int, list[LLMStreamChunkEvent]] = {}
for evt in tool_call_events:
if evt.tool_call is not None:
idx = evt.tool_call.index
if idx not in events_by_index:
events_by_index[idx] = []
events_by_index[idx].append(evt)
for idx, evts in events_by_index.items():
ids = [
e.tool_call.id
for e in evts
if e.tool_call is not None and e.tool_call.id
]
if ids:
assert len(set(ids)) == 1, f"Tool call ID should be consistent for index {idx}"
class TestMixedStreamingEvents:
"""Tests for scenarios with both text and tool call streaming events."""
@pytest.mark.vcr()
def test_streaming_distinguishes_text_and_tool_calls(
self, get_temperature_tool_schema: dict[str, Any], mock_emit: MagicMock
) -> None:
"""Test that streaming correctly distinguishes between text chunks and tool calls."""
llm = LLM(model="openai/gpt-4o-mini", stream=True)
llm.call(
messages=[
{"role": "user", "content": "What is the temperature in San Francisco?"},
],
tools=[get_temperature_tool_schema],
available_functions={
"get_current_temperature": lambda city: f"The temperature in {city} is 72°F"
},
)
all_events = get_all_stream_events(mock_emit)
tool_call_events = get_tool_call_events(mock_emit)
assert len(all_events) >= 1, "Should receive streaming events"
for event in tool_call_events:
assert event.call_type == LLMCallType.TOOL_CALL
assert event.tool_call is not None
class TestGeminiToolCallStreaming:
"""Tests for Gemini provider tool call streaming events."""
@pytest.mark.vcr()
def test_gemini_streaming_emits_tool_call_events(
self, get_temperature_tool_schema: dict[str, Any], mock_emit: MagicMock
) -> None:
"""Test that Gemini streaming emits tool call events with correct call_type."""
llm = LLM(model="gemini/gemini-2.0-flash", stream=True)
llm.call(
messages=[
{"role": "user", "content": "What is the temperature in San Francisco?"},
],
tools=[get_temperature_tool_schema],
available_functions={
"get_current_temperature": lambda city: f"The temperature in {city} is 72°F"
},
)
tool_call_events = get_tool_call_events(mock_emit)
assert len(tool_call_events) > 0, "Should receive tool call streaming events"
first_tool_call_event = tool_call_events[0]
assert first_tool_call_event.call_type == LLMCallType.TOOL_CALL
assert first_tool_call_event.tool_call is not None
assert isinstance(first_tool_call_event.tool_call, ToolCall)
assert first_tool_call_event.tool_call.function is not None
assert first_tool_call_event.tool_call.function.name == "get_current_temperature"
assert first_tool_call_event.tool_call.type == "function"
@pytest.mark.vcr()
def test_gemini_streaming_multiple_tool_calls_unique_ids(
self, get_temperature_tool_schema: dict[str, Any], mock_emit: MagicMock
) -> None:
"""Test that Gemini streaming assigns unique IDs to multiple tool calls."""
llm = LLM(model="gemini/gemini-2.0-flash", stream=True)
llm.call(
messages=[
{"role": "user", "content": "What is the temperature in Paris and London?"},
],
tools=[get_temperature_tool_schema],
available_functions={
"get_current_temperature": lambda city: f"The temperature in {city} is 72°F"
},
)
tool_call_events = get_tool_call_events(mock_emit)
assert len(tool_call_events) >= 2, "Should receive at least 2 tool call events"
tool_ids = [
evt.tool_call.id
for evt in tool_call_events
if evt.tool_call is not None and evt.tool_call.id
]
assert len(set(tool_ids)) >= 2, "Each tool call should have a unique ID"
class TestAzureToolCallStreaming:
"""Tests for Azure provider tool call streaming events."""
@pytest.mark.vcr()
def test_azure_streaming_emits_tool_call_events(
self, get_temperature_tool_schema: dict[str, Any], mock_emit: MagicMock
) -> None:
"""Test that Azure streaming emits tool call events with correct call_type."""
llm = LLM(model="azure/gpt-4o-mini", stream=True)
llm.call(
messages=[
{"role": "user", "content": "What is the temperature in San Francisco?"},
],
tools=[get_temperature_tool_schema],
available_functions={
"get_current_temperature": lambda city: f"The temperature in {city} is 72°F"
},
)
tool_call_events = get_tool_call_events(mock_emit)
assert len(tool_call_events) > 0, "Should receive tool call streaming events"
first_tool_call_event = tool_call_events[0]
assert first_tool_call_event.call_type == LLMCallType.TOOL_CALL
assert first_tool_call_event.tool_call is not None
assert isinstance(first_tool_call_event.tool_call, ToolCall)
assert first_tool_call_event.tool_call.function is not None
assert first_tool_call_event.tool_call.function.name == "get_current_temperature"
assert first_tool_call_event.tool_call.type == "function"
class TestAnthropicToolCallStreaming:
"""Tests for Anthropic provider tool call streaming events."""
@pytest.mark.vcr()
def test_anthropic_streaming_emits_tool_call_events(
self, get_temperature_tool_schema: dict[str, Any], mock_emit: MagicMock
) -> None:
"""Test that Anthropic streaming emits tool call events with correct call_type."""
llm = LLM(model="anthropic/claude-3-5-haiku-latest", stream=True)
llm.call(
messages=[
{"role": "user", "content": "What is the temperature in San Francisco?"},
],
tools=[get_temperature_tool_schema],
available_functions={
"get_current_temperature": lambda city: f"The temperature in {city} is 72°F"
},
)
tool_call_events = get_tool_call_events(mock_emit)
assert len(tool_call_events) > 0, "Should receive tool call streaming events"
first_tool_call_event = tool_call_events[0]
assert first_tool_call_event.call_type == LLMCallType.TOOL_CALL
assert first_tool_call_event.tool_call is not None
assert isinstance(first_tool_call_event.tool_call, ToolCall)
assert first_tool_call_event.tool_call.function is not None
assert first_tool_call_event.tool_call.function.name == "get_current_temperature"
assert first_tool_call_event.tool_call.type == "function" | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/llms/test_tool_call_streaming.py",
"license": "MIT License",
"lines": 258,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/flow/async_feedback/providers.py | """Default provider implementations for human feedback and user input.
This module provides the ConsoleProvider, which is the default synchronous
provider that collects both feedback (for ``@human_feedback``) and user input
(for ``Flow.ask()``) via console.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from crewai.flow.async_feedback.types import PendingFeedbackContext
if TYPE_CHECKING:
from crewai.flow.flow import Flow
class ConsoleProvider:
"""Default synchronous console-based provider for feedback and input.
This provider blocks execution and waits for console input from the user.
It serves two purposes:
- **Feedback** (``request_feedback``): Used by ``@human_feedback`` to
display method output and collect review feedback.
- **Input** (``request_input``): Used by ``Flow.ask()`` to prompt the
user with a question and collect a response.
This is the default provider used when no custom provider is specified
in the ``@human_feedback`` decorator or on the Flow's ``input_provider``.
Example (feedback):
```python
from crewai.flow.async_feedback import ConsoleProvider
@human_feedback(
message="Review this:",
provider=ConsoleProvider(),
)
def my_method(self):
return "Content to review"
```
Example (input):
```python
from crewai.flow import Flow, start
class MyFlow(Flow):
@start()
def gather_info(self):
topic = self.ask("What topic should we research?")
return topic
```
"""
def __init__(self, verbose: bool = True) -> None:
"""Initialize the console provider.
Args:
verbose: Whether to display formatted output. If False, only
shows the prompt message.
"""
self.verbose = verbose
def request_feedback(
self,
context: PendingFeedbackContext,
flow: Flow[Any],
) -> str:
"""Request feedback via console input (blocking).
Displays the method output with formatting and waits for the user
to type their feedback. Press Enter to skip (returns empty string).
Args:
context: The pending feedback context with output and message.
flow: The Flow instance (used for event emission).
Returns:
The user's feedback as a string, or empty string if skipped.
"""
from crewai.events.event_bus import crewai_event_bus
from crewai.events.event_listener import event_listener
from crewai.events.types.flow_events import (
HumanFeedbackReceivedEvent,
HumanFeedbackRequestedEvent,
)
# Emit feedback requested event
crewai_event_bus.emit(
flow,
HumanFeedbackRequestedEvent(
type="human_feedback_requested",
flow_name=flow.name or flow.__class__.__name__,
method_name=context.method_name,
output=context.method_output,
message=context.message,
emit=context.emit,
),
)
# Pause live updates during human input
formatter = event_listener.formatter
formatter.pause_live_updates()
try:
console = formatter.console
if self.verbose:
# Display output with formatting using Rich console
console.print("\n" + "═" * 50, style="bold cyan")
console.print(" OUTPUT FOR REVIEW", style="bold cyan")
console.print("═" * 50 + "\n", style="bold cyan")
console.print(context.method_output)
console.print("\n" + "═" * 50 + "\n", style="bold cyan")
# Show message and prompt for feedback
console.print(context.message, style="yellow")
console.print(
"(Press Enter to skip, or type your feedback)\n", style="cyan"
)
feedback = input("Your feedback: ").strip()
# Emit feedback received event
crewai_event_bus.emit(
flow,
HumanFeedbackReceivedEvent(
type="human_feedback_received",
flow_name=flow.name or flow.__class__.__name__,
method_name=context.method_name,
feedback=feedback,
outcome=None, # Will be determined after collapsing
),
)
return feedback
finally:
# Resume live updates
formatter.resume_live_updates()
def request_input(
self,
message: str,
flow: Flow[Any],
metadata: dict[str, Any] | None = None,
) -> str | None:
"""Request user input via console (blocking).
Displays the prompt message with formatting and waits for the user
to type their response. Used by ``Flow.ask()``.
Unlike ``request_feedback``, this method does not display an
"OUTPUT FOR REVIEW" panel or emit feedback-specific events (those
are handled by ``ask()`` itself).
Args:
message: The question or prompt to display to the user.
flow: The Flow instance requesting input.
metadata: Optional metadata from the caller. Ignored by the
console provider (console has no concept of user routing).
Returns:
The user's input as a stripped string. Returns empty string
if user presses Enter without input. Never returns None
(console input is always available).
"""
from crewai.events.event_listener import event_listener
# Pause live updates during human input
formatter = event_listener.formatter
formatter.pause_live_updates()
try:
console = formatter.console
if self.verbose:
console.print()
console.print(message, style="yellow")
console.print()
response = input(">>> \n").strip()
else:
response = input(f"{message} ").strip()
# Add line break after input so formatter output starts clean
console.print()
return response
finally:
# Resume live updates
formatter.resume_live_updates()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/flow/async_feedback/providers.py",
"license": "MIT License",
"lines": 153,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/flow/async_feedback/types.py | """Core types for async human feedback in Flows.
This module defines the protocol, exception, and context types used for
non-blocking human-in-the-loop workflows.
"""
from __future__ import annotations
from dataclasses import dataclass, field
from datetime import datetime
from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable
if TYPE_CHECKING:
from crewai.flow.flow import Flow
@dataclass
class PendingFeedbackContext:
"""Context capturing everything needed to resume a paused flow.
When a flow is paused waiting for async human feedback, this dataclass
stores all the information needed to:
1. Identify which flow execution is waiting
2. What method triggered the feedback request
3. What was shown to the human
4. How to route the response when it arrives
Attributes:
flow_id: Unique identifier for the flow instance (from state.id)
flow_class: Fully qualified class name (e.g., "myapp.flows.ReviewFlow")
method_name: Name of the method that triggered feedback request
method_output: The output that was shown to the human for review
message: The message displayed when requesting feedback
emit: Optional list of outcome strings for routing
default_outcome: Outcome to use when no feedback is provided
metadata: Optional metadata for external system integration
llm: LLM model string for outcome collapsing
requested_at: When the feedback was requested
Example:
```python
context = PendingFeedbackContext(
flow_id="abc-123",
flow_class="myapp.ReviewFlow",
method_name="review_content",
method_output={"title": "Draft", "body": "..."},
message="Please review and approve or reject:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
)
```
"""
flow_id: str
flow_class: str
method_name: str
method_output: Any
message: str
emit: list[str] | None = None
default_outcome: str | None = None
metadata: dict[str, Any] = field(default_factory=dict)
llm: str | None = None
requested_at: datetime = field(default_factory=datetime.now)
def to_dict(self) -> dict[str, Any]:
"""Serialize context to a dictionary for persistence.
Returns:
Dictionary representation suitable for JSON serialization.
"""
return {
"flow_id": self.flow_id,
"flow_class": self.flow_class,
"method_name": self.method_name,
"method_output": self.method_output,
"message": self.message,
"emit": self.emit,
"default_outcome": self.default_outcome,
"metadata": self.metadata,
"llm": self.llm,
"requested_at": self.requested_at.isoformat(),
}
@classmethod
def from_dict(cls, data: dict[str, Any]) -> PendingFeedbackContext:
"""Deserialize context from a dictionary.
Args:
data: Dictionary representation of the context.
Returns:
Reconstructed PendingFeedbackContext instance.
"""
requested_at = data.get("requested_at")
if isinstance(requested_at, str):
requested_at = datetime.fromisoformat(requested_at)
elif requested_at is None:
requested_at = datetime.now()
return cls(
flow_id=data["flow_id"],
flow_class=data["flow_class"],
method_name=data["method_name"],
method_output=data.get("method_output"),
message=data.get("message", ""),
emit=data.get("emit"),
default_outcome=data.get("default_outcome"),
metadata=data.get("metadata", {}),
llm=data.get("llm"),
requested_at=requested_at,
)
class HumanFeedbackPending(Exception): # noqa: N818 - Not an error, a control flow signal
"""Signal that flow execution should pause for async human feedback.
When raised by a provider, the flow framework will:
1. Stop execution at the current method
2. Automatically persist state and context (if persistence is configured)
3. Return this object to the caller (not re-raise it)
The caller receives this as a return value from `flow.kickoff()`, enabling
graceful handling of the paused state without try/except blocks:
```python
result = flow.kickoff()
if isinstance(result, HumanFeedbackPending):
# Flow is paused, handle async feedback
print(f"Waiting for feedback: {result.context.flow_id}")
else:
# Normal completion
print(f"Flow completed: {result}")
```
Note:
The flow framework automatically saves pending feedback when this
exception is raised. Providers do NOT need to call `save_pending_feedback`
manually - just raise this exception and the framework handles persistence.
Attributes:
context: The PendingFeedbackContext with all details needed to resume
callback_info: Optional dict with information for external systems
(e.g., webhook URL, ticket ID, Slack thread ID)
Example:
```python
class SlackProvider(HumanFeedbackProvider):
def request_feedback(self, context, flow):
# Send notification to external system
ticket_id = self.create_slack_thread(context)
# Raise to pause - framework handles persistence automatically
raise HumanFeedbackPending(
context=context,
callback_info={
"slack_channel": "#reviews",
"thread_id": ticket_id,
},
)
```
"""
def __init__(
self,
context: PendingFeedbackContext,
callback_info: dict[str, Any] | None = None,
message: str | None = None,
):
"""Initialize the pending feedback exception.
Args:
context: The pending feedback context with flow details
callback_info: Optional information for external system callbacks
message: Optional custom message (defaults to descriptive message)
"""
self.context = context
self.callback_info = callback_info or {}
if message is None:
message = (
f"Human feedback pending for flow '{context.flow_id}' "
f"at method '{context.method_name}'"
)
super().__init__(message)
@runtime_checkable
class HumanFeedbackProvider(Protocol):
"""Protocol for human feedback collection strategies.
Implement this protocol to create custom feedback providers that integrate
with external systems like Slack, Teams, email, or custom APIs.
Providers can be either:
- **Synchronous (blocking)**: Return feedback string directly
- **Asynchronous (non-blocking)**: Raise HumanFeedbackPending to pause
The default ConsoleProvider is synchronous and blocks waiting for input.
For async workflows, implement a provider that raises HumanFeedbackPending.
Note:
The flow framework automatically handles state persistence when
HumanFeedbackPending is raised. Providers only need to:
1. Notify the external system (Slack, email, webhook, etc.)
2. Raise HumanFeedbackPending with the context and callback info
Example synchronous provider:
```python
class ConsoleProvider(HumanFeedbackProvider):
def request_feedback(self, context, flow):
print(context.method_output)
return input("Your feedback: ")
```
Example async provider:
```python
class SlackProvider(HumanFeedbackProvider):
def __init__(self, channel: str):
self.channel = channel
def request_feedback(self, context, flow):
# Send notification to Slack
thread_id = self.post_to_slack(
channel=self.channel,
message=context.message,
content=context.method_output,
)
# Raise to pause - framework handles persistence automatically
raise HumanFeedbackPending(
context=context,
callback_info={
"channel": self.channel,
"thread_id": thread_id,
},
)
```
"""
def request_feedback(
self,
context: PendingFeedbackContext,
flow: Flow[Any],
) -> str:
"""Request feedback from a human.
For synchronous providers, block and return the feedback string.
For async providers, notify the external system and raise
HumanFeedbackPending to pause the flow.
Args:
context: The pending feedback context containing all details
about what feedback is needed and how to route the response.
flow: The Flow instance, providing access to state and name.
Returns:
The human's feedback as a string (synchronous providers only).
Raises:
HumanFeedbackPending: To signal that the flow should pause and
wait for external feedback. The framework will automatically
persist state when this is raised.
"""
...
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/flow/async_feedback/types.py",
"license": "MIT License",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/flow/human_feedback.py | """Human feedback decorator for Flow methods.
This module provides the @human_feedback decorator that enables human-in-the-loop
workflows within CrewAI Flows. It allows collecting human feedback on method outputs
and optionally routing to different listeners based on the feedback.
Supports both synchronous (blocking) and asynchronous (non-blocking) feedback
collection through the provider parameter.
Example (synchronous, default):
```python
from crewai.flow import Flow, start, listen, human_feedback
class ReviewFlow(Flow):
@start()
@human_feedback(
message="Please review this content:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
)
def generate_content(self):
return {"title": "Article", "body": "Content..."}
@listen("approved")
def publish(self):
result = self.human_feedback
print(f"Publishing: {result.output}")
```
Example (asynchronous with custom provider):
```python
from crewai.flow import Flow, start, human_feedback
from crewai.flow.async_feedback import HumanFeedbackProvider, HumanFeedbackPending
class SlackProvider(HumanFeedbackProvider):
def request_feedback(self, context, flow):
self.send_notification(context)
raise HumanFeedbackPending(context=context)
class ReviewFlow(Flow):
@start()
@human_feedback(
message="Review this:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
provider=SlackProvider(),
)
def generate_content(self):
return "Content..."
```
"""
from __future__ import annotations
import asyncio
from collections.abc import Callable, Sequence
from dataclasses import dataclass, field
from datetime import datetime
from functools import wraps
from typing import TYPE_CHECKING, Any, TypeVar
from pydantic import BaseModel, Field
from crewai.flow.flow_wrappers import FlowMethod
if TYPE_CHECKING:
from crewai.flow.async_feedback.types import HumanFeedbackProvider
from crewai.flow.flow import Flow
from crewai.llms.base_llm import BaseLLM
F = TypeVar("F", bound=Callable[..., Any])
@dataclass
class HumanFeedbackResult:
"""Result from a @human_feedback decorated method.
This dataclass captures all information about a human feedback interaction,
including the original method output, the human's feedback, and any
collapsed outcome for routing purposes.
Attributes:
output: The original return value from the decorated method that was
shown to the human for review.
feedback: The raw text feedback provided by the human. Empty string
if no feedback was provided.
outcome: The collapsed outcome string when emit is specified.
This is determined by the LLM based on the human's feedback.
None if emit was not specified.
timestamp: When the feedback was received.
method_name: The name of the decorated method that triggered feedback.
metadata: Optional metadata for enterprise integrations. Can be used
to pass additional context like channel, assignee, etc.
Example:
```python
@listen("approved")
def handle_approval(self):
result = self.human_feedback
print(f"Output: {result.output}")
print(f"Feedback: {result.feedback}")
print(f"Outcome: {result.outcome}") # "approved"
```
"""
output: Any
feedback: str
outcome: str | None = None
timestamp: datetime = field(default_factory=datetime.now)
method_name: str = ""
metadata: dict[str, Any] = field(default_factory=dict)
@dataclass
class HumanFeedbackConfig:
"""Configuration for the @human_feedback decorator.
Stores the parameters passed to the decorator for later use during
method execution and for introspection by visualization tools.
Attributes:
message: The message shown to the human when requesting feedback.
emit: Optional sequence of outcome strings for routing.
llm: The LLM model to use for collapsing feedback to outcomes.
default_outcome: The outcome to use when no feedback is provided.
metadata: Optional metadata for enterprise integrations.
provider: Optional custom feedback provider for async workflows.
"""
message: str
emit: Sequence[str] | None = None
llm: str | BaseLLM | None = "gpt-4o-mini"
default_outcome: str | None = None
metadata: dict[str, Any] | None = None
provider: HumanFeedbackProvider | None = None
learn: bool = False
learn_source: str = "hitl"
class HumanFeedbackMethod(FlowMethod[Any, Any]):
"""Wrapper for methods decorated with @human_feedback.
This wrapper extends FlowMethod to add human feedback specific attributes
that are used by FlowMeta for routing and by visualization tools.
Attributes:
__is_router__: True when emit is specified, enabling router behavior.
__router_paths__: List of possible outcomes when acting as a router.
__human_feedback_config__: The HumanFeedbackConfig for this method.
"""
__is_router__: bool = False
__router_paths__: list[str] | None = None
__human_feedback_config__: HumanFeedbackConfig | None = None
class PreReviewResult(BaseModel):
"""Structured output from the HITL pre-review LLM call."""
improved_output: str = Field(
description="The improved version of the output with past human feedback lessons applied.",
)
class DistilledLessons(BaseModel):
"""Structured output from the HITL lesson distillation LLM call."""
lessons: list[str] = Field(
default_factory=list,
description=(
"Generalizable lessons extracted from the human feedback. "
"Each lesson should be a reusable rule or preference. "
"Return an empty list if the feedback contains no generalizable guidance."
),
)
def human_feedback(
message: str,
emit: Sequence[str] | None = None,
llm: str | BaseLLM | None = "gpt-4o-mini",
default_outcome: str | None = None,
metadata: dict[str, Any] | None = None,
provider: HumanFeedbackProvider | None = None,
learn: bool = False,
learn_source: str = "hitl"
) -> Callable[[F], F]:
"""Decorator for Flow methods that require human feedback.
This decorator wraps a Flow method to:
1. Execute the method and capture its output
2. Display the output to the human with a feedback request
3. Collect the human's free-form feedback
4. Optionally collapse the feedback to a predefined outcome using an LLM
5. Store the result for access by downstream methods
When `emit` is specified, the decorator acts as a router, and the
collapsed outcome triggers the appropriate @listen decorated method.
Supports both synchronous (blocking) and asynchronous (non-blocking)
feedback collection through the `provider` parameter. If no provider
is specified, defaults to synchronous console input.
Args:
message: The message shown to the human when requesting feedback.
This should clearly explain what kind of feedback is expected.
emit: Optional sequence of outcome strings. When provided, the
human's feedback will be collapsed to one of these outcomes
using the specified LLM. The outcome then triggers @listen
methods that match.
llm: The LLM model to use for collapsing feedback to outcomes.
Required when emit is specified. Can be a model string
like "gpt-4o-mini" or a BaseLLM instance.
default_outcome: The outcome to use when the human provides no
feedback (empty input). Must be one of the emit values
if emit is specified.
metadata: Optional metadata for enterprise integrations. This is
passed through to the HumanFeedbackResult and can be used
by enterprise forks for features like Slack/Teams integration.
provider: Optional HumanFeedbackProvider for custom feedback
collection. Use this for async workflows that integrate with
external systems like Slack, Teams, or webhooks. When the
provider raises HumanFeedbackPending, the flow pauses and
can be resumed later with Flow.resume().
Returns:
A decorator function that wraps the method with human feedback
collection logic.
Raises:
ValueError: If emit is specified but llm is not provided.
ValueError: If default_outcome is specified but emit is not.
ValueError: If default_outcome is not in the emit list.
HumanFeedbackPending: When an async provider pauses execution.
Example:
Basic feedback without routing:
```python
@start()
@human_feedback(message="Please review this output:")
def generate_content(self):
return "Generated content..."
```
With routing based on feedback:
```python
@start()
@human_feedback(
message="Review and approve or reject:",
emit=["approved", "rejected", "needs_revision"],
llm="gpt-4o-mini",
default_outcome="needs_revision",
)
def review_document(self):
return document_content
@listen("approved")
def publish(self):
print(f"Publishing: {self.last_human_feedback.output}")
```
Async feedback with custom provider:
```python
@start()
@human_feedback(
message="Review this content:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
provider=SlackProvider(channel="#reviews"),
)
def generate_content(self):
return "Content to review..."
```
"""
# Validation at decoration time
if emit is not None:
if not llm:
raise ValueError(
"llm is required when emit is specified. "
"Provide an LLM model string (e.g., 'gpt-4o-mini') or a BaseLLM instance. "
"See the CrewAI Human-in-the-Loop (HITL) documentation for more information: "
"https://docs.crewai.com/en/learn/human-feedback-in-flows"
)
if default_outcome is not None and default_outcome not in emit:
raise ValueError(
f"default_outcome '{default_outcome}' must be one of the "
f"emit options: {list(emit)}"
)
elif default_outcome is not None:
raise ValueError("default_outcome requires emit to be specified.")
def decorator(func: F) -> F:
"""Inner decorator that wraps the function."""
# -- HITL learning helpers (only used when learn=True) --------
def _get_hitl_prompt(key: str) -> str:
"""Read a HITL prompt from the i18n translations."""
from crewai.utilities.i18n import get_i18n
return get_i18n().slice(key)
def _resolve_llm_instance() -> Any:
"""Resolve the ``llm`` parameter to a BaseLLM instance.
Uses the SAME model specified in the decorator so pre-review,
distillation, and outcome collapsing all share one model.
"""
if llm is None:
from crewai.llm import LLM
return LLM(model="gpt-4o-mini")
if isinstance(llm, str):
from crewai.llm import LLM
return LLM(model=llm)
return llm # already a BaseLLM instance
def _pre_review_with_lessons(
flow_instance: Flow[Any], method_output: Any
) -> Any:
"""Recall past HITL lessons and use LLM to pre-review the output."""
try:
query = f"human feedback lessons for {func.__name__}: {method_output!s}"
matches = flow_instance.memory.recall(
query, source=learn_source
)
if not matches:
return method_output
lessons = "\n".join(f"- {m.record.content}" for m in matches)
llm_inst = _resolve_llm_instance()
prompt = _get_hitl_prompt("hitl_pre_review_user").format(
output=str(method_output),
lessons=lessons,
)
messages = [
{"role": "system", "content": _get_hitl_prompt("hitl_pre_review_system")},
{"role": "user", "content": prompt},
]
if getattr(llm_inst, "supports_function_calling", lambda: False)():
response = llm_inst.call(messages, response_model=PreReviewResult)
if isinstance(response, PreReviewResult):
return response.improved_output
return PreReviewResult.model_validate(response).improved_output
reviewed = llm_inst.call(messages)
return reviewed if isinstance(reviewed, str) else str(reviewed)
except Exception:
return method_output # fallback to raw output on any failure
def _distill_and_store_lessons(
flow_instance: Flow[Any], method_output: Any, raw_feedback: str
) -> None:
"""Extract generalizable lessons from output + feedback, store in memory."""
try:
llm_inst = _resolve_llm_instance()
prompt = _get_hitl_prompt("hitl_distill_user").format(
method_name=func.__name__,
output=str(method_output),
feedback=raw_feedback,
)
messages = [
{"role": "system", "content": _get_hitl_prompt("hitl_distill_system")},
{"role": "user", "content": prompt},
]
lessons: list[str] = []
if getattr(llm_inst, "supports_function_calling", lambda: False)():
response = llm_inst.call(messages, response_model=DistilledLessons)
if isinstance(response, DistilledLessons):
lessons = response.lessons
else:
lessons = DistilledLessons.model_validate(response).lessons
else:
response = llm_inst.call(messages)
if isinstance(response, str):
lessons = [
line.strip("- ").strip()
for line in response.strip().split("\n")
if line.strip() and line.strip() != "NONE"
]
if lessons:
flow_instance.memory.remember_many(lessons, source=learn_source)
except Exception: # noqa: S110
pass # non-critical: don't fail the flow because lesson storage failed
# -- Core feedback helpers ------------------------------------
def _request_feedback(flow_instance: Flow[Any], method_output: Any) -> str:
"""Request feedback using provider or default console."""
from crewai.flow.async_feedback.types import PendingFeedbackContext
# Build context for provider
# Use flow_id property which handles both dict and BaseModel states
context = PendingFeedbackContext(
flow_id=flow_instance.flow_id or "unknown",
flow_class=f"{flow_instance.__class__.__module__}.{flow_instance.__class__.__name__}",
method_name=func.__name__,
method_output=method_output,
message=message,
emit=list(emit) if emit else None,
default_outcome=default_outcome,
metadata=metadata or {},
llm=llm if isinstance(llm, str) else None,
)
# Determine effective provider:
effective_provider = provider
if effective_provider is None:
from crewai.flow.flow_config import flow_config
effective_provider = flow_config.hitl_provider
if effective_provider is not None:
return effective_provider.request_feedback(context, flow_instance)
return flow_instance._request_human_feedback(
message=message,
output=method_output,
metadata=metadata,
emit=emit,
)
def _process_feedback(
flow_instance: Flow[Any],
method_output: Any,
raw_feedback: str,
) -> HumanFeedbackResult | str:
"""Process feedback and return result or outcome."""
# Determine outcome
collapsed_outcome: str | None = None
if not raw_feedback.strip():
# Empty feedback
if default_outcome:
collapsed_outcome = default_outcome
elif emit:
# No default and no feedback - use first outcome
collapsed_outcome = emit[0]
elif emit:
if llm is not None:
collapsed_outcome = flow_instance._collapse_to_outcome(
feedback=raw_feedback,
outcomes=emit,
llm=llm,
)
else:
collapsed_outcome = emit[0]
# Create result
result = HumanFeedbackResult(
output=method_output,
feedback=raw_feedback,
outcome=collapsed_outcome,
timestamp=datetime.now(),
method_name=func.__name__,
metadata=metadata or {},
)
# Store in flow instance
flow_instance.human_feedback_history.append(result)
flow_instance.last_human_feedback = result
# Return based on mode
if emit:
# Return outcome for routing
return collapsed_outcome # type: ignore[return-value]
return result
if asyncio.iscoroutinefunction(func):
# Async wrapper
@wraps(func)
async def async_wrapper(self: Flow[Any], *args: Any, **kwargs: Any) -> Any:
method_output = await func(self, *args, **kwargs)
# Pre-review: apply past HITL lessons before human sees it
if learn and getattr(self, "memory", None) is not None:
method_output = _pre_review_with_lessons(self, method_output)
raw_feedback = _request_feedback(self, method_output)
result = _process_feedback(self, method_output, raw_feedback)
# Distill: extract lessons from output + feedback, store in memory
if learn and getattr(self, "memory", None) is not None and raw_feedback.strip():
_distill_and_store_lessons(self, method_output, raw_feedback)
return result
wrapper: Any = async_wrapper
else:
# Sync wrapper
@wraps(func)
def sync_wrapper(self: Flow[Any], *args: Any, **kwargs: Any) -> Any:
method_output = func(self, *args, **kwargs)
# Pre-review: apply past HITL lessons before human sees it
if learn and getattr(self, "memory", None) is not None:
method_output = _pre_review_with_lessons(self, method_output)
raw_feedback = _request_feedback(self, method_output)
result = _process_feedback(self, method_output, raw_feedback)
# Distill: extract lessons from output + feedback, store in memory
if learn and getattr(self, "memory", None) is not None and raw_feedback.strip():
_distill_and_store_lessons(self, method_output, raw_feedback)
return result
wrapper = sync_wrapper
# Preserve existing Flow decorator attributes
for attr in [
"__is_start_method__",
"__trigger_methods__",
"__condition_type__",
"__trigger_condition__",
"__is_flow_method__",
]:
if hasattr(func, attr):
setattr(wrapper, attr, getattr(func, attr))
# Add human feedback specific attributes (create config inline to avoid race conditions)
wrapper.__human_feedback_config__ = HumanFeedbackConfig(
message=message,
emit=emit,
llm=llm,
default_outcome=default_outcome,
metadata=metadata,
provider=provider,
learn=learn,
learn_source=learn_source
)
wrapper.__is_flow_method__ = True
if emit:
wrapper.__is_router__ = True
wrapper.__router_paths__ = list(emit)
return wrapper # type: ignore[no-any-return]
return decorator
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/flow/human_feedback.py",
"license": "MIT License",
"lines": 453,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/tests/test_async_human_feedback.py | """Tests for async human feedback functionality.
This module tests the async/non-blocking human feedback flow, including:
- PendingFeedbackContext creation and serialization
- HumanFeedbackPending exception handling
- HumanFeedbackProvider protocol
- ConsoleProvider
- Flow.from_pending() and Flow.resume()
- SQLite persistence with pending feedback
"""
from __future__ import annotations
import json
import os
import tempfile
from datetime import datetime
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from pydantic import BaseModel
from crewai.flow import Flow, start, listen, human_feedback
from crewai.flow.async_feedback import (
ConsoleProvider,
HumanFeedbackPending,
HumanFeedbackProvider,
PendingFeedbackContext,
)
from crewai.flow.persistence import SQLiteFlowPersistence
# =============================================================================
# PendingFeedbackContext Tests
# =============================================================================
class TestPendingFeedbackContext:
"""Tests for PendingFeedbackContext dataclass."""
def test_create_basic_context(self) -> None:
"""Test creating a basic pending feedback context."""
context = PendingFeedbackContext(
flow_id="test-flow-123",
flow_class="myapp.flows.ReviewFlow",
method_name="review_content",
method_output="Content to review",
message="Please review this content:",
)
assert context.flow_id == "test-flow-123"
assert context.flow_class == "myapp.flows.ReviewFlow"
assert context.method_name == "review_content"
assert context.method_output == "Content to review"
assert context.message == "Please review this content:"
assert context.emit is None
assert context.default_outcome is None
assert context.metadata == {}
assert isinstance(context.requested_at, datetime)
def test_create_context_with_emit(self) -> None:
"""Test creating context with routing outcomes."""
context = PendingFeedbackContext(
flow_id="test-flow-456",
flow_class="myapp.flows.ApprovalFlow",
method_name="submit_for_approval",
method_output={"document": "content"},
message="Approve or reject:",
emit=["approved", "rejected", "needs_revision"],
default_outcome="needs_revision",
llm="gpt-4o-mini",
)
assert context.emit == ["approved", "rejected", "needs_revision"]
assert context.default_outcome == "needs_revision"
assert context.llm == "gpt-4o-mini"
def test_to_dict_serialization(self) -> None:
"""Test serializing context to dictionary."""
context = PendingFeedbackContext(
flow_id="test-flow-789",
flow_class="myapp.flows.TestFlow",
method_name="test_method",
method_output={"key": "value"},
message="Test message",
emit=["yes", "no"],
metadata={"channel": "#reviews"},
)
result = context.to_dict()
assert result["flow_id"] == "test-flow-789"
assert result["flow_class"] == "myapp.flows.TestFlow"
assert result["method_name"] == "test_method"
assert result["method_output"] == {"key": "value"}
assert result["message"] == "Test message"
assert result["emit"] == ["yes", "no"]
assert result["metadata"] == {"channel": "#reviews"}
assert "requested_at" in result
def test_from_dict_deserialization(self) -> None:
"""Test deserializing context from dictionary."""
data = {
"flow_id": "test-flow-abc",
"flow_class": "myapp.flows.TestFlow",
"method_name": "my_method",
"method_output": "output value",
"message": "Feedback message",
"emit": ["option_a", "option_b"],
"default_outcome": "option_a",
"metadata": {"user_id": "123"},
"llm": "gpt-4o-mini",
"requested_at": "2024-01-15T10:30:00",
}
context = PendingFeedbackContext.from_dict(data)
assert context.flow_id == "test-flow-abc"
assert context.flow_class == "myapp.flows.TestFlow"
assert context.method_name == "my_method"
assert context.emit == ["option_a", "option_b"]
assert context.default_outcome == "option_a"
assert context.llm == "gpt-4o-mini"
def test_roundtrip_serialization(self) -> None:
"""Test that to_dict/from_dict roundtrips correctly."""
original = PendingFeedbackContext(
flow_id="roundtrip-test",
flow_class="test.TestFlow",
method_name="test",
method_output={"nested": {"data": [1, 2, 3]}},
message="Test",
emit=["a", "b"],
metadata={"key": "value"},
)
serialized = original.to_dict()
restored = PendingFeedbackContext.from_dict(serialized)
assert restored.flow_id == original.flow_id
assert restored.flow_class == original.flow_class
assert restored.method_name == original.method_name
assert restored.method_output == original.method_output
assert restored.emit == original.emit
assert restored.metadata == original.metadata
# =============================================================================
# HumanFeedbackPending Exception Tests
# =============================================================================
class TestHumanFeedbackPending:
"""Tests for HumanFeedbackPending exception."""
def test_basic_exception(self) -> None:
"""Test creating basic pending exception."""
context = PendingFeedbackContext(
flow_id="exc-test",
flow_class="test.Flow",
method_name="method",
method_output="output",
message="message",
)
exc = HumanFeedbackPending(context=context)
assert exc.context == context
assert exc.callback_info == {}
assert "exc-test" in str(exc)
assert "method" in str(exc)
def test_exception_with_callback_info(self) -> None:
"""Test pending exception with callback information."""
context = PendingFeedbackContext(
flow_id="callback-test",
flow_class="test.Flow",
method_name="method",
method_output="output",
message="message",
)
exc = HumanFeedbackPending(
context=context,
callback_info={
"webhook_url": "https://example.com/webhook",
"slack_thread": "123456",
},
)
assert exc.callback_info["webhook_url"] == "https://example.com/webhook"
assert exc.callback_info["slack_thread"] == "123456"
def test_exception_with_custom_message(self) -> None:
"""Test pending exception with custom message."""
context = PendingFeedbackContext(
flow_id="msg-test",
flow_class="test.Flow",
method_name="method",
method_output="output",
message="message",
)
exc = HumanFeedbackPending(
context=context,
message="Custom pending message",
)
assert str(exc) == "Custom pending message"
def test_exception_is_catchable(self) -> None:
"""Test that exception can be caught and handled."""
context = PendingFeedbackContext(
flow_id="catch-test",
flow_class="test.Flow",
method_name="method",
method_output="output",
message="message",
)
with pytest.raises(HumanFeedbackPending) as exc_info:
raise HumanFeedbackPending(context=context)
assert exc_info.value.context.flow_id == "catch-test"
# =============================================================================
# HumanFeedbackProvider Protocol Tests
# =============================================================================
class TestHumanFeedbackProvider:
"""Tests for HumanFeedbackProvider protocol."""
def test_protocol_compliance_sync_provider(self) -> None:
"""Test that sync provider complies with protocol."""
class SyncProvider:
def request_feedback(
self, context: PendingFeedbackContext, flow: Flow
) -> str:
return "sync feedback"
provider = SyncProvider()
assert isinstance(provider, HumanFeedbackProvider)
def test_protocol_compliance_async_provider(self) -> None:
"""Test that async provider complies with protocol."""
class AsyncProvider:
def request_feedback(
self, context: PendingFeedbackContext, flow: Flow
) -> str:
raise HumanFeedbackPending(context=context)
provider = AsyncProvider()
assert isinstance(provider, HumanFeedbackProvider)
# =============================================================================
# ConsoleProvider Tests
# =============================================================================
class TestConsoleProvider:
"""Tests for ConsoleProvider."""
def test_provider_initialization(self) -> None:
"""Test console provider initialization."""
provider = ConsoleProvider()
assert provider.verbose is True
quiet_provider = ConsoleProvider(verbose=False)
assert quiet_provider.verbose is False
# =============================================================================
# SQLite Persistence Tests for Async Feedback
# =============================================================================
class TestSQLitePendingFeedback:
"""Tests for SQLite persistence with pending feedback."""
def test_save_and_load_pending_feedback(self) -> None:
"""Test saving and loading pending feedback context."""
with tempfile.TemporaryDirectory() as tmpdir:
db_path = os.path.join(tmpdir, "test_flows.db")
persistence = SQLiteFlowPersistence(db_path)
context = PendingFeedbackContext(
flow_id="persist-test-123",
flow_class="test.TestFlow",
method_name="review",
method_output={"data": "test"},
message="Review this:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
)
state_data = {"counter": 10, "items": ["a", "b"]}
# Save pending feedback
persistence.save_pending_feedback(
flow_uuid="persist-test-123",
context=context,
state_data=state_data,
)
# Load pending feedback
result = persistence.load_pending_feedback("persist-test-123")
assert result is not None
loaded_state, loaded_context = result
assert loaded_state["counter"] == 10
assert loaded_state["items"] == ["a", "b"]
assert loaded_context.flow_id == "persist-test-123"
assert loaded_context.emit == ["approved", "rejected"]
def test_load_nonexistent_pending_feedback(self) -> None:
"""Test loading pending feedback that doesn't exist."""
with tempfile.TemporaryDirectory() as tmpdir:
db_path = os.path.join(tmpdir, "test_flows.db")
persistence = SQLiteFlowPersistence(db_path)
result = persistence.load_pending_feedback("nonexistent-id")
assert result is None
def test_clear_pending_feedback(self) -> None:
"""Test clearing pending feedback after resume."""
with tempfile.TemporaryDirectory() as tmpdir:
db_path = os.path.join(tmpdir, "test_flows.db")
persistence = SQLiteFlowPersistence(db_path)
context = PendingFeedbackContext(
flow_id="clear-test",
flow_class="test.Flow",
method_name="method",
method_output="output",
message="message",
)
persistence.save_pending_feedback(
flow_uuid="clear-test",
context=context,
state_data={"key": "value"},
)
# Verify it exists
assert persistence.load_pending_feedback("clear-test") is not None
# Clear it
persistence.clear_pending_feedback("clear-test")
# Verify it's gone
assert persistence.load_pending_feedback("clear-test") is None
def test_replace_existing_pending_feedback(self) -> None:
"""Test that saving pending feedback replaces existing entry."""
with tempfile.TemporaryDirectory() as tmpdir:
db_path = os.path.join(tmpdir, "test_flows.db")
persistence = SQLiteFlowPersistence(db_path)
flow_id = "replace-test"
# Save first version
context1 = PendingFeedbackContext(
flow_id=flow_id,
flow_class="test.Flow",
method_name="method1",
method_output="output1",
message="message1",
)
persistence.save_pending_feedback(
flow_uuid=flow_id,
context=context1,
state_data={"version": 1},
)
# Save second version (should replace)
context2 = PendingFeedbackContext(
flow_id=flow_id,
flow_class="test.Flow",
method_name="method2",
method_output="output2",
message="message2",
)
persistence.save_pending_feedback(
flow_uuid=flow_id,
context=context2,
state_data={"version": 2},
)
# Load and verify it's the second version
result = persistence.load_pending_feedback(flow_id)
assert result is not None
state, context = result
assert state["version"] == 2
assert context.method_name == "method2"
# =============================================================================
# Custom Async Provider Tests
# =============================================================================
class TestCustomAsyncProvider:
"""Tests for custom async providers."""
def test_provider_raises_pending_exception(self) -> None:
"""Test that async provider raises HumanFeedbackPending."""
class WebhookProvider:
def __init__(self, webhook_url: str):
self.webhook_url = webhook_url
def request_feedback(
self, context: PendingFeedbackContext, flow: Flow
) -> str:
raise HumanFeedbackPending(
context=context,
callback_info={"url": f"{self.webhook_url}/{context.flow_id}"},
)
provider = WebhookProvider("https://example.com/api")
context = PendingFeedbackContext(
flow_id="webhook-test",
flow_class="test.Flow",
method_name="method",
method_output="output",
message="message",
)
mock_flow = MagicMock()
with pytest.raises(HumanFeedbackPending) as exc_info:
provider.request_feedback(context, mock_flow)
assert exc_info.value.callback_info["url"] == (
"https://example.com/api/webhook-test"
)
# =============================================================================
# Flow.from_pending and resume Tests
# =============================================================================
class TestFlowResumeWithFeedback:
"""Tests for Flow.from_pending and resume."""
def test_from_pending_uses_default_persistence(self) -> None:
"""Test that from_pending uses SQLiteFlowPersistence by default."""
class TestFlow(Flow):
@start()
def begin(self):
return "started"
# When no persistence is provided, it uses default SQLiteFlowPersistence
# This will raise "No pending feedback found" (not a persistence error)
with pytest.raises(ValueError, match="No pending feedback found"):
TestFlow.from_pending("nonexistent-id")
def test_from_pending_raises_for_missing_flow(self) -> None:
"""Test that from_pending raises error for nonexistent flow."""
with tempfile.TemporaryDirectory() as tmpdir:
db_path = os.path.join(tmpdir, "test_flows.db")
persistence = SQLiteFlowPersistence(db_path)
class TestFlow(Flow):
@start()
def begin(self):
return "started"
with pytest.raises(ValueError, match="No pending feedback found"):
TestFlow.from_pending("nonexistent-id", persistence)
def test_from_pending_restores_state(self) -> None:
"""Test that from_pending correctly restores flow state."""
with tempfile.TemporaryDirectory() as tmpdir:
db_path = os.path.join(tmpdir, "test_flows.db")
persistence = SQLiteFlowPersistence(db_path)
class TestState(BaseModel):
id: str = "test-restore-123"
counter: int = 0
class TestFlow(Flow[TestState]):
@start()
def begin(self):
return "started"
# Manually save pending feedback
context = PendingFeedbackContext(
flow_id="test-restore-123",
flow_class="test.TestFlow",
method_name="review",
method_output="content",
message="Review:",
)
persistence.save_pending_feedback(
flow_uuid="test-restore-123",
context=context,
state_data={"id": "test-restore-123", "counter": 42},
)
# Restore flow
flow = TestFlow.from_pending("test-restore-123", persistence)
assert flow._pending_feedback_context is not None
assert flow._pending_feedback_context.flow_id == "test-restore-123"
assert flow._is_execution_resuming is True
assert flow.state.counter == 42
def test_resume_without_pending_raises_error(self) -> None:
"""Test that resume raises error without pending context."""
class TestFlow(Flow):
@start()
def begin(self):
return "started"
flow = TestFlow()
with pytest.raises(ValueError, match="No pending feedback context"):
flow.resume("some feedback")
def test_resume_from_async_context_raises_error(self) -> None:
"""Test that resume() raises RuntimeError when called from async context."""
import asyncio
class TestFlow(Flow):
@start()
def begin(self):
return "started"
async def call_resume_from_async():
with tempfile.TemporaryDirectory() as tmpdir:
db_path = os.path.join(tmpdir, "test.db")
persistence = SQLiteFlowPersistence(db_path)
# Save pending feedback
context = PendingFeedbackContext(
flow_id="async-context-test",
flow_class="TestFlow",
method_name="begin",
method_output="output",
message="Review:",
)
persistence.save_pending_feedback(
flow_uuid="async-context-test",
context=context,
state_data={"id": "async-context-test"},
)
flow = TestFlow.from_pending("async-context-test", persistence)
# This should raise RuntimeError because we're in an async context
with pytest.raises(RuntimeError, match="cannot be called from within an async context"):
flow.resume("feedback")
asyncio.run(call_resume_from_async())
@pytest.mark.asyncio
async def test_resume_async_direct(self) -> None:
"""Test resume_async() can be called directly in async context."""
with tempfile.TemporaryDirectory() as tmpdir:
db_path = os.path.join(tmpdir, "test.db")
persistence = SQLiteFlowPersistence(db_path)
class TestFlow(Flow):
@start()
@human_feedback(message="Review:")
def generate(self):
return "content"
@listen(generate)
def process(self, result):
return f"processed: {result.feedback}"
# Save pending feedback
context = PendingFeedbackContext(
flow_id="async-direct-test",
flow_class="TestFlow",
method_name="generate",
method_output="content",
message="Review:",
)
persistence.save_pending_feedback(
flow_uuid="async-direct-test",
context=context,
state_data={"id": "async-direct-test"},
)
flow = TestFlow.from_pending("async-direct-test", persistence)
with patch("crewai.flow.flow.crewai_event_bus.emit"):
result = await flow.resume_async("async feedback")
assert flow.last_human_feedback is not None
assert flow.last_human_feedback.feedback == "async feedback"
@patch("crewai.flow.flow.crewai_event_bus.emit")
def test_resume_basic(self, mock_emit: MagicMock) -> None:
"""Test basic resume functionality."""
with tempfile.TemporaryDirectory() as tmpdir:
db_path = os.path.join(tmpdir, "test_flows.db")
persistence = SQLiteFlowPersistence(db_path)
class TestFlow(Flow):
@start()
@human_feedback(message="Review this:")
def generate(self):
return "generated content"
@listen(generate)
def process(self, feedback_result):
return f"Processed: {feedback_result.feedback}"
# Manually save pending feedback (simulating async pause)
context = PendingFeedbackContext(
flow_id="resume-test-123",
flow_class="test.TestFlow",
method_name="generate",
method_output="generated content",
message="Review this:",
)
persistence.save_pending_feedback(
flow_uuid="resume-test-123",
context=context,
state_data={"id": "resume-test-123"},
)
# Restore and resume
flow = TestFlow.from_pending("resume-test-123", persistence)
result = flow.resume("looks good!")
# Verify feedback was processed
assert flow.last_human_feedback is not None
assert flow.last_human_feedback.feedback == "looks good!"
assert flow.last_human_feedback.output == "generated content"
# Verify pending feedback was cleared
assert persistence.load_pending_feedback("resume-test-123") is None
@patch("crewai.flow.flow.crewai_event_bus.emit")
def test_resume_routing(self, mock_emit: MagicMock) -> None:
"""Test resume with routing."""
with tempfile.TemporaryDirectory() as tmpdir:
db_path = os.path.join(tmpdir, "test_flows.db")
persistence = SQLiteFlowPersistence(db_path)
class TestFlow(Flow):
result_path: str = ""
@start()
@human_feedback(
message="Approve?",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
)
def review(self):
return "content"
@listen("approved")
def handle_approved(self):
self.result_path = "approved"
return "Approved!"
@listen("rejected")
def handle_rejected(self):
self.result_path = "rejected"
return "Rejected!"
# Save pending feedback
context = PendingFeedbackContext(
flow_id="route-test-123",
flow_class="test.TestFlow",
method_name="review",
method_output="content",
message="Approve?",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
)
persistence.save_pending_feedback(
flow_uuid="route-test-123",
context=context,
state_data={"id": "route-test-123"},
)
# Restore and resume - mock _collapse_to_outcome directly
flow = TestFlow.from_pending("route-test-123", persistence)
with patch.object(flow, "_collapse_to_outcome", return_value="approved"):
result = flow.resume("yes, this looks great")
# Verify routing worked
assert flow.last_human_feedback.outcome == "approved"
assert flow.result_path == "approved"
# =============================================================================
# Integration Tests with @human_feedback decorator
# =============================================================================
class TestAsyncHumanFeedbackIntegration:
"""Integration tests for async human feedback with decorator."""
def test_decorator_with_provider_parameter(self) -> None:
"""Test that decorator accepts provider parameter."""
class MockProvider:
def request_feedback(
self, context: PendingFeedbackContext, flow: Flow
) -> str:
raise HumanFeedbackPending(context=context)
# This should not raise
class TestFlow(Flow):
@start()
@human_feedback(
message="Review:",
provider=MockProvider(),
)
def review(self):
return "content"
flow = TestFlow()
# Verify the method has the provider config
method = getattr(flow, "review")
assert hasattr(method, "__human_feedback_config__")
assert method.__human_feedback_config__.provider is not None
@patch("crewai.flow.flow.crewai_event_bus.emit")
def test_async_provider_pauses_flow(self, mock_emit: MagicMock) -> None:
"""Test that async provider pauses flow execution."""
with tempfile.TemporaryDirectory() as tmpdir:
db_path = os.path.join(tmpdir, "test_flows.db")
persistence = SQLiteFlowPersistence(db_path)
class PausingProvider:
def __init__(self, persistence: SQLiteFlowPersistence):
self.persistence = persistence
def request_feedback(
self, context: PendingFeedbackContext, flow: Flow
) -> str:
# Save pending state
self.persistence.save_pending_feedback(
flow_uuid=context.flow_id,
context=context,
state_data=flow.state if isinstance(flow.state, dict) else flow.state.model_dump(),
)
raise HumanFeedbackPending(
context=context,
callback_info={"saved": True},
)
class TestFlow(Flow):
@start()
@human_feedback(
message="Review:",
provider=PausingProvider(persistence),
)
def generate(self):
return "generated content"
flow = TestFlow(persistence=persistence)
# kickoff now returns HumanFeedbackPending instead of raising it
result = flow.kickoff()
assert isinstance(result, HumanFeedbackPending)
assert result.callback_info["saved"] is True
# Get flow ID from the returned pending context
flow_id = result.context.flow_id
# Verify state was persisted
persisted = persistence.load_pending_feedback(flow_id)
assert persisted is not None
@patch("crewai.flow.flow.crewai_event_bus.emit")
def test_full_async_flow_cycle(self, mock_emit: MagicMock) -> None:
"""Test complete async flow: start -> pause -> resume."""
with tempfile.TemporaryDirectory() as tmpdir:
db_path = os.path.join(tmpdir, "test_flows.db")
persistence = SQLiteFlowPersistence(db_path)
flow_id_holder: list[str] = []
class SaveAndPauseProvider:
def __init__(self, persistence: SQLiteFlowPersistence):
self.persistence = persistence
def request_feedback(
self, context: PendingFeedbackContext, flow: Flow
) -> str:
flow_id_holder.append(context.flow_id)
self.persistence.save_pending_feedback(
flow_uuid=context.flow_id,
context=context,
state_data=flow.state if isinstance(flow.state, dict) else flow.state.model_dump(),
)
raise HumanFeedbackPending(context=context)
class ReviewFlow(Flow):
processed_feedback: str = ""
@start()
@human_feedback(
message="Review this content:",
provider=SaveAndPauseProvider(persistence),
)
def generate(self):
return "AI generated content"
@listen(generate)
def process(self, feedback_result):
self.processed_feedback = feedback_result.feedback
return f"Final: {feedback_result.feedback}"
# Phase 1: Start flow (should pause)
flow1 = ReviewFlow(persistence=persistence)
result = flow1.kickoff()
# kickoff now returns HumanFeedbackPending instead of raising it
assert isinstance(result, HumanFeedbackPending)
assert len(flow_id_holder) == 1
paused_flow_id = flow_id_holder[0]
# Phase 2: Resume flow
flow2 = ReviewFlow.from_pending(paused_flow_id, persistence)
result = flow2.resume("This is my feedback")
# Verify feedback was processed
assert flow2.last_human_feedback.feedback == "This is my feedback"
assert flow2.processed_feedback == "This is my feedback"
# =============================================================================
# Edge Case Tests
# =============================================================================
class TestAutoPersistence:
"""Tests for automatic persistence when no persistence is provided."""
@patch("crewai.flow.flow.crewai_event_bus.emit")
def test_auto_persistence_when_none_provided(self, mock_emit: MagicMock) -> None:
"""Test that persistence is auto-created when HumanFeedbackPending is raised."""
class PausingProvider:
def request_feedback(
self, context: PendingFeedbackContext, flow: Flow
) -> str:
raise HumanFeedbackPending(
context=context,
callback_info={"paused": True},
)
class TestFlow(Flow):
@start()
@human_feedback(
message="Review:",
provider=PausingProvider(),
)
def generate(self):
return "content"
# Create flow WITHOUT persistence
flow = TestFlow()
assert flow._persistence is None # No persistence initially
# kickoff should auto-create persistence when HumanFeedbackPending is raised
result = flow.kickoff()
# Should return HumanFeedbackPending (not raise it)
assert isinstance(result, HumanFeedbackPending)
# Persistence should have been auto-created
assert flow._persistence is not None
# The pending feedback should be saved
flow_id = result.context.flow_id
loaded = flow._persistence.load_pending_feedback(flow_id)
assert loaded is not None
class TestCollapseToOutcomeJsonParsing:
"""Tests for _collapse_to_outcome JSON parsing edge cases."""
def test_json_string_response_is_parsed(self) -> None:
"""Test that JSON string response from LLM is correctly parsed."""
flow = Flow()
with patch("crewai.llm.LLM") as MockLLM:
mock_llm = MagicMock()
# Simulate LLM returning JSON string (the bug we fixed)
mock_llm.call.return_value = '{"outcome": "approved"}'
MockLLM.return_value = mock_llm
result = flow._collapse_to_outcome(
feedback="I approve this",
outcomes=["approved", "rejected"],
llm="gpt-4o-mini",
)
assert result == "approved"
def test_plain_string_response_is_matched(self) -> None:
"""Test that plain string response is correctly matched."""
flow = Flow()
with patch("crewai.llm.LLM") as MockLLM:
mock_llm = MagicMock()
# Simulate LLM returning plain outcome string
mock_llm.call.return_value = "rejected"
MockLLM.return_value = mock_llm
result = flow._collapse_to_outcome(
feedback="This is not good",
outcomes=["approved", "rejected"],
llm="gpt-4o-mini",
)
assert result == "rejected"
def test_invalid_json_falls_back_to_matching(self) -> None:
"""Test that invalid JSON falls back to string matching."""
flow = Flow()
with patch("crewai.llm.LLM") as MockLLM:
mock_llm = MagicMock()
# Invalid JSON that contains "approved"
mock_llm.call.return_value = "{invalid json but says approved"
MockLLM.return_value = mock_llm
result = flow._collapse_to_outcome(
feedback="looks good",
outcomes=["approved", "rejected"],
llm="gpt-4o-mini",
)
assert result == "approved"
def test_llm_exception_falls_back_to_simple_prompting(self) -> None:
"""Test that LLM exception triggers fallback to simple prompting."""
flow = Flow()
with patch("crewai.llm.LLM") as MockLLM:
mock_llm = MagicMock()
# First call raises, second call succeeds (fallback)
mock_llm.call.side_effect = [
Exception("Structured output failed"),
"approved",
]
MockLLM.return_value = mock_llm
result = flow._collapse_to_outcome(
feedback="I approve",
outcomes=["approved", "rejected"],
llm="gpt-4o-mini",
)
assert result == "approved"
# Verify it was called twice (initial + fallback)
assert mock_llm.call.call_count == 2
class TestAsyncHumanFeedbackEdgeCases:
"""Edge case tests for async human feedback."""
def test_pending_context_with_complex_output(self) -> None:
"""Test context with complex nested output."""
complex_output = {
"items": [{"id": 1, "name": "Item 1"}, {"id": 2, "name": "Item 2"}],
"metadata": {"total": 2, "page": 1},
"nested": {"deep": {"value": "test"}},
}
context = PendingFeedbackContext(
flow_id="complex-test",
flow_class="test.Flow",
method_name="method",
method_output=complex_output,
message="Review:",
)
# Serialize and deserialize
serialized = context.to_dict()
json_str = json.dumps(serialized) # Should be JSON serializable
restored = PendingFeedbackContext.from_dict(json.loads(json_str))
assert restored.method_output == complex_output
def test_empty_feedback_uses_default_outcome(self) -> None:
"""Test that empty feedback uses default outcome during resume."""
with tempfile.TemporaryDirectory() as tmpdir:
db_path = os.path.join(tmpdir, "test_flows.db")
persistence = SQLiteFlowPersistence(db_path)
class TestFlow(Flow):
@start()
def generate(self):
return "content"
# Save pending feedback with default_outcome
context = PendingFeedbackContext(
flow_id="default-test",
flow_class="test.Flow",
method_name="generate",
method_output="content",
message="Review:",
emit=["approved", "rejected"],
default_outcome="approved",
llm="gpt-4o-mini",
)
persistence.save_pending_feedback(
flow_uuid="default-test",
context=context,
state_data={"id": "default-test"},
)
flow = TestFlow.from_pending("default-test", persistence)
with patch("crewai.flow.flow.crewai_event_bus.emit"):
result = flow.resume("") # Empty feedback
assert flow.last_human_feedback.outcome == "approved"
def test_resume_without_feedback_uses_default(self) -> None:
"""Test that resume() can be called without feedback argument."""
with tempfile.TemporaryDirectory() as tmpdir:
db_path = os.path.join(tmpdir, "test.db")
persistence = SQLiteFlowPersistence(db_path)
class TestFlow(Flow):
@start()
def step(self):
return "output"
context = PendingFeedbackContext(
flow_id="no-feedback-test",
flow_class="TestFlow",
method_name="step",
method_output="test output",
message="Review:",
emit=["approved", "rejected"],
default_outcome="approved",
llm="gpt-4o-mini",
)
persistence.save_pending_feedback(
flow_uuid="no-feedback-test",
context=context,
state_data={"id": "no-feedback-test"},
)
flow = TestFlow.from_pending("no-feedback-test", persistence)
with patch("crewai.flow.flow.crewai_event_bus.emit"):
# Call resume() with no arguments - should use default
result = flow.resume()
assert flow.last_human_feedback.outcome == "approved"
assert flow.last_human_feedback.feedback == ""
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/test_async_human_feedback.py",
"license": "MIT License",
"lines": 855,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/test_human_feedback_decorator.py | """Unit tests for the @human_feedback decorator.
This module tests the @human_feedback decorator's validation logic,
async support, and attribute preservation functionality.
"""
from __future__ import annotations
import asyncio
from datetime import datetime
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from crewai.flow import Flow, human_feedback, listen, start
from crewai.flow.human_feedback import (
HumanFeedbackConfig,
HumanFeedbackResult,
)
class TestHumanFeedbackValidation:
"""Tests for decorator parameter validation."""
def test_emit_requires_llm(self):
"""Test that specifying emit with llm=None raises ValueError."""
with pytest.raises(ValueError) as exc_info:
@human_feedback(
message="Review this:",
emit=["approve", "reject"],
llm=None, # explicitly None
)
def test_method(self):
return "output"
assert "llm is required" in str(exc_info.value)
def test_default_outcome_requires_emit(self):
"""Test that specifying default_outcome without emit raises ValueError."""
with pytest.raises(ValueError) as exc_info:
@human_feedback(
message="Review this:",
default_outcome="approve",
# emit not provided
)
def test_method(self):
return "output"
assert "requires emit" in str(exc_info.value)
def test_default_outcome_must_be_in_emit(self):
"""Test that default_outcome must be one of the emit values."""
with pytest.raises(ValueError) as exc_info:
@human_feedback(
message="Review this:",
emit=["approve", "reject"],
llm="gpt-4o-mini",
default_outcome="invalid_outcome",
)
def test_method(self):
return "output"
assert "must be one of" in str(exc_info.value)
def test_valid_configuration_with_routing(self):
"""Test that valid configuration with routing doesn't raise."""
@human_feedback(
message="Review this:",
emit=["approve", "reject"],
llm="gpt-4o-mini",
default_outcome="reject",
)
def test_method(self):
return "output"
# Should not raise
assert hasattr(test_method, "__human_feedback_config__")
assert test_method.__is_router__ is True
assert test_method.__router_paths__ == ["approve", "reject"]
def test_valid_configuration_without_routing(self):
"""Test that valid configuration without routing doesn't raise."""
@human_feedback(message="Review this:")
def test_method(self):
return "output"
# Should not raise
assert hasattr(test_method, "__human_feedback_config__")
assert not hasattr(test_method, "__is_router__") or not test_method.__is_router__
class TestHumanFeedbackConfig:
"""Tests for HumanFeedbackConfig dataclass."""
def test_config_creation(self):
"""Test HumanFeedbackConfig can be created with all parameters."""
config = HumanFeedbackConfig(
message="Test message",
emit=["a", "b"],
llm="gpt-4",
default_outcome="a",
metadata={"key": "value"},
)
assert config.message == "Test message"
assert config.emit == ["a", "b"]
assert config.llm == "gpt-4"
assert config.default_outcome == "a"
assert config.metadata == {"key": "value"}
class TestHumanFeedbackResult:
"""Tests for HumanFeedbackResult dataclass."""
def test_result_creation(self):
"""Test HumanFeedbackResult can be created with all fields."""
result = HumanFeedbackResult(
output={"title": "Test"},
feedback="Looks good",
outcome="approved",
method_name="test_method",
)
assert result.output == {"title": "Test"}
assert result.feedback == "Looks good"
assert result.outcome == "approved"
assert result.method_name == "test_method"
assert isinstance(result.timestamp, datetime)
assert result.metadata == {}
def test_result_with_metadata(self):
"""Test HumanFeedbackResult with custom metadata."""
result = HumanFeedbackResult(
output="test",
feedback="feedback",
metadata={"channel": "slack", "user": "test_user"},
)
assert result.metadata == {"channel": "slack", "user": "test_user"}
class TestDecoratorAttributePreservation:
"""Tests for preserving Flow decorator attributes."""
def test_preserves_start_method_attributes(self):
"""Test that @human_feedback preserves @start decorator attributes."""
class TestFlow(Flow):
@start()
@human_feedback(message="Review:")
def my_start_method(self):
return "output"
# Check that start method attributes are preserved
flow = TestFlow()
method = flow._methods.get("my_start_method")
assert method is not None
assert hasattr(method, "__is_start_method__") or "my_start_method" in flow._start_methods
def test_preserves_listen_method_attributes(self):
"""Test that @human_feedback preserves @listen decorator attributes."""
class TestFlow(Flow):
@start()
def begin(self):
return "start"
@listen("begin")
@human_feedback(message="Review:")
def review(self):
return "review output"
flow = TestFlow()
# The method should be registered as a listener
assert "review" in flow._listeners or any(
"review" in str(v) for v in flow._listeners.values()
)
def test_sets_router_attributes_when_emit_specified(self):
"""Test that router attributes are set when emit is specified."""
# Test the decorator directly without @start wrapping
@human_feedback(
message="Review:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
)
def review_method(self):
return "output"
assert review_method.__is_router__ is True
assert review_method.__router_paths__ == ["approved", "rejected"]
class TestAsyncSupport:
"""Tests for async method support."""
def test_async_method_detection(self):
"""Test that async methods are properly detected and wrapped."""
@human_feedback(message="Review:")
async def async_method(self):
return "async output"
assert asyncio.iscoroutinefunction(async_method)
def test_sync_method_remains_sync(self):
"""Test that sync methods remain synchronous."""
@human_feedback(message="Review:")
def sync_method(self):
return "sync output"
assert not asyncio.iscoroutinefunction(sync_method)
class TestHumanFeedbackExecution:
"""Tests for actual human feedback execution."""
@patch("builtins.input", return_value="This looks great!")
@patch("builtins.print")
def test_basic_feedback_collection(self, mock_print, mock_input):
"""Test basic feedback collection without routing."""
class TestFlow(Flow):
@start()
@human_feedback(message="Please review:")
def generate(self):
return "Generated content"
flow = TestFlow()
with patch.object(flow, "_request_human_feedback", return_value="Great job!"):
result = flow.kickoff()
assert flow.last_human_feedback is not None
assert flow.last_human_feedback.output == "Generated content"
assert flow.last_human_feedback.feedback == "Great job!"
@patch("builtins.input", return_value="")
@patch("builtins.print")
def test_empty_feedback_with_default_outcome(self, mock_print, mock_input):
"""Test empty feedback uses default_outcome."""
class TestFlow(Flow):
@start()
@human_feedback(
message="Review:",
emit=["approved", "needs_work"],
llm="gpt-4o-mini",
default_outcome="needs_work",
)
def review(self):
return "Content"
flow = TestFlow()
with patch.object(flow, "_request_human_feedback", return_value=""):
result = flow.kickoff()
assert result == "needs_work"
assert flow.last_human_feedback is not None
assert flow.last_human_feedback.outcome == "needs_work"
@patch("builtins.input", return_value="Approved!")
@patch("builtins.print")
def test_feedback_collapsing(self, mock_print, mock_input):
"""Test that feedback is collapsed to an outcome."""
class TestFlow(Flow):
@start()
@human_feedback(
message="Review:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
)
def review(self):
return "Content"
flow = TestFlow()
with (
patch.object(flow, "_request_human_feedback", return_value="Looks great, approved!"),
patch.object(flow, "_collapse_to_outcome", return_value="approved"),
):
result = flow.kickoff()
assert result == "approved"
assert flow.last_human_feedback is not None
assert flow.last_human_feedback.outcome == "approved"
class TestHumanFeedbackHistory:
"""Tests for human feedback history tracking."""
@patch("builtins.input", return_value="feedback")
@patch("builtins.print")
def test_history_accumulates(self, mock_print, mock_input):
"""Test that multiple feedbacks are stored in history."""
class TestFlow(Flow):
@start()
@human_feedback(message="Review step 1:")
def step1(self):
return "Step 1 output"
@listen(step1)
@human_feedback(message="Review step 2:")
def step2(self, prev):
return "Step 2 output"
flow = TestFlow()
with patch.object(flow, "_request_human_feedback", return_value="feedback"):
flow.kickoff()
# Both feedbacks should be in history
assert len(flow.human_feedback_history) == 2
assert flow.human_feedback_history[0].method_name == "step1"
assert flow.human_feedback_history[1].method_name == "step2"
@patch("builtins.input", return_value="")
@patch("builtins.print")
def test_human_feedback_property_returns_last(self, mock_print, mock_input):
"""Test that human_feedback property returns the last result."""
class TestFlow(Flow):
@start()
@human_feedback(message="Review:")
def generate(self):
return "output"
flow = TestFlow()
with patch.object(flow, "_request_human_feedback", return_value="last feedback"):
flow.kickoff()
assert flow.last_human_feedback is not None
assert flow.last_human_feedback.feedback == "last feedback"
assert flow.last_human_feedback is flow.last_human_feedback
class TestCollapseToOutcome:
"""Tests for the _collapse_to_outcome method."""
def test_exact_match(self):
"""Test exact match returns the correct outcome."""
flow = Flow()
with patch("crewai.llm.LLM") as MockLLM:
mock_llm = MagicMock()
mock_llm.call.return_value = "approved"
MockLLM.return_value = mock_llm
result = flow._collapse_to_outcome(
feedback="I approve this",
outcomes=["approved", "rejected"],
llm="gpt-4o-mini",
)
assert result == "approved"
def test_partial_match(self):
"""Test partial match finds the outcome in the response."""
flow = Flow()
with patch("crewai.llm.LLM") as MockLLM:
mock_llm = MagicMock()
mock_llm.call.return_value = "The outcome is approved based on the feedback"
MockLLM.return_value = mock_llm
result = flow._collapse_to_outcome(
feedback="Looks good",
outcomes=["approved", "rejected"],
llm="gpt-4o-mini",
)
assert result == "approved"
def test_fallback_to_first(self):
"""Test that unmatched response falls back to first outcome."""
flow = Flow()
with patch("crewai.llm.LLM") as MockLLM:
mock_llm = MagicMock()
mock_llm.call.return_value = "something completely different"
MockLLM.return_value = mock_llm
result = flow._collapse_to_outcome(
feedback="Unclear feedback",
outcomes=["approved", "rejected"],
llm="gpt-4o-mini",
)
assert result == "approved" # First in list
# -- HITL Learning tests --
class TestHumanFeedbackLearn:
"""Tests for the learn=True HITL learning feature."""
def test_learn_false_does_not_interact_with_memory(self):
"""When learn=False (default), memory is never touched."""
class LearnOffFlow(Flow):
@start()
@human_feedback(message="Review:", learn=False)
def produce(self):
return "output"
flow = LearnOffFlow()
flow.memory = MagicMock()
with patch.object(
flow, "_request_human_feedback", return_value="looks good"
):
flow.produce()
# memory.recall and memory.remember_many should NOT be called
flow.memory.recall.assert_not_called()
flow.memory.remember_many.assert_not_called()
def test_learn_true_stores_distilled_lessons(self):
"""When learn=True and feedback has substance, lessons are distilled and stored."""
class LearnFlow(Flow):
@start()
@human_feedback(message="Review:", llm="gpt-4o-mini", learn=True)
def produce(self):
return "draft article"
flow = LearnFlow()
flow.memory = MagicMock()
flow.memory.recall.return_value = [] # no prior lessons
with (
patch.object(
flow, "_request_human_feedback", return_value="Always add citations"
),
patch("crewai.llm.LLM") as MockLLM,
):
from crewai.flow.human_feedback import DistilledLessons
mock_llm = MagicMock()
mock_llm.supports_function_calling.return_value = True
# Distillation call -> returns structured lessons
mock_llm.call.return_value = DistilledLessons(
lessons=["Always include source citations when making factual claims"]
)
MockLLM.return_value = mock_llm
flow.produce()
# remember_many should be called with the distilled lesson
flow.memory.remember_many.assert_called_once()
lessons = flow.memory.remember_many.call_args.args[0]
assert len(lessons) == 1
assert "citations" in lessons[0].lower()
# source should be "hitl"
assert flow.memory.remember_many.call_args.kwargs.get("source") == "hitl"
def test_learn_true_pre_reviews_with_past_lessons(self):
"""When learn=True and past lessons exist, output is pre-reviewed before human sees it."""
from crewai.memory.types import MemoryMatch, MemoryRecord
class LearnFlow(Flow):
@start()
@human_feedback(message="Review:", llm="gpt-4o-mini", learn=True)
def produce(self):
return "draft without citations"
flow = LearnFlow()
# Mock memory with a past lesson
flow.memory = MagicMock()
flow.memory.recall.return_value = [
MemoryMatch(
record=MemoryRecord(
content="Always include source citations when making factual claims",
embedding=[],
),
score=0.9,
match_reasons=["semantic"],
)
]
captured_output = {}
def capture_feedback(message, output, metadata=None, emit=None):
captured_output["shown_to_human"] = output
return "approved"
with (
patch.object(flow, "_request_human_feedback", side_effect=capture_feedback),
patch("crewai.llm.LLM") as MockLLM,
):
from crewai.flow.human_feedback import DistilledLessons, PreReviewResult
mock_llm = MagicMock()
mock_llm.supports_function_calling.return_value = True
# Pre-review returns structured improved output, distillation returns empty lessons
mock_llm.call.side_effect = [
PreReviewResult(improved_output="draft with citations added"),
DistilledLessons(lessons=[]), # "approved" has no new lessons
]
MockLLM.return_value = mock_llm
flow.produce()
# The human should have seen the pre-reviewed output, not the raw output
assert captured_output["shown_to_human"] == "draft with citations added"
# recall was called to find past lessons
flow.memory.recall.assert_called_once()
def test_learn_true_empty_feedback_does_not_store(self):
"""When learn=True but feedback is empty, no lessons are stored."""
class LearnFlow(Flow):
@start()
@human_feedback(message="Review:", llm="gpt-4o-mini", learn=True)
def produce(self):
return "output"
flow = LearnFlow()
flow.memory = MagicMock()
flow.memory.recall.return_value = []
with patch.object(
flow, "_request_human_feedback", return_value=""
):
flow.produce()
# Empty feedback -> no distillation, no storage
flow.memory.remember_many.assert_not_called()
def test_learn_true_uses_default_llm(self):
"""When learn=True and llm is not explicitly set, the default gpt-4o-mini is used."""
@human_feedback(message="Review:", learn=True)
def test_method(self):
return "output"
config = test_method.__human_feedback_config__
assert config is not None
assert config.learn is True
# llm defaults to "gpt-4o-mini" at the function level
assert config.llm == "gpt-4o-mini"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/test_human_feedback_decorator.py",
"license": "MIT License",
"lines": 425,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/test_human_feedback_integration.py | """Integration tests for the @human_feedback decorator with Flow.
This module tests the integration of @human_feedback with @listen,
routing behavior, multi-step flows, and state management.
"""
from __future__ import annotations
import asyncio
from datetime import datetime
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from pydantic import BaseModel
from crewai.flow import Flow, HumanFeedbackResult, human_feedback, listen, or_, start
from crewai.flow.flow import FlowState
class TestRoutingIntegration:
"""Tests for routing integration with @listen decorators."""
@patch("builtins.input", return_value="I approve")
@patch("builtins.print")
def test_routes_to_matching_listener(self, mock_print, mock_input):
"""Test that collapsed outcome routes to the matching @listen method."""
execution_order = []
class ReviewFlow(Flow):
@start()
@human_feedback(
message="Review:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
)
def generate(self):
execution_order.append("generate")
return "content"
@listen("approved")
def on_approved(self):
execution_order.append("on_approved")
return "published"
@listen("rejected")
def on_rejected(self):
execution_order.append("on_rejected")
return "discarded"
flow = ReviewFlow()
with (
patch.object(flow, "_request_human_feedback", return_value="Approved!"),
patch.object(flow, "_collapse_to_outcome", return_value="approved"),
):
result = flow.kickoff()
assert "generate" in execution_order
assert "on_approved" in execution_order
assert "on_rejected" not in execution_order
@patch("builtins.input", return_value="")
@patch("builtins.print")
def test_default_outcome_routes_correctly(self, mock_print, mock_input):
"""Test that default_outcome routes when no feedback provided."""
executed_listener = []
class ReviewFlow(Flow):
@start()
@human_feedback(
message="Review:",
emit=["approved", "needs_work"],
llm="gpt-4o-mini",
default_outcome="needs_work",
)
def generate(self):
return "content"
@listen("approved")
def on_approved(self):
executed_listener.append("approved")
@listen("needs_work")
def on_needs_work(self):
executed_listener.append("needs_work")
flow = ReviewFlow()
with patch.object(flow, "_request_human_feedback", return_value=""):
flow.kickoff()
assert "needs_work" in executed_listener
assert "approved" not in executed_listener
class TestMultiStepFlows:
"""Tests for multi-step flows with multiple @human_feedback decorators."""
@patch("builtins.input", side_effect=["Good draft", "Final approved"])
@patch("builtins.print")
def test_multiple_feedback_steps(self, mock_print, mock_input):
"""Test a flow with multiple human feedback steps."""
class MultiStepFlow(Flow):
@start()
@human_feedback(message="Review draft:")
def draft(self):
return "Draft content"
@listen(draft)
@human_feedback(message="Final review:")
def final_review(self, prev_result: HumanFeedbackResult):
return f"Final content based on: {prev_result.feedback}"
flow = MultiStepFlow()
with patch.object(
flow, "_request_human_feedback", side_effect=["Good draft", "Approved"]
):
flow.kickoff()
# Both feedbacks should be recorded
assert len(flow.human_feedback_history) == 2
assert flow.human_feedback_history[0].method_name == "draft"
assert flow.human_feedback_history[0].feedback == "Good draft"
assert flow.human_feedback_history[1].method_name == "final_review"
assert flow.human_feedback_history[1].feedback == "Approved"
@patch("builtins.input", return_value="feedback")
@patch("builtins.print")
def test_mixed_feedback_and_regular_methods(self, mock_print, mock_input):
"""Test flow with both @human_feedback and regular methods."""
execution_order = []
class MixedFlow(Flow):
@start()
def generate(self):
execution_order.append("generate")
return "generated"
@listen(generate)
@human_feedback(message="Review:")
def review(self):
execution_order.append("review")
return "reviewed"
@listen(review)
def finalize(self, result):
execution_order.append("finalize")
return "finalized"
flow = MixedFlow()
with patch.object(flow, "_request_human_feedback", return_value="feedback"):
flow.kickoff()
assert execution_order == ["generate", "review", "finalize"]
def test_chained_router_feedback_steps(self):
"""Test that a router outcome can trigger another router method.
Regression test: @listen("outcome") combined with @human_feedback(emit=...)
creates a method that is both a listener and a router. The flow must find
and execute it when the upstream router emits the matching outcome.
"""
execution_order: list[str] = []
class ChainedRouterFlow(Flow):
@start()
@human_feedback(
message="First review:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
)
def draft(self):
execution_order.append("draft")
return "draft content"
@listen("approved")
@human_feedback(
message="Final review:",
emit=["publish", "revise"],
llm="gpt-4o-mini",
)
def final_review(self, prev: HumanFeedbackResult):
execution_order.append("final_review")
return "final content"
@listen("rejected")
def on_rejected(self, prev: HumanFeedbackResult):
execution_order.append("on_rejected")
return "rejected"
@listen("publish")
def on_publish(self, prev: HumanFeedbackResult):
execution_order.append("on_publish")
return "published"
@listen("revise")
def on_revise(self, prev: HumanFeedbackResult):
execution_order.append("on_revise")
return "revised"
flow = ChainedRouterFlow()
with (
patch.object(
flow,
"_request_human_feedback",
side_effect=["looks good", "ship it"],
),
patch.object(
flow,
"_collapse_to_outcome",
side_effect=["approved", "publish"],
),
):
result = flow.kickoff()
assert execution_order == ["draft", "final_review", "on_publish"]
assert result == "published"
assert len(flow.human_feedback_history) == 2
assert flow.human_feedback_history[0].outcome == "approved"
assert flow.human_feedback_history[1].outcome == "publish"
def test_chained_router_rejected_path(self):
"""Test that a start-router outcome routes to a non-router listener."""
execution_order: list[str] = []
class ChainedRouterFlow(Flow):
@start()
@human_feedback(
message="Review:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
)
def draft(self):
execution_order.append("draft")
return "draft"
@listen("approved")
@human_feedback(
message="Final:",
emit=["publish", "revise"],
llm="gpt-4o-mini",
)
def final_review(self, prev: HumanFeedbackResult):
execution_order.append("final_review")
return "final"
@listen("rejected")
def on_rejected(self, prev: HumanFeedbackResult):
execution_order.append("on_rejected")
return "rejected"
flow = ChainedRouterFlow()
with (
patch.object(
flow, "_request_human_feedback", return_value="bad"
),
patch.object(
flow, "_collapse_to_outcome", return_value="rejected"
),
):
result = flow.kickoff()
assert execution_order == ["draft", "on_rejected"]
assert result == "rejected"
assert len(flow.human_feedback_history) == 1
assert flow.human_feedback_history[0].outcome == "rejected"
def test_hitl_self_loop_routes_back_to_same_method(self):
"""Test that a HITL router can loop back to itself via its own emit outcome.
Pattern: review_work listens to or_("do_work", "review") and emits
["review", "approved"]. When the human rejects (outcome="review"),
the method should re-execute. When approved, the flow should continue
to the approve_work listener.
"""
execution_order: list[str] = []
class SelfLoopFlow(Flow):
@start()
def initial_func(self):
execution_order.append("initial_func")
return "initial"
@listen(initial_func)
def do_work(self):
execution_order.append("do_work")
return "work output"
@human_feedback(
message="Do you approve this content?",
emit=["review", "approved"],
llm="gpt-4o-mini",
default_outcome="approved",
)
@listen(or_("do_work", "review"))
def review_work(self):
execution_order.append("review_work")
return "content for review"
@listen("approved")
def approve_work(self):
execution_order.append("approve_work")
return "published"
flow = SelfLoopFlow()
# First call: human rejects (outcome="review") -> self-loop
# Second call: human approves (outcome="approved") -> continue
with (
patch.object(
flow,
"_request_human_feedback",
side_effect=["needs changes", "looks good"],
),
patch.object(
flow,
"_collapse_to_outcome",
side_effect=["review", "approved"],
),
):
result = flow.kickoff()
assert execution_order == [
"initial_func",
"do_work",
"review_work", # first review -> rejected (review)
"review_work", # second review -> approved
"approve_work",
]
assert result == "published"
assert len(flow.human_feedback_history) == 2
assert flow.human_feedback_history[0].outcome == "review"
assert flow.human_feedback_history[1].outcome == "approved"
def test_hitl_self_loop_multiple_rejections(self):
"""Test that a HITL router can loop back multiple times before approving.
Verifies the self-loop works for more than one rejection cycle.
"""
execution_order: list[str] = []
class MultiRejectFlow(Flow):
@start()
def generate(self):
execution_order.append("generate")
return "draft"
@human_feedback(
message="Review this content:",
emit=["revise", "approved"],
llm="gpt-4o-mini",
default_outcome="approved",
)
@listen(or_("generate", "revise"))
def review(self):
execution_order.append("review")
return "content v" + str(execution_order.count("review"))
@listen("approved")
def publish(self):
execution_order.append("publish")
return "published"
flow = MultiRejectFlow()
# Three rejections, then approval
with (
patch.object(
flow,
"_request_human_feedback",
side_effect=["bad", "still bad", "not yet", "great"],
),
patch.object(
flow,
"_collapse_to_outcome",
side_effect=["revise", "revise", "revise", "approved"],
),
):
result = flow.kickoff()
assert execution_order == [
"generate",
"review", # 1st review -> revise
"review", # 2nd review -> revise
"review", # 3rd review -> revise
"review", # 4th review -> approved
"publish",
]
assert result == "published"
assert len(flow.human_feedback_history) == 4
assert [r.outcome for r in flow.human_feedback_history] == [
"revise", "revise", "revise", "approved"
]
def test_hitl_self_loop_immediate_approval(self):
"""Test that a HITL self-loop flow works when approved on the first try.
No looping occurs -- the flow should proceed straight through.
"""
execution_order: list[str] = []
class ImmediateApprovalFlow(Flow):
@start()
def generate(self):
execution_order.append("generate")
return "perfect draft"
@human_feedback(
message="Review:",
emit=["revise", "approved"],
llm="gpt-4o-mini",
)
@listen(or_("generate", "revise"))
def review(self):
execution_order.append("review")
return "content"
@listen("approved")
def publish(self):
execution_order.append("publish")
return "published"
flow = ImmediateApprovalFlow()
with (
patch.object(
flow,
"_request_human_feedback",
return_value="perfect",
),
patch.object(
flow,
"_collapse_to_outcome",
return_value="approved",
),
):
result = flow.kickoff()
assert execution_order == ["generate", "review", "publish"]
assert result == "published"
assert len(flow.human_feedback_history) == 1
assert flow.human_feedback_history[0].outcome == "approved"
def test_router_and_non_router_listeners_for_same_outcome(self):
"""Test that both router and non-router listeners fire for the same outcome."""
execution_order: list[str] = []
class MixedListenerFlow(Flow):
@start()
@human_feedback(
message="Review:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
)
def draft(self):
execution_order.append("draft")
return "draft"
@listen("approved")
@human_feedback(
message="Final:",
emit=["publish", "revise"],
llm="gpt-4o-mini",
)
def router_listener(self, prev: HumanFeedbackResult):
execution_order.append("router_listener")
return "final"
@listen("approved")
def plain_listener(self, prev: HumanFeedbackResult):
execution_order.append("plain_listener")
return "logged"
@listen("publish")
def on_publish(self, prev: HumanFeedbackResult):
execution_order.append("on_publish")
return "published"
flow = MixedListenerFlow()
with (
patch.object(
flow,
"_request_human_feedback",
side_effect=["approve it", "publish it"],
),
patch.object(
flow,
"_collapse_to_outcome",
side_effect=["approved", "publish"],
),
):
flow.kickoff()
assert "draft" in execution_order
assert "router_listener" in execution_order
assert "plain_listener" in execution_order
assert "on_publish" in execution_order
class TestStateManagement:
"""Tests for state management with human feedback."""
@patch("builtins.input", return_value="approved")
@patch("builtins.print")
def test_feedback_available_in_listener(self, mock_print, mock_input):
"""Test that feedback is accessible in downstream listeners."""
captured_feedback = []
class StateFlow(Flow):
@start()
@human_feedback(
message="Review:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
)
def review(self):
return "Content to review"
@listen("approved")
def on_approved(self):
# Access the feedback via property
captured_feedback.append(self.last_human_feedback)
return "done"
flow = StateFlow()
with (
patch.object(flow, "_request_human_feedback", return_value="Great content!"),
patch.object(flow, "_collapse_to_outcome", return_value="approved"),
):
flow.kickoff()
assert len(captured_feedback) == 1
result = captured_feedback[0]
assert isinstance(result, HumanFeedbackResult)
assert result.output == "Content to review"
assert result.feedback == "Great content!"
assert result.outcome == "approved"
@patch("builtins.input", return_value="")
@patch("builtins.print")
def test_history_preserved_across_steps(self, mock_print, mock_input):
"""Test that feedback history is preserved across flow execution."""
class HistoryFlow(Flow):
@start()
@human_feedback(message="Step 1:")
def step1(self):
return "Step 1"
@listen(step1)
@human_feedback(message="Step 2:")
def step2(self, result):
return "Step 2"
@listen(step2)
def final(self, result):
# Access history
return len(self.human_feedback_history)
flow = HistoryFlow()
with patch.object(flow, "_request_human_feedback", return_value="feedback"):
result = flow.kickoff()
# Final method should see 2 feedback entries
assert result == 2
class TestAsyncFlowIntegration:
"""Tests for async flow integration."""
@pytest.mark.asyncio
async def test_async_flow_with_human_feedback(self):
"""Test that @human_feedback works with async flows."""
executed = []
class AsyncFlow(Flow):
@start()
@human_feedback(message="Review:")
async def async_review(self):
executed.append("async_review")
await asyncio.sleep(0.01) # Simulate async work
return "async content"
flow = AsyncFlow()
with patch.object(flow, "_request_human_feedback", return_value="feedback"):
await flow.kickoff_async()
assert "async_review" in executed
assert flow.last_human_feedback is not None
assert flow.last_human_feedback.output == "async content"
class TestWithStructuredState:
"""Tests for flows with structured (Pydantic) state."""
@patch("builtins.input", return_value="approved")
@patch("builtins.print")
def test_with_pydantic_state(self, mock_print, mock_input):
"""Test human feedback with structured Pydantic state."""
class ReviewState(FlowState):
content: str = ""
review_count: int = 0
class StructuredFlow(Flow[ReviewState]):
initial_state = ReviewState
@start()
@human_feedback(
message="Review:",
emit=["approved", "rejected"],
llm="gpt-4o-mini",
)
def review(self):
self.state.content = "Generated content"
self.state.review_count += 1
return self.state.content
@listen("approved")
def on_approved(self):
return f"Approved: {self.state.content}"
flow = StructuredFlow()
with (
patch.object(flow, "_request_human_feedback", return_value="LGTM"),
patch.object(flow, "_collapse_to_outcome", return_value="approved"),
):
result = flow.kickoff()
assert flow.state.review_count == 1
assert flow.last_human_feedback is not None
assert flow.last_human_feedback.feedback == "LGTM"
class TestMetadataPassthrough:
"""Tests for metadata passthrough functionality."""
@patch("builtins.input", return_value="")
@patch("builtins.print")
def test_metadata_included_in_result(self, mock_print, mock_input):
"""Test that metadata is passed through to HumanFeedbackResult."""
class MetadataFlow(Flow):
@start()
@human_feedback(
message="Review:",
metadata={"channel": "slack", "priority": "high"},
)
def review(self):
return "content"
flow = MetadataFlow()
with patch.object(flow, "_request_human_feedback", return_value="feedback"):
flow.kickoff()
result = flow.last_human_feedback
assert result is not None
assert result.metadata == {"channel": "slack", "priority": "high"}
class TestEventEmission:
"""Tests for event emission during human feedback."""
@patch("builtins.input", return_value="test feedback")
@patch("builtins.print")
def test_events_emitted_on_feedback_request(self, mock_print, mock_input):
"""Test that events are emitted when feedback is requested."""
from crewai.events.event_listener import event_listener
class EventFlow(Flow):
@start()
@human_feedback(message="Review:")
def review(self):
return "content"
flow = EventFlow()
# We can't easily capture events in tests, but we can verify
# the flow executes without errors
with (
patch.object(
event_listener.formatter, "pause_live_updates", return_value=None
),
patch.object(
event_listener.formatter, "resume_live_updates", return_value=None
),
):
flow.kickoff()
assert flow.last_human_feedback is not None
class TestEdgeCases:
"""Tests for edge cases and error handling."""
@patch("builtins.input", return_value="")
@patch("builtins.print")
def test_empty_feedback_first_outcome_fallback(self, mock_print, mock_input):
"""Test that empty feedback without default uses first outcome."""
class FallbackFlow(Flow):
@start()
@human_feedback(
message="Review:",
emit=["first", "second", "third"],
llm="gpt-4o-mini",
# No default_outcome specified
)
def review(self):
return "content"
flow = FallbackFlow()
with patch.object(flow, "_request_human_feedback", return_value=""):
result = flow.kickoff()
assert result == "first" # Falls back to first outcome
@patch("builtins.input", return_value="whitespace only ")
@patch("builtins.print")
def test_whitespace_only_feedback_treated_as_empty(self, mock_print, mock_input):
"""Test that whitespace-only feedback is treated as empty."""
class WhitespaceFlow(Flow):
@start()
@human_feedback(
message="Review:",
emit=["approve", "reject"],
llm="gpt-4o-mini",
default_outcome="reject",
)
def review(self):
return "content"
flow = WhitespaceFlow()
with patch.object(flow, "_request_human_feedback", return_value=" "):
result = flow.kickoff()
assert result == "reject" # Uses default because feedback is empty after strip
@patch("builtins.input", return_value="feedback")
@patch("builtins.print")
def test_feedback_result_without_routing(self, mock_print, mock_input):
"""Test that HumanFeedbackResult is returned when not routing."""
class NoRoutingFlow(Flow):
@start()
@human_feedback(message="Review:")
def review(self):
return "content"
flow = NoRoutingFlow()
with patch.object(flow, "_request_human_feedback", return_value="feedback"):
result = flow.kickoff()
# Result should be HumanFeedbackResult when not routing
assert isinstance(result, HumanFeedbackResult)
assert result.output == "content"
assert result.feedback == "feedback"
assert result.outcome is None # No routing, no outcome
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/test_human_feedback_integration.py",
"license": "MIT License",
"lines": 625,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/utilities/pydantic_schema_utils.py | """Dynamic Pydantic model creation from JSON schemas.
This module provides utilities for converting JSON schemas to Pydantic models at runtime.
The main function is `create_model_from_schema`, which takes a JSON schema and returns
a dynamically created Pydantic model class.
This is used by the A2A server to honor response schemas sent by clients, allowing
structured output from agent tasks.
Based on dydantic (https://github.com/zenbase-ai/dydantic).
This module provides functions for converting Pydantic models to JSON schemas
suitable for use with LLMs and tool definitions.
"""
from __future__ import annotations
from collections.abc import Callable
from copy import deepcopy
import datetime
import logging
from typing import TYPE_CHECKING, Annotated, Any, Final, Literal, TypedDict, Union
import uuid
import jsonref # type: ignore[import-untyped]
from pydantic import (
UUID1,
UUID3,
UUID4,
UUID5,
AnyUrl,
BaseModel,
ConfigDict,
DirectoryPath,
Field,
FilePath,
FileUrl,
HttpUrl,
Json,
MongoDsn,
NewPath,
PostgresDsn,
SecretBytes,
SecretStr,
StrictBytes,
create_model as create_model_base,
)
from pydantic.networks import ( # type: ignore[attr-defined]
IPv4Address,
IPv6Address,
IPvAnyAddress,
IPvAnyInterface,
IPvAnyNetwork,
)
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from pydantic import EmailStr
from pydantic.main import AnyClassMethod
else:
try:
from pydantic import EmailStr
except ImportError:
logger.warning(
"EmailStr unavailable, using str fallback",
extra={"missing_package": "email_validator"},
)
EmailStr = str
class JsonSchemaInfo(TypedDict):
"""Inner structure for JSON schema metadata."""
name: str
strict: Literal[True]
schema: dict[str, Any]
class ModelDescription(TypedDict):
"""Return type for generate_model_description."""
type: Literal["json_schema"]
json_schema: JsonSchemaInfo
def resolve_refs(schema: dict[str, Any]) -> dict[str, Any]:
"""Recursively resolve all local $refs in the given JSON Schema using $defs as the source.
This is needed because Pydantic generates $ref-based schemas that
some consumers (e.g. LLMs, tool frameworks) don't handle well.
Args:
schema: JSON Schema dict that may contain "$refs" and "$defs".
Returns:
A new schema dictionary with all local $refs replaced by their definitions.
"""
defs = schema.get("$defs", {})
schema_copy = deepcopy(schema)
def _resolve(node: Any) -> Any:
if isinstance(node, dict):
ref = node.get("$ref")
if isinstance(ref, str) and ref.startswith("#/$defs/"):
def_name = ref.replace("#/$defs/", "")
if def_name in defs:
return _resolve(deepcopy(defs[def_name]))
raise KeyError(f"Definition '{def_name}' not found in $defs.")
return {k: _resolve(v) for k, v in node.items()}
if isinstance(node, list):
return [_resolve(i) for i in node]
return node
return _resolve(schema_copy) # type: ignore[no-any-return]
def add_key_in_dict_recursively(
d: dict[str, Any], key: str, value: Any, criteria: Callable[[dict[str, Any]], bool]
) -> dict[str, Any]:
"""Recursively adds a key/value pair to all nested dicts matching `criteria`.
Args:
d: The dictionary to modify.
key: The key to add.
value: The value to add.
criteria: A function that returns True for dicts that should receive the key.
Returns:
The modified dictionary.
"""
if isinstance(d, dict):
if criteria(d) and key not in d:
d[key] = value
for v in d.values():
add_key_in_dict_recursively(v, key, value, criteria)
elif isinstance(d, list):
for i in d:
add_key_in_dict_recursively(i, key, value, criteria)
return d
def force_additional_properties_false(d: Any) -> Any:
"""Force additionalProperties=false on all object-type dicts recursively.
OpenAI strict mode requires all objects to have additionalProperties=false.
This function overwrites any existing value to ensure compliance.
Also ensures objects have properties and required arrays, even if empty,
as OpenAI strict mode requires these for all object types.
Args:
d: The dictionary/list to modify.
Returns:
The modified dictionary/list.
"""
if isinstance(d, dict):
if d.get("type") == "object":
d["additionalProperties"] = False
if "properties" not in d:
d["properties"] = {}
if "required" not in d:
d["required"] = []
for v in d.values():
force_additional_properties_false(v)
elif isinstance(d, list):
for i in d:
force_additional_properties_false(i)
return d
OPENAI_SUPPORTED_FORMATS: Final[
set[Literal["date-time", "date", "time", "duration"]]
] = {
"date-time",
"date",
"time",
"duration",
}
def strip_unsupported_formats(d: Any) -> Any:
"""Remove format annotations that OpenAI strict mode doesn't support.
OpenAI only supports: date-time, date, time, duration.
Other formats like uri, email, uuid etc. cause validation errors.
Args:
d: The dictionary/list to modify.
Returns:
The modified dictionary/list.
"""
if isinstance(d, dict):
format_value = d.get("format")
if (
isinstance(format_value, str)
and format_value not in OPENAI_SUPPORTED_FORMATS
):
del d["format"]
for v in d.values():
strip_unsupported_formats(v)
elif isinstance(d, list):
for i in d:
strip_unsupported_formats(i)
return d
def ensure_type_in_schemas(d: Any) -> Any:
"""Ensure all schema objects in anyOf/oneOf have a 'type' key.
OpenAI strict mode requires every schema to have a 'type' key.
Empty schemas {} in anyOf/oneOf are converted to {"type": "object"}.
Args:
d: The dictionary/list to modify.
Returns:
The modified dictionary/list.
"""
if isinstance(d, dict):
for key in ("anyOf", "oneOf"):
if key in d:
schema_list = d[key]
for i, schema in enumerate(schema_list):
if isinstance(schema, dict) and schema == {}:
schema_list[i] = {"type": "object"}
else:
ensure_type_in_schemas(schema)
for v in d.values():
ensure_type_in_schemas(v)
elif isinstance(d, list):
for item in d:
ensure_type_in_schemas(item)
return d
def fix_discriminator_mappings(schema: dict[str, Any]) -> dict[str, Any]:
"""Replace '#/$defs/...' references in discriminator.mapping with just the model name.
Args:
schema: JSON schema dictionary.
Returns:
Modified schema with fixed discriminator mappings.
"""
output = schema.get("properties", {}).get("output")
if not output:
return schema
disc = output.get("discriminator")
if not disc or "mapping" not in disc:
return schema
disc["mapping"] = {k: v.split("/")[-1] for k, v in disc["mapping"].items()}
return schema
def add_const_to_oneof_variants(schema: dict[str, Any]) -> dict[str, Any]:
"""Add const fields to oneOf variants for discriminated unions.
The json_schema_to_pydantic library requires each oneOf variant to have
a const field for the discriminator property. This function adds those
const fields based on the discriminator mapping.
Args:
schema: JSON Schema dict that may contain discriminated unions
Returns:
Modified schema with const fields added to oneOf variants
"""
def _process_oneof(node: dict[str, Any]) -> dict[str, Any]:
"""Process a single node that might contain a oneOf with discriminator."""
if not isinstance(node, dict):
return node
if "oneOf" in node and "discriminator" in node:
discriminator = node["discriminator"]
property_name = discriminator.get("propertyName")
mapping = discriminator.get("mapping", {})
if property_name and mapping:
one_of_variants = node.get("oneOf", [])
for variant in one_of_variants:
if isinstance(variant, dict) and "properties" in variant:
variant_title = variant.get("title", "")
matched_disc_value = None
for disc_value, schema_name in mapping.items():
if variant_title == schema_name or variant_title.endswith(
schema_name
):
matched_disc_value = disc_value
break
if matched_disc_value is not None:
props = variant["properties"]
if property_name in props:
props[property_name]["const"] = matched_disc_value
for key, value in node.items():
if isinstance(value, dict):
node[key] = _process_oneof(value)
elif isinstance(value, list):
node[key] = [
_process_oneof(item) if isinstance(item, dict) else item
for item in value
]
return node
return _process_oneof(deepcopy(schema))
def convert_oneof_to_anyof(schema: dict[str, Any]) -> dict[str, Any]:
"""Convert oneOf to anyOf for OpenAI compatibility.
OpenAI's Structured Outputs support anyOf better than oneOf.
This recursively converts all oneOf occurrences to anyOf.
Args:
schema: JSON schema dictionary.
Returns:
Modified schema with anyOf instead of oneOf.
"""
if isinstance(schema, dict):
if "oneOf" in schema:
schema["anyOf"] = schema.pop("oneOf")
for value in schema.values():
if isinstance(value, dict):
convert_oneof_to_anyof(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
convert_oneof_to_anyof(item)
return schema
def ensure_all_properties_required(schema: dict[str, Any]) -> dict[str, Any]:
"""Ensure all properties are in the required array for OpenAI strict mode.
OpenAI's strict structured outputs require all properties to be listed
in the required array. This recursively updates all objects to include
all their properties in required.
Args:
schema: JSON schema dictionary.
Returns:
Modified schema with all properties marked as required.
"""
if isinstance(schema, dict):
if schema.get("type") == "object" and "properties" in schema:
properties = schema["properties"]
if properties:
schema["required"] = list(properties.keys())
for value in schema.values():
if isinstance(value, dict):
ensure_all_properties_required(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
ensure_all_properties_required(item)
return schema
def strip_null_from_types(schema: dict[str, Any]) -> dict[str, Any]:
"""Remove null type from anyOf/type arrays.
Pydantic generates `T | None` for optional fields, which creates schemas with
null in the type. However, for MCP tools, optional fields should be omitted
entirely rather than sent as null. This function strips null from types.
Args:
schema: JSON schema dictionary.
Returns:
Modified schema with null types removed.
"""
if isinstance(schema, dict):
if "anyOf" in schema:
any_of = schema["anyOf"]
non_null = [opt for opt in any_of if opt.get("type") != "null"]
if len(non_null) == 1:
schema.pop("anyOf")
schema.update(non_null[0])
elif len(non_null) > 1:
schema["anyOf"] = non_null
type_value = schema.get("type")
if isinstance(type_value, list) and "null" in type_value:
non_null_types = [t for t in type_value if t != "null"]
if len(non_null_types) == 1:
schema["type"] = non_null_types[0]
elif len(non_null_types) > 1:
schema["type"] = non_null_types
for value in schema.values():
if isinstance(value, dict):
strip_null_from_types(value)
elif isinstance(value, list):
for item in value:
if isinstance(item, dict):
strip_null_from_types(item)
return schema
def generate_model_description(
model: type[BaseModel],
*,
strip_null_types: bool = True,
) -> ModelDescription:
"""Generate JSON schema description of a Pydantic model.
This function takes a Pydantic model class and returns its JSON schema,
which includes full type information, discriminators, and all metadata.
The schema is dereferenced to inline all $ref references for better LLM understanding.
Args:
model: A Pydantic model class.
strip_null_types: When ``True`` (default), remove ``null`` from
``anyOf`` / ``type`` arrays. Set to ``False`` to allow sending ``null`` for
optional fields.
Returns:
A ModelDescription with JSON schema representation of the model.
"""
json_schema = model.model_json_schema(ref_template="#/$defs/{model}")
json_schema = force_additional_properties_false(json_schema)
json_schema = strip_unsupported_formats(json_schema)
json_schema = ensure_type_in_schemas(json_schema)
json_schema = resolve_refs(json_schema)
json_schema.pop("$defs", None)
json_schema = fix_discriminator_mappings(json_schema)
json_schema = convert_oneof_to_anyof(json_schema)
json_schema = ensure_all_properties_required(json_schema)
if strip_null_types:
json_schema = strip_null_from_types(json_schema)
return {
"type": "json_schema",
"json_schema": {
"name": model.__name__,
"strict": True,
"schema": json_schema,
},
}
FORMAT_TYPE_MAP: dict[str, type[Any]] = {
"base64": Annotated[bytes, Field(json_schema_extra={"format": "base64"})], # type: ignore[dict-item]
"binary": StrictBytes,
"date": datetime.date,
"time": datetime.time,
"date-time": datetime.datetime,
"duration": datetime.timedelta,
"directory-path": DirectoryPath,
"email": EmailStr,
"file-path": FilePath,
"ipv4": IPv4Address,
"ipv6": IPv6Address,
"ipvanyaddress": IPvAnyAddress, # type: ignore[dict-item]
"ipvanyinterface": IPvAnyInterface, # type: ignore[dict-item]
"ipvanynetwork": IPvAnyNetwork, # type: ignore[dict-item]
"json-string": Json,
"multi-host-uri": PostgresDsn | MongoDsn, # type: ignore[dict-item]
"password": SecretStr,
"path": NewPath,
"uri": AnyUrl,
"uuid": uuid.UUID,
"uuid1": UUID1,
"uuid3": UUID3,
"uuid4": UUID4,
"uuid5": UUID5,
}
def build_rich_field_description(prop_schema: dict[str, Any]) -> str:
"""Build a comprehensive field description including constraints.
Embeds format, enum, pattern, min/max, and example constraints into the
description text so that LLMs can understand tool parameter requirements
without inspecting the raw JSON Schema.
Args:
prop_schema: Property schema with description and constraints.
Returns:
Enhanced description with format, enum, and other constraints.
"""
parts: list[str] = []
description = prop_schema.get("description", "")
if description:
parts.append(description)
format_type = prop_schema.get("format")
if format_type:
parts.append(f"Format: {format_type}")
enum_values = prop_schema.get("enum")
if enum_values:
enum_str = ", ".join(repr(v) for v in enum_values)
parts.append(f"Allowed values: [{enum_str}]")
pattern = prop_schema.get("pattern")
if pattern:
parts.append(f"Pattern: {pattern}")
minimum = prop_schema.get("minimum")
maximum = prop_schema.get("maximum")
if minimum is not None:
parts.append(f"Minimum: {minimum}")
if maximum is not None:
parts.append(f"Maximum: {maximum}")
min_length = prop_schema.get("minLength")
max_length = prop_schema.get("maxLength")
if min_length is not None:
parts.append(f"Min length: {min_length}")
if max_length is not None:
parts.append(f"Max length: {max_length}")
examples = prop_schema.get("examples")
if examples:
examples_str = ", ".join(repr(e) for e in examples[:3])
parts.append(f"Examples: {examples_str}")
return ". ".join(parts) if parts else ""
def create_model_from_schema( # type: ignore[no-any-unimported]
json_schema: dict[str, Any],
*,
root_schema: dict[str, Any] | None = None,
model_name: str | None = None,
enrich_descriptions: bool = False,
__config__: ConfigDict | None = None,
__base__: type[BaseModel] | None = None,
__module__: str = __name__,
__validators__: dict[str, AnyClassMethod] | None = None,
__cls_kwargs__: dict[str, Any] | None = None,
) -> type[BaseModel]:
"""Create a Pydantic model from a JSON schema.
This function takes a JSON schema as input and dynamically creates a Pydantic
model class based on the schema. It supports various JSON schema features such
as nested objects, referenced definitions ($ref), arrays with typed items,
union types (anyOf/oneOf), and string formats.
Args:
json_schema: A dictionary representing the JSON schema.
root_schema: The root schema containing $defs. If not provided, the
current schema is treated as the root schema.
model_name: Override for the model name. If not provided, the schema
``title`` field is used, falling back to ``"DynamicModel"``.
enrich_descriptions: When True, augment field descriptions with
constraint info (format, enum, pattern, min/max, examples) via
:func:`build_rich_field_description`. Useful for LLM-facing tool
schemas where constraints in the description help the model
understand parameter requirements.
__config__: Pydantic configuration for the generated model.
__base__: Base class for the generated model. Defaults to BaseModel.
__module__: Module name for the generated model class.
__validators__: A dictionary of custom validators for the generated model.
__cls_kwargs__: Additional keyword arguments for the generated model class.
Returns:
A dynamically created Pydantic model class based on the provided JSON schema.
Example:
>>> schema = {
... "title": "Person",
... "type": "object",
... "properties": {
... "name": {"type": "string"},
... "age": {"type": "integer"},
... },
... "required": ["name"],
... }
>>> Person = create_model_from_schema(schema)
>>> person = Person(name="John", age=30)
>>> person.name
'John'
"""
json_schema = dict(jsonref.replace_refs(json_schema, proxies=False))
effective_root = root_schema or json_schema
json_schema = force_additional_properties_false(json_schema)
effective_root = force_additional_properties_false(effective_root)
if "allOf" in json_schema:
json_schema = _merge_all_of_schemas(json_schema["allOf"], effective_root)
if "title" not in json_schema and "title" in (root_schema or {}):
json_schema["title"] = (root_schema or {}).get("title")
effective_name = model_name or json_schema.get("title") or "DynamicModel"
field_definitions = {
name: _json_schema_to_pydantic_field(
name,
prop,
json_schema.get("required", []),
effective_root,
enrich_descriptions=enrich_descriptions,
)
for name, prop in (json_schema.get("properties", {}) or {}).items()
}
effective_config = __config__ or ConfigDict(extra="forbid")
return create_model_base(
effective_name,
__config__=effective_config,
__base__=__base__,
__module__=__module__,
__validators__=__validators__,
__cls_kwargs__=__cls_kwargs__,
**field_definitions,
)
def _json_schema_to_pydantic_field(
name: str,
json_schema: dict[str, Any],
required: list[str],
root_schema: dict[str, Any],
*,
enrich_descriptions: bool = False,
) -> Any:
"""Convert a JSON schema property to a Pydantic field definition.
Args:
name: The field name.
json_schema: The JSON schema for this field.
required: List of required field names.
root_schema: The root schema for resolving $ref.
enrich_descriptions: When True, embed constraints in the description.
Returns:
A tuple of (type, Field) for use with create_model.
"""
type_ = _json_schema_to_pydantic_type(
json_schema, root_schema, name_=name.title(), enrich_descriptions=enrich_descriptions
)
is_required = name in required
field_params: dict[str, Any] = {}
schema_extra: dict[str, Any] = {}
if enrich_descriptions:
rich_desc = build_rich_field_description(json_schema)
if rich_desc:
field_params["description"] = rich_desc
else:
description = json_schema.get("description")
if description:
field_params["description"] = description
examples = json_schema.get("examples")
if examples:
schema_extra["examples"] = examples
default = ... if is_required else None
if isinstance(type_, type) and issubclass(type_, (int, float)):
if "minimum" in json_schema:
field_params["ge"] = json_schema["minimum"]
if "exclusiveMinimum" in json_schema:
field_params["gt"] = json_schema["exclusiveMinimum"]
if "maximum" in json_schema:
field_params["le"] = json_schema["maximum"]
if "exclusiveMaximum" in json_schema:
field_params["lt"] = json_schema["exclusiveMaximum"]
if "multipleOf" in json_schema:
field_params["multiple_of"] = json_schema["multipleOf"]
format_ = json_schema.get("format")
if format_ in FORMAT_TYPE_MAP:
pydantic_type = FORMAT_TYPE_MAP[format_]
if format_ == "password":
if json_schema.get("writeOnly"):
pydantic_type = SecretBytes
elif format_ == "uri":
allowed_schemes = json_schema.get("scheme")
if allowed_schemes:
if len(allowed_schemes) == 1 and allowed_schemes[0] == "http":
pydantic_type = HttpUrl
elif len(allowed_schemes) == 1 and allowed_schemes[0] == "file":
pydantic_type = FileUrl
type_ = pydantic_type
if isinstance(type_, type) and issubclass(type_, str):
if "minLength" in json_schema:
field_params["min_length"] = json_schema["minLength"]
if "maxLength" in json_schema:
field_params["max_length"] = json_schema["maxLength"]
if "pattern" in json_schema:
field_params["pattern"] = json_schema["pattern"]
if not is_required:
type_ = type_ | None
if schema_extra:
field_params["json_schema_extra"] = schema_extra
return type_, Field(default, **field_params)
def _resolve_ref(ref: str, root_schema: dict[str, Any]) -> dict[str, Any]:
"""Resolve a $ref to its actual schema.
Args:
ref: The $ref string (e.g., "#/$defs/MyType").
root_schema: The root schema containing $defs.
Returns:
The resolved schema dict.
"""
from typing import cast
ref_path = ref.split("/")
if ref.startswith("#/$defs/"):
ref_schema: dict[str, Any] = root_schema["$defs"]
start_idx = 2
else:
ref_schema = root_schema
start_idx = 1
for path in ref_path[start_idx:]:
ref_schema = cast(dict[str, Any], ref_schema[path])
return ref_schema
def _merge_all_of_schemas(
schemas: list[dict[str, Any]],
root_schema: dict[str, Any],
) -> dict[str, Any]:
"""Merge multiple allOf schemas into a single schema.
Combines properties and required fields from all schemas.
Args:
schemas: List of schemas to merge.
root_schema: The root schema for resolving $ref.
Returns:
Merged schema with combined properties and required fields.
"""
merged: dict[str, Any] = {"type": "object", "properties": {}, "required": []}
for schema in schemas:
if "$ref" in schema:
schema = _resolve_ref(schema["$ref"], root_schema)
if "properties" in schema:
merged["properties"].update(schema["properties"])
if "required" in schema:
for field in schema["required"]:
if field not in merged["required"]:
merged["required"].append(field)
if "title" in schema and "title" not in merged:
merged["title"] = schema["title"]
return merged
def _json_schema_to_pydantic_type(
json_schema: dict[str, Any],
root_schema: dict[str, Any],
*,
name_: str | None = None,
enrich_descriptions: bool = False,
) -> Any:
"""Convert a JSON schema to a Python/Pydantic type.
Args:
json_schema: The JSON schema to convert.
root_schema: The root schema for resolving $ref.
name_: Optional name for nested models.
enrich_descriptions: Propagated to nested model creation.
Returns:
A Python type corresponding to the JSON schema.
"""
ref = json_schema.get("$ref")
if ref:
ref_schema = _resolve_ref(ref, root_schema)
return _json_schema_to_pydantic_type(
ref_schema, root_schema, name_=name_, enrich_descriptions=enrich_descriptions
)
enum_values = json_schema.get("enum")
if enum_values:
return Literal[tuple(enum_values)]
if "const" in json_schema:
return Literal[json_schema["const"]]
any_of_schemas = []
if "anyOf" in json_schema or "oneOf" in json_schema:
any_of_schemas = json_schema.get("anyOf", []) + json_schema.get("oneOf", [])
if any_of_schemas:
any_of_types = [
_json_schema_to_pydantic_type(
schema,
root_schema,
name_=f"{name_ or 'Union'}Option{i}",
enrich_descriptions=enrich_descriptions,
)
for i, schema in enumerate(any_of_schemas)
]
return Union[tuple(any_of_types)] # noqa: UP007
all_of_schemas = json_schema.get("allOf")
if all_of_schemas:
if len(all_of_schemas) == 1:
return _json_schema_to_pydantic_type(
all_of_schemas[0], root_schema, name_=name_,
enrich_descriptions=enrich_descriptions,
)
merged = _merge_all_of_schemas(all_of_schemas, root_schema)
return _json_schema_to_pydantic_type(
merged, root_schema, name_=name_,
enrich_descriptions=enrich_descriptions,
)
type_ = json_schema.get("type")
if type_ == "string":
return str
if type_ == "integer":
return int
if type_ == "number":
return float
if type_ == "boolean":
return bool
if type_ == "array":
items_schema = json_schema.get("items")
if items_schema:
item_type = _json_schema_to_pydantic_type(
items_schema, root_schema, name_=name_,
enrich_descriptions=enrich_descriptions,
)
return list[item_type] # type: ignore[valid-type]
return list
if type_ == "object":
properties = json_schema.get("properties")
if properties:
json_schema_ = json_schema.copy()
if json_schema_.get("title") is None:
json_schema_["title"] = name_ or "DynamicModel"
return create_model_from_schema(
json_schema_, root_schema=root_schema,
enrich_descriptions=enrich_descriptions,
)
return dict
if type_ == "null":
return None
if type_ is None:
return Any
raise ValueError(f"Unsupported JSON schema type: {type_} from {json_schema}")
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/utilities/pydantic_schema_utils.py",
"license": "MIT License",
"lines": 714,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/tests/telemetry/test_execution_span_assignment.py | """Test that crew execution span is properly assigned during kickoff."""
import os
import threading
import pytest
from crewai import Agent, Crew, Task
from crewai.events.event_bus import crewai_event_bus
from crewai.events.event_listener import EventListener
from crewai.telemetry import Telemetry
@pytest.fixture(autouse=True)
def cleanup_singletons():
"""Reset singletons between tests and enable telemetry."""
original_telemetry = os.environ.get("CREWAI_DISABLE_TELEMETRY")
original_otel = os.environ.get("OTEL_SDK_DISABLED")
os.environ["CREWAI_DISABLE_TELEMETRY"] = "false"
os.environ["OTEL_SDK_DISABLED"] = "false"
with crewai_event_bus._rwlock.w_locked():
crewai_event_bus._sync_handlers.clear()
crewai_event_bus._async_handlers.clear()
Telemetry._instance = None
EventListener._instance = None
if hasattr(Telemetry, "_lock"):
Telemetry._lock = threading.Lock()
yield
with crewai_event_bus._rwlock.w_locked():
crewai_event_bus._sync_handlers.clear()
crewai_event_bus._async_handlers.clear()
if original_telemetry is not None:
os.environ["CREWAI_DISABLE_TELEMETRY"] = original_telemetry
else:
os.environ.pop("CREWAI_DISABLE_TELEMETRY", None)
if original_otel is not None:
os.environ["OTEL_SDK_DISABLED"] = original_otel
else:
os.environ.pop("OTEL_SDK_DISABLED", None)
Telemetry._instance = None
EventListener._instance = None
if hasattr(Telemetry, "_lock"):
Telemetry._lock = threading.Lock()
@pytest.mark.vcr()
def test_crew_execution_span_assigned_on_kickoff():
"""Test that _execution_span is assigned to crew after kickoff.
The bug: event_listener.py calls crew_execution_span() but doesn't assign
the returned span to source._execution_span, causing end_crew() to fail
when it tries to access crew._execution_span.
"""
agent = Agent(
role="test agent",
goal="say hello",
backstory="a friendly agent",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello",
expected_output="hello",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
share_crew=True,
)
crew.kickoff()
# The critical check: verify the crew has _execution_span set
# This is what end_crew() needs to properly close the span
assert crew._execution_span is not None, (
"crew._execution_span should be set after kickoff when share_crew=True. "
"The event_listener.py must assign the return value of crew_execution_span() "
"to source._execution_span."
)
@pytest.mark.vcr()
def test_end_crew_receives_valid_execution_span():
"""Test that end_crew receives a valid execution span to close.
This verifies the complete lifecycle: span creation, assignment, and closure
without errors when end_crew() accesses crew._execution_span.
"""
agent = Agent(
role="test agent",
goal="say hello",
backstory="a friendly agent",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello",
expected_output="hello",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
share_crew=True,
)
result = crew.kickoff()
assert crew._execution_span is not None
assert result is not None
@pytest.mark.vcr()
def test_crew_execution_span_not_set_when_share_crew_false():
"""Test that _execution_span is None when share_crew=False.
When share_crew is False, crew_execution_span() returns None,
so _execution_span should not be set.
"""
agent = Agent(
role="test agent",
goal="say hello",
backstory="a friendly agent",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello",
expected_output="hello",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
share_crew=False,
)
crew.kickoff()
assert (
not hasattr(crew, "_execution_span") or crew._execution_span is None
), "crew._execution_span should be None when share_crew=False"
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_crew_execution_span_assigned_on_kickoff_async():
"""Test that _execution_span is assigned during async kickoff.
Verifies that the async execution path also properly assigns
the execution span.
"""
agent = Agent(
role="test agent",
goal="say hello",
backstory="a friendly agent",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello",
expected_output="hello",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
share_crew=True,
)
await crew.kickoff_async()
assert crew._execution_span is not None, (
"crew._execution_span should be set after kickoff_async when share_crew=True"
)
@pytest.mark.vcr()
def test_crew_execution_span_assigned_on_kickoff_for_each():
"""Test that _execution_span is assigned for each crew execution.
Verifies that batch execution properly assigns execution spans
for each input.
"""
agent = Agent(
role="test agent",
goal="say hello",
backstory="a friendly agent",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello to {name}",
expected_output="hello",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
share_crew=True,
)
inputs = [{"name": "Alice"}, {"name": "Bob"}]
results = crew.kickoff_for_each(inputs)
assert len(results) == 2
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/telemetry/test_execution_span_assignment.py",
"license": "MIT License",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/telemetry/test_flow_crew_span_integration.py | """Test that crew execution spans work correctly when crews run inside flows.
Note: These tests use mocked LLM responses instead of VCR cassettes because
VCR's httpx async stubs have a known incompatibility with the OpenAI client
when running inside asyncio.run() (which Flow.kickoff() uses). The VCR
assertion `assert not hasattr(resp, "_decoder")` fails silently when the
OpenAI client reads responses before VCR can serialize them.
"""
import os
import threading
from unittest.mock import Mock
import pytest
from pydantic import BaseModel
from crewai import Agent, Crew, Task, LLM
from crewai.events.event_listener import EventListener
from crewai.flow.flow import Flow, listen, start
from crewai.telemetry import Telemetry
from crewai.types.usage_metrics import UsageMetrics
class SimpleState(BaseModel):
"""Simple state for flow testing."""
result: str = ""
def create_mock_llm() -> Mock:
"""Create a mock LLM that returns a simple response.
The mock includes all attributes required by the telemetry system,
particularly the 'model' attribute which is accessed during span creation.
"""
mock_llm = Mock(spec=LLM)
mock_llm.call.return_value = "Hello! This is a test response."
mock_llm.stop = []
mock_llm.model = "gpt-4o-mini" # Required by telemetry
mock_llm.supports_stop_words.return_value = True
mock_llm.get_token_usage_summary.return_value = UsageMetrics(
total_tokens=100,
prompt_tokens=50,
completion_tokens=50,
cached_prompt_tokens=0,
successful_requests=1,
)
return mock_llm
@pytest.fixture(autouse=True)
def enable_telemetry_for_tests():
"""Enable telemetry for these tests and reset singletons."""
from crewai.events.event_bus import crewai_event_bus
original_telemetry = os.environ.get("CREWAI_DISABLE_TELEMETRY")
original_otel = os.environ.get("OTEL_SDK_DISABLED")
os.environ["CREWAI_DISABLE_TELEMETRY"] = "false"
os.environ["OTEL_SDK_DISABLED"] = "false"
with crewai_event_bus._rwlock.w_locked():
crewai_event_bus._sync_handlers.clear()
crewai_event_bus._async_handlers.clear()
Telemetry._instance = None
EventListener._instance = None
if hasattr(Telemetry, "_lock"):
Telemetry._lock = threading.Lock()
yield
with crewai_event_bus._rwlock.w_locked():
crewai_event_bus._sync_handlers.clear()
crewai_event_bus._async_handlers.clear()
Telemetry._instance = None
EventListener._instance = None
if hasattr(Telemetry, "_lock"):
Telemetry._lock = threading.Lock()
if original_telemetry is not None:
os.environ["CREWAI_DISABLE_TELEMETRY"] = original_telemetry
else:
os.environ.pop("CREWAI_DISABLE_TELEMETRY", None)
if original_otel is not None:
os.environ["OTEL_SDK_DISABLED"] = original_otel
else:
os.environ.pop("OTEL_SDK_DISABLED", None)
def test_crew_execution_span_in_flow_with_share_crew():
"""Test that crew._execution_span is properly set when crew runs inside a flow.
This verifies that when a crew is kicked off inside a flow method with
share_crew=True, the execution span is properly assigned and closed without
errors.
"""
mock_llm = create_mock_llm()
class SampleFlow(Flow[SimpleState]):
@start()
def run_crew(self):
"""Run a crew inside the flow."""
agent = Agent(
role="test agent",
goal="say hello",
backstory="a friendly agent",
llm=mock_llm,
)
task = Task(
description="Say hello",
expected_output="hello",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
share_crew=True,
)
result = crew.kickoff()
assert crew._execution_span is not None, (
"crew._execution_span should be set after kickoff even when "
"crew runs inside a flow method"
)
self.state.result = str(result.raw)
return self.state.result
flow = SampleFlow()
flow.kickoff()
assert flow.state.result != ""
mock_llm.call.assert_called()
def test_crew_execution_span_not_set_in_flow_without_share_crew():
"""Test that crew._execution_span is None when share_crew=False in flow.
Verifies that when a crew runs inside a flow with share_crew=False,
no execution span is created.
"""
mock_llm = create_mock_llm()
class SampleTestFlowNotSet(Flow[SimpleState]):
@start()
def run_crew(self):
"""Run a crew inside the flow without sharing."""
agent = Agent(
role="test agent",
goal="say hello",
backstory="a friendly agent",
llm=mock_llm,
)
task = Task(
description="Say hello",
expected_output="hello",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
share_crew=False,
)
result = crew.kickoff()
assert (
not hasattr(crew, "_execution_span") or crew._execution_span is None
), "crew._execution_span should be None when share_crew=False"
self.state.result = str(result.raw)
return self.state.result
flow = SampleTestFlowNotSet()
flow.kickoff()
assert flow.state.result != ""
mock_llm.call.assert_called()
def test_multiple_crews_in_flow_span_lifecycle():
"""Test that multiple crews in a flow each get proper execution spans.
This ensures that when multiple crews are executed sequentially in different
flow methods, each crew gets its own execution span properly assigned and closed.
"""
mock_llm_1 = create_mock_llm()
mock_llm_1.call.return_value = "First crew result"
mock_llm_2 = create_mock_llm()
mock_llm_2.call.return_value = "Second crew result"
class SampleMultiCrewFlow(Flow[SimpleState]):
@start()
def first_crew(self):
"""Run first crew."""
agent = Agent(
role="first agent",
goal="first task",
backstory="first agent",
llm=mock_llm_1,
)
task = Task(
description="First task",
expected_output="first result",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
share_crew=True,
)
result = crew.kickoff()
assert crew._execution_span is not None
return str(result.raw)
@listen(first_crew)
def second_crew(self, first_result: str):
"""Run second crew."""
agent = Agent(
role="second agent",
goal="second task",
backstory="second agent",
llm=mock_llm_2,
)
task = Task(
description="Second task",
expected_output="second result",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
share_crew=True,
)
result = crew.kickoff()
assert crew._execution_span is not None
self.state.result = f"{first_result} + {result.raw}"
return self.state.result
flow = SampleMultiCrewFlow()
flow.kickoff()
assert flow.state.result != ""
assert "+" in flow.state.result
mock_llm_1.call.assert_called()
mock_llm_2.call.assert_called()
@pytest.mark.asyncio
async def test_crew_execution_span_in_async_flow():
"""Test that crew execution spans work in async flow methods.
Verifies that crews executed within async flow methods still properly
assign and close execution spans.
"""
mock_llm = create_mock_llm()
class AsyncTestFlow(Flow[SimpleState]):
@start()
async def run_crew_async(self):
"""Run a crew inside an async flow method."""
agent = Agent(
role="test agent",
goal="say hello",
backstory="a friendly agent",
llm=mock_llm,
)
task = Task(
description="Say hello",
expected_output="hello",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
share_crew=True,
)
result = crew.kickoff()
assert crew._execution_span is not None, (
"crew._execution_span should be set in async flow method"
)
self.state.result = str(result.raw)
return self.state.result
flow = AsyncTestFlow()
await flow.kickoff_async()
assert flow.state.result != ""
mock_llm.call.assert_called() | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/telemetry/test_flow_crew_span_integration.py",
"license": "MIT License",
"lines": 241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/crews/utils.py | """Utility functions for crew operations."""
from __future__ import annotations
import asyncio
from collections.abc import Callable, Coroutine, Iterable, Mapping
from typing import TYPE_CHECKING, Any
from opentelemetry import baggage
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.crews.crew_output import CrewOutput
from crewai.rag.embeddings.types import EmbedderConfig
from crewai.types.streaming import CrewStreamingOutput, FlowStreamingOutput
from crewai.utilities.file_store import store_files
from crewai.utilities.streaming import (
StreamingState,
TaskInfo,
create_streaming_state,
)
try:
from crewai_files import (
AudioFile,
ImageFile,
PDFFile,
TextFile,
VideoFile,
)
_FILE_TYPES: tuple[type, ...] = (AudioFile, ImageFile, PDFFile, TextFile, VideoFile)
except ImportError:
_FILE_TYPES = ()
if TYPE_CHECKING:
from crewai_files import FileInput
from crewai.crew import Crew
def enable_agent_streaming(agents: Iterable[BaseAgent]) -> None:
"""Enable streaming on all agents that have an LLM configured.
Args:
agents: Iterable of agents to enable streaming on.
"""
for agent in agents:
if agent.llm is not None:
agent.llm.stream = True
def setup_agents(
crew: Crew,
agents: Iterable[BaseAgent],
embedder: EmbedderConfig | None,
function_calling_llm: Any,
step_callback: Callable[..., Any] | None,
) -> None:
"""Set up agents for crew execution.
Args:
crew: The crew instance agents belong to.
agents: Iterable of agents to set up.
embedder: Embedder configuration for knowledge.
function_calling_llm: Default function calling LLM for agents.
step_callback: Default step callback for agents.
"""
for agent in agents:
agent.crew = crew
agent.set_knowledge(crew_embedder=embedder)
if not agent.function_calling_llm: # type: ignore[attr-defined]
agent.function_calling_llm = function_calling_llm # type: ignore[attr-defined]
if not agent.step_callback: # type: ignore[attr-defined]
agent.step_callback = step_callback # type: ignore[attr-defined]
agent.create_agent_executor()
class TaskExecutionData:
"""Data container for prepared task execution information."""
def __init__(
self,
agent: BaseAgent | None,
tools: list[Any],
should_skip: bool = False,
) -> None:
"""Initialize task execution data.
Args:
agent: The agent to use for task execution (None if skipped).
tools: Prepared tools for the task.
should_skip: Whether the task should be skipped (replay).
"""
self.agent = agent
self.tools = tools
self.should_skip = should_skip
def prepare_task_execution(
crew: Crew,
task: Any,
task_index: int,
start_index: int | None,
task_outputs: list[Any],
last_sync_output: Any | None,
) -> tuple[TaskExecutionData, list[Any], Any | None]:
"""Prepare a task for execution, handling replay skip logic and agent/tool setup.
Args:
crew: The crew instance.
task: The task to prepare.
task_index: Index of the current task.
start_index: Index to start execution from (for replay).
task_outputs: Current list of task outputs.
last_sync_output: Last synchronous task output.
Returns:
A tuple of (TaskExecutionData or None if skipped, updated task_outputs, updated last_sync_output).
If the task should be skipped, TaskExecutionData will have should_skip=True.
Raises:
ValueError: If no agent is available for the task.
"""
# Handle replay skip
if start_index is not None and task_index < start_index:
if task.output:
if task.async_execution:
task_outputs.append(task.output)
else:
task_outputs = [task.output]
last_sync_output = task.output
return (
TaskExecutionData(agent=None, tools=[], should_skip=True),
task_outputs,
last_sync_output,
)
agent_to_use = crew._get_agent_to_use(task)
if agent_to_use is None:
raise ValueError(
f"No agent available for task: {task.description}. "
f"Ensure that either the task has an assigned agent "
f"or a manager agent is provided."
)
tools_for_task = task.tools or agent_to_use.tools or []
tools_for_task = crew._prepare_tools(
agent_to_use,
task,
tools_for_task,
)
crew._log_task_start(task, agent_to_use.role)
return (
TaskExecutionData(agent=agent_to_use, tools=tools_for_task),
task_outputs,
last_sync_output,
)
def check_conditional_skip(
crew: Crew,
task: Any,
task_outputs: list[Any],
task_index: int,
was_replayed: bool,
) -> Any | None:
"""Check if a conditional task should be skipped.
Args:
crew: The crew instance.
task: The conditional task to check.
task_outputs: List of previous task outputs.
task_index: Index of the current task.
was_replayed: Whether this is a replayed execution.
Returns:
The skipped task output if the task should be skipped, None otherwise.
"""
previous_output = task_outputs[-1] if task_outputs else None
if previous_output is not None and not task.should_execute(previous_output):
crew._logger.log(
"debug",
f"Skipping conditional task: {task.description}",
color="yellow",
)
skipped_task_output = task.get_skipped_task_output()
if not was_replayed:
crew._store_execution_log(task, skipped_task_output, task_index)
return skipped_task_output
return None
def _extract_files_from_inputs(inputs: dict[str, Any]) -> dict[str, Any]:
"""Extract file objects from inputs dict.
Scans inputs for FileInput objects (ImageFile, TextFile, etc.) and
extracts them into a separate dict.
Args:
inputs: The inputs dictionary to scan.
Returns:
Dictionary of extracted file objects.
"""
if not _FILE_TYPES:
return {}
files: dict[str, Any] = {}
keys_to_remove: list[str] = []
for key, value in inputs.items():
if isinstance(value, _FILE_TYPES):
files[key] = value
keys_to_remove.append(key)
for key in keys_to_remove:
del inputs[key]
return files
def prepare_kickoff(
crew: Crew,
inputs: dict[str, Any] | None,
input_files: dict[str, FileInput] | None = None,
) -> dict[str, Any] | None:
"""Prepare crew for kickoff execution.
Handles before callbacks, event emission, task handler reset, input
interpolation, task callbacks, agent setup, and planning.
Args:
crew: The crew instance to prepare.
inputs: Optional input dictionary to pass to the crew.
input_files: Optional dict of named file inputs for the crew.
Returns:
The potentially modified inputs dictionary after before callbacks.
"""
from crewai.events.base_events import reset_emission_counter
from crewai.events.event_bus import crewai_event_bus
from crewai.events.event_context import get_current_parent_id, reset_last_event_id
from crewai.events.types.crew_events import CrewKickoffStartedEvent
if get_current_parent_id() is None:
reset_emission_counter()
reset_last_event_id()
# Normalize inputs to dict[str, Any] for internal processing
normalized: dict[str, Any] | None = None
if inputs is not None:
if not isinstance(inputs, Mapping):
raise TypeError(
f"inputs must be a dict or Mapping, got {type(inputs).__name__}"
)
normalized = dict(inputs)
for before_callback in crew.before_kickoff_callbacks:
if normalized is None:
normalized = {}
normalized = before_callback(normalized)
started_event = CrewKickoffStartedEvent(crew_name=crew.name, inputs=normalized)
crew._kickoff_event_id = started_event.event_id
future = crewai_event_bus.emit(crew, started_event)
if future is not None:
try:
future.result()
except Exception: # noqa: S110
pass
crew._task_output_handler.reset()
crew._logging_color = "bold_purple"
# Check for flow input files in baggage context (inherited from parent Flow)
_flow_files = baggage.get_baggage("flow_input_files")
flow_files: dict[str, Any] = _flow_files if isinstance(_flow_files, dict) else {}
if normalized is not None:
# Extract file objects unpacked directly into inputs
unpacked_files = _extract_files_from_inputs(normalized)
# Merge files: flow_files < input_files < unpacked_files (later takes precedence)
all_files = {**flow_files, **(input_files or {}), **unpacked_files}
if all_files:
store_files(crew.id, all_files)
crew._inputs = normalized
crew._interpolate_inputs(normalized)
else:
# No inputs dict provided
all_files = {**flow_files, **(input_files or {})}
if all_files:
store_files(crew.id, all_files)
crew._set_tasks_callbacks()
crew._set_allow_crewai_trigger_context_for_first_task()
setup_agents(
crew,
crew.agents,
crew.embedder,
crew.function_calling_llm,
crew.step_callback,
)
if crew.planning:
crew._handle_crew_planning()
return normalized
class StreamingContext:
"""Container for streaming state and holders used during crew execution."""
def __init__(self, use_async: bool = False) -> None:
"""Initialize streaming context.
Args:
use_async: Whether to use async streaming mode.
"""
self.result_holder: list[CrewOutput] = []
self.current_task_info: TaskInfo = {
"index": 0,
"name": "",
"id": "",
"agent_role": "",
"agent_id": "",
}
self.state: StreamingState = create_streaming_state(
self.current_task_info, self.result_holder, use_async=use_async
)
self.output_holder: list[CrewStreamingOutput | FlowStreamingOutput] = []
class ForEachStreamingContext:
"""Container for streaming state used in for_each crew execution methods."""
def __init__(self) -> None:
"""Initialize for_each streaming context."""
self.result_holder: list[list[CrewOutput]] = [[]]
self.current_task_info: TaskInfo = {
"index": 0,
"name": "",
"id": "",
"agent_role": "",
"agent_id": "",
}
self.state: StreamingState = create_streaming_state(
self.current_task_info, self.result_holder, use_async=True
)
self.output_holder: list[CrewStreamingOutput | FlowStreamingOutput] = []
async def run_for_each_async(
crew: Crew,
inputs: list[dict[str, Any]],
kickoff_fn: Callable[
[Crew, dict[str, Any]], Coroutine[Any, Any, CrewOutput | CrewStreamingOutput]
],
) -> list[CrewOutput | CrewStreamingOutput] | CrewStreamingOutput:
"""Execute crew workflow for each input asynchronously.
Args:
crew: The crew instance to execute.
inputs: List of input dictionaries for each execution.
kickoff_fn: Async function to call for each crew copy (kickoff_async or akickoff).
Returns:
If streaming, a single CrewStreamingOutput that yields chunks from all crews.
Otherwise, a list of CrewOutput results.
"""
from crewai.types.usage_metrics import UsageMetrics
from crewai.utilities.streaming import (
create_async_chunk_generator,
signal_end,
signal_error,
)
crew_copies = [crew.copy() for _ in inputs]
if crew.stream:
ctx = ForEachStreamingContext()
async def run_all_crews() -> None:
try:
streaming_outputs: list[CrewStreamingOutput] = []
for i, crew_copy in enumerate(crew_copies):
streaming = await kickoff_fn(crew_copy, inputs[i])
if isinstance(streaming, CrewStreamingOutput):
streaming_outputs.append(streaming)
async def consume_stream(
stream_output: CrewStreamingOutput,
) -> CrewOutput:
async for chunk in stream_output:
if (
ctx.state.async_queue is not None
and ctx.state.loop is not None
):
ctx.state.loop.call_soon_threadsafe(
ctx.state.async_queue.put_nowait, chunk
)
return stream_output.result
crew_results = await asyncio.gather(
*[consume_stream(s) for s in streaming_outputs]
)
ctx.result_holder[0] = list(crew_results)
except Exception as e:
signal_error(ctx.state, e, is_async=True)
finally:
signal_end(ctx.state, is_async=True)
streaming_output = CrewStreamingOutput(
async_iterator=create_async_chunk_generator(
ctx.state, run_all_crews, ctx.output_holder
)
)
def set_results_wrapper(result: Any) -> None:
streaming_output._set_results(result)
streaming_output._set_result = set_results_wrapper # type: ignore[method-assign]
ctx.output_holder.append(streaming_output)
return streaming_output
async_tasks: list[asyncio.Task[CrewOutput | CrewStreamingOutput]] = [
asyncio.create_task(kickoff_fn(crew_copy, input_data))
for crew_copy, input_data in zip(crew_copies, inputs, strict=True)
]
results = await asyncio.gather(*async_tasks)
total_usage_metrics = UsageMetrics()
for crew_copy in crew_copies:
if crew_copy.usage_metrics:
total_usage_metrics.add_usage_metrics(crew_copy.usage_metrics)
crew.usage_metrics = total_usage_metrics
crew._task_output_handler.reset()
return list(results)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/crews/utils.py",
"license": "MIT License",
"lines": 363,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/tests/crew/test_async_crew.py | """Tests for async crew execution."""
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
from crewai.agent import Agent
from crewai.crew import Crew
from crewai.task import Task
from crewai.crews.crew_output import CrewOutput
from crewai.tasks.task_output import TaskOutput
@pytest.fixture
def test_agent() -> Agent:
"""Create a test agent."""
return Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm="gpt-4o-mini",
verbose=False,
)
@pytest.fixture
def test_task(test_agent: Agent) -> Task:
"""Create a test task."""
return Task(
description="Test task description",
expected_output="Test expected output",
agent=test_agent,
)
@pytest.fixture
def test_crew(test_agent: Agent, test_task: Task) -> Crew:
"""Create a test crew."""
return Crew(
agents=[test_agent],
tasks=[test_task],
verbose=False,
)
class TestAsyncCrewKickoff:
"""Tests for async crew kickoff methods."""
@pytest.mark.asyncio
@patch("crewai.task.Task.aexecute_sync", new_callable=AsyncMock)
async def test_akickoff_basic(
self, mock_execute: AsyncMock, test_crew: Crew
) -> None:
"""Test basic async crew kickoff."""
mock_output = TaskOutput(
description="Test task description",
raw="Task result",
agent="Test Agent",
)
mock_execute.return_value = mock_output
result = await test_crew.akickoff()
assert result is not None
assert isinstance(result, CrewOutput)
assert result.raw == "Task result"
mock_execute.assert_called_once()
@pytest.mark.asyncio
@patch("crewai.task.Task.aexecute_sync", new_callable=AsyncMock)
async def test_akickoff_with_inputs(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test async crew kickoff with inputs."""
task = Task(
description="Test task for {topic}",
expected_output="Expected output for {topic}",
agent=test_agent,
)
crew = Crew(
agents=[test_agent],
tasks=[task],
verbose=False,
)
mock_output = TaskOutput(
description="Test task for AI",
raw="Task result about AI",
agent="Test Agent",
)
mock_execute.return_value = mock_output
result = await crew.akickoff(inputs={"topic": "AI"})
assert result is not None
assert isinstance(result, CrewOutput)
mock_execute.assert_called_once()
@pytest.mark.asyncio
@patch("crewai.task.Task.aexecute_sync", new_callable=AsyncMock)
async def test_akickoff_multiple_tasks(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test async crew kickoff with multiple tasks."""
task1 = Task(
description="First task",
expected_output="First output",
agent=test_agent,
)
task2 = Task(
description="Second task",
expected_output="Second output",
agent=test_agent,
)
crew = Crew(
agents=[test_agent],
tasks=[task1, task2],
verbose=False,
)
mock_output1 = TaskOutput(
description="First task",
raw="First result",
agent="Test Agent",
)
mock_output2 = TaskOutput(
description="Second task",
raw="Second result",
agent="Test Agent",
)
mock_execute.side_effect = [mock_output1, mock_output2]
result = await crew.akickoff()
assert result is not None
assert isinstance(result, CrewOutput)
assert result.raw == "Second result"
assert mock_execute.call_count == 2
@pytest.mark.asyncio
@patch("crewai.task.Task.aexecute_sync", new_callable=AsyncMock)
async def test_akickoff_handles_exception(
self, mock_execute: AsyncMock, test_crew: Crew
) -> None:
"""Test that async kickoff handles exceptions properly."""
mock_execute.side_effect = RuntimeError("Test error")
with pytest.raises(RuntimeError) as exc_info:
await test_crew.akickoff()
assert "Test error" in str(exc_info.value)
@pytest.mark.asyncio
@patch("crewai.task.Task.aexecute_sync", new_callable=AsyncMock)
async def test_akickoff_calls_before_callbacks(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test that async kickoff calls before_kickoff_callbacks."""
callback_called = False
def before_callback(inputs: dict | None) -> dict:
nonlocal callback_called
callback_called = True
return inputs or {}
task = Task(
description="Test task",
expected_output="Test output",
agent=test_agent,
)
crew = Crew(
agents=[test_agent],
tasks=[task],
verbose=False,
before_kickoff_callbacks=[before_callback],
)
mock_output = TaskOutput(
description="Test task",
raw="Task result",
agent="Test Agent",
)
mock_execute.return_value = mock_output
await crew.akickoff()
assert callback_called
@pytest.mark.asyncio
@patch("crewai.task.Task.aexecute_sync", new_callable=AsyncMock)
async def test_akickoff_calls_after_callbacks(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test that async kickoff calls after_kickoff_callbacks."""
callback_called = False
def after_callback(result: CrewOutput) -> CrewOutput:
nonlocal callback_called
callback_called = True
return result
task = Task(
description="Test task",
expected_output="Test output",
agent=test_agent,
)
crew = Crew(
agents=[test_agent],
tasks=[task],
verbose=False,
after_kickoff_callbacks=[after_callback],
)
mock_output = TaskOutput(
description="Test task",
raw="Task result",
agent="Test Agent",
)
mock_execute.return_value = mock_output
await crew.akickoff()
assert callback_called
class TestAsyncCrewKickoffForEach:
"""Tests for async crew kickoff_for_each methods."""
@pytest.mark.asyncio
@patch("crewai.task.Task.aexecute_sync", new_callable=AsyncMock)
async def test_akickoff_for_each_basic(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test basic async kickoff_for_each."""
task = Task(
description="Test task for {topic}",
expected_output="Expected output",
agent=test_agent,
)
crew = Crew(
agents=[test_agent],
tasks=[task],
verbose=False,
)
mock_output1 = TaskOutput(
description="Test task for AI",
raw="Result about AI",
agent="Test Agent",
)
mock_output2 = TaskOutput(
description="Test task for ML",
raw="Result about ML",
agent="Test Agent",
)
mock_execute.side_effect = [mock_output1, mock_output2]
inputs = [{"topic": "AI"}, {"topic": "ML"}]
results = await crew.akickoff_for_each(inputs)
assert len(results) == 2
assert all(isinstance(r, CrewOutput) for r in results)
@pytest.mark.asyncio
@patch("crewai.task.Task.aexecute_sync", new_callable=AsyncMock)
async def test_akickoff_for_each_concurrent(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test that async kickoff_for_each runs concurrently."""
task = Task(
description="Test task for {topic}",
expected_output="Expected output",
agent=test_agent,
)
crew = Crew(
agents=[test_agent],
tasks=[task],
verbose=False,
)
mock_output = TaskOutput(
description="Test task",
raw="Result",
agent="Test Agent",
)
mock_execute.return_value = mock_output
inputs = [{"topic": f"topic_{i}"} for i in range(3)]
results = await crew.akickoff_for_each(inputs)
assert len(results) == 3
class TestAsyncTaskExecution:
"""Tests for async task execution within crew."""
@pytest.mark.asyncio
@patch("crewai.task.Task.aexecute_sync", new_callable=AsyncMock)
async def test_aexecute_tasks_sequential(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test async sequential task execution."""
task1 = Task(
description="First task",
expected_output="First output",
agent=test_agent,
)
task2 = Task(
description="Second task",
expected_output="Second output",
agent=test_agent,
)
crew = Crew(
agents=[test_agent],
tasks=[task1, task2],
verbose=False,
)
mock_output1 = TaskOutput(
description="First task",
raw="First result",
agent="Test Agent",
)
mock_output2 = TaskOutput(
description="Second task",
raw="Second result",
agent="Test Agent",
)
mock_execute.side_effect = [mock_output1, mock_output2]
result = await crew._aexecute_tasks(crew.tasks)
assert result is not None
assert result.raw == "Second result"
assert len(result.tasks_output) == 2
@pytest.mark.asyncio
@patch("crewai.task.Task.aexecute_sync", new_callable=AsyncMock)
async def test_aexecute_tasks_with_async_task(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test async execution with async_execution task flag."""
task1 = Task(
description="Async task",
expected_output="Async output",
agent=test_agent,
async_execution=True,
)
task2 = Task(
description="Sync task",
expected_output="Sync output",
agent=test_agent,
)
crew = Crew(
agents=[test_agent],
tasks=[task1, task2],
verbose=False,
)
mock_output1 = TaskOutput(
description="Async task",
raw="Async result",
agent="Test Agent",
)
mock_output2 = TaskOutput(
description="Sync task",
raw="Sync result",
agent="Test Agent",
)
mock_execute.side_effect = [mock_output1, mock_output2]
result = await crew._aexecute_tasks(crew.tasks)
assert result is not None
assert mock_execute.call_count == 2
class TestAsyncProcessAsyncTasks:
"""Tests for _aprocess_async_tasks method."""
@pytest.mark.asyncio
async def test_aprocess_async_tasks_empty(self, test_crew: Crew) -> None:
"""Test processing empty list of async tasks."""
result = await test_crew._aprocess_async_tasks([])
assert result == [] | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/crew/test_async_crew.py",
"license": "MIT License",
"lines": 325,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/agent/utils.py | """Utility functions for agent task execution.
This module contains shared logic extracted from the Agent's execute_task
and aexecute_task methods to reduce code duplication.
"""
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.knowledge_events import (
KnowledgeRetrievalCompletedEvent,
KnowledgeRetrievalStartedEvent,
KnowledgeSearchQueryFailedEvent,
)
from crewai.knowledge.utils.knowledge_utils import extract_knowledge_context
from crewai.utilities.pydantic_schema_utils import generate_model_description
from crewai.utilities.types import LLMMessage
if TYPE_CHECKING:
from crewai.agent.core import Agent
from crewai.task import Task
from crewai.tools.base_tool import BaseTool
from crewai.utilities.i18n import I18N
def handle_reasoning(agent: Agent, task: Task) -> None:
"""Handle the reasoning process for an agent before task execution.
Args:
agent: The agent performing the task.
task: The task to execute.
"""
if not agent.reasoning:
return
try:
from crewai.utilities.reasoning_handler import (
AgentReasoning,
AgentReasoningOutput,
)
reasoning_handler = AgentReasoning(task=task, agent=agent)
reasoning_output: AgentReasoningOutput = (
reasoning_handler.handle_agent_reasoning()
)
task.description += f"\n\nReasoning Plan:\n{reasoning_output.plan.plan}"
except Exception as e:
agent._logger.log("error", f"Error during reasoning process: {e!s}")
def build_task_prompt_with_schema(task: Task, task_prompt: str, i18n: I18N) -> str:
"""Build task prompt with JSON/Pydantic schema instructions if applicable.
Args:
task: The task being executed.
task_prompt: The initial task prompt.
i18n: Internationalization instance.
Returns:
The task prompt potentially augmented with schema instructions.
"""
if (task.output_json or task.output_pydantic) and not task.response_model:
if task.output_json:
schema_dict = generate_model_description(task.output_json)
schema = json.dumps(schema_dict["json_schema"]["schema"], indent=2)
task_prompt += "\n" + i18n.slice("formatted_task_instructions").format(
output_format=schema
)
elif task.output_pydantic:
schema_dict = generate_model_description(task.output_pydantic)
schema = json.dumps(schema_dict["json_schema"]["schema"], indent=2)
task_prompt += "\n" + i18n.slice("formatted_task_instructions").format(
output_format=schema
)
return task_prompt
def format_task_with_context(task_prompt: str, context: str | None, i18n: I18N) -> str:
"""Format task prompt with context if provided.
Args:
task_prompt: The task prompt.
context: Optional context string.
i18n: Internationalization instance.
Returns:
The task prompt formatted with context if provided.
"""
if context:
return i18n.slice("task_with_context").format(task=task_prompt, context=context)
return task_prompt
def get_knowledge_config(agent: Agent) -> dict[str, Any]:
"""Get knowledge configuration from agent.
Args:
agent: The agent instance.
Returns:
Dictionary of knowledge configuration.
"""
return agent.knowledge_config.model_dump() if agent.knowledge_config else {}
def handle_knowledge_retrieval(
agent: Agent,
task: Task,
task_prompt: str,
knowledge_config: dict[str, Any],
query_func: Any,
crew_query_func: Any,
) -> str:
"""Handle knowledge retrieval for task execution.
This function handles both agent-specific and crew-specific knowledge queries.
Args:
agent: The agent performing the task.
task: The task being executed.
task_prompt: The current task prompt.
knowledge_config: Knowledge configuration dictionary.
query_func: Function to query agent knowledge (sync or async).
crew_query_func: Function to query crew knowledge (sync or async).
Returns:
The task prompt potentially augmented with knowledge context.
"""
if not (agent.knowledge or (agent.crew and agent.crew.knowledge)):
return task_prompt
crewai_event_bus.emit(
agent,
event=KnowledgeRetrievalStartedEvent(
from_task=task,
from_agent=agent,
),
)
try:
agent.knowledge_search_query = agent._get_knowledge_search_query(
task_prompt, task
)
if agent.knowledge_search_query:
if agent.knowledge:
agent_knowledge_snippets = query_func(
[agent.knowledge_search_query], **knowledge_config
)
if agent_knowledge_snippets:
agent.agent_knowledge_context = extract_knowledge_context(
agent_knowledge_snippets
)
if agent.agent_knowledge_context:
task_prompt += agent.agent_knowledge_context
knowledge_snippets = crew_query_func(
[agent.knowledge_search_query], **knowledge_config
)
if knowledge_snippets:
agent.crew_knowledge_context = extract_knowledge_context(
knowledge_snippets
)
if agent.crew_knowledge_context:
task_prompt += agent.crew_knowledge_context
crewai_event_bus.emit(
agent,
event=KnowledgeRetrievalCompletedEvent(
query=agent.knowledge_search_query,
from_task=task,
from_agent=agent,
retrieved_knowledge=_combine_knowledge_context(agent),
),
)
except Exception as e:
crewai_event_bus.emit(
agent,
event=KnowledgeSearchQueryFailedEvent(
query=agent.knowledge_search_query or "",
error=str(e),
from_task=task,
from_agent=agent,
),
)
return task_prompt
def _combine_knowledge_context(agent: Agent) -> str:
"""Combine agent and crew knowledge contexts into a single string.
Args:
agent: The agent with knowledge contexts.
Returns:
Combined knowledge context string.
"""
agent_ctx = agent.agent_knowledge_context or ""
crew_ctx = agent.crew_knowledge_context or ""
separator = "\n" if agent_ctx and crew_ctx else ""
return agent_ctx + separator + crew_ctx
def apply_training_data(agent: Agent, task_prompt: str) -> str:
"""Apply training data to the task prompt.
Args:
agent: The agent performing the task.
task_prompt: The task prompt.
Returns:
The task prompt with training data applied.
"""
if agent.crew and agent.crew._train:
return agent._training_handler(task_prompt=task_prompt)
return agent._use_trained_data(task_prompt=task_prompt)
def process_tool_results(agent: Agent, result: Any) -> Any:
"""Process tool results, returning result_as_answer if applicable.
Args:
agent: The agent with tool results.
result: The current result.
Returns:
The final result, potentially overridden by tool result_as_answer.
"""
for tool_result in agent.tools_results:
if tool_result.get("result_as_answer", False):
result = tool_result["result"]
return result
def save_last_messages(agent: Agent) -> None:
"""Save the last messages from agent executor.
Sanitizes messages to be compatible with TaskOutput's LLMMessage type,
which accepts 'user', 'assistant', 'system', and 'tool' roles.
Preserves tool_call_id/name for tool messages and tool_calls for assistant messages.
Args:
agent: The agent instance.
"""
if not agent.agent_executor or not hasattr(agent.agent_executor, "messages"):
agent._last_messages = []
return
sanitized_messages: list[LLMMessage] = []
for msg in agent.agent_executor.messages:
role = msg.get("role", "")
if role not in ("user", "assistant", "system", "tool"):
continue
content = msg.get("content")
if content is None:
content = ""
sanitized_msg: LLMMessage = {"role": role, "content": content}
if role == "tool":
tool_call_id = msg.get("tool_call_id")
if tool_call_id:
sanitized_msg["tool_call_id"] = tool_call_id
name = msg.get("name")
if name:
sanitized_msg["name"] = name
elif role == "assistant":
tool_calls = msg.get("tool_calls")
if tool_calls:
sanitized_msg["tool_calls"] = tool_calls
sanitized_messages.append(sanitized_msg)
agent._last_messages = sanitized_messages
def prepare_tools(
agent: Agent, tools: list[BaseTool] | None, task: Task
) -> list[BaseTool]:
"""Prepare tools for task execution and create agent executor.
Args:
agent: The agent instance.
tools: Optional list of tools.
task: The task being executed.
Returns:
The list of tools to use.
"""
final_tools = tools or agent.tools or []
agent.create_agent_executor(tools=final_tools, task=task)
return final_tools
def validate_max_execution_time(max_execution_time: int | None) -> None:
"""Validate max_execution_time parameter.
Args:
max_execution_time: The maximum execution time to validate.
Raises:
ValueError: If max_execution_time is not a positive integer.
"""
if max_execution_time is not None:
if not isinstance(max_execution_time, int) or max_execution_time <= 0:
raise ValueError(
"Max Execution time must be a positive integer greater than zero"
)
async def ahandle_knowledge_retrieval(
agent: Agent,
task: Task,
task_prompt: str,
knowledge_config: dict[str, Any],
) -> str:
"""Handle async knowledge retrieval for task execution.
Args:
agent: The agent performing the task.
task: The task being executed.
task_prompt: The current task prompt.
knowledge_config: Knowledge configuration dictionary.
Returns:
The task prompt potentially augmented with knowledge context.
"""
if not (agent.knowledge or (agent.crew and agent.crew.knowledge)):
return task_prompt
crewai_event_bus.emit(
agent,
event=KnowledgeRetrievalStartedEvent(
from_task=task,
from_agent=agent,
),
)
try:
agent.knowledge_search_query = agent._get_knowledge_search_query(
task_prompt, task
)
if agent.knowledge_search_query:
if agent.knowledge:
agent_knowledge_snippets = await agent.knowledge.aquery(
[agent.knowledge_search_query], **knowledge_config
)
if agent_knowledge_snippets:
agent.agent_knowledge_context = extract_knowledge_context(
agent_knowledge_snippets
)
if agent.agent_knowledge_context:
task_prompt += agent.agent_knowledge_context
knowledge_snippets = await agent.crew.aquery_knowledge(
[agent.knowledge_search_query], **knowledge_config
)
if knowledge_snippets:
agent.crew_knowledge_context = extract_knowledge_context(
knowledge_snippets
)
if agent.crew_knowledge_context:
task_prompt += agent.crew_knowledge_context
crewai_event_bus.emit(
agent,
event=KnowledgeRetrievalCompletedEvent(
query=agent.knowledge_search_query,
from_task=task,
from_agent=agent,
retrieved_knowledge=_combine_knowledge_context(agent),
),
)
except Exception as e:
crewai_event_bus.emit(
agent,
event=KnowledgeSearchQueryFailedEvent(
query=agent.knowledge_search_query or "",
error=str(e),
from_task=task,
from_agent=agent,
),
)
return task_prompt
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/agent/utils.py",
"license": "MIT License",
"lines": 318,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/tests/task/test_async_task.py | """Tests for async task execution."""
import pytest
from unittest.mock import AsyncMock, MagicMock, patch
from crewai.agent import Agent
from crewai.task import Task
from crewai.tasks.task_output import TaskOutput
from crewai.tasks.output_format import OutputFormat
@pytest.fixture
def test_agent() -> Agent:
"""Create a test agent."""
return Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm="gpt-4o-mini",
verbose=False,
)
class TestAsyncTaskExecution:
"""Tests for async task execution methods."""
@pytest.mark.asyncio
@patch("crewai.Agent.aexecute_task", new_callable=AsyncMock)
async def test_aexecute_sync_basic(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test basic async task execution."""
mock_execute.return_value = "Async task result"
task = Task(
description="Test task description",
expected_output="Test expected output",
agent=test_agent,
)
result = await task.aexecute_sync()
assert result is not None
assert isinstance(result, TaskOutput)
assert result.raw == "Async task result"
assert result.agent == "Test Agent"
mock_execute.assert_called_once()
@pytest.mark.asyncio
@patch("crewai.Agent.aexecute_task", new_callable=AsyncMock)
async def test_aexecute_sync_with_context(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test async task execution with context."""
mock_execute.return_value = "Async result"
task = Task(
description="Test task description",
expected_output="Test expected output",
agent=test_agent,
)
context = "Additional context for the task"
result = await task.aexecute_sync(context=context)
assert result is not None
assert task.prompt_context == context
mock_execute.assert_called_once()
call_kwargs = mock_execute.call_args[1]
assert call_kwargs["context"] == context
@pytest.mark.asyncio
@patch("crewai.Agent.aexecute_task", new_callable=AsyncMock)
async def test_aexecute_sync_with_tools(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test async task execution with custom tools."""
mock_execute.return_value = "Async result"
task = Task(
description="Test task description",
expected_output="Test expected output",
agent=test_agent,
)
mock_tool = MagicMock()
mock_tool.name = "test_tool"
result = await task.aexecute_sync(tools=[mock_tool])
assert result is not None
mock_execute.assert_called_once()
call_kwargs = mock_execute.call_args[1]
assert mock_tool in call_kwargs["tools"]
@pytest.mark.asyncio
@patch("crewai.Agent.aexecute_task", new_callable=AsyncMock)
async def test_aexecute_sync_sets_start_and_end_time(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test that async execution sets start and end times."""
mock_execute.return_value = "Async result"
task = Task(
description="Test task description",
expected_output="Test expected output",
agent=test_agent,
)
assert task.start_time is None
assert task.end_time is None
await task.aexecute_sync()
assert task.start_time is not None
assert task.end_time is not None
assert task.end_time >= task.start_time
@pytest.mark.asyncio
@patch("crewai.Agent.aexecute_task", new_callable=AsyncMock)
async def test_aexecute_sync_stores_output(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test that async execution stores the output."""
mock_execute.return_value = "Async task result"
task = Task(
description="Test task description",
expected_output="Test expected output",
agent=test_agent,
)
assert task.output is None
await task.aexecute_sync()
assert task.output is not None
assert task.output.raw == "Async task result"
@pytest.mark.asyncio
@patch("crewai.Agent.aexecute_task", new_callable=AsyncMock)
async def test_aexecute_sync_adds_agent_to_processed_by(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test that async execution adds agent to processed_by_agents."""
mock_execute.return_value = "Async result"
task = Task(
description="Test task description",
expected_output="Test expected output",
agent=test_agent,
)
assert len(task.processed_by_agents) == 0
await task.aexecute_sync()
assert "Test Agent" in task.processed_by_agents
@pytest.mark.asyncio
@patch("crewai.Agent.aexecute_task", new_callable=AsyncMock)
async def test_aexecute_sync_calls_callback(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test that async execution calls the callback."""
mock_execute.return_value = "Async result"
callback = MagicMock()
task = Task(
description="Test task description",
expected_output="Test expected output",
agent=test_agent,
callback=callback,
)
await task.aexecute_sync()
callback.assert_called_once()
assert isinstance(callback.call_args[0][0], TaskOutput)
@pytest.mark.asyncio
async def test_aexecute_sync_without_agent_raises(self) -> None:
"""Test that async execution without agent raises exception."""
task = Task(
description="Test task",
expected_output="Test output",
)
with pytest.raises(Exception) as exc_info:
await task.aexecute_sync()
assert "has no agent assigned" in str(exc_info.value)
@pytest.mark.asyncio
@patch("crewai.Agent.aexecute_task", new_callable=AsyncMock)
async def test_aexecute_sync_with_different_agent(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test async execution with a different agent than assigned."""
mock_execute.return_value = "Other agent result"
task = Task(
description="Test task description",
expected_output="Test expected output",
agent=test_agent,
)
other_agent = Agent(
role="Other Agent",
goal="Other goal",
backstory="Other backstory",
llm="gpt-4o-mini",
verbose=False,
)
result = await task.aexecute_sync(agent=other_agent)
assert result.raw == "Other agent result"
assert result.agent == "Other Agent"
mock_execute.assert_called_once()
@pytest.mark.asyncio
@patch("crewai.Agent.aexecute_task", new_callable=AsyncMock)
async def test_aexecute_sync_handles_exception(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test that async execution handles exceptions properly."""
mock_execute.side_effect = RuntimeError("Test error")
task = Task(
description="Test task description",
expected_output="Test expected output",
agent=test_agent,
)
with pytest.raises(RuntimeError) as exc_info:
await task.aexecute_sync()
assert "Test error" in str(exc_info.value)
assert task.end_time is not None
class TestAsyncGuardrails:
"""Tests for async guardrail invocation."""
@pytest.mark.asyncio
@patch("crewai.Agent.aexecute_task", new_callable=AsyncMock)
async def test_ainvoke_guardrail_success(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test async guardrail invocation with successful validation."""
mock_execute.return_value = "Async task result"
def guardrail_fn(output: TaskOutput) -> tuple[bool, str]:
return True, output.raw
task = Task(
description="Test task",
expected_output="Test output",
agent=test_agent,
guardrail=guardrail_fn,
)
result = await task.aexecute_sync()
assert result is not None
assert result.raw == "Async task result"
@pytest.mark.asyncio
@patch("crewai.Agent.aexecute_task", new_callable=AsyncMock)
async def test_ainvoke_guardrail_failure_then_success(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test async guardrail that fails then succeeds on retry."""
mock_execute.side_effect = ["First result", "Second result"]
call_count = 0
def guardrail_fn(output: TaskOutput) -> tuple[bool, str]:
nonlocal call_count
call_count += 1
if call_count == 1:
return False, "First attempt failed"
return True, output.raw
task = Task(
description="Test task",
expected_output="Test output",
agent=test_agent,
guardrail=guardrail_fn,
)
result = await task.aexecute_sync()
assert result is not None
assert call_count == 2
@pytest.mark.asyncio
@patch("crewai.Agent.aexecute_task", new_callable=AsyncMock)
async def test_ainvoke_guardrail_max_retries_exceeded(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test async guardrail that exceeds max retries."""
mock_execute.return_value = "Async result"
def guardrail_fn(output: TaskOutput) -> tuple[bool, str]:
return False, "Always fails"
task = Task(
description="Test task",
expected_output="Test output",
agent=test_agent,
guardrail=guardrail_fn,
guardrail_max_retries=2,
)
with pytest.raises(Exception) as exc_info:
await task.aexecute_sync()
assert "validation after" in str(exc_info.value)
assert "2 retries" in str(exc_info.value)
@pytest.mark.asyncio
@patch("crewai.Agent.aexecute_task", new_callable=AsyncMock)
async def test_ainvoke_multiple_guardrails(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test async execution with multiple guardrails."""
mock_execute.return_value = "Async result"
guardrail1_called = False
guardrail2_called = False
def guardrail1(output: TaskOutput) -> tuple[bool, str]:
nonlocal guardrail1_called
guardrail1_called = True
return True, output.raw
def guardrail2(output: TaskOutput) -> tuple[bool, str]:
nonlocal guardrail2_called
guardrail2_called = True
return True, output.raw
task = Task(
description="Test task",
expected_output="Test output",
agent=test_agent,
guardrails=[guardrail1, guardrail2],
)
await task.aexecute_sync()
assert guardrail1_called
assert guardrail2_called
class TestAsyncTaskOutput:
"""Tests for async task output handling."""
@pytest.mark.asyncio
@patch("crewai.Agent.aexecute_task", new_callable=AsyncMock)
async def test_aexecute_sync_output_format_raw(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test async execution with raw output format."""
mock_execute.return_value = '{"key": "value"}'
task = Task(
description="Test task",
expected_output="Test output",
agent=test_agent,
)
result = await task.aexecute_sync()
assert result.output_format == OutputFormat.RAW
@pytest.mark.asyncio
@patch("crewai.Agent.aexecute_task", new_callable=AsyncMock)
async def test_aexecute_sync_task_output_attributes(
self, mock_execute: AsyncMock, test_agent: Agent
) -> None:
"""Test that task output has correct attributes."""
mock_execute.return_value = "Test result"
task = Task(
description="Test description",
expected_output="Test expected",
agent=test_agent,
name="Test Task Name",
)
result = await task.aexecute_sync()
assert result.name == "Test Task Name"
assert result.description == "Test description"
assert result.expected_output == "Test expected"
assert result.raw == "Test result"
assert result.agent == "Test Agent" | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/task/test_async_task.py",
"license": "MIT License",
"lines": 314,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/knowledge/test_async_knowledge.py | """Tests for async knowledge operations."""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from crewai.knowledge.knowledge import Knowledge
from crewai.knowledge.source.string_knowledge_source import StringKnowledgeSource
from crewai.knowledge.storage.knowledge_storage import KnowledgeStorage
class TestAsyncKnowledgeStorage:
"""Tests for async KnowledgeStorage operations."""
@pytest.mark.asyncio
async def test_asearch_returns_results(self):
"""Test that asearch returns search results."""
mock_client = MagicMock()
mock_client.asearch = AsyncMock(
return_value=[{"content": "test result", "score": 0.9}]
)
storage = KnowledgeStorage(collection_name="test_collection")
storage._client = mock_client
results = await storage.asearch(["test query"])
assert len(results) == 1
assert results[0]["content"] == "test result"
mock_client.asearch.assert_called_once()
@pytest.mark.asyncio
async def test_asearch_empty_query_raises_error(self):
"""Test that asearch handles empty query."""
storage = KnowledgeStorage(collection_name="test_collection")
# Empty query should not raise but return empty results due to error handling
results = await storage.asearch([])
assert results == []
@pytest.mark.asyncio
async def test_asave_calls_client_methods(self):
"""Test that asave calls the correct client methods."""
mock_client = MagicMock()
mock_client.aget_or_create_collection = AsyncMock()
mock_client.aadd_documents = AsyncMock()
storage = KnowledgeStorage(collection_name="test_collection")
storage._client = mock_client
await storage.asave(["document 1", "document 2"])
mock_client.aget_or_create_collection.assert_called_once_with(
collection_name="knowledge_test_collection"
)
mock_client.aadd_documents.assert_called_once()
@pytest.mark.asyncio
async def test_areset_calls_client_delete(self):
"""Test that areset calls delete_collection on the client."""
mock_client = MagicMock()
mock_client.adelete_collection = AsyncMock()
storage = KnowledgeStorage(collection_name="test_collection")
storage._client = mock_client
await storage.areset()
mock_client.adelete_collection.assert_called_once_with(
collection_name="knowledge_test_collection"
)
class TestAsyncKnowledge:
"""Tests for async Knowledge operations."""
@pytest.mark.asyncio
async def test_aquery_calls_storage_asearch(self):
"""Test that aquery calls storage.asearch."""
mock_storage = MagicMock(spec=KnowledgeStorage)
mock_storage.asearch = AsyncMock(
return_value=[{"content": "result", "score": 0.8}]
)
knowledge = Knowledge(
collection_name="test",
sources=[],
storage=mock_storage,
)
results = await knowledge.aquery(["test query"])
assert len(results) == 1
mock_storage.asearch.assert_called_once_with(
["test query"],
limit=5,
score_threshold=0.6,
)
@pytest.mark.asyncio
async def test_aquery_raises_when_storage_not_initialized(self):
"""Test that aquery raises ValueError when storage is None."""
knowledge = Knowledge(
collection_name="test",
sources=[],
storage=MagicMock(spec=KnowledgeStorage),
)
knowledge.storage = None
with pytest.raises(ValueError, match="Storage is not initialized"):
await knowledge.aquery(["test query"])
@pytest.mark.asyncio
async def test_aadd_sources_calls_source_aadd(self):
"""Test that aadd_sources calls aadd on each source."""
mock_storage = MagicMock(spec=KnowledgeStorage)
mock_source = MagicMock()
mock_source.aadd = AsyncMock()
knowledge = Knowledge(
collection_name="test",
sources=[mock_source],
storage=mock_storage,
)
await knowledge.aadd_sources()
mock_source.aadd.assert_called_once()
assert mock_source.storage == mock_storage
@pytest.mark.asyncio
async def test_areset_calls_storage_areset(self):
"""Test that areset calls storage.areset."""
mock_storage = MagicMock(spec=KnowledgeStorage)
mock_storage.areset = AsyncMock()
knowledge = Knowledge(
collection_name="test",
sources=[],
storage=mock_storage,
)
await knowledge.areset()
mock_storage.areset.assert_called_once()
@pytest.mark.asyncio
async def test_areset_raises_when_storage_not_initialized(self):
"""Test that areset raises ValueError when storage is None."""
knowledge = Knowledge(
collection_name="test",
sources=[],
storage=MagicMock(spec=KnowledgeStorage),
)
knowledge.storage = None
with pytest.raises(ValueError, match="Storage is not initialized"):
await knowledge.areset()
class TestAsyncStringKnowledgeSource:
"""Tests for async StringKnowledgeSource operations."""
@pytest.mark.asyncio
async def test_aadd_saves_documents_asynchronously(self):
"""Test that aadd chunks and saves documents asynchronously."""
mock_storage = MagicMock(spec=KnowledgeStorage)
mock_storage.asave = AsyncMock()
source = StringKnowledgeSource(content="Test content for async processing")
source.storage = mock_storage
await source.aadd()
mock_storage.asave.assert_called_once()
assert len(source.chunks) > 0
@pytest.mark.asyncio
async def test_aadd_raises_without_storage(self):
"""Test that aadd raises ValueError when storage is not set."""
source = StringKnowledgeSource(content="Test content")
source.storage = None
with pytest.raises(ValueError, match="No storage found"):
await source.aadd()
class TestAsyncBaseKnowledgeSource:
"""Tests for async _asave_documents method."""
@pytest.mark.asyncio
async def test_asave_documents_calls_storage_asave(self):
"""Test that _asave_documents calls storage.asave."""
mock_storage = MagicMock(spec=KnowledgeStorage)
mock_storage.asave = AsyncMock()
source = StringKnowledgeSource(content="Test")
source.storage = mock_storage
source.chunks = ["chunk1", "chunk2"]
await source._asave_documents()
mock_storage.asave.assert_called_once_with(["chunk1", "chunk2"])
@pytest.mark.asyncio
async def test_asave_documents_raises_without_storage(self):
"""Test that _asave_documents raises ValueError when storage is None."""
source = StringKnowledgeSource(content="Test")
source.storage = None
with pytest.raises(ValueError, match="No storage found"):
await source._asave_documents() | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/knowledge/test_async_knowledge.py",
"license": "MIT License",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/agents/test_async_agent_executor.py | """Tests for async agent executor functionality."""
import asyncio
from typing import Any
from unittest.mock import AsyncMock, MagicMock, Mock, patch
import pytest
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.agents.parser import AgentAction, AgentFinish
from crewai.tools.tool_types import ToolResult
@pytest.fixture
def mock_llm() -> MagicMock:
"""Create a mock LLM for testing."""
llm = MagicMock()
llm.supports_stop_words.return_value = True
llm.stop = []
return llm
@pytest.fixture
def mock_agent() -> MagicMock:
"""Create a mock agent for testing."""
agent = MagicMock()
agent.role = "Test Agent"
agent.key = "test_agent_key"
agent.verbose = False
agent.id = "test_agent_id"
return agent
@pytest.fixture
def mock_task() -> MagicMock:
"""Create a mock task for testing."""
task = MagicMock()
task.description = "Test task description"
return task
@pytest.fixture
def mock_crew() -> MagicMock:
"""Create a mock crew for testing."""
crew = MagicMock()
crew.verbose = False
crew._train = False
return crew
@pytest.fixture
def mock_tools_handler() -> MagicMock:
"""Create a mock tools handler."""
return MagicMock()
@pytest.fixture
def executor(
mock_llm: MagicMock,
mock_agent: MagicMock,
mock_task: MagicMock,
mock_crew: MagicMock,
mock_tools_handler: MagicMock,
) -> CrewAgentExecutor:
"""Create a CrewAgentExecutor instance for testing."""
return CrewAgentExecutor(
llm=mock_llm,
task=mock_task,
crew=mock_crew,
agent=mock_agent,
prompt={"prompt": "Test prompt {input} {tool_names} {tools}"},
max_iter=5,
tools=[],
tools_names="",
stop_words=["Observation:"],
tools_description="",
tools_handler=mock_tools_handler,
)
class TestAsyncAgentExecutor:
"""Tests for async agent executor methods."""
@pytest.mark.asyncio
async def test_ainvoke_returns_output(self, executor: CrewAgentExecutor) -> None:
"""Test that ainvoke returns the expected output."""
expected_output = "Final answer from agent"
with patch.object(
executor,
"_ainvoke_loop",
new_callable=AsyncMock,
return_value=AgentFinish(
thought="Done", output=expected_output, text="Final Answer: Done"
),
):
with patch.object(executor, "_show_start_logs"):
with patch.object(executor, "_save_to_memory"):
result = await executor.ainvoke(
{
"input": "test input",
"tool_names": "",
"tools": "",
}
)
assert result == {"output": expected_output}
@pytest.mark.asyncio
async def test_ainvoke_loop_calls_aget_llm_response(
self, executor: CrewAgentExecutor
) -> None:
"""Test that _ainvoke_loop calls aget_llm_response."""
with patch(
"crewai.agents.crew_agent_executor.aget_llm_response",
new_callable=AsyncMock,
return_value="Thought: I know the answer\nFinal Answer: Test result",
) as mock_aget_llm:
with patch.object(executor, "_show_logs"):
result = await executor._ainvoke_loop()
mock_aget_llm.assert_called_once()
assert isinstance(result, AgentFinish)
@pytest.mark.asyncio
async def test_ainvoke_loop_handles_tool_execution(
self,
executor: CrewAgentExecutor,
) -> None:
"""Test that _ainvoke_loop handles tool execution asynchronously."""
call_count = 0
async def mock_llm_response(*args: Any, **kwargs: Any) -> str:
nonlocal call_count
call_count += 1
if call_count == 1:
return (
"Thought: I need to use a tool\n"
"Action: test_tool\n"
'Action Input: {"arg": "value"}'
)
return "Thought: I have the answer\nFinal Answer: Tool result processed"
with patch(
"crewai.agents.crew_agent_executor.aget_llm_response",
new_callable=AsyncMock,
side_effect=mock_llm_response,
):
with patch(
"crewai.agents.crew_agent_executor.aexecute_tool_and_check_finality",
new_callable=AsyncMock,
return_value=ToolResult(result="Tool executed", result_as_answer=False),
) as mock_tool_exec:
with patch.object(executor, "_show_logs"):
with patch.object(executor, "_handle_agent_action") as mock_handle:
mock_handle.return_value = AgentAction(
text="Tool result",
tool="test_tool",
tool_input='{"arg": "value"}',
thought="Used tool",
result="Tool executed",
)
result = await executor._ainvoke_loop()
assert mock_tool_exec.called
assert isinstance(result, AgentFinish)
@pytest.mark.asyncio
async def test_ainvoke_loop_respects_max_iterations(
self, executor: CrewAgentExecutor
) -> None:
"""Test that _ainvoke_loop respects max iterations."""
executor.max_iter = 2
async def always_return_action(*args: Any, **kwargs: Any) -> str:
return (
"Thought: I need to think more\n"
"Action: some_tool\n"
"Action Input: {}"
)
with patch(
"crewai.agents.crew_agent_executor.aget_llm_response",
new_callable=AsyncMock,
side_effect=always_return_action,
):
with patch(
"crewai.agents.crew_agent_executor.aexecute_tool_and_check_finality",
new_callable=AsyncMock,
return_value=ToolResult(result="Tool result", result_as_answer=False),
):
with patch(
"crewai.agents.crew_agent_executor.handle_max_iterations_exceeded",
return_value=AgentFinish(
thought="Max iterations",
output="Forced answer",
text="Max iterations reached",
),
) as mock_max_iter:
with patch.object(executor, "_show_logs"):
with patch.object(executor, "_handle_agent_action") as mock_ha:
mock_ha.return_value = AgentAction(
text="Action",
tool="some_tool",
tool_input="{}",
thought="Thinking",
)
result = await executor._ainvoke_loop()
mock_max_iter.assert_called_once()
assert isinstance(result, AgentFinish)
@pytest.mark.asyncio
async def test_ainvoke_handles_exceptions(
self, executor: CrewAgentExecutor
) -> None:
"""Test that ainvoke properly propagates exceptions."""
with patch.object(executor, "_show_start_logs"):
with patch.object(
executor,
"_ainvoke_loop",
new_callable=AsyncMock,
side_effect=ValueError("Test error"),
):
with pytest.raises(ValueError, match="Test error"):
await executor.ainvoke(
{"input": "test", "tool_names": "", "tools": ""}
)
@pytest.mark.asyncio
async def test_concurrent_ainvoke_calls(
self, mock_llm: MagicMock, mock_agent: MagicMock, mock_task: MagicMock,
mock_crew: MagicMock, mock_tools_handler: MagicMock
) -> None:
"""Test that multiple ainvoke calls can run concurrently."""
max_concurrent = 0
current_concurrent = 0
lock = asyncio.Lock()
async def create_and_run_executor(executor_id: int) -> dict[str, Any]:
nonlocal max_concurrent, current_concurrent
executor = CrewAgentExecutor(
llm=mock_llm,
task=mock_task,
crew=mock_crew,
agent=mock_agent,
prompt={"prompt": "Test {input} {tool_names} {tools}"},
max_iter=5,
tools=[],
tools_names="",
stop_words=["Observation:"],
tools_description="",
tools_handler=mock_tools_handler,
)
async def delayed_response(*args: Any, **kwargs: Any) -> str:
nonlocal max_concurrent, current_concurrent
async with lock:
current_concurrent += 1
max_concurrent = max(max_concurrent, current_concurrent)
await asyncio.sleep(0.01)
async with lock:
current_concurrent -= 1
return f"Thought: Done\nFinal Answer: Result from executor {executor_id}"
with patch(
"crewai.agents.crew_agent_executor.aget_llm_response",
new_callable=AsyncMock,
side_effect=delayed_response,
):
with patch.object(executor, "_show_start_logs"):
with patch.object(executor, "_show_logs"):
with patch.object(executor, "_save_to_memory"):
return await executor.ainvoke(
{
"input": f"test {executor_id}",
"tool_names": "",
"tools": "",
}
)
results = await asyncio.gather(
create_and_run_executor(1),
create_and_run_executor(2),
create_and_run_executor(3),
)
assert len(results) == 3
assert all("output" in r for r in results)
assert max_concurrent > 1, f"Expected concurrent execution, max concurrent was {max_concurrent}"
class TestInvokeStepCallback:
"""Tests for _invoke_step_callback with sync and async callbacks."""
def test_invoke_step_callback_with_sync_callback(
self, executor: CrewAgentExecutor
) -> None:
"""Test that a sync step callback is called normally."""
callback = Mock()
executor.step_callback = callback
answer = AgentFinish(thought="thinking", output="test", text="final")
executor._invoke_step_callback(answer)
callback.assert_called_once_with(answer)
def test_invoke_step_callback_with_async_callback(
self, executor: CrewAgentExecutor
) -> None:
"""Test that an async step callback is awaited via asyncio.run."""
async_callback = AsyncMock()
executor.step_callback = async_callback
answer = AgentFinish(thought="thinking", output="test", text="final")
with patch("crewai.agents.crew_agent_executor.asyncio.run") as mock_run:
executor._invoke_step_callback(answer)
async_callback.assert_called_once_with(answer)
mock_run.assert_called_once()
def test_invoke_step_callback_with_none(
self, executor: CrewAgentExecutor
) -> None:
"""Test that no error is raised when step_callback is None."""
executor.step_callback = None
answer = AgentFinish(thought="thinking", output="test", text="final")
# Should not raise
executor._invoke_step_callback(answer)
class TestAsyncLLMResponseHelper:
"""Tests for aget_llm_response helper function."""
@pytest.mark.asyncio
async def test_aget_llm_response_calls_acall(self) -> None:
"""Test that aget_llm_response calls llm.acall."""
from crewai.utilities.agent_utils import aget_llm_response
from crewai.utilities.printer import Printer
mock_llm = MagicMock()
mock_llm.acall = AsyncMock(return_value="LLM response")
result = await aget_llm_response(
llm=mock_llm,
messages=[{"role": "user", "content": "test"}],
callbacks=[],
printer=Printer(),
)
mock_llm.acall.assert_called_once()
assert result == "LLM response"
@pytest.mark.asyncio
async def test_aget_llm_response_raises_on_empty_response(self) -> None:
"""Test that aget_llm_response raises ValueError on empty response."""
from crewai.utilities.agent_utils import aget_llm_response
from crewai.utilities.printer import Printer
mock_llm = MagicMock()
mock_llm.acall = AsyncMock(return_value="")
with pytest.raises(ValueError, match="Invalid response from LLM call"):
await aget_llm_response(
llm=mock_llm,
messages=[{"role": "user", "content": "test"}],
callbacks=[],
printer=Printer(),
)
@pytest.mark.asyncio
async def test_aget_llm_response_propagates_exceptions(self) -> None:
"""Test that aget_llm_response propagates LLM exceptions."""
from crewai.utilities.agent_utils import aget_llm_response
from crewai.utilities.printer import Printer
mock_llm = MagicMock()
mock_llm.acall = AsyncMock(side_effect=RuntimeError("LLM error"))
with pytest.raises(RuntimeError, match="LLM error"):
await aget_llm_response(
llm=mock_llm,
messages=[{"role": "user", "content": "test"}],
callbacks=[],
printer=Printer(),
) | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/agents/test_async_agent_executor.py",
"license": "MIT License",
"lines": 328,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/extensions/base.py | """Base extension interface for CrewAI A2A wrapper processing hooks.
This module defines the protocol for extending CrewAI's A2A wrapper functionality
with custom logic for tool injection, prompt augmentation, and response processing.
Note: These are CrewAI-specific processing hooks, NOT A2A protocol extensions.
A2A protocol extensions are capability declarations using AgentExtension objects
in AgentCard.capabilities.extensions, activated via the A2A-Extensions HTTP header.
See: https://a2a-protocol.org/latest/topics/extensions/
"""
from __future__ import annotations
from collections.abc import Sequence
from typing import TYPE_CHECKING, Annotated, Any, Protocol, runtime_checkable
from pydantic import BeforeValidator
if TYPE_CHECKING:
from a2a.types import Message
from crewai.agent.core import Agent
def _validate_a2a_extension(v: Any) -> Any:
"""Validate that value implements A2AExtension protocol."""
if not isinstance(v, A2AExtension):
raise ValueError(
f"Value must implement A2AExtension protocol. "
f"Got {type(v).__name__} which is missing required methods."
)
return v
ValidatedA2AExtension = Annotated[Any, BeforeValidator(_validate_a2a_extension)]
@runtime_checkable
class ConversationState(Protocol):
"""Protocol for extension-specific conversation state.
Extensions can define their own state classes that implement this protocol
to track conversation-specific data extracted from message history.
"""
def is_ready(self) -> bool:
"""Check if the state indicates readiness for some action.
Returns:
True if the state is ready, False otherwise.
"""
...
@runtime_checkable
class A2AExtension(Protocol):
"""Protocol for A2A wrapper extensions.
Extensions can implement this protocol to inject custom logic into
the A2A conversation flow at various integration points.
Example:
class MyExtension:
def inject_tools(self, agent: Agent) -> None:
# Add custom tools to the agent
pass
def extract_state_from_history(
self, conversation_history: Sequence[Message]
) -> ConversationState | None:
# Extract state from conversation
return None
def augment_prompt(
self, base_prompt: str, conversation_state: ConversationState | None
) -> str:
# Add custom instructions
return base_prompt
def process_response(
self, agent_response: Any, conversation_state: ConversationState | None
) -> Any:
# Modify response if needed
return agent_response
"""
def inject_tools(self, agent: Agent) -> None:
"""Inject extension-specific tools into the agent.
Called when an agent is wrapped with A2A capabilities. Extensions
can add tools that enable extension-specific functionality.
Args:
agent: The agent instance to inject tools into.
"""
...
def extract_state_from_history(
self, conversation_history: Sequence[Message]
) -> ConversationState | None:
"""Extract extension-specific state from conversation history.
Called during prompt augmentation to allow extensions to analyze
the conversation history and extract relevant state information.
Args:
conversation_history: The sequence of A2A messages exchanged.
Returns:
Extension-specific conversation state, or None if no relevant state.
"""
...
def augment_prompt(
self,
base_prompt: str,
conversation_state: ConversationState | None,
) -> str:
"""Augment the task prompt with extension-specific instructions.
Called during prompt augmentation to allow extensions to add
custom instructions based on conversation state.
Args:
base_prompt: The base prompt to augment.
conversation_state: Extension-specific state from extract_state_from_history.
Returns:
The augmented prompt with extension-specific instructions.
"""
...
def process_response(
self,
agent_response: Any,
conversation_state: ConversationState | None,
) -> Any:
"""Process and potentially modify the agent response.
Called after parsing the agent's response, allowing extensions to
enhance or modify the response based on conversation state.
Args:
agent_response: The parsed agent response.
conversation_state: Extension-specific state from extract_state_from_history.
Returns:
The processed agent response (may be modified or original).
"""
...
class ExtensionRegistry:
"""Registry for managing A2A extensions.
Maintains a collection of extensions and provides methods to invoke
their hooks at various integration points.
"""
def __init__(self) -> None:
"""Initialize the extension registry."""
self._extensions: list[A2AExtension] = []
def register(self, extension: A2AExtension) -> None:
"""Register an extension.
Args:
extension: The extension to register.
"""
self._extensions.append(extension)
def inject_all_tools(self, agent: Agent) -> None:
"""Inject tools from all registered extensions.
Args:
agent: The agent instance to inject tools into.
"""
for extension in self._extensions:
extension.inject_tools(agent)
def extract_all_states(
self, conversation_history: Sequence[Message]
) -> dict[type[A2AExtension], ConversationState]:
"""Extract conversation states from all registered extensions.
Args:
conversation_history: The sequence of A2A messages exchanged.
Returns:
Mapping of extension types to their conversation states.
"""
states: dict[type[A2AExtension], ConversationState] = {}
for extension in self._extensions:
state = extension.extract_state_from_history(conversation_history)
if state is not None:
states[type(extension)] = state
return states
def augment_prompt_with_all(
self,
base_prompt: str,
extension_states: dict[type[A2AExtension], ConversationState],
) -> str:
"""Augment prompt with instructions from all registered extensions.
Args:
base_prompt: The base prompt to augment.
extension_states: Mapping of extension types to conversation states.
Returns:
The fully augmented prompt.
"""
augmented = base_prompt
for extension in self._extensions:
state = extension_states.get(type(extension))
augmented = extension.augment_prompt(augmented, state)
return augmented
def process_response_with_all(
self,
agent_response: Any,
extension_states: dict[type[A2AExtension], ConversationState],
) -> Any:
"""Process response through all registered extensions.
Args:
agent_response: The parsed agent response.
extension_states: Mapping of extension types to conversation states.
Returns:
The processed agent response.
"""
processed = agent_response
for extension in self._extensions:
state = extension_states.get(type(extension))
processed = extension.process_response(processed, state)
return processed
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/extensions/base.py",
"license": "MIT License",
"lines": 182,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/extensions/registry.py | """A2A Protocol extension utilities.
This module provides utilities for working with A2A protocol extensions as
defined in the A2A specification. Extensions are capability declarations in
AgentCard.capabilities.extensions using AgentExtension objects, activated
via the X-A2A-Extensions HTTP header.
See: https://a2a-protocol.org/latest/topics/extensions/
"""
from __future__ import annotations
from typing import Any
from a2a.client.middleware import ClientCallContext, ClientCallInterceptor
from a2a.extensions.common import (
HTTP_EXTENSION_HEADER,
)
from a2a.types import AgentCard, AgentExtension
from crewai.a2a.config import A2AClientConfig, A2AConfig
from crewai.a2a.extensions.base import ExtensionRegistry
def get_extensions_from_config(
a2a_config: list[A2AConfig | A2AClientConfig] | A2AConfig | A2AClientConfig,
) -> list[str]:
"""Extract extension URIs from A2A configuration.
Args:
a2a_config: A2A configuration (single or list).
Returns:
Deduplicated list of extension URIs from all configs.
"""
configs = a2a_config if isinstance(a2a_config, list) else [a2a_config]
seen: set[str] = set()
result: list[str] = []
for config in configs:
if not isinstance(config, A2AClientConfig):
continue
for uri in config.extensions:
if uri not in seen:
seen.add(uri)
result.append(uri)
return result
class ExtensionsMiddleware(ClientCallInterceptor):
"""Middleware to add X-A2A-Extensions header to requests.
This middleware adds the extensions header to all outgoing requests,
declaring which A2A protocol extensions the client supports.
"""
def __init__(self, extensions: list[str]) -> None:
"""Initialize with extension URIs.
Args:
extensions: List of extension URIs the client supports.
"""
self._extensions = extensions
async def intercept(
self,
method_name: str,
request_payload: dict[str, Any],
http_kwargs: dict[str, Any],
agent_card: AgentCard | None,
context: ClientCallContext | None,
) -> tuple[dict[str, Any], dict[str, Any]]:
"""Add extensions header to the request.
Args:
method_name: The A2A method being called.
request_payload: The JSON-RPC request payload.
http_kwargs: HTTP request kwargs (headers, etc).
agent_card: The target agent's card.
context: Optional call context.
Returns:
Tuple of (request_payload, modified_http_kwargs).
"""
if self._extensions:
headers = http_kwargs.setdefault("headers", {})
headers[HTTP_EXTENSION_HEADER] = ",".join(self._extensions)
return request_payload, http_kwargs
def validate_required_extensions(
agent_card: AgentCard,
client_extensions: list[str] | None,
) -> list[AgentExtension]:
"""Validate that client supports all required extensions from agent.
Args:
agent_card: The agent's card with declared extensions.
client_extensions: Extension URIs the client supports.
Returns:
List of unsupported required extensions.
Raises:
None - returns list of unsupported extensions for caller to handle.
"""
unsupported: list[AgentExtension] = []
client_set = set(client_extensions or [])
if not agent_card.capabilities or not agent_card.capabilities.extensions:
return unsupported
unsupported.extend(
ext
for ext in agent_card.capabilities.extensions
if ext.required and ext.uri not in client_set
)
return unsupported
def create_extension_registry_from_config(
a2a_config: list[A2AConfig | A2AClientConfig] | A2AConfig | A2AClientConfig,
) -> ExtensionRegistry:
"""Create an extension registry from A2A client configuration.
Extracts client_extensions from each A2AClientConfig and registers them
with the ExtensionRegistry. These extensions provide CrewAI-specific
processing hooks (tool injection, prompt augmentation, response processing).
Note: A2A protocol extensions (URI strings sent via X-A2A-Extensions header)
are handled separately via get_extensions_from_config() and ExtensionsMiddleware.
Args:
a2a_config: A2A configuration (single or list).
Returns:
Extension registry with all client_extensions registered.
Example:
class LoggingExtension:
def inject_tools(self, agent): pass
def extract_state_from_history(self, history): return None
def augment_prompt(self, prompt, state): return prompt
def process_response(self, response, state):
print(f"Response: {response}")
return response
config = A2AClientConfig(
endpoint="https://agent.example.com",
client_extensions=[LoggingExtension()],
)
registry = create_extension_registry_from_config(config)
"""
registry = ExtensionRegistry()
configs = a2a_config if isinstance(a2a_config, list) else [a2a_config]
seen: set[int] = set()
for config in configs:
if isinstance(config, (A2AConfig, A2AClientConfig)):
client_exts = getattr(config, "client_extensions", [])
for extension in client_exts:
ext_id = id(extension)
if ext_id not in seen:
seen.add(ext_id)
registry.register(extension)
return registry
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/extensions/registry.py",
"license": "MIT License",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/tests/tools/test_async_tools.py | """Tests for async tool functionality."""
import asyncio
import pytest
from crewai.tools import BaseTool, tool
class SyncTool(BaseTool):
"""Test tool with synchronous _run method."""
name: str = "sync_tool"
description: str = "A synchronous tool for testing"
def _run(self, input_text: str) -> str:
"""Process input text synchronously."""
return f"Sync processed: {input_text}"
class AsyncTool(BaseTool):
"""Test tool with both sync and async implementations."""
name: str = "async_tool"
description: str = "An asynchronous tool for testing"
def _run(self, input_text: str) -> str:
"""Process input text synchronously."""
return f"Sync processed: {input_text}"
async def _arun(self, input_text: str) -> str:
"""Process input text asynchronously."""
await asyncio.sleep(0.01)
return f"Async processed: {input_text}"
class TestBaseTool:
"""Tests for BaseTool async functionality."""
def test_sync_tool_run_returns_result(self) -> None:
"""Test that sync tool run() returns correct result."""
tool = SyncTool()
result = tool.run(input_text="hello")
assert result == "Sync processed: hello"
def test_async_tool_run_returns_result(self) -> None:
"""Test that async tool run() works."""
tool = AsyncTool()
result = tool.run(input_text="hello")
assert result == "Sync processed: hello"
@pytest.mark.asyncio
async def test_sync_tool_arun_raises_not_implemented(self) -> None:
"""Test that sync tool arun() raises NotImplementedError."""
tool = SyncTool()
with pytest.raises(NotImplementedError):
await tool.arun(input_text="hello")
@pytest.mark.asyncio
async def test_async_tool_arun_returns_result(self) -> None:
"""Test that async tool arun() awaits directly."""
tool = AsyncTool()
result = await tool.arun(input_text="hello")
assert result == "Async processed: hello"
@pytest.mark.asyncio
async def test_arun_increments_usage_count(self) -> None:
"""Test that arun increments the usage count."""
tool = AsyncTool()
assert tool.current_usage_count == 0
await tool.arun(input_text="test")
assert tool.current_usage_count == 1
await tool.arun(input_text="test2")
assert tool.current_usage_count == 2
@pytest.mark.asyncio
async def test_multiple_async_tools_run_concurrently(self) -> None:
"""Test that multiple async tools can run concurrently."""
tool1 = AsyncTool()
tool2 = AsyncTool()
results = await asyncio.gather(
tool1.arun(input_text="first"),
tool2.arun(input_text="second"),
)
assert results[0] == "Async processed: first"
assert results[1] == "Async processed: second"
class TestToolDecorator:
"""Tests for @tool decorator with async functions."""
def test_sync_decorated_tool_run(self) -> None:
"""Test sync decorated tool works with run()."""
@tool("sync_decorated")
def sync_func(value: str) -> str:
"""A sync decorated tool."""
return f"sync: {value}"
result = sync_func.run(value="test")
assert result == "sync: test"
def test_async_decorated_tool_run(self) -> None:
"""Test async decorated tool works with run()."""
@tool("async_decorated")
async def async_func(value: str) -> str:
"""An async decorated tool."""
await asyncio.sleep(0.01)
return f"async: {value}"
result = async_func.run(value="test")
assert result == "async: test"
@pytest.mark.asyncio
async def test_sync_decorated_tool_arun_raises(self) -> None:
"""Test sync decorated tool arun() raises NotImplementedError."""
@tool("sync_decorated_arun")
def sync_func(value: str) -> str:
"""A sync decorated tool."""
return f"sync: {value}"
with pytest.raises(NotImplementedError):
await sync_func.arun(value="test")
@pytest.mark.asyncio
async def test_async_decorated_tool_arun(self) -> None:
"""Test async decorated tool works with arun()."""
@tool("async_decorated_arun")
async def async_func(value: str) -> str:
"""An async decorated tool."""
await asyncio.sleep(0.01)
return f"async: {value}"
result = await async_func.arun(value="test")
assert result == "async: test"
class TestAsyncToolWithIO:
"""Tests for async tools with simulated I/O operations."""
@pytest.mark.asyncio
async def test_async_tool_simulated_io(self) -> None:
"""Test async tool with simulated I/O delay."""
class SlowAsyncTool(BaseTool):
name: str = "slow_async"
description: str = "Simulates slow I/O"
def _run(self, delay: float) -> str:
return f"Completed after {delay}s"
async def _arun(self, delay: float) -> str:
await asyncio.sleep(delay)
return f"Completed after {delay}s"
tool = SlowAsyncTool()
result = await tool.arun(delay=0.05)
assert result == "Completed after 0.05s"
@pytest.mark.asyncio
async def test_multiple_slow_tools_concurrent(self) -> None:
"""Test that slow async tools benefit from concurrency."""
class SlowAsyncTool(BaseTool):
name: str = "slow_async"
description: str = "Simulates slow I/O"
def _run(self, task_id: int, delay: float) -> str:
return f"Task {task_id} done"
async def _arun(self, task_id: int, delay: float) -> str:
await asyncio.sleep(delay)
return f"Task {task_id} done"
tool = SlowAsyncTool()
import time
start = time.time()
results = await asyncio.gather(
tool.arun(task_id=1, delay=0.1),
tool.arun(task_id=2, delay=0.1),
tool.arun(task_id=3, delay=0.1),
)
elapsed = time.time() - start
assert len(results) == 3
assert all("done" in r for r in results)
assert elapsed < 0.25, f"Expected concurrent execution, took {elapsed}s" | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/tools/test_async_tools.py",
"license": "MIT License",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/llms/anthropic/test_anthropic_async.py | """Tests for Anthropic async completion functionality."""
import json
import logging
import pytest
import tiktoken
from pydantic import BaseModel
from crewai.llm import LLM
from crewai.llms.providers.anthropic.completion import AnthropicCompletion
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_anthropic_async_basic_call():
"""Test basic async call with Anthropic."""
llm = LLM(model="anthropic/claude-sonnet-4-0")
result = await llm.acall("Say hello")
assert result is not None
assert isinstance(result, str)
assert len(result) > 0
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_anthropic_async_with_temperature():
"""Test async call with temperature parameter."""
llm = LLM(model="anthropic/claude-sonnet-4-0", temperature=0.1)
result = await llm.acall("Say the word 'test' once")
assert result is not None
assert isinstance(result, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_anthropic_async_with_max_tokens():
"""Test async call with max_tokens parameter."""
llm = LLM(model="anthropic/claude-sonnet-4-0", max_tokens=10)
result = await llm.acall("Write a very long story about a dragon.")
assert result is not None
assert isinstance(result, str)
encoder = tiktoken.get_encoding("cl100k_base")
token_count = len(encoder.encode(result))
assert token_count <= 10
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_anthropic_async_with_system_message():
"""Test async call with system message."""
llm = LLM(model="anthropic/claude-sonnet-4-0")
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is 2+2?"}
]
result = await llm.acall(messages)
assert result is not None
assert isinstance(result, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_anthropic_async_conversation():
"""Test async call with conversation history."""
llm = LLM(model="anthropic/claude-sonnet-4-0")
messages = [
{"role": "user", "content": "My name is Alice."},
{"role": "assistant", "content": "Hello Alice! Nice to meet you."},
{"role": "user", "content": "What is my name?"}
]
result = await llm.acall(messages)
assert result is not None
assert isinstance(result, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_anthropic_async_stop_sequences():
"""Test async call with stop sequences."""
llm = LLM(
model="anthropic/claude-sonnet-4-0",
stop_sequences=["END", "STOP"]
)
result = await llm.acall("Count from 1 to 10")
assert result is not None
assert isinstance(result, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_anthropic_async_multiple_calls():
"""Test making multiple async calls in sequence."""
llm = LLM(model="anthropic/claude-sonnet-4-0")
result1 = await llm.acall("What is 1+1?")
result2 = await llm.acall("What is 2+2?")
assert result1 is not None
assert result2 is not None
assert isinstance(result1, str)
assert isinstance(result2, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_anthropic_async_with_response_format_none():
"""Test async call with response_format set to None."""
llm = LLM(model="anthropic/claude-sonnet-4-0", response_format=None)
result = await llm.acall("Tell me a short fact")
assert result is not None
assert isinstance(result, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_anthropic_async_with_response_format_json():
"""Test async call with JSON response format."""
llm = LLM(model="anthropic/claude-sonnet-4-0", response_format={"type": "json_object"})
result = await llm.acall("Return a JSON object devoid of ```json{x}```, where x is the json object, with a 'greeting' field")
assert isinstance(result, str)
deserialized_result = json.loads(result)
assert isinstance(deserialized_result, dict)
assert isinstance(deserialized_result["greeting"], str)
class GreetingResponse(BaseModel):
"""Response model for greeting test."""
greeting: str
language: str
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_anthropic_async_with_response_model():
"""Test async call with Pydantic response_model for structured output."""
llm = LLM(model="anthropic/claude-sonnet-4-0")
result = await llm.acall(
"Say hello in French",
response_model=GreetingResponse
)
# When response_model is provided, the result is already a parsed Pydantic model instance
assert isinstance(result, GreetingResponse)
assert isinstance(result.greeting, str)
assert isinstance(result.language, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_anthropic_async_with_tools():
"""Test async call with tools."""
llm = AnthropicCompletion(model="claude-sonnet-4-0")
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
}
},
"required": ["location"]
}
}
}
]
result = await llm.acall(
"What's the weather in San Francisco?",
tools=tools
)
logging.debug("result: %s", result)
assert result is not None
# Result can be either a string or a list of tool calls (native tool calling)
assert isinstance(result, (str, list))
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/llms/anthropic/test_anthropic_async.py",
"license": "MIT License",
"lines": 150,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/llms/bedrock/test_bedrock_async.py | """Tests for Bedrock async completion functionality.
Note: These tests are skipped in CI because VCR.py does not support
aiobotocore's HTTP session. The cassettes were recorded locally but
cannot be played back properly in CI.
"""
import pytest
import tiktoken
from crewai.llm import LLM
SKIP_REASON = "VCR does not support aiobotocore async HTTP client"
@pytest.mark.vcr()
@pytest.mark.asyncio
@pytest.mark.skip(reason=SKIP_REASON)
async def test_bedrock_async_basic_call():
"""Test basic async call with Bedrock."""
llm = LLM(model="bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0")
result = await llm.acall("Say hello")
assert result is not None
assert isinstance(result, str)
assert len(result) > 0
@pytest.mark.vcr()
@pytest.mark.asyncio
@pytest.mark.skip(reason=SKIP_REASON)
async def test_bedrock_async_with_temperature():
"""Test async call with temperature parameter."""
llm = LLM(model="bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0", temperature=0.1)
result = await llm.acall("Say the word 'test' once")
assert result is not None
assert isinstance(result, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
@pytest.mark.skip(reason=SKIP_REASON)
async def test_bedrock_async_with_max_tokens():
"""Test async call with max_tokens parameter."""
llm = LLM(model="bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0", max_tokens=10)
result = await llm.acall("Write a very long story about a dragon.")
assert result is not None
assert isinstance(result, str)
encoder = tiktoken.get_encoding("cl100k_base")
token_count = len(encoder.encode(result))
assert token_count <= 10
@pytest.mark.vcr()
@pytest.mark.asyncio
@pytest.mark.skip(reason=SKIP_REASON)
async def test_bedrock_async_with_system_message():
"""Test async call with system message."""
llm = LLM(model="bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0")
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is 2+2?"}
]
result = await llm.acall(messages)
assert result is not None
assert isinstance(result, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
@pytest.mark.skip(reason=SKIP_REASON)
async def test_bedrock_async_conversation():
"""Test async call with conversation history."""
llm = LLM(model="bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0")
messages = [
{"role": "user", "content": "My name is Alice."},
{"role": "assistant", "content": "Hello Alice! Nice to meet you."},
{"role": "user", "content": "What is my name?"}
]
result = await llm.acall(messages)
assert result is not None
assert isinstance(result, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
@pytest.mark.skip(reason=SKIP_REASON)
async def test_bedrock_async_multiple_calls():
"""Test making multiple async calls in sequence."""
llm = LLM(model="bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0")
result1 = await llm.acall("What is 1+1?")
result2 = await llm.acall("What is 2+2?")
assert result1 is not None
assert result2 is not None
assert isinstance(result1, str)
assert isinstance(result2, str)
@pytest.mark.vcr()
@pytest.mark.asyncio
@pytest.mark.skip(reason=SKIP_REASON)
async def test_bedrock_async_with_parameters():
"""Test async call with multiple parameters."""
llm = LLM(
model="bedrock/us.anthropic.claude-3-5-sonnet-20241022-v2:0",
temperature=0.7,
max_tokens=100,
top_p=0.9
)
result = await llm.acall("Tell me a short fact")
assert result is not None
assert isinstance(result, str)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/llms/bedrock/test_bedrock_async.py",
"license": "MIT License",
"lines": 93,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/events/types/system_events.py | """System signal event types for CrewAI.
This module contains event types for system-level signals like SIGTERM,
allowing listeners to perform cleanup operations before process termination.
"""
from collections.abc import Callable
from enum import IntEnum
import signal
from typing import Annotated, Literal, TypeVar
from pydantic import Field, TypeAdapter
from crewai.events.base_events import BaseEvent
class SignalType(IntEnum):
"""Enumeration of supported system signals."""
SIGTERM = signal.SIGTERM
SIGINT = signal.SIGINT
SIGHUP = getattr(signal, "SIGHUP", 1)
SIGTSTP = getattr(signal, "SIGTSTP", 20)
SIGCONT = getattr(signal, "SIGCONT", 18)
class SigTermEvent(BaseEvent):
"""Event emitted when SIGTERM is received."""
type: Literal["SIGTERM"] = "SIGTERM"
signal_number: SignalType = SignalType.SIGTERM
reason: str | None = None
class SigIntEvent(BaseEvent):
"""Event emitted when SIGINT is received."""
type: Literal["SIGINT"] = "SIGINT"
signal_number: SignalType = SignalType.SIGINT
reason: str | None = None
class SigHupEvent(BaseEvent):
"""Event emitted when SIGHUP is received."""
type: Literal["SIGHUP"] = "SIGHUP"
signal_number: SignalType = SignalType.SIGHUP
reason: str | None = None
class SigTStpEvent(BaseEvent):
"""Event emitted when SIGTSTP is received.
Note: SIGSTOP cannot be caught - it immediately suspends the process.
"""
type: Literal["SIGTSTP"] = "SIGTSTP"
signal_number: SignalType = SignalType.SIGTSTP
reason: str | None = None
class SigContEvent(BaseEvent):
"""Event emitted when SIGCONT is received."""
type: Literal["SIGCONT"] = "SIGCONT"
signal_number: SignalType = SignalType.SIGCONT
reason: str | None = None
SignalEvent = Annotated[
SigTermEvent | SigIntEvent | SigHupEvent | SigTStpEvent | SigContEvent,
Field(discriminator="type"),
]
signal_event_adapter: TypeAdapter[SignalEvent] = TypeAdapter(SignalEvent)
SIGNAL_EVENT_TYPES: tuple[type[BaseEvent], ...] = (
SigTermEvent,
SigIntEvent,
SigHupEvent,
SigTStpEvent,
SigContEvent,
)
T = TypeVar("T", bound=Callable[[object, SignalEvent], None])
def on_signal(func: T) -> T:
"""Decorator to register a handler for all signal events.
Args:
func: Handler function that receives (source, event) arguments.
Returns:
The original function, registered for all signal event types.
"""
from crewai.events.event_bus import crewai_event_bus
for event_type in SIGNAL_EVENT_TYPES:
crewai_event_bus.on(event_type)(func)
return func
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/events/types/system_events.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/tests/events/types/test_system_events.py | """Tests for system signal events."""
import signal
from unittest.mock import MagicMock, patch
import pytest
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.system_events import (
SIGNAL_EVENT_TYPES,
SignalEvent,
SignalType,
SigContEvent,
SigHupEvent,
SigIntEvent,
SigTermEvent,
SigTStpEvent,
on_signal,
signal_event_adapter,
)
class TestSignalType:
"""Tests for SignalType enum."""
def test_signal_type_values(self) -> None:
"""Verify SignalType maps to correct signal numbers."""
assert SignalType.SIGTERM == signal.SIGTERM
assert SignalType.SIGINT == signal.SIGINT
assert SignalType.SIGHUP == getattr(signal, "SIGHUP", 1)
assert SignalType.SIGTSTP == getattr(signal, "SIGTSTP", 20)
assert SignalType.SIGCONT == getattr(signal, "SIGCONT", 18)
class TestSignalEvents:
"""Tests for individual signal event classes."""
def test_sigterm_event_defaults(self) -> None:
"""Test SigTermEvent has correct defaults."""
event = SigTermEvent()
assert event.type == "SIGTERM"
assert event.signal_number == SignalType.SIGTERM
assert event.reason is None
def test_sigterm_event_with_reason(self) -> None:
"""Test SigTermEvent can be created with a reason."""
event = SigTermEvent(reason="graceful shutdown")
assert event.reason == "graceful shutdown"
def test_sigint_event_defaults(self) -> None:
"""Test SigIntEvent has correct defaults."""
event = SigIntEvent()
assert event.type == "SIGINT"
assert event.signal_number == SignalType.SIGINT
def test_sighup_event_defaults(self) -> None:
"""Test SigHupEvent has correct defaults."""
event = SigHupEvent()
assert event.type == "SIGHUP"
assert event.signal_number == SignalType.SIGHUP
def test_sigtstp_event_defaults(self) -> None:
"""Test SigTStpEvent has correct defaults."""
event = SigTStpEvent()
assert event.type == "SIGTSTP"
assert event.signal_number == SignalType.SIGTSTP
def test_sigcont_event_defaults(self) -> None:
"""Test SigContEvent has correct defaults."""
event = SigContEvent()
assert event.type == "SIGCONT"
assert event.signal_number == SignalType.SIGCONT
class TestSignalEventAdapter:
"""Tests for the Pydantic discriminated union adapter."""
def test_adapter_parses_sigterm(self) -> None:
"""Test adapter correctly parses SIGTERM event."""
data = {"type": "SIGTERM", "reason": "test"}
event = signal_event_adapter.validate_python(data)
assert isinstance(event, SigTermEvent)
assert event.reason == "test"
def test_adapter_parses_sigint(self) -> None:
"""Test adapter correctly parses SIGINT event."""
data = {"type": "SIGINT"}
event = signal_event_adapter.validate_python(data)
assert isinstance(event, SigIntEvent)
def test_adapter_parses_sighup(self) -> None:
"""Test adapter correctly parses SIGHUP event."""
data = {"type": "SIGHUP"}
event = signal_event_adapter.validate_python(data)
assert isinstance(event, SigHupEvent)
def test_adapter_parses_sigtstp(self) -> None:
"""Test adapter correctly parses SIGTSTP event."""
data = {"type": "SIGTSTP"}
event = signal_event_adapter.validate_python(data)
assert isinstance(event, SigTStpEvent)
def test_adapter_parses_sigcont(self) -> None:
"""Test adapter correctly parses SIGCONT event."""
data = {"type": "SIGCONT"}
event = signal_event_adapter.validate_python(data)
assert isinstance(event, SigContEvent)
def test_adapter_rejects_invalid_type(self) -> None:
"""Test adapter rejects unknown signal type."""
data = {"type": "SIGKILL"}
with pytest.raises(Exception):
signal_event_adapter.validate_python(data)
class TestSignalEventTypes:
"""Tests for SIGNAL_EVENT_TYPES constant."""
def test_contains_all_event_types(self) -> None:
"""Verify SIGNAL_EVENT_TYPES contains all signal events."""
assert SigTermEvent in SIGNAL_EVENT_TYPES
assert SigIntEvent in SIGNAL_EVENT_TYPES
assert SigHupEvent in SIGNAL_EVENT_TYPES
assert SigTStpEvent in SIGNAL_EVENT_TYPES
assert SigContEvent in SIGNAL_EVENT_TYPES
assert len(SIGNAL_EVENT_TYPES) == 5
class TestOnSignalDecorator:
"""Tests for the @on_signal decorator."""
def test_decorator_registers_for_all_signals(self) -> None:
"""Test that @on_signal registers handler for all signal event types."""
import threading
received_types: set[str] = set()
condition = threading.Condition()
expected_count = len(SIGNAL_EVENT_TYPES)
@on_signal
def test_handler(source: object, event: SignalEvent) -> None:
with condition:
received_types.add(event.type)
condition.notify_all()
for event_class in SIGNAL_EVENT_TYPES:
crewai_event_bus.emit(self, event_class())
with condition:
condition.wait_for(lambda: len(received_types) >= expected_count, timeout=5.0)
assert "SIGTERM" in received_types
assert "SIGINT" in received_types
assert "SIGHUP" in received_types
assert "SIGTSTP" in received_types
assert "SIGCONT" in received_types
def test_decorator_returns_original_function(self) -> None:
"""Test that @on_signal returns the original function."""
def my_handler(source: object, event: SignalEvent) -> None:
pass
decorated = on_signal(my_handler)
assert decorated is my_handler
def test_decorator_preserves_function_name(self) -> None:
"""Test that @on_signal preserves function metadata."""
@on_signal
def my_named_handler(source: object, event: SignalEvent) -> None:
"""My docstring."""
pass
assert my_named_handler.__name__ == "my_named_handler"
assert my_named_handler.__doc__ == "My docstring."
class TestSignalEventSerialization:
"""Tests for event serialization."""
def test_sigterm_to_dict(self) -> None:
"""Test SigTermEvent serializes correctly."""
event = SigTermEvent(reason="test reason")
data = event.model_dump()
assert data["type"] == "SIGTERM"
assert data["signal_number"] == signal.SIGTERM
assert data["reason"] == "test reason"
def test_roundtrip_serialization(self) -> None:
"""Test events can be serialized and deserialized."""
original = SigTermEvent(reason="roundtrip test")
serialized = original.model_dump()
restored = signal_event_adapter.validate_python(serialized)
assert isinstance(restored, SigTermEvent)
assert restored.reason == original.reason
assert restored.type == original.type | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/events/types/test_system_events.py",
"license": "MIT License",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/rag/test_rag_tool_add_data_type.py | """Tests for RagTool.add() method with various data_type values."""
from pathlib import Path
from tempfile import TemporaryDirectory
from unittest.mock import MagicMock, Mock, patch
import pytest
from crewai_tools.rag.data_types import DataType
from crewai_tools.tools.rag.rag_tool import RagTool
@pytest.fixture
def mock_rag_client() -> MagicMock:
"""Create a mock RAG client for testing."""
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_client.add_documents = MagicMock(return_value=None)
mock_client.search = MagicMock(return_value=[])
return mock_client
@pytest.fixture
def rag_tool(mock_rag_client: MagicMock) -> RagTool:
"""Create a RagTool instance with mocked client."""
with (
patch(
"crewai_tools.adapters.crewai_rag_adapter.get_rag_client",
return_value=mock_rag_client,
),
patch(
"crewai_tools.adapters.crewai_rag_adapter.create_client",
return_value=mock_rag_client,
),
):
return RagTool()
class TestDataTypeFileAlias:
"""Tests for data_type='file' alias."""
def test_file_alias_with_existing_file(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test that data_type='file' works with existing files."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "test.txt"
test_file.write_text("Test content for file alias.")
rag_tool.add(path=str(test_file), data_type="file")
assert mock_rag_client.add_documents.called
def test_file_alias_with_nonexistent_file_raises_error(
self, rag_tool: RagTool
) -> None:
"""Test that data_type='file' raises FileNotFoundError for missing files."""
with pytest.raises(FileNotFoundError, match="File does not exist"):
rag_tool.add(path="nonexistent/path/to/file.pdf", data_type="file")
def test_file_alias_with_path_keyword(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test that path keyword argument works with data_type='file'."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "document.txt"
test_file.write_text("Content via path keyword.")
rag_tool.add(data_type="file", path=str(test_file))
assert mock_rag_client.add_documents.called
def test_file_alias_with_file_path_keyword(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test that file_path keyword argument works with data_type='file'."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "document.txt"
test_file.write_text("Content via file_path keyword.")
rag_tool.add(data_type="file", file_path=str(test_file))
assert mock_rag_client.add_documents.called
class TestDataTypeStringValues:
"""Tests for data_type as string values matching DataType enum."""
def test_pdf_file_string(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test data_type='pdf_file' with existing PDF file."""
with TemporaryDirectory() as tmpdir:
# Create a minimal valid PDF file
test_file = Path(tmpdir) / "test.pdf"
test_file.write_bytes(
b"%PDF-1.4\n1 0 obj\n<<\n/Type /Catalog\n>>\nendobj\ntrailer\n"
b"<<\n/Root 1 0 R\n>>\n%%EOF"
)
# Mock the PDF loader to avoid actual PDF parsing
with patch(
"crewai_tools.adapters.crewai_rag_adapter.DataType.get_loader"
) as mock_loader:
mock_loader_instance = MagicMock()
mock_loader_instance.load.return_value = MagicMock(
content="PDF content", metadata={}, doc_id="test-id"
)
mock_loader.return_value = mock_loader_instance
rag_tool.add(path=str(test_file), data_type="pdf_file")
assert mock_rag_client.add_documents.called
def test_text_file_string(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test data_type='text_file' with existing text file."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "test.txt"
test_file.write_text("Plain text content.")
rag_tool.add(path=str(test_file), data_type="text_file")
assert mock_rag_client.add_documents.called
def test_csv_string(self, rag_tool: RagTool, mock_rag_client: MagicMock) -> None:
"""Test data_type='csv' with existing CSV file."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "test.csv"
test_file.write_text("name,value\nfoo,1\nbar,2")
rag_tool.add(path=str(test_file), data_type="csv")
assert mock_rag_client.add_documents.called
def test_json_string(self, rag_tool: RagTool, mock_rag_client: MagicMock) -> None:
"""Test data_type='json' with existing JSON file."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "test.json"
test_file.write_text('{"key": "value", "items": [1, 2, 3]}')
rag_tool.add(path=str(test_file), data_type="json")
assert mock_rag_client.add_documents.called
def test_xml_string(self, rag_tool: RagTool, mock_rag_client: MagicMock) -> None:
"""Test data_type='xml' with existing XML file."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "test.xml"
test_file.write_text('<?xml version="1.0"?><root><item>value</item></root>')
rag_tool.add(path=str(test_file), data_type="xml")
assert mock_rag_client.add_documents.called
def test_mdx_string(self, rag_tool: RagTool, mock_rag_client: MagicMock) -> None:
"""Test data_type='mdx' with existing MDX file."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "test.mdx"
test_file.write_text("# Heading\n\nSome markdown content.")
rag_tool.add(path=str(test_file), data_type="mdx")
assert mock_rag_client.add_documents.called
def test_text_string(self, rag_tool: RagTool, mock_rag_client: MagicMock) -> None:
"""Test data_type='text' with raw text content."""
rag_tool.add("This is raw text content.", data_type="text")
assert mock_rag_client.add_documents.called
def test_directory_string(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test data_type='directory' with existing directory."""
with TemporaryDirectory() as tmpdir:
# Create some files in the directory
(Path(tmpdir) / "file1.txt").write_text("Content 1")
(Path(tmpdir) / "file2.txt").write_text("Content 2")
rag_tool.add(path=tmpdir, data_type="directory")
assert mock_rag_client.add_documents.called
class TestDataTypeEnumValues:
"""Tests for data_type as DataType enum values."""
def test_datatype_file_enum_with_existing_file(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test data_type=DataType.FILE with existing file (auto-detect)."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "test.txt"
test_file.write_text("File enum auto-detect content.")
rag_tool.add(str(test_file), data_type=DataType.FILE)
assert mock_rag_client.add_documents.called
def test_datatype_file_enum_with_nonexistent_file_raises_error(
self, rag_tool: RagTool
) -> None:
"""Test data_type=DataType.FILE raises FileNotFoundError for missing files."""
with pytest.raises(FileNotFoundError, match="File does not exist"):
rag_tool.add("nonexistent/file.pdf", data_type=DataType.FILE)
def test_datatype_pdf_file_enum(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test data_type=DataType.PDF_FILE with existing file."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "test.pdf"
test_file.write_bytes(
b"%PDF-1.4\n1 0 obj\n<<\n/Type /Catalog\n>>\nendobj\ntrailer\n"
b"<<\n/Root 1 0 R\n>>\n%%EOF"
)
with patch(
"crewai_tools.adapters.crewai_rag_adapter.DataType.get_loader"
) as mock_loader:
mock_loader_instance = MagicMock()
mock_loader_instance.load.return_value = MagicMock(
content="PDF content", metadata={}, doc_id="test-id"
)
mock_loader.return_value = mock_loader_instance
rag_tool.add(str(test_file), data_type=DataType.PDF_FILE)
assert mock_rag_client.add_documents.called
def test_datatype_text_file_enum(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test data_type=DataType.TEXT_FILE with existing file."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "test.txt"
test_file.write_text("Text file content.")
rag_tool.add(str(test_file), data_type=DataType.TEXT_FILE)
assert mock_rag_client.add_documents.called
def test_datatype_text_enum(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test data_type=DataType.TEXT with raw text."""
rag_tool.add("Raw text using enum.", data_type=DataType.TEXT)
assert mock_rag_client.add_documents.called
def test_datatype_directory_enum(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test data_type=DataType.DIRECTORY with existing directory."""
with TemporaryDirectory() as tmpdir:
(Path(tmpdir) / "file.txt").write_text("Directory file content.")
rag_tool.add(tmpdir, data_type=DataType.DIRECTORY)
assert mock_rag_client.add_documents.called
class TestInvalidDataType:
"""Tests for invalid data_type values."""
def test_invalid_string_data_type_raises_error(self, rag_tool: RagTool) -> None:
"""Test that invalid string data_type raises ValueError."""
with pytest.raises(ValueError, match="Invalid data_type"):
rag_tool.add("some content", data_type="invalid_type")
def test_invalid_data_type_error_message_contains_valid_values(
self, rag_tool: RagTool
) -> None:
"""Test that error message lists valid data_type values."""
with pytest.raises(ValueError) as exc_info:
rag_tool.add("some content", data_type="not_a_type")
error_message = str(exc_info.value)
assert "file" in error_message
assert "pdf_file" in error_message
assert "text_file" in error_message
class TestFileExistenceValidation:
"""Tests for file existence validation."""
def test_pdf_file_not_found_raises_error(self, rag_tool: RagTool) -> None:
"""Test that non-existent PDF file raises FileNotFoundError."""
with pytest.raises(FileNotFoundError, match="File does not exist"):
rag_tool.add(path="nonexistent.pdf", data_type="pdf_file")
def test_text_file_not_found_raises_error(self, rag_tool: RagTool) -> None:
"""Test that non-existent text file raises FileNotFoundError."""
with pytest.raises(FileNotFoundError, match="File does not exist"):
rag_tool.add(path="nonexistent.txt", data_type="text_file")
def test_csv_file_not_found_raises_error(self, rag_tool: RagTool) -> None:
"""Test that non-existent CSV file raises FileNotFoundError."""
with pytest.raises(FileNotFoundError, match="File does not exist"):
rag_tool.add(path="nonexistent.csv", data_type="csv")
def test_json_file_not_found_raises_error(self, rag_tool: RagTool) -> None:
"""Test that non-existent JSON file raises FileNotFoundError."""
with pytest.raises(FileNotFoundError, match="File does not exist"):
rag_tool.add(path="nonexistent.json", data_type="json")
def test_xml_file_not_found_raises_error(self, rag_tool: RagTool) -> None:
"""Test that non-existent XML file raises FileNotFoundError."""
with pytest.raises(FileNotFoundError, match="File does not exist"):
rag_tool.add(path="nonexistent.xml", data_type="xml")
def test_docx_file_not_found_raises_error(self, rag_tool: RagTool) -> None:
"""Test that non-existent DOCX file raises FileNotFoundError."""
with pytest.raises(FileNotFoundError, match="File does not exist"):
rag_tool.add(path="nonexistent.docx", data_type="docx")
def test_mdx_file_not_found_raises_error(self, rag_tool: RagTool) -> None:
"""Test that non-existent MDX file raises FileNotFoundError."""
with pytest.raises(FileNotFoundError, match="File does not exist"):
rag_tool.add(path="nonexistent.mdx", data_type="mdx")
def test_directory_not_found_raises_error(self, rag_tool: RagTool) -> None:
"""Test that non-existent directory raises ValueError."""
with pytest.raises(ValueError, match="Directory does not exist"):
rag_tool.add(path="nonexistent/directory", data_type="directory")
class TestKeywordArgumentVariants:
"""Tests for different keyword argument combinations."""
def test_positional_argument_with_data_type(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test positional argument with data_type."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "test.txt"
test_file.write_text("Positional arg content.")
rag_tool.add(str(test_file), data_type="text_file")
assert mock_rag_client.add_documents.called
def test_path_keyword_with_data_type(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test path keyword argument with data_type."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "test.txt"
test_file.write_text("Path keyword content.")
rag_tool.add(path=str(test_file), data_type="text_file")
assert mock_rag_client.add_documents.called
def test_file_path_keyword_with_data_type(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test file_path keyword argument with data_type."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "test.txt"
test_file.write_text("File path keyword content.")
rag_tool.add(file_path=str(test_file), data_type="text_file")
assert mock_rag_client.add_documents.called
def test_directory_path_keyword(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test directory_path keyword argument."""
with TemporaryDirectory() as tmpdir:
(Path(tmpdir) / "file.txt").write_text("Directory content.")
rag_tool.add(directory_path=tmpdir)
assert mock_rag_client.add_documents.called
class TestAutoDetection:
"""Tests for auto-detection of data type from content."""
def test_auto_detect_nonexistent_file_raises_error(self, rag_tool: RagTool) -> None:
"""Test that auto-detection raises FileNotFoundError for missing files."""
with pytest.raises(FileNotFoundError, match="File does not exist"):
rag_tool.add("path/to/document.pdf")
def test_auto_detect_txt_file(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test auto-detection of .txt file type."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "auto.txt"
test_file.write_text("Auto-detected text file.")
# No data_type specified - should auto-detect
rag_tool.add(str(test_file))
assert mock_rag_client.add_documents.called
def test_auto_detect_csv_file(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test auto-detection of .csv file type."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "auto.csv"
test_file.write_text("col1,col2\nval1,val2")
rag_tool.add(str(test_file))
assert mock_rag_client.add_documents.called
def test_auto_detect_json_file(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test auto-detection of .json file type."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "auto.json"
test_file.write_text('{"auto": "detected"}')
rag_tool.add(str(test_file))
assert mock_rag_client.add_documents.called
def test_auto_detect_directory(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test auto-detection of directory type."""
with TemporaryDirectory() as tmpdir:
(Path(tmpdir) / "file.txt").write_text("Auto-detected directory.")
rag_tool.add(tmpdir)
assert mock_rag_client.add_documents.called
def test_auto_detect_raw_text(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test auto-detection of raw text (non-file content)."""
rag_tool.add("Just some raw text content")
assert mock_rag_client.add_documents.called
class TestMetadataHandling:
"""Tests for metadata handling with data_type."""
def test_metadata_passed_to_documents(
self, rag_tool: RagTool, mock_rag_client: MagicMock
) -> None:
"""Test that metadata is properly passed to documents."""
with TemporaryDirectory() as tmpdir:
test_file = Path(tmpdir) / "test.txt"
test_file.write_text("Content with metadata.")
rag_tool.add(
path=str(test_file),
data_type="text_file",
metadata={"custom_key": "custom_value"},
)
assert mock_rag_client.add_documents.called
call_args = mock_rag_client.add_documents.call_args
documents = call_args.kwargs.get("documents", call_args.args[0] if call_args.args else [])
# Check that at least one document has the custom metadata
assert any(
doc.get("metadata", {}).get("custom_key") == "custom_value"
for doc in documents
) | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/rag/test_rag_tool_add_data_type.py",
"license": "MIT License",
"lines": 356,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/mcp/test_sse_transport.py | """Tests for SSE transport."""
import pytest
from crewai.mcp.transports.sse import SSETransport
@pytest.mark.asyncio
async def test_sse_transport_connect_does_not_pass_invalid_args():
"""Test that SSETransport.connect() doesn't pass invalid args to sse_client.
The sse_client function does not accept terminate_on_close parameter.
"""
transport = SSETransport(
url="http://localhost:9999/sse",
headers={"Authorization": "Bearer test"},
)
with pytest.raises(ConnectionError) as exc_info:
await transport.connect()
assert "unexpected keyword argument" not in str(exc_info.value) | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/mcp/test_sse_transport.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/rag/types.py | """Type definitions for RAG tool configuration."""
from pathlib import Path
from typing import Any, Literal, TypeAlias
from crewai.rag.embeddings.types import ProviderSpec
from typing_extensions import TypedDict
from crewai_tools.rag.data_types import DataType
DataTypeStr: TypeAlias = Literal[
"file",
"pdf_file",
"text_file",
"csv",
"json",
"xml",
"docx",
"mdx",
"mysql",
"postgres",
"github",
"directory",
"website",
"docs_site",
"youtube_video",
"youtube_channel",
"text",
]
ContentItem: TypeAlias = str | Path | dict[str, Any]
class AddDocumentParams(TypedDict, total=False):
"""Parameters for adding documents to the RAG system."""
data_type: DataType | DataTypeStr
metadata: dict[str, Any]
path: str | Path
file_path: str | Path
website: str
url: str
github_url: str
youtube_url: str
directory_path: str | Path
class VectorDbConfig(TypedDict):
"""Configuration for vector database provider.
Attributes:
provider: RAG provider literal.
config: RAG configuration options.
"""
provider: Literal["chromadb", "qdrant"]
config: dict[str, Any]
class RagToolConfig(TypedDict, total=False):
"""Configuration accepted by RAG tools.
Supports embedding model and vector database configuration.
Attributes:
embedding_model: Embedding model configuration accepted by RAG tools.
vectordb: Vector database configuration accepted by RAG tools.
"""
embedding_model: ProviderSpec
vectordb: VectorDbConfig
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/rag/types.py",
"license": "MIT License",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/rag/test_rag_tool_validation.py | """Tests for improved RAG tool validation error messages."""
from unittest.mock import MagicMock, Mock, patch
import pytest
from pydantic import ValidationError
from crewai_tools.tools.rag.rag_tool import RagTool
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_azure_missing_deployment_id_gives_clear_error(mock_create_client: Mock) -> None:
"""Test that missing deployment_id for Azure gives a clear, focused error message."""
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_create_client.return_value = mock_client
class MyTool(RagTool):
pass
config = {
"embedding_model": {
"provider": "azure",
"config": {
"api_base": "http://localhost:4000/v1",
"api_key": "test-key",
"api_version": "2024-02-01",
},
}
}
with pytest.raises(ValueError) as exc_info:
MyTool(config=config)
error_msg = str(exc_info.value)
assert "azure" in error_msg.lower()
assert "deployment_id" in error_msg.lower()
assert "bedrock" not in error_msg.lower()
assert "cohere" not in error_msg.lower()
assert "huggingface" not in error_msg.lower()
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_valid_azure_config_works(mock_create_client: Mock) -> None:
"""Test that valid Azure config works without errors."""
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_create_client.return_value = mock_client
class MyTool(RagTool):
pass
config = {
"embedding_model": {
"provider": "azure",
"config": {
"api_base": "http://localhost:4000/v1",
"api_key": "test-key",
"api_version": "2024-02-01",
"deployment_id": "text-embedding-3-small",
},
}
}
tool = MyTool(config=config)
assert tool is not None | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/rag/test_rag_tool_validation.py",
"license": "MIT License",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/test_pdf_search_tool_config.py | from unittest.mock import MagicMock, Mock, patch
from crewai_tools.adapters.crewai_rag_adapter import CrewAIRagAdapter
from crewai_tools.tools.pdf_search_tool.pdf_search_tool import PDFSearchTool
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_pdf_search_tool_with_azure_config_without_env_vars(
mock_create_client: Mock,
) -> None:
"""Test PDFSearchTool accepts Azure config without requiring env vars.
This verifies the fix for the reported issue where PDFSearchTool would
throw a validation error:
pydantic_core._pydantic_core.ValidationError: 1 validation error for PDFSearchTool
EMBEDDINGS_OPENAI_API_KEY
Field required [type=missing, input_value={}, input_type=dict]
"""
mock_embedding_func = MagicMock()
mock_embedding_func.return_value = [[0.1] * 1536]
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_create_client.return_value = mock_client
# Patch the embedding function builder to avoid actual API calls
with patch(
"crewai_tools.tools.rag.rag_tool.build_embedder",
return_value=mock_embedding_func,
):
# This is the exact config format from the bug report
config = {
"embedding_model": {
"provider": "azure",
"config": {
"model": "text-embedding-3-small",
"api_key": "test-litellm-api-key",
"api_base": "https://test.litellm.proxy/",
"api_version": "2024-02-01",
"api_type": "azure",
"deployment_id": "test-deployment",
},
}
}
# This should not raise a validation error about missing env vars
tool = PDFSearchTool(config=config)
assert tool.adapter is not None
assert isinstance(tool.adapter, CrewAIRagAdapter)
assert tool.name == "Search a PDF's content"
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_pdf_search_tool_with_openai_config_without_env_vars(
mock_create_client: Mock,
) -> None:
"""Test PDFSearchTool accepts OpenAI config without requiring env vars."""
mock_embedding_func = MagicMock()
mock_embedding_func.return_value = [[0.1] * 1536]
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_create_client.return_value = mock_client
with patch(
"crewai_tools.tools.rag.rag_tool.build_embedder",
return_value=mock_embedding_func,
):
config = {
"embedding_model": {
"provider": "openai",
"config": {
"model": "text-embedding-3-small",
"api_key": "sk-test123",
},
}
}
tool = PDFSearchTool(config=config)
assert tool.adapter is not None
assert isinstance(tool.adapter, CrewAIRagAdapter)
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_pdf_search_tool_with_vectordb_and_embedding_config(
mock_create_client: Mock,
) -> None:
"""Test PDFSearchTool with both vector DB and embedding config."""
mock_embedding_func = MagicMock()
mock_embedding_func.return_value = [[0.1] * 1536]
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_create_client.return_value = mock_client
with patch(
"crewai_tools.tools.rag.rag_tool.build_embedder",
return_value=mock_embedding_func,
):
config = {
"vectordb": {"provider": "chromadb", "config": {}},
"embedding_model": {
"provider": "openai",
"config": {
"model": "text-embedding-3-large",
"api_key": "sk-test-key",
},
},
}
tool = PDFSearchTool(config=config)
assert tool.adapter is not None
assert isinstance(tool.adapter, CrewAIRagAdapter) | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/test_pdf_search_tool_config.py",
"license": "MIT License",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/test_txt_search_tool_config.py | from unittest.mock import MagicMock, Mock, patch
from crewai_tools.adapters.crewai_rag_adapter import CrewAIRagAdapter
from crewai_tools.tools.txt_search_tool.txt_search_tool import TXTSearchTool
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_txt_search_tool_with_azure_config_without_env_vars(
mock_create_client: Mock,
) -> None:
"""Test TXTSearchTool accepts Azure config without requiring env vars."""
mock_embedding_func = MagicMock()
mock_embedding_func.return_value = [[0.1] * 1536]
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_create_client.return_value = mock_client
with patch(
"crewai_tools.tools.rag.rag_tool.build_embedder",
return_value=mock_embedding_func,
):
config = {
"embedding_model": {
"provider": "azure",
"config": {
"model": "text-embedding-3-small",
"api_key": "test-api-key",
"api_base": "https://test.openai.azure.com/",
"api_version": "2024-02-01",
"api_type": "azure",
"deployment_id": "test-deployment",
},
}
}
# This should not raise a validation error about missing env vars
tool = TXTSearchTool(config=config)
assert tool.adapter is not None
assert isinstance(tool.adapter, CrewAIRagAdapter)
assert tool.name == "Search a txt's content"
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_txt_search_tool_with_openai_config_without_env_vars(
mock_create_client: Mock,
) -> None:
"""Test TXTSearchTool accepts OpenAI config without requiring env vars."""
mock_embedding_func = MagicMock()
mock_embedding_func.return_value = [[0.1] * 1536]
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_create_client.return_value = mock_client
with patch(
"crewai_tools.tools.rag.rag_tool.build_embedder",
return_value=mock_embedding_func,
):
config = {
"embedding_model": {
"provider": "openai",
"config": {
"model": "text-embedding-3-small",
"api_key": "sk-test123",
},
}
}
tool = TXTSearchTool(config=config)
assert tool.adapter is not None
assert isinstance(tool.adapter, CrewAIRagAdapter)
@patch("crewai_tools.adapters.crewai_rag_adapter.create_client")
def test_txt_search_tool_with_cohere_config(mock_create_client: Mock) -> None:
"""Test TXTSearchTool with Cohere embedding provider."""
mock_embedding_func = MagicMock()
mock_embedding_func.return_value = [[0.1] * 1024]
mock_client = MagicMock()
mock_client.get_or_create_collection = MagicMock(return_value=None)
mock_create_client.return_value = mock_client
with patch(
"crewai_tools.tools.rag.rag_tool.build_embedder",
return_value=mock_embedding_func,
):
config = {
"embedding_model": {
"provider": "cohere",
"config": {
"model": "embed-english-v3.0",
"api_key": "test-cohere-key",
},
}
}
tool = TXTSearchTool(config=config)
assert tool.adapter is not None
assert isinstance(tool.adapter, CrewAIRagAdapter) | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/test_txt_search_tool_config.py",
"license": "MIT License",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/rag/embeddings/test_backward_compatibility.py | """Tests for backward compatibility of embedding provider configurations."""
from crewai.rag.embeddings.factory import build_embedder, PROVIDER_PATHS
from crewai.rag.embeddings.providers.openai.openai_provider import OpenAIProvider
from crewai.rag.embeddings.providers.cohere.cohere_provider import CohereProvider
from crewai.rag.embeddings.providers.google.generative_ai import GenerativeAiProvider
from crewai.rag.embeddings.providers.google.vertex import VertexAIProvider
from crewai.rag.embeddings.providers.microsoft.azure import AzureProvider
from crewai.rag.embeddings.providers.jina.jina_provider import JinaProvider
from crewai.rag.embeddings.providers.ollama.ollama_provider import OllamaProvider
from crewai.rag.embeddings.providers.aws.bedrock import BedrockProvider
from crewai.rag.embeddings.providers.text2vec.text2vec_provider import Text2VecProvider
from crewai.rag.embeddings.providers.sentence_transformer.sentence_transformer_provider import (
SentenceTransformerProvider,
)
from crewai.rag.embeddings.providers.instructor.instructor_provider import InstructorProvider
from crewai.rag.embeddings.providers.openclip.openclip_provider import OpenCLIPProvider
class TestGoogleProviderAlias:
"""Test that 'google' provider name alias works for backward compatibility."""
def test_google_alias_in_provider_paths(self):
"""Verify 'google' is registered as an alias for google-generativeai."""
assert "google" in PROVIDER_PATHS
assert "google-generativeai" in PROVIDER_PATHS
assert PROVIDER_PATHS["google"] == PROVIDER_PATHS["google-generativeai"]
class TestModelKeyBackwardCompatibility:
"""Test that 'model' config key works as alias for 'model_name'."""
def test_openai_provider_accepts_model_key(self):
"""Test OpenAI provider accepts 'model' as alias for 'model_name'."""
provider = OpenAIProvider(
api_key="test-key",
model="text-embedding-3-small",
)
assert provider.model_name == "text-embedding-3-small"
def test_openai_provider_model_name_takes_precedence(self):
"""Test that model_name takes precedence when both are provided."""
provider = OpenAIProvider(
api_key="test-key",
model_name="text-embedding-3-large",
)
assert provider.model_name == "text-embedding-3-large"
def test_cohere_provider_accepts_model_key(self):
"""Test Cohere provider accepts 'model' as alias for 'model_name'."""
provider = CohereProvider(
api_key="test-key",
model="embed-english-v3.0",
)
assert provider.model_name == "embed-english-v3.0"
def test_google_generativeai_provider_accepts_model_key(self):
"""Test Google Generative AI provider accepts 'model' as alias."""
provider = GenerativeAiProvider(
api_key="test-key",
model="gemini-embedding-001",
)
assert provider.model_name == "gemini-embedding-001"
def test_google_vertex_provider_accepts_model_key(self):
"""Test Google Vertex AI provider accepts 'model' as alias."""
provider = VertexAIProvider(
api_key="test-key",
model="text-embedding-004",
)
assert provider.model_name == "text-embedding-004"
def test_azure_provider_accepts_model_key(self):
"""Test Azure provider accepts 'model' as alias for 'model_name'."""
provider = AzureProvider(
api_key="test-key",
deployment_id="test-deployment",
model="text-embedding-ada-002",
)
assert provider.model_name == "text-embedding-ada-002"
def test_jina_provider_accepts_model_key(self):
"""Test Jina provider accepts 'model' as alias for 'model_name'."""
provider = JinaProvider(
api_key="test-key",
model="jina-embeddings-v3",
)
assert provider.model_name == "jina-embeddings-v3"
def test_ollama_provider_accepts_model_key(self):
"""Test Ollama provider accepts 'model' as alias for 'model_name'."""
provider = OllamaProvider(
model="nomic-embed-text",
)
assert provider.model_name == "nomic-embed-text"
def test_text2vec_provider_accepts_model_key(self):
"""Test Text2Vec provider accepts 'model' as alias for 'model_name'."""
provider = Text2VecProvider(
model="shibing624/text2vec-base-multilingual",
)
assert provider.model_name == "shibing624/text2vec-base-multilingual"
def test_sentence_transformer_provider_accepts_model_key(self):
"""Test SentenceTransformer provider accepts 'model' as alias."""
provider = SentenceTransformerProvider(
model="all-mpnet-base-v2",
)
assert provider.model_name == "all-mpnet-base-v2"
def test_instructor_provider_accepts_model_key(self):
"""Test Instructor provider accepts 'model' as alias for 'model_name'."""
provider = InstructorProvider(
model="hkunlp/instructor-xl",
)
assert provider.model_name == "hkunlp/instructor-xl"
def test_openclip_provider_accepts_model_key(self):
"""Test OpenCLIP provider accepts 'model' as alias for 'model_name'."""
provider = OpenCLIPProvider(
model="ViT-B-16",
)
assert provider.model_name == "ViT-B-16"
class TestTaskTypeConfiguration:
"""Test that task_type configuration works correctly."""
def test_google_provider_accepts_lowercase_task_type(self):
"""Test Google provider accepts lowercase task_type."""
provider = GenerativeAiProvider(
api_key="test-key",
task_type="retrieval_document",
)
assert provider.task_type == "retrieval_document"
def test_google_provider_accepts_uppercase_task_type(self):
"""Test Google provider accepts uppercase task_type."""
provider = GenerativeAiProvider(
api_key="test-key",
task_type="RETRIEVAL_QUERY",
)
assert provider.task_type == "RETRIEVAL_QUERY"
def test_google_provider_default_task_type(self):
"""Test Google provider has correct default task_type."""
provider = GenerativeAiProvider(
api_key="test-key",
)
assert provider.task_type == "RETRIEVAL_DOCUMENT"
class TestFactoryBackwardCompatibility:
"""Test factory function with backward compatible configurations."""
def test_factory_with_google_alias(self):
"""Test factory resolves 'google' to google-generativeai provider."""
config = {
"provider": "google",
"config": {
"api_key": "test-key",
"model": "gemini-embedding-001",
},
}
from unittest.mock import patch, MagicMock
with patch("crewai.rag.embeddings.factory.import_and_validate_definition") as mock_import:
mock_provider_class = MagicMock()
mock_provider_instance = MagicMock()
mock_import.return_value = mock_provider_class
mock_provider_class.return_value = mock_provider_instance
build_embedder(config)
mock_import.assert_called_once_with(
"crewai.rag.embeddings.providers.google.generative_ai.GenerativeAiProvider"
)
def test_factory_with_model_key_openai(self):
"""Test factory passes 'model' config to OpenAI provider."""
config = {
"provider": "openai",
"config": {
"api_key": "test-key",
"model": "text-embedding-3-small",
},
}
from unittest.mock import patch, MagicMock
with patch("crewai.rag.embeddings.factory.import_and_validate_definition") as mock_import:
mock_provider_class = MagicMock()
mock_provider_instance = MagicMock()
mock_import.return_value = mock_provider_class
mock_provider_class.return_value = mock_provider_instance
build_embedder(config)
call_kwargs = mock_provider_class.call_args.kwargs
assert call_kwargs["model"] == "text-embedding-3-small"
class TestDocumentationCodeSnippets:
"""Test code snippets from documentation work correctly."""
def test_memory_openai_config(self):
"""Test OpenAI config from memory.mdx documentation."""
provider = OpenAIProvider(
model_name="text-embedding-3-small",
)
assert provider.model_name == "text-embedding-3-small"
def test_memory_openai_config_with_options(self):
"""Test OpenAI config with all options from memory.mdx."""
provider = OpenAIProvider(
api_key="your-openai-api-key",
model_name="text-embedding-3-large",
dimensions=1536,
organization_id="your-org-id",
)
assert provider.model_name == "text-embedding-3-large"
assert provider.dimensions == 1536
def test_memory_azure_config(self):
"""Test Azure config from memory.mdx documentation."""
provider = AzureProvider(
api_key="your-azure-key",
api_base="https://your-resource.openai.azure.com/",
api_type="azure",
api_version="2023-05-15",
model_name="text-embedding-3-small",
deployment_id="your-deployment-name",
)
assert provider.model_name == "text-embedding-3-small"
assert provider.api_type == "azure"
def test_memory_google_generativeai_config(self):
"""Test Google Generative AI config from memory.mdx documentation."""
provider = GenerativeAiProvider(
api_key="your-google-api-key",
model_name="gemini-embedding-001",
)
assert provider.model_name == "gemini-embedding-001"
def test_memory_cohere_config(self):
"""Test Cohere config from memory.mdx documentation."""
provider = CohereProvider(
api_key="your-cohere-api-key",
model_name="embed-english-v3.0",
)
assert provider.model_name == "embed-english-v3.0"
def test_knowledge_agent_embedder_config(self):
"""Test agent embedder config from knowledge.mdx documentation."""
provider = GenerativeAiProvider(
model_name="gemini-embedding-001",
api_key="your-google-key",
)
assert provider.model_name == "gemini-embedding-001"
def test_ragtool_openai_config(self):
"""Test RagTool OpenAI config from ragtool.mdx documentation."""
provider = OpenAIProvider(
model_name="text-embedding-3-small",
)
assert provider.model_name == "text-embedding-3-small"
def test_ragtool_cohere_config(self):
"""Test RagTool Cohere config from ragtool.mdx documentation."""
provider = CohereProvider(
api_key="your-api-key",
model_name="embed-english-v3.0",
)
assert provider.model_name == "embed-english-v3.0"
def test_ragtool_ollama_config(self):
"""Test RagTool Ollama config from ragtool.mdx documentation."""
provider = OllamaProvider(
model_name="llama2",
url="http://localhost:11434/api/embeddings",
)
assert provider.model_name == "llama2"
def test_ragtool_azure_config(self):
"""Test RagTool Azure config from ragtool.mdx documentation."""
provider = AzureProvider(
deployment_id="your-deployment-id",
api_key="your-api-key",
api_base="https://your-resource.openai.azure.com",
api_version="2024-02-01",
model_name="text-embedding-ada-002",
api_type="azure",
)
assert provider.model_name == "text-embedding-ada-002"
assert provider.deployment_id == "your-deployment-id"
def test_ragtool_google_generativeai_config(self):
"""Test RagTool Google Generative AI config from ragtool.mdx."""
provider = GenerativeAiProvider(
api_key="your-api-key",
model_name="gemini-embedding-001",
task_type="RETRIEVAL_DOCUMENT",
)
assert provider.model_name == "gemini-embedding-001"
assert provider.task_type == "RETRIEVAL_DOCUMENT"
def test_ragtool_jina_config(self):
"""Test RagTool Jina config from ragtool.mdx documentation."""
provider = JinaProvider(
api_key="your-api-key",
model_name="jina-embeddings-v3",
)
assert provider.model_name == "jina-embeddings-v3"
def test_ragtool_sentence_transformer_config(self):
"""Test RagTool SentenceTransformer config from ragtool.mdx."""
provider = SentenceTransformerProvider(
model_name="all-mpnet-base-v2",
device="cuda",
normalize_embeddings=True,
)
assert provider.model_name == "all-mpnet-base-v2"
assert provider.device == "cuda"
assert provider.normalize_embeddings is True
class TestLegacyConfigurationFormats:
"""Test legacy configuration formats that should still work."""
def test_legacy_google_with_model_key(self):
"""Test legacy Google config using 'model' instead of 'model_name'."""
provider = GenerativeAiProvider(
api_key="test-key",
model="text-embedding-005",
task_type="retrieval_document",
)
assert provider.model_name == "text-embedding-005"
assert provider.task_type == "retrieval_document"
def test_legacy_openai_with_model_key(self):
"""Test legacy OpenAI config using 'model' instead of 'model_name'."""
provider = OpenAIProvider(
api_key="test-key",
model="text-embedding-ada-002",
)
assert provider.model_name == "text-embedding-ada-002"
def test_legacy_cohere_with_model_key(self):
"""Test legacy Cohere config using 'model' instead of 'model_name'."""
provider = CohereProvider(
api_key="test-key",
model="embed-multilingual-v3.0",
)
assert provider.model_name == "embed-multilingual-v3.0"
def test_legacy_azure_with_model_key(self):
"""Test legacy Azure config using 'model' instead of 'model_name'."""
provider = AzureProvider(
api_key="test-key",
deployment_id="test-deployment",
model="text-embedding-3-large",
)
assert provider.model_name == "text-embedding-3-large" | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/rag/embeddings/test_backward_compatibility.py",
"license": "MIT License",
"lines": 308,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/types/streaming.py | """Streaming output types for crew and flow execution."""
from __future__ import annotations
from collections.abc import AsyncIterator, Iterator
from enum import Enum
from typing import TYPE_CHECKING, Any, Generic, TypeVar
from pydantic import BaseModel, Field
if TYPE_CHECKING:
from crewai.crews.crew_output import CrewOutput
T = TypeVar("T")
class StreamChunkType(Enum):
"""Type of streaming chunk."""
TEXT = "text"
TOOL_CALL = "tool_call"
class ToolCallChunk(BaseModel):
"""Tool call information in a streaming chunk.
Attributes:
tool_id: Unique identifier for the tool call
tool_name: Name of the tool being called
arguments: JSON string of tool arguments
index: Index of the tool call in the response
"""
tool_id: str | None = None
tool_name: str | None = None
arguments: str = ""
index: int = 0
class StreamChunk(BaseModel):
"""Base streaming chunk with full context.
Attributes:
content: The streaming content (text or partial content)
chunk_type: Type of the chunk (text, tool_call, etc.)
task_index: Index of the current task (0-based)
task_name: Name or description of the current task
task_id: Unique identifier of the task
agent_role: Role of the agent executing the task
agent_id: Unique identifier of the agent
tool_call: Tool call information if chunk_type is TOOL_CALL
"""
content: str = Field(description="The streaming content")
chunk_type: StreamChunkType = Field(
default=StreamChunkType.TEXT, description="Type of the chunk"
)
task_index: int = Field(default=0, description="Index of the current task")
task_name: str = Field(default="", description="Name of the current task")
task_id: str = Field(default="", description="Unique identifier of the task")
agent_role: str = Field(default="", description="Role of the agent")
agent_id: str = Field(default="", description="Unique identifier of the agent")
tool_call: ToolCallChunk | None = Field(
default=None, description="Tool call information"
)
def __str__(self) -> str:
"""Return the chunk content as a string."""
return self.content
class StreamingOutputBase(Generic[T]):
"""Base class for streaming output with result access.
Provides iteration over stream chunks and access to final result
via the .result property after streaming completes.
"""
def __init__(self) -> None:
"""Initialize streaming output base."""
self._result: T | None = None
self._completed: bool = False
self._chunks: list[StreamChunk] = []
self._error: Exception | None = None
@property
def result(self) -> T:
"""Get the final result after streaming completes.
Returns:
The final output (CrewOutput for crews, Any for flows).
Raises:
RuntimeError: If streaming has not completed yet.
Exception: If streaming failed with an error.
"""
if not self._completed:
raise RuntimeError(
"Streaming has not completed yet. "
"Iterate over all chunks before accessing result."
)
if self._error is not None:
raise self._error
if self._result is None:
raise RuntimeError("No result available")
return self._result
@property
def is_completed(self) -> bool:
"""Check if streaming has completed."""
return self._completed
@property
def chunks(self) -> list[StreamChunk]:
"""Get all collected chunks so far."""
return self._chunks.copy()
def get_full_text(self) -> str:
"""Get all streamed text content concatenated.
Returns:
All text chunks concatenated together.
"""
return "".join(
chunk.content
for chunk in self._chunks
if chunk.chunk_type == StreamChunkType.TEXT
)
class CrewStreamingOutput(StreamingOutputBase["CrewOutput"]):
"""Streaming output wrapper for crew execution.
Provides both sync and async iteration over stream chunks,
with access to the final CrewOutput via the .result property.
For kickoff_for_each_async with streaming, use .results to get list of outputs.
Example:
```python
# Single crew
streaming = crew.kickoff(inputs={"topic": "AI"})
for chunk in streaming:
print(chunk.content, end="", flush=True)
result = streaming.result
# Multiple crews (kickoff_for_each_async)
streaming = await crew.kickoff_for_each_async(
[{"topic": "AI"}, {"topic": "ML"}]
)
async for chunk in streaming:
print(chunk.content, end="", flush=True)
results = streaming.results # List of CrewOutput
```
"""
def __init__(
self,
sync_iterator: Iterator[StreamChunk] | None = None,
async_iterator: AsyncIterator[StreamChunk] | None = None,
) -> None:
"""Initialize crew streaming output.
Args:
sync_iterator: Synchronous iterator for chunks.
async_iterator: Asynchronous iterator for chunks.
"""
super().__init__()
self._sync_iterator = sync_iterator
self._async_iterator = async_iterator
self._results: list[CrewOutput] | None = None
@property
def results(self) -> list[CrewOutput]:
"""Get all results for kickoff_for_each_async.
Returns:
List of CrewOutput from all crews.
Raises:
RuntimeError: If streaming has not completed or results not available.
"""
if not self._completed:
raise RuntimeError(
"Streaming has not completed yet. "
"Iterate over all chunks before accessing results."
)
if self._error is not None:
raise self._error
if self._results is not None:
return self._results
if self._result is not None:
return [self._result]
raise RuntimeError("No results available")
def _set_results(self, results: list[CrewOutput]) -> None:
"""Set multiple results for kickoff_for_each_async.
Args:
results: List of CrewOutput from all crews.
"""
self._results = results
self._completed = True
def __iter__(self) -> Iterator[StreamChunk]:
"""Iterate over stream chunks synchronously.
Yields:
StreamChunk objects as they arrive.
Raises:
RuntimeError: If sync iterator not available.
"""
if self._sync_iterator is None:
raise RuntimeError("Sync iterator not available")
try:
for chunk in self._sync_iterator:
self._chunks.append(chunk)
yield chunk
except Exception as e:
self._error = e
raise
finally:
self._completed = True
def __aiter__(self) -> AsyncIterator[StreamChunk]:
"""Return async iterator for stream chunks.
Returns:
Async iterator for StreamChunk objects.
"""
return self._async_iterate()
async def _async_iterate(self) -> AsyncIterator[StreamChunk]:
"""Iterate over stream chunks asynchronously.
Yields:
StreamChunk objects as they arrive.
Raises:
RuntimeError: If async iterator not available.
"""
if self._async_iterator is None:
raise RuntimeError("Async iterator not available")
try:
async for chunk in self._async_iterator:
self._chunks.append(chunk)
yield chunk
except Exception as e:
self._error = e
raise
finally:
self._completed = True
def _set_result(self, result: CrewOutput) -> None:
"""Set the final result after streaming completes.
Args:
result: The final CrewOutput.
"""
self._result = result
self._completed = True
class FlowStreamingOutput(StreamingOutputBase[Any]):
"""Streaming output wrapper for flow execution.
Provides both sync and async iteration over stream chunks,
with access to the final flow output via the .result property.
Example:
```python
# Sync usage
streaming = flow.kickoff_streaming()
for chunk in streaming:
print(chunk.content, end="", flush=True)
result = streaming.result
# Async usage
streaming = await flow.kickoff_streaming_async()
async for chunk in streaming:
print(chunk.content, end="", flush=True)
result = streaming.result
```
"""
def __init__(
self,
sync_iterator: Iterator[StreamChunk] | None = None,
async_iterator: AsyncIterator[StreamChunk] | None = None,
) -> None:
"""Initialize flow streaming output.
Args:
sync_iterator: Synchronous iterator for chunks.
async_iterator: Asynchronous iterator for chunks.
"""
super().__init__()
self._sync_iterator = sync_iterator
self._async_iterator = async_iterator
def __iter__(self) -> Iterator[StreamChunk]:
"""Iterate over stream chunks synchronously.
Yields:
StreamChunk objects as they arrive.
Raises:
RuntimeError: If sync iterator not available.
"""
if self._sync_iterator is None:
raise RuntimeError("Sync iterator not available")
try:
for chunk in self._sync_iterator:
self._chunks.append(chunk)
yield chunk
except Exception as e:
self._error = e
raise
finally:
self._completed = True
def __aiter__(self) -> AsyncIterator[StreamChunk]:
"""Return async iterator for stream chunks.
Returns:
Async iterator for StreamChunk objects.
"""
return self._async_iterate()
async def _async_iterate(self) -> AsyncIterator[StreamChunk]:
"""Iterate over stream chunks asynchronously.
Yields:
StreamChunk objects as they arrive.
Raises:
RuntimeError: If async iterator not available.
"""
if self._async_iterator is None:
raise RuntimeError("Async iterator not available")
try:
async for chunk in self._async_iterator:
self._chunks.append(chunk)
yield chunk
except Exception as e:
self._error = e
raise
finally:
self._completed = True
def _set_result(self, result: Any) -> None:
"""Set the final result after streaming completes.
Args:
result: The final flow output.
"""
self._result = result
self._completed = True
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/types/streaming.py",
"license": "MIT License",
"lines": 291,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/utilities/streaming.py | """Streaming utilities for crew and flow execution."""
import asyncio
from collections.abc import AsyncIterator, Callable, Iterator
import queue
import threading
from typing import Any, NamedTuple
from typing_extensions import TypedDict
from crewai.events.base_events import BaseEvent
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.llm_events import LLMStreamChunkEvent
from crewai.types.streaming import (
CrewStreamingOutput,
FlowStreamingOutput,
StreamChunk,
StreamChunkType,
ToolCallChunk,
)
from crewai.utilities.string_utils import sanitize_tool_name
class TaskInfo(TypedDict):
"""Task context information for streaming."""
index: int
name: str
id: str
agent_role: str
agent_id: str
class StreamingState(NamedTuple):
"""Immutable state for streaming execution."""
current_task_info: TaskInfo
result_holder: list[Any]
sync_queue: queue.Queue[StreamChunk | None | Exception]
async_queue: asyncio.Queue[StreamChunk | None | Exception] | None
loop: asyncio.AbstractEventLoop | None
handler: Callable[[Any, BaseEvent], None]
def _extract_tool_call_info(
event: LLMStreamChunkEvent,
) -> tuple[StreamChunkType, ToolCallChunk | None]:
"""Extract tool call information from an LLM stream chunk event.
Args:
event: The LLM stream chunk event to process.
Returns:
A tuple of (chunk_type, tool_call_chunk) where tool_call_chunk is None
if the event is not a tool call.
"""
if event.tool_call:
return (
StreamChunkType.TOOL_CALL,
ToolCallChunk(
tool_id=event.tool_call.id,
tool_name=sanitize_tool_name(event.tool_call.function.name),
arguments=event.tool_call.function.arguments,
index=event.tool_call.index,
),
)
return StreamChunkType.TEXT, None
def _create_stream_chunk(
event: LLMStreamChunkEvent,
current_task_info: TaskInfo,
) -> StreamChunk:
"""Create a StreamChunk from an LLM stream chunk event.
Args:
event: The LLM stream chunk event to process.
current_task_info: Task context info.
Returns:
A StreamChunk populated with event and task info.
"""
chunk_type, tool_call_chunk = _extract_tool_call_info(event)
return StreamChunk(
content=event.chunk,
chunk_type=chunk_type,
task_index=current_task_info["index"],
task_name=current_task_info["name"],
task_id=current_task_info["id"],
agent_role=event.agent_role or current_task_info["agent_role"],
agent_id=event.agent_id or current_task_info["agent_id"],
tool_call=tool_call_chunk,
)
def _create_stream_handler(
current_task_info: TaskInfo,
sync_queue: queue.Queue[StreamChunk | None | Exception],
async_queue: asyncio.Queue[StreamChunk | None | Exception] | None = None,
loop: asyncio.AbstractEventLoop | None = None,
) -> Callable[[Any, BaseEvent], None]:
"""Create a stream handler function.
Args:
current_task_info: Task context info.
sync_queue: Synchronous queue for chunks.
async_queue: Optional async queue for chunks.
loop: Optional event loop for async operations.
Returns:
Handler function that can be registered with the event bus.
"""
def stream_handler(_: Any, event: BaseEvent) -> None:
"""Handle LLM stream chunk events and enqueue them.
Args:
_: Event source (unused).
event: The event to process.
"""
if not isinstance(event, LLMStreamChunkEvent):
return
chunk = _create_stream_chunk(event, current_task_info)
if async_queue is not None and loop is not None:
loop.call_soon_threadsafe(async_queue.put_nowait, chunk)
else:
sync_queue.put(chunk)
return stream_handler
def _unregister_handler(handler: Callable[[Any, BaseEvent], None]) -> None:
"""Unregister a stream handler from the event bus.
Args:
handler: The handler function to unregister.
"""
with crewai_event_bus._rwlock.w_locked():
handlers: frozenset[Callable[[Any, BaseEvent], None]] = (
crewai_event_bus._sync_handlers.get(LLMStreamChunkEvent, frozenset())
)
crewai_event_bus._sync_handlers[LLMStreamChunkEvent] = handlers - {handler}
def _finalize_streaming(
state: StreamingState,
streaming_output: CrewStreamingOutput | FlowStreamingOutput,
) -> None:
"""Finalize streaming by unregistering handler and setting result.
Args:
state: The streaming state to finalize.
streaming_output: The streaming output to set the result on.
"""
_unregister_handler(state.handler)
if state.result_holder:
streaming_output._set_result(state.result_holder[0])
def create_streaming_state(
current_task_info: TaskInfo,
result_holder: list[Any],
use_async: bool = False,
) -> StreamingState:
"""Create and register streaming state.
Args:
current_task_info: Task context info.
result_holder: List to hold the final result.
use_async: Whether to use async queue.
Returns:
Initialized StreamingState with registered handler.
"""
sync_queue: queue.Queue[StreamChunk | None | Exception] = queue.Queue()
async_queue: asyncio.Queue[StreamChunk | None | Exception] | None = None
loop: asyncio.AbstractEventLoop | None = None
if use_async:
async_queue = asyncio.Queue()
loop = asyncio.get_event_loop()
handler = _create_stream_handler(current_task_info, sync_queue, async_queue, loop)
crewai_event_bus.register_handler(LLMStreamChunkEvent, handler)
return StreamingState(
current_task_info=current_task_info,
result_holder=result_holder,
sync_queue=sync_queue,
async_queue=async_queue,
loop=loop,
handler=handler,
)
def signal_end(state: StreamingState, is_async: bool = False) -> None:
"""Signal end of stream.
Args:
state: The streaming state.
is_async: Whether this is an async stream.
"""
if is_async and state.async_queue is not None and state.loop is not None:
state.loop.call_soon_threadsafe(state.async_queue.put_nowait, None)
else:
state.sync_queue.put(None)
def signal_error(
state: StreamingState, error: Exception, is_async: bool = False
) -> None:
"""Signal an error in the stream.
Args:
state: The streaming state.
error: The exception to signal.
is_async: Whether this is an async stream.
"""
if is_async and state.async_queue is not None and state.loop is not None:
state.loop.call_soon_threadsafe(state.async_queue.put_nowait, error)
else:
state.sync_queue.put(error)
def create_chunk_generator(
state: StreamingState,
run_func: Callable[[], None],
output_holder: list[CrewStreamingOutput | FlowStreamingOutput],
) -> Iterator[StreamChunk]:
"""Create a chunk generator that uses a holder to access streaming output.
Args:
state: The streaming state.
run_func: Function to run in a separate thread.
output_holder: Single-element list that will contain the streaming output.
Yields:
StreamChunk objects as they arrive.
"""
thread = threading.Thread(target=run_func, daemon=True)
thread.start()
try:
while True:
item = state.sync_queue.get()
if item is None:
break
if isinstance(item, Exception):
raise item
yield item
finally:
thread.join()
if output_holder:
_finalize_streaming(state, output_holder[0])
else:
_unregister_handler(state.handler)
async def create_async_chunk_generator(
state: StreamingState,
run_coro: Callable[[], Any],
output_holder: list[CrewStreamingOutput | FlowStreamingOutput],
) -> AsyncIterator[StreamChunk]:
"""Create an async chunk generator that uses a holder to access streaming output.
Args:
state: The streaming state.
run_coro: Coroutine function to run as a task.
output_holder: Single-element list that will contain the streaming output.
Yields:
StreamChunk objects as they arrive.
"""
if state.async_queue is None:
raise RuntimeError(
"Async queue not initialized. Use create_streaming_state(use_async=True)."
)
task = asyncio.create_task(run_coro())
try:
while True:
item = await state.async_queue.get()
if item is None:
break
if isinstance(item, Exception):
raise item
yield item
finally:
await task
if output_holder:
_finalize_streaming(state, output_holder[0])
else:
_unregister_handler(state.handler)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/utilities/streaming.py",
"license": "MIT License",
"lines": 240,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/tests/test_streaming.py | """Tests for streaming output functionality in crews and flows."""
import asyncio
from collections.abc import AsyncIterator, Generator
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from crewai import Agent, Crew, Task
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.llm_events import LLMStreamChunkEvent, ToolCall, FunctionCall
from crewai.flow.flow import Flow, start
from crewai.types.streaming import (
CrewStreamingOutput,
FlowStreamingOutput,
StreamChunk,
StreamChunkType,
ToolCallChunk,
)
@pytest.fixture
def researcher() -> Agent:
"""Create a researcher agent for testing."""
return Agent(
role="Researcher",
goal="Research and analyze topics thoroughly",
backstory="You are an expert researcher with deep analytical skills.",
allow_delegation=False,
)
@pytest.fixture
def simple_task(researcher: Agent) -> Task:
"""Create a simple task for testing."""
return Task(
description="Write a brief analysis of AI trends",
expected_output="A concise analysis of current AI trends",
agent=researcher,
)
@pytest.fixture
def simple_crew(researcher: Agent, simple_task: Task) -> Crew:
"""Create a simple crew with one agent and one task."""
return Crew(
agents=[researcher],
tasks=[simple_task],
verbose=False,
)
@pytest.fixture
def streaming_crew(researcher: Agent, simple_task: Task) -> Crew:
"""Create a streaming crew with one agent and one task."""
return Crew(
agents=[researcher],
tasks=[simple_task],
verbose=False,
stream=True,
)
class TestStreamChunk:
"""Tests for StreamChunk model."""
def test_stream_chunk_creation(self) -> None:
"""Test creating a basic stream chunk."""
chunk = StreamChunk(
content="Hello, world!",
chunk_type=StreamChunkType.TEXT,
task_index=0,
task_name="Test Task",
task_id="task-123",
agent_role="Researcher",
agent_id="agent-456",
)
assert chunk.content == "Hello, world!"
assert chunk.chunk_type == StreamChunkType.TEXT
assert chunk.task_index == 0
assert chunk.task_name == "Test Task"
assert str(chunk) == "Hello, world!"
def test_stream_chunk_with_tool_call(self) -> None:
"""Test creating a stream chunk with tool call information."""
tool_call = ToolCallChunk(
tool_id="call-123",
tool_name="search",
arguments='{"query": "AI trends"}',
index=0,
)
chunk = StreamChunk(
content="",
chunk_type=StreamChunkType.TOOL_CALL,
tool_call=tool_call,
)
assert chunk.chunk_type == StreamChunkType.TOOL_CALL
assert chunk.tool_call is not None
assert chunk.tool_call.tool_name == "search"
class TestCrewStreamingOutput:
"""Tests for CrewStreamingOutput functionality."""
def test_result_before_iteration_raises_error(self) -> None:
"""Test that accessing result before iteration raises error."""
def empty_gen() -> Generator[StreamChunk, None, None]:
yield StreamChunk(content="test")
streaming = CrewStreamingOutput(sync_iterator=empty_gen())
with pytest.raises(RuntimeError, match="Streaming has not completed yet"):
_ = streaming.result
def test_is_completed_property(self) -> None:
"""Test the is_completed property."""
def simple_gen() -> Generator[StreamChunk, None, None]:
yield StreamChunk(content="test")
streaming = CrewStreamingOutput(sync_iterator=simple_gen())
assert streaming.is_completed is False
list(streaming)
assert streaming.is_completed is True
def test_get_full_text(self) -> None:
"""Test getting full text from chunks."""
def gen() -> Generator[StreamChunk, None, None]:
yield StreamChunk(content="Hello ")
yield StreamChunk(content="World!")
yield StreamChunk(content="", chunk_type=StreamChunkType.TOOL_CALL)
streaming = CrewStreamingOutput(sync_iterator=gen())
list(streaming)
assert streaming.get_full_text() == "Hello World!"
def test_chunks_property(self) -> None:
"""Test accessing collected chunks."""
def gen() -> Generator[StreamChunk, None, None]:
yield StreamChunk(content="chunk1")
yield StreamChunk(content="chunk2")
streaming = CrewStreamingOutput(sync_iterator=gen())
list(streaming)
assert len(streaming.chunks) == 2
assert streaming.chunks[0].content == "chunk1"
class TestFlowStreamingOutput:
"""Tests for FlowStreamingOutput functionality."""
def test_result_before_iteration_raises_error(self) -> None:
"""Test that accessing result before iteration raises error."""
def empty_gen() -> Generator[StreamChunk, None, None]:
yield StreamChunk(content="test")
streaming = FlowStreamingOutput(sync_iterator=empty_gen())
with pytest.raises(RuntimeError, match="Streaming has not completed yet"):
_ = streaming.result
def test_is_completed_property(self) -> None:
"""Test the is_completed property."""
def simple_gen() -> Generator[StreamChunk, None, None]:
yield StreamChunk(content="test")
streaming = FlowStreamingOutput(sync_iterator=simple_gen())
assert streaming.is_completed is False
list(streaming)
assert streaming.is_completed is True
class TestCrewKickoffStreaming:
"""Tests for Crew(stream=True).kickoff() method."""
def test_kickoff_streaming_returns_streaming_output(self, streaming_crew: Crew) -> None:
"""Test that kickoff with stream=True returns CrewStreamingOutput."""
with patch.object(Crew, "kickoff") as mock_kickoff:
mock_output = MagicMock()
mock_output.raw = "Test output"
def side_effect(*args: Any, **kwargs: Any) -> Any:
return mock_output
mock_kickoff.side_effect = side_effect
streaming = streaming_crew.kickoff()
assert isinstance(streaming, CrewStreamingOutput)
def test_kickoff_streaming_captures_chunks(self, researcher: Agent, simple_task: Task) -> None:
"""Test that streaming captures LLM chunks."""
crew = Crew(
agents=[researcher],
tasks=[simple_task],
verbose=False,
stream=True,
)
mock_output = MagicMock()
mock_output.raw = "Test output"
original_kickoff = Crew.kickoff
call_count = [0]
def mock_kickoff_fn(self: Any, inputs: Any = None, **kwargs: Any) -> Any:
call_count[0] += 1
if call_count[0] == 1:
return original_kickoff(self, inputs, **kwargs)
else:
crewai_event_bus.emit(
crew,
LLMStreamChunkEvent(
type="llm_stream_chunk",
chunk="Hello ",
call_id="test-call-id",
),
)
crewai_event_bus.emit(
crew,
LLMStreamChunkEvent(
type="llm_stream_chunk",
chunk="World!",
call_id="test-call-id",
),
)
return mock_output
with patch.object(Crew, "kickoff", mock_kickoff_fn):
streaming = crew.kickoff()
assert isinstance(streaming, CrewStreamingOutput)
chunks = list(streaming)
assert len(chunks) >= 2
contents = [c.content for c in chunks]
assert "Hello " in contents
assert "World!" in contents
def test_kickoff_streaming_result_available_after_iteration(
self, researcher: Agent, simple_task: Task
) -> None:
"""Test that result is available after iterating all chunks."""
mock_output = MagicMock()
mock_output.raw = "Final result"
def gen() -> Generator[StreamChunk, None, None]:
yield StreamChunk(content="test chunk")
streaming = CrewStreamingOutput(sync_iterator=gen())
# Iterate all chunks
_ = list(streaming)
# Simulate what _finalize_streaming does
streaming._set_result(mock_output)
result = streaming.result
assert result.raw == "Final result"
def test_kickoff_streaming_handles_tool_calls(self, researcher: Agent, simple_task: Task) -> None:
"""Test that streaming handles tool call chunks correctly."""
crew = Crew(
agents=[researcher],
tasks=[simple_task],
verbose=False,
stream=True,
)
mock_output = MagicMock()
mock_output.raw = "Test output"
original_kickoff = Crew.kickoff
call_count = [0]
def mock_kickoff_fn(self: Any, inputs: Any = None, **kwargs: Any) -> Any:
call_count[0] += 1
if call_count[0] == 1:
return original_kickoff(self, inputs, **kwargs)
else:
crewai_event_bus.emit(
crew,
LLMStreamChunkEvent(
type="llm_stream_chunk",
chunk="",
call_id="test-call-id",
tool_call=ToolCall(
id="call-123",
function=FunctionCall(
name="search",
arguments='{"query": "test"}',
),
type="function",
index=0,
),
),
)
return mock_output
with patch.object(Crew, "kickoff", mock_kickoff_fn):
streaming = crew.kickoff()
assert isinstance(streaming, CrewStreamingOutput)
chunks = list(streaming)
tool_chunks = [c for c in chunks if c.chunk_type == StreamChunkType.TOOL_CALL]
assert len(tool_chunks) >= 1
assert tool_chunks[0].tool_call is not None
assert tool_chunks[0].tool_call.tool_name == "search"
class TestCrewKickoffStreamingAsync:
"""Tests for Crew(stream=True).kickoff_async() method."""
@pytest.mark.asyncio
async def test_kickoff_streaming_async_returns_streaming_output(
self, researcher: Agent, simple_task: Task
) -> None:
"""Test that kickoff_async with stream=True returns CrewStreamingOutput."""
crew = Crew(
agents=[researcher],
tasks=[simple_task],
verbose=False,
stream=True,
)
mock_output = MagicMock()
mock_output.raw = "Test output"
original_kickoff = Crew.kickoff
call_count = [0]
def mock_kickoff_fn(self: Any, inputs: Any = None, **kwargs: Any) -> Any:
call_count[0] += 1
if call_count[0] == 1:
return original_kickoff(self, inputs, **kwargs)
else:
return mock_output
with patch.object(Crew, "kickoff", mock_kickoff_fn):
streaming = await crew.kickoff_async()
assert isinstance(streaming, CrewStreamingOutput)
@pytest.mark.asyncio
async def test_kickoff_streaming_async_captures_chunks(
self, researcher: Agent, simple_task: Task
) -> None:
"""Test that async streaming captures LLM chunks."""
crew = Crew(
agents=[researcher],
tasks=[simple_task],
verbose=False,
stream=True,
)
mock_output = MagicMock()
mock_output.raw = "Test output"
def mock_kickoff_fn(
self: Any, inputs: Any = None, input_files: Any = None, **kwargs: Any
) -> Any:
crewai_event_bus.emit(
crew,
LLMStreamChunkEvent(
type="llm_stream_chunk",
chunk="Async ",
call_id="test-call-id",
),
)
crewai_event_bus.emit(
crew,
LLMStreamChunkEvent(
type="llm_stream_chunk",
chunk="Stream!",
call_id="test-call-id",
),
)
return mock_output
with patch.object(Crew, "kickoff", mock_kickoff_fn):
streaming = await crew.kickoff_async()
assert isinstance(streaming, CrewStreamingOutput)
chunks: list[StreamChunk] = []
async for chunk in streaming:
chunks.append(chunk)
assert len(chunks) >= 2
contents = [c.content for c in chunks]
assert "Async " in contents
assert "Stream!" in contents
@pytest.mark.asyncio
async def test_kickoff_streaming_async_result_available_after_iteration(
self, researcher: Agent, simple_task: Task
) -> None:
"""Test that result is available after async iteration."""
mock_output = MagicMock()
mock_output.raw = "Async result"
async def async_gen() -> AsyncIterator[StreamChunk]:
yield StreamChunk(content="test chunk")
streaming = CrewStreamingOutput(async_iterator=async_gen())
# Iterate all chunks
async for _ in streaming:
pass
# Simulate what _finalize_streaming does
streaming._set_result(mock_output)
result = streaming.result
assert result.raw == "Async result"
class TestFlowKickoffStreaming:
"""Tests for Flow(stream=True).kickoff() method."""
def test_kickoff_streaming_returns_streaming_output(self) -> None:
"""Test that flow kickoff with stream=True returns FlowStreamingOutput."""
class SimpleFlow(Flow[dict[str, Any]]):
@start()
def generate(self) -> str:
return "result"
flow = SimpleFlow()
flow.stream = True
streaming = flow.kickoff()
assert isinstance(streaming, FlowStreamingOutput)
def test_flow_kickoff_streaming_captures_chunks(self) -> None:
"""Test that flow streaming captures LLM chunks from crew execution."""
class TestFlow(Flow[dict[str, Any]]):
@start()
def run_crew(self) -> str:
return "done"
flow = TestFlow()
flow.stream = True
original_kickoff = Flow.kickoff
call_count = [0]
def mock_kickoff_fn(self: Any, inputs: Any = None, **kwargs: Any) -> Any:
call_count[0] += 1
if call_count[0] == 1:
return original_kickoff(self, inputs, **kwargs)
else:
crewai_event_bus.emit(
flow,
LLMStreamChunkEvent(
type="llm_stream_chunk",
chunk="Flow ",
call_id="test-call-id",
),
)
crewai_event_bus.emit(
flow,
LLMStreamChunkEvent(
type="llm_stream_chunk",
chunk="output!",
call_id="test-call-id",
),
)
return "done"
with patch.object(Flow, "kickoff", mock_kickoff_fn):
streaming = flow.kickoff()
assert isinstance(streaming, FlowStreamingOutput)
chunks = list(streaming)
assert len(chunks) >= 2
contents = [c.content for c in chunks]
assert "Flow " in contents
assert "output!" in contents
def test_flow_kickoff_streaming_result_available(self) -> None:
"""Test that flow result is available after iteration."""
class TestFlow(Flow[dict[str, Any]]):
@start()
def generate(self) -> str:
return "flow result"
flow = TestFlow()
flow.stream = True
original_kickoff = Flow.kickoff
call_count = [0]
def mock_kickoff_fn(self: Any, inputs: Any = None, **kwargs: Any) -> Any:
call_count[0] += 1
if call_count[0] == 1:
return original_kickoff(self, inputs, **kwargs)
else:
return "flow result"
with patch.object(Flow, "kickoff", mock_kickoff_fn):
streaming = flow.kickoff()
assert isinstance(streaming, FlowStreamingOutput)
_ = list(streaming)
result = streaming.result
assert result == "flow result"
class TestFlowKickoffStreamingAsync:
"""Tests for Flow(stream=True).kickoff_async() method."""
@pytest.mark.asyncio
async def test_kickoff_streaming_async_returns_streaming_output(self) -> None:
"""Test that flow kickoff_async with stream=True returns FlowStreamingOutput."""
class SimpleFlow(Flow[dict[str, Any]]):
@start()
async def generate(self) -> str:
return "async result"
flow = SimpleFlow()
flow.stream = True
streaming = await flow.kickoff_async()
assert isinstance(streaming, FlowStreamingOutput)
@pytest.mark.asyncio
async def test_flow_kickoff_streaming_async_captures_chunks(self) -> None:
"""Test that async flow streaming captures LLM chunks."""
class TestFlow(Flow[dict[str, Any]]):
@start()
async def run_crew(self) -> str:
return "done"
flow = TestFlow()
flow.stream = True
original_kickoff = Flow.kickoff_async
call_count = [0]
async def mock_kickoff_fn(self: Any, inputs: Any = None, **kwargs: Any) -> Any:
call_count[0] += 1
if call_count[0] == 1:
return await original_kickoff(self, inputs, **kwargs)
else:
await asyncio.sleep(0.01)
crewai_event_bus.emit(
flow,
LLMStreamChunkEvent(
type="llm_stream_chunk",
chunk="Async flow ",
call_id="test-call-id",
),
)
await asyncio.sleep(0.01)
crewai_event_bus.emit(
flow,
LLMStreamChunkEvent(
type="llm_stream_chunk",
chunk="stream!",
call_id="test-call-id",
),
)
await asyncio.sleep(0.01)
return "done"
with patch.object(Flow, "kickoff_async", mock_kickoff_fn):
streaming = await flow.kickoff_async()
assert isinstance(streaming, FlowStreamingOutput)
chunks: list[StreamChunk] = []
async for chunk in streaming:
chunks.append(chunk)
assert len(chunks) >= 2
contents = [c.content for c in chunks]
assert "Async flow " in contents
assert "stream!" in contents
@pytest.mark.asyncio
async def test_flow_kickoff_streaming_async_result_available(self) -> None:
"""Test that async flow result is available after iteration."""
class TestFlow(Flow[dict[str, Any]]):
@start()
async def generate(self) -> str:
return "async flow result"
flow = TestFlow()
flow.stream = True
original_kickoff = Flow.kickoff_async
call_count = [0]
async def mock_kickoff_fn(self: Any, inputs: Any = None, **kwargs: Any) -> Any:
call_count[0] += 1
if call_count[0] == 1:
return await original_kickoff(self, inputs, **kwargs)
else:
return "async flow result"
with patch.object(Flow, "kickoff_async", mock_kickoff_fn):
streaming = await flow.kickoff_async()
assert isinstance(streaming, FlowStreamingOutput)
async for _ in streaming:
pass
result = streaming.result
assert result == "async flow result"
class TestStreamingEdgeCases:
"""Tests for edge cases in streaming functionality."""
def test_streaming_handles_exceptions(self, researcher: Agent, simple_task: Task) -> None:
"""Test that streaming properly propagates exceptions."""
crew = Crew(
agents=[researcher],
tasks=[simple_task],
verbose=False,
stream=True,
)
original_kickoff = Crew.kickoff
call_count = [0]
def mock_kickoff_fn(self: Any, inputs: Any = None, **kwargs: Any) -> Any:
call_count[0] += 1
if call_count[0] == 1:
return original_kickoff(self, inputs, **kwargs)
else:
raise ValueError("Test error")
with patch.object(Crew, "kickoff", mock_kickoff_fn):
streaming = crew.kickoff()
with pytest.raises(ValueError, match="Test error"):
list(streaming)
def test_streaming_with_empty_content_chunks(self) -> None:
"""Test streaming when LLM chunks have empty content."""
mock_output = MagicMock()
mock_output.raw = "No streaming"
def gen() -> Generator[StreamChunk, None, None]:
yield StreamChunk(content="")
streaming = CrewStreamingOutput(sync_iterator=gen())
chunks = list(streaming)
assert streaming.is_completed
assert len(chunks) == 1
assert chunks[0].content == ""
# Simulate what _finalize_streaming does
streaming._set_result(mock_output)
result = streaming.result
assert result.raw == "No streaming"
def test_streaming_with_multiple_tasks(self, researcher: Agent) -> None:
"""Test streaming with multiple tasks tracks task context."""
task1 = Task(
description="First task",
expected_output="First output",
agent=researcher,
)
task2 = Task(
description="Second task",
expected_output="Second output",
agent=researcher,
)
crew = Crew(
agents=[researcher],
tasks=[task1, task2],
verbose=False,
stream=True,
)
mock_output = MagicMock()
mock_output.raw = "Multi-task output"
original_kickoff = Crew.kickoff
call_count = [0]
def mock_kickoff_fn(self: Any, inputs: Any = None, **kwargs: Any) -> Any:
call_count[0] += 1
if call_count[0] == 1:
return original_kickoff(self, inputs, **kwargs)
else:
crewai_event_bus.emit(
crew,
LLMStreamChunkEvent(
type="llm_stream_chunk",
chunk="Task 1",
task_name="First task",
call_id="test-call-id",
),
)
return mock_output
with patch.object(Crew, "kickoff", mock_kickoff_fn):
streaming = crew.kickoff()
assert isinstance(streaming, CrewStreamingOutput)
chunks = list(streaming)
assert len(chunks) >= 1
assert streaming.is_completed
class TestStreamingImports:
"""Tests for correct imports of streaming types."""
def test_streaming_types_importable_from_types_module(self) -> None:
"""Test that streaming types can be imported from crewai.types.streaming."""
from crewai.types.streaming import (
CrewStreamingOutput,
FlowStreamingOutput,
StreamChunk,
StreamChunkType,
ToolCallChunk,
)
assert CrewStreamingOutput is not None
assert FlowStreamingOutput is not None
assert StreamChunk is not None
assert StreamChunkType is not None
assert ToolCallChunk is not None
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/test_streaming.py",
"license": "MIT License",
"lines": 588,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/test_streaming_integration.py | """Integration tests for streaming with real LLM interactions using cassettes."""
import pytest
from crewai import Agent, Crew, Task
from crewai.flow.flow import Flow, start
from crewai.types.streaming import CrewStreamingOutput, FlowStreamingOutput
@pytest.fixture
def researcher() -> Agent:
"""Create a researcher agent for testing."""
return Agent(
role="Research Analyst",
goal="Gather comprehensive information on topics",
backstory="You are an experienced researcher with excellent analytical skills.",
allow_delegation=False,
)
@pytest.fixture
def simple_task(researcher: Agent) -> Task:
"""Create a simple research task."""
return Task(
description="Research the latest developments in {topic}",
expected_output="A brief summary of recent developments",
agent=researcher,
)
class TestStreamingCrewIntegration:
"""Integration tests for crew streaming that match documentation examples."""
@pytest.mark.vcr()
def test_basic_crew_streaming_from_docs(
self, researcher: Agent, simple_task: Task
) -> None:
"""Test basic streaming example from documentation."""
crew = Crew(
agents=[researcher],
tasks=[simple_task],
stream=True,
verbose=False,
)
streaming = crew.kickoff(inputs={"topic": "artificial intelligence"})
assert isinstance(streaming, CrewStreamingOutput)
chunks = []
for chunk in streaming:
chunks.append(chunk.content)
assert len(chunks) > 0
result = streaming.result
assert result.raw is not None
assert len(result.raw) > 0
@pytest.mark.vcr()
def test_streaming_with_chunk_context_from_docs(
self, researcher: Agent, simple_task: Task
) -> None:
"""Test streaming with chunk context example from documentation."""
crew = Crew(
agents=[researcher],
tasks=[simple_task],
stream=True,
verbose=False,
)
streaming = crew.kickoff(inputs={"topic": "AI"})
chunk_contexts = []
for chunk in streaming:
chunk_contexts.append(
{
"task_name": chunk.task_name,
"task_index": chunk.task_index,
"agent_role": chunk.agent_role,
"content": chunk.content,
"type": chunk.chunk_type,
}
)
assert len(chunk_contexts) > 0
assert all("agent_role" in ctx for ctx in chunk_contexts)
result = streaming.result
assert result is not None
@pytest.mark.vcr()
def test_streaming_properties_from_docs(
self, researcher: Agent, simple_task: Task
) -> None:
"""Test streaming properties example from documentation."""
crew = Crew(
agents=[researcher],
tasks=[simple_task],
stream=True,
verbose=False,
)
streaming = crew.kickoff(inputs={"topic": "AI"})
for _ in streaming:
pass
assert streaming.is_completed is True
full_text = streaming.get_full_text()
assert len(full_text) > 0
assert len(streaming.chunks) > 0
result = streaming.result
assert result.raw is not None
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_async_streaming_from_docs(
self, researcher: Agent, simple_task: Task
) -> None:
"""Test async streaming example from documentation."""
crew = Crew(
agents=[researcher],
tasks=[simple_task],
stream=True,
verbose=False,
)
streaming = await crew.kickoff_async(inputs={"topic": "AI"})
assert isinstance(streaming, CrewStreamingOutput)
chunks = []
async for chunk in streaming:
chunks.append(chunk.content)
assert len(chunks) > 0
result = streaming.result
assert result.raw is not None
@pytest.mark.vcr()
def test_kickoff_for_each_streaming_from_docs(
self, researcher: Agent, simple_task: Task
) -> None:
"""Test kickoff_for_each streaming example from documentation."""
crew = Crew(
agents=[researcher],
tasks=[simple_task],
stream=True,
verbose=False,
)
inputs_list = [{"topic": "AI in healthcare"}, {"topic": "AI in finance"}]
streaming_outputs = crew.kickoff_for_each(inputs=inputs_list)
assert len(streaming_outputs) == 2
assert all(isinstance(s, CrewStreamingOutput) for s in streaming_outputs)
results = []
for streaming in streaming_outputs:
for _ in streaming:
pass
result = streaming.result
results.append(result)
assert len(results) == 2
assert all(r.raw is not None for r in results)
class TestStreamingFlowIntegration:
"""Integration tests for flow streaming that match documentation examples."""
@pytest.mark.vcr()
def test_basic_flow_streaming_from_docs(self) -> None:
"""Test basic flow streaming example from documentation."""
class ResearchFlow(Flow):
stream = True
@start()
def research_topic(self) -> str:
researcher = Agent(
role="Research Analyst",
goal="Research topics thoroughly",
backstory="Expert researcher with analytical skills",
allow_delegation=False,
)
task = Task(
description="Research AI trends and provide insights",
expected_output="Detailed research findings",
agent=researcher,
)
crew = Crew(
agents=[researcher],
tasks=[task],
stream=True,
verbose=False,
)
streaming = crew.kickoff()
for _ in streaming:
pass
return streaming.result.raw
flow = ResearchFlow()
streaming = flow.kickoff()
assert isinstance(streaming, FlowStreamingOutput)
chunks = []
for chunk in streaming:
chunks.append(chunk.content)
assert len(chunks) > 0
result = streaming.result
assert result is not None
@pytest.mark.vcr()
def test_flow_streaming_properties_from_docs(self) -> None:
"""Test flow streaming properties example from documentation."""
class SimpleFlow(Flow):
stream = True
@start()
def execute(self) -> str:
return "Flow result"
flow = SimpleFlow()
streaming = flow.kickoff()
for _ in streaming:
pass
assert streaming.is_completed is True
streaming.get_full_text()
assert len(streaming.chunks) >= 0
result = streaming.result
assert result is not None
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_async_flow_streaming_from_docs(self) -> None:
"""Test async flow streaming example from documentation."""
class AsyncResearchFlow(Flow):
stream = True
@start()
def research(self) -> str:
researcher = Agent(
role="Researcher",
goal="Research topics",
backstory="Expert researcher",
allow_delegation=False,
)
task = Task(
description="Research AI",
expected_output="Research findings",
agent=researcher,
)
crew = Crew(agents=[researcher], tasks=[task], stream=True, verbose=False)
streaming = crew.kickoff()
for _ in streaming:
pass
return streaming.result.raw
flow = AsyncResearchFlow()
streaming = await flow.kickoff_async()
assert isinstance(streaming, FlowStreamingOutput)
chunks = []
async for chunk in streaming:
chunks.append(chunk.content)
result = streaming.result
assert result is not None
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/test_streaming_integration.py",
"license": "MIT License",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/cli/authentication/providers/entra_id.py | from typing import cast
from crewai.cli.authentication.providers.base_provider import BaseProvider
class EntraIdProvider(BaseProvider):
def get_authorize_url(self) -> str:
return f"{self._base_url()}/oauth2/v2.0/devicecode"
def get_token_url(self) -> str:
return f"{self._base_url()}/oauth2/v2.0/token"
def get_jwks_url(self) -> str:
return f"{self._base_url()}/discovery/v2.0/keys"
def get_issuer(self) -> str:
return f"{self._base_url()}/v2.0"
def get_audience(self) -> str:
if self.settings.audience is None:
raise ValueError(
"Audience is required. Please set it in the configuration."
)
return self.settings.audience
def get_client_id(self) -> str:
if self.settings.client_id is None:
raise ValueError(
"Client ID is required. Please set it in the configuration."
)
return self.settings.client_id
def get_oauth_scopes(self) -> list[str]:
return [
*super().get_oauth_scopes(),
*cast(str, self.settings.extra.get("scope", "")).split(),
]
def get_required_fields(self) -> list[str]:
return ["scope"]
def _base_url(self) -> str:
return f"https://login.microsoftonline.com/{self.settings.domain}"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/cli/authentication/providers/entra_id.py",
"license": "MIT License",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/tests/cli/authentication/providers/test_entra_id.py | import pytest
from crewai.cli.authentication.main import Oauth2Settings
from crewai.cli.authentication.providers.entra_id import EntraIdProvider
class TestEntraIdProvider:
@pytest.fixture(autouse=True)
def setup_method(self):
self.valid_settings = Oauth2Settings(
provider="entra_id",
domain="tenant-id-abcdef123456",
client_id="test-client-id",
audience="test-audience",
extra={
"scope": "openid profile email api://crewai-cli-dev/read"
}
)
self.provider = EntraIdProvider(self.valid_settings)
def test_initialization_with_valid_settings(self):
provider = EntraIdProvider(self.valid_settings)
assert provider.settings == self.valid_settings
assert provider.settings.provider == "entra_id"
assert provider.settings.domain == "tenant-id-abcdef123456"
assert provider.settings.client_id == "test-client-id"
assert provider.settings.audience == "test-audience"
def test_get_authorize_url(self):
expected_url = "https://login.microsoftonline.com/tenant-id-abcdef123456/oauth2/v2.0/devicecode"
assert self.provider.get_authorize_url() == expected_url
def test_get_authorize_url_with_different_domain(self):
# For EntraID, the domain is the tenant ID.
settings = Oauth2Settings(
provider="entra_id",
domain="my-company.entra.id",
client_id="test-client",
audience="test-audience",
)
provider = EntraIdProvider(settings)
expected_url = "https://login.microsoftonline.com/my-company.entra.id/oauth2/v2.0/devicecode"
assert provider.get_authorize_url() == expected_url
def test_get_token_url(self):
expected_url = "https://login.microsoftonline.com/tenant-id-abcdef123456/oauth2/v2.0/token"
assert self.provider.get_token_url() == expected_url
def test_get_token_url_with_different_domain(self):
# For EntraID, the domain is the tenant ID.
settings = Oauth2Settings(
provider="entra_id",
domain="another-domain.entra.id",
client_id="test-client",
audience="test-audience",
)
provider = EntraIdProvider(settings)
expected_url = "https://login.microsoftonline.com/another-domain.entra.id/oauth2/v2.0/token"
assert provider.get_token_url() == expected_url
def test_get_jwks_url(self):
expected_url = "https://login.microsoftonline.com/tenant-id-abcdef123456/discovery/v2.0/keys"
assert self.provider.get_jwks_url() == expected_url
def test_get_jwks_url_with_different_domain(self):
# For EntraID, the domain is the tenant ID.
settings = Oauth2Settings(
provider="entra_id",
domain="dev.entra.id",
client_id="test-client",
audience="test-audience",
)
provider = EntraIdProvider(settings)
expected_url = "https://login.microsoftonline.com/dev.entra.id/discovery/v2.0/keys"
assert provider.get_jwks_url() == expected_url
def test_get_issuer(self):
expected_issuer = "https://login.microsoftonline.com/tenant-id-abcdef123456/v2.0"
assert self.provider.get_issuer() == expected_issuer
def test_get_issuer_with_different_domain(self):
# For EntraID, the domain is the tenant ID.
settings = Oauth2Settings(
provider="entra_id",
domain="other-tenant-id-xpto",
client_id="test-client",
audience="test-audience",
)
provider = EntraIdProvider(settings)
expected_issuer = "https://login.microsoftonline.com/other-tenant-id-xpto/v2.0"
assert provider.get_issuer() == expected_issuer
def test_get_audience(self):
assert self.provider.get_audience() == "test-audience"
def test_get_audience_assertion_error_when_none(self):
settings = Oauth2Settings(
provider="entra_id",
domain="test-tenant-id",
client_id="test-client-id",
audience=None,
)
provider = EntraIdProvider(settings)
with pytest.raises(ValueError, match="Audience is required"):
provider.get_audience()
def test_get_client_id(self):
assert self.provider.get_client_id() == "test-client-id"
def test_get_required_fields(self):
assert set(self.provider.get_required_fields()) == set(["scope"])
def test_get_oauth_scopes(self):
settings = Oauth2Settings(
provider="entra_id",
domain="tenant-id-abcdef123456",
client_id="test-client-id",
audience="test-audience",
extra={
"scope": "api://crewai-cli-dev/read"
}
)
provider = EntraIdProvider(settings)
assert provider.get_oauth_scopes() == ["openid", "profile", "email", "api://crewai-cli-dev/read"]
def test_get_oauth_scopes_with_multiple_custom_scopes(self):
settings = Oauth2Settings(
provider="entra_id",
domain="tenant-id-abcdef123456",
client_id="test-client-id",
audience="test-audience",
extra={
"scope": "api://crewai-cli-dev/read api://crewai-cli-dev/write custom-scope1 custom-scope2"
}
)
provider = EntraIdProvider(settings)
assert provider.get_oauth_scopes() == ["openid", "profile", "email", "api://crewai-cli-dev/read", "api://crewai-cli-dev/write", "custom-scope1", "custom-scope2"]
def test_base_url(self):
assert self.provider._base_url() == "https://login.microsoftonline.com/tenant-id-abcdef123456" | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/cli/authentication/providers/test_entra_id.py",
"license": "MIT License",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai-tools/src/crewai_tools/tools/merge_agent_handler_tool/merge_agent_handler_tool.py | """Merge Agent Handler tools wrapper for CrewAI."""
import json
import logging
from typing import Any
from uuid import uuid4
from crewai.tools import BaseTool, EnvVar
from pydantic import BaseModel, Field, create_model
import requests
import typing_extensions as te
logger = logging.getLogger(__name__)
class MergeAgentHandlerToolError(Exception):
"""Base exception for Merge Agent Handler tool errors."""
class MergeAgentHandlerTool(BaseTool):
"""
Wrapper for Merge Agent Handler tools.
This tool allows CrewAI agents to execute tools from Merge Agent Handler,
which provides secure access to third-party integrations via the Model Context Protocol (MCP).
Agent Handler manages authentication, permissions, and monitoring of all tool interactions.
"""
tool_pack_id: str = Field(
..., description="UUID of the Agent Handler Tool Pack to use"
)
registered_user_id: str = Field(
..., description="UUID or origin_id of the registered user"
)
tool_name: str = Field(..., description="Name of the specific tool to execute")
base_url: str = Field(
default="https://ah-api.merge.dev",
description="Base URL for Agent Handler API",
)
session_id: str | None = Field(
default=None, description="MCP session ID (generated if not provided)"
)
env_vars: list[EnvVar] = Field(
default_factory=lambda: [
EnvVar(
name="AGENT_HANDLER_API_KEY",
description="Production API key for Agent Handler services",
required=True,
),
]
)
def model_post_init(self, __context: Any) -> None:
"""Initialize session ID if not provided."""
super().model_post_init(__context)
if self.session_id is None:
self.session_id = str(uuid4())
def _get_api_key(self) -> str:
"""Get the API key from environment variables."""
import os
api_key = os.environ.get("AGENT_HANDLER_API_KEY")
if not api_key:
raise MergeAgentHandlerToolError(
"AGENT_HANDLER_API_KEY environment variable is required. "
"Set it with: export AGENT_HANDLER_API_KEY='your-key-here'"
)
return api_key
def _make_mcp_request(
self, method: str, params: dict[str, Any] | None = None
) -> dict[str, Any]:
"""Make a JSON-RPC 2.0 MCP request to Agent Handler."""
url = f"{self.base_url}/api/v1/tool-packs/{self.tool_pack_id}/registered-users/{self.registered_user_id}/mcp"
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self._get_api_key()}",
"Mcp-Session-Id": self.session_id or str(uuid4()),
}
payload: dict[str, Any] = {
"jsonrpc": "2.0",
"method": method,
"id": str(uuid4()),
}
if params:
payload["params"] = params
# Log the full payload for debugging
logger.debug(f"MCP Request to {url}: {json.dumps(payload, indent=2)}")
try:
response = requests.post(url, json=payload, headers=headers, timeout=60)
response.raise_for_status()
result = response.json()
# Handle JSON-RPC error responses
if "error" in result:
error_msg = result["error"].get("message", "Unknown error")
error_code = result["error"].get("code", -1)
logger.error(
f"Agent Handler API error (code {error_code}): {error_msg}"
)
raise MergeAgentHandlerToolError(f"API Error: {error_msg}")
return result
except requests.exceptions.RequestException as e:
logger.error(f"Failed to call Agent Handler API: {e!s}")
raise MergeAgentHandlerToolError(
f"Failed to communicate with Agent Handler API: {e!s}"
) from e
def _run(self, **kwargs: Any) -> Any:
"""Execute the Agent Handler tool with the given arguments."""
try:
# Log what we're about to send
logger.info(f"Executing {self.tool_name} with arguments: {kwargs}")
# Make the tool call via MCP
result = self._make_mcp_request(
method="tools/call",
params={"name": self.tool_name, "arguments": kwargs},
)
# Extract the actual result from the MCP response
if "result" in result and "content" in result["result"]:
content = result["result"]["content"]
if content and len(content) > 0:
# Parse the text content (it's JSON-encoded)
text_content = content[0].get("text", "")
try:
return json.loads(text_content)
except json.JSONDecodeError:
return text_content
return result
except MergeAgentHandlerToolError:
raise
except Exception as e:
logger.error(f"Unexpected error executing tool {self.tool_name}: {e!s}")
raise MergeAgentHandlerToolError(f"Tool execution failed: {e!s}") from e
@classmethod
def from_tool_name(
cls,
tool_name: str,
tool_pack_id: str,
registered_user_id: str,
base_url: str = "https://ah-api.merge.dev",
**kwargs: Any,
) -> te.Self:
"""
Create a MergeAgentHandlerTool from a tool name.
Args:
tool_name: Name of the tool (e.g., "linear__create_issue")
tool_pack_id: UUID of the Tool Pack
registered_user_id: UUID of the registered user
base_url: Base URL for Agent Handler API (defaults to production)
**kwargs: Additional arguments to pass to the tool
Returns:
MergeAgentHandlerTool instance ready to use
Example:
>>> tool = MergeAgentHandlerTool.from_tool_name(
... tool_name="linear__create_issue",
... tool_pack_id="134e0111-0f67-44f6-98f0-597000290bb3",
... registered_user_id="91b2b905-e866-40c8-8be2-efe53827a0aa"
... )
"""
# Create an empty args schema model (proper BaseModel subclass)
empty_args_schema = create_model(f"{tool_name.replace('__', '_').title()}Args")
# Initialize session and get tool schema
instance = cls(
name=tool_name,
description=f"Execute {tool_name} via Agent Handler",
tool_pack_id=tool_pack_id,
registered_user_id=registered_user_id,
tool_name=tool_name,
base_url=base_url,
args_schema=empty_args_schema, # Empty schema that properly inherits from BaseModel
**kwargs,
)
# Try to fetch the actual tool schema from Agent Handler
try:
result = instance._make_mcp_request(method="tools/list")
if "result" in result and "tools" in result["result"]:
tools = result["result"]["tools"]
tool_schema = next(
(t for t in tools if t.get("name") == tool_name), None
)
if tool_schema:
instance.description = tool_schema.get(
"description", instance.description
)
# Convert parameters schema to Pydantic model
if "parameters" in tool_schema:
try:
params = tool_schema["parameters"]
if params.get("type") == "object" and "properties" in params:
# Build field definitions for Pydantic
fields = {}
properties = params["properties"]
required = params.get("required", [])
for field_name, field_schema in properties.items():
field_type = Any # Default type
field_default = ... # Required by default
# Map JSON schema types to Python types
json_type = field_schema.get("type", "string")
if json_type == "string":
field_type = str
elif json_type == "integer":
field_type = int
elif json_type == "number":
field_type = float
elif json_type == "boolean":
field_type = bool
elif json_type == "array":
field_type = list[Any]
elif json_type == "object":
field_type = dict[str, Any]
# Make field optional if not required
if field_name not in required:
field_type = field_type | None
field_default = None
field_description = field_schema.get("description")
if field_description:
fields[field_name] = (
field_type,
Field(
default=field_default,
description=field_description,
),
)
else:
fields[field_name] = (field_type, field_default)
# Create the Pydantic model
if fields:
args_schema = create_model(
f"{tool_name.replace('__', '_').title()}Args",
**fields,
)
instance.args_schema = args_schema
except Exception as e:
logger.warning(
f"Failed to create args schema for {tool_name}: {e!s}"
)
except Exception as e:
logger.warning(
f"Failed to fetch tool schema for {tool_name}, using defaults: {e!s}"
)
return instance
@classmethod
def from_tool_pack(
cls,
tool_pack_id: str,
registered_user_id: str,
tool_names: list[str] | None = None,
base_url: str = "https://ah-api.merge.dev",
**kwargs: Any,
) -> list[te.Self]:
"""
Create multiple MergeAgentHandlerTool instances from a Tool Pack.
Args:
tool_pack_id: UUID of the Tool Pack
registered_user_id: UUID or origin_id of the registered user
tool_names: Optional list of specific tool names to load. If None, loads all tools.
base_url: Base URL for Agent Handler API (defaults to production)
**kwargs: Additional arguments to pass to each tool
Returns:
List of MergeAgentHandlerTool instances
Example:
>>> tools = MergeAgentHandlerTool.from_tool_pack(
... tool_pack_id="134e0111-0f67-44f6-98f0-597000290bb3",
... registered_user_id="91b2b905-e866-40c8-8be2-efe53827a0aa",
... tool_names=["linear__create_issue", "linear__get_issues"]
... )
"""
# Create a temporary instance to fetch the tool list
temp_instance = cls(
name="temp",
description="temp",
tool_pack_id=tool_pack_id,
registered_user_id=registered_user_id,
tool_name="temp",
base_url=base_url,
args_schema=BaseModel,
)
try:
# Fetch available tools
result = temp_instance._make_mcp_request(method="tools/list")
if "result" not in result or "tools" not in result["result"]:
raise MergeAgentHandlerToolError(
"Failed to fetch tools from Agent Handler Tool Pack"
)
available_tools = result["result"]["tools"]
# Filter tools if specific names were requested
if tool_names:
available_tools = [
t for t in available_tools if t.get("name") in tool_names
]
# Check if all requested tools were found
found_names = {t.get("name") for t in available_tools}
missing_names = set(tool_names) - found_names
if missing_names:
logger.warning(
f"The following tools were not found in the Tool Pack: {missing_names}"
)
# Create tool instances
tools = []
for tool_schema in available_tools:
tool_name = tool_schema.get("name")
if not tool_name:
continue
tool = cls.from_tool_name(
tool_name=tool_name,
tool_pack_id=tool_pack_id,
registered_user_id=registered_user_id,
base_url=base_url,
**kwargs,
)
tools.append(tool)
return tools
except MergeAgentHandlerToolError:
raise
except Exception as e:
logger.error(f"Failed to create tools from Tool Pack: {e!s}")
raise MergeAgentHandlerToolError(f"Failed to load Tool Pack: {e!s}") from e
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/src/crewai_tools/tools/merge_agent_handler_tool/merge_agent_handler_tool.py",
"license": "MIT License",
"lines": 304,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai-tools/tests/tools/merge_agent_handler_tool_test.py | """Tests for MergeAgentHandlerTool."""
import os
from unittest.mock import Mock, patch
import pytest
from crewai_tools import MergeAgentHandlerTool
@pytest.fixture(autouse=True)
def mock_agent_handler_api_key():
"""Mock the Agent Handler API key environment variable."""
with patch.dict(os.environ, {"AGENT_HANDLER_API_KEY": "test_key"}):
yield
@pytest.fixture
def mock_tool_pack_response():
"""Mock response for tools/list MCP request."""
return {
"jsonrpc": "2.0",
"id": "test-id",
"result": {
"tools": [
{
"name": "linear__create_issue",
"description": "Creates a new issue in Linear",
"parameters": {
"type": "object",
"properties": {
"title": {
"type": "string",
"description": "The issue title",
},
"description": {
"type": "string",
"description": "The issue description",
},
"priority": {
"type": "integer",
"description": "Priority level (1-4)",
},
},
"required": ["title"],
},
},
{
"name": "linear__get_issues",
"description": "Get issues from Linear",
"parameters": {
"type": "object",
"properties": {
"filter": {
"type": "object",
"description": "Filter criteria",
}
},
},
},
]
},
}
@pytest.fixture
def mock_tool_execute_response():
"""Mock response for tools/call MCP request."""
return {
"jsonrpc": "2.0",
"id": "test-id",
"result": {
"content": [
{
"type": "text",
"text": '{"success": true, "id": "ISS-123", "title": "Test Issue"}',
}
]
},
}
def test_tool_initialization():
"""Test basic tool initialization."""
tool = MergeAgentHandlerTool(
name="test_tool",
description="Test tool",
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
tool_name="linear__create_issue",
)
assert tool.name == "test_tool"
assert "Test tool" in tool.description # Description gets formatted by BaseTool
assert tool.tool_pack_id == "test-pack-id"
assert tool.registered_user_id == "test-user-id"
assert tool.tool_name == "linear__create_issue"
assert tool.base_url == "https://ah-api.merge.dev"
assert tool.session_id is not None
def test_tool_initialization_with_custom_base_url():
"""Test tool initialization with custom base URL."""
tool = MergeAgentHandlerTool(
name="test_tool",
description="Test tool",
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
tool_name="linear__create_issue",
base_url="http://localhost:8000",
)
assert tool.base_url == "http://localhost:8000"
def test_missing_api_key():
"""Test that missing API key raises appropriate error."""
with patch.dict(os.environ, {}, clear=True):
tool = MergeAgentHandlerTool(
name="test_tool",
description="Test tool",
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
tool_name="linear__create_issue",
)
with pytest.raises(Exception) as exc_info:
tool._get_api_key()
assert "AGENT_HANDLER_API_KEY" in str(exc_info.value)
@patch("requests.post")
def test_mcp_request_success(mock_post, mock_tool_pack_response):
"""Test successful MCP request."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = mock_tool_pack_response
mock_post.return_value = mock_response
tool = MergeAgentHandlerTool(
name="test_tool",
description="Test tool",
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
tool_name="linear__create_issue",
)
result = tool._make_mcp_request(method="tools/list")
assert "result" in result
assert "tools" in result["result"]
assert len(result["result"]["tools"]) == 2
@patch("requests.post")
def test_mcp_request_error(mock_post):
"""Test MCP request with error response."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"jsonrpc": "2.0",
"id": "test-id",
"error": {"code": -32601, "message": "Method not found"},
}
mock_post.return_value = mock_response
tool = MergeAgentHandlerTool(
name="test_tool",
description="Test tool",
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
tool_name="linear__create_issue",
)
with pytest.raises(Exception) as exc_info:
tool._make_mcp_request(method="invalid/method")
assert "Method not found" in str(exc_info.value)
@patch("requests.post")
def test_mcp_request_http_error(mock_post):
"""Test MCP request with HTTP error."""
mock_post.side_effect = Exception("Connection error")
tool = MergeAgentHandlerTool(
name="test_tool",
description="Test tool",
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
tool_name="linear__create_issue",
)
with pytest.raises(Exception) as exc_info:
tool._make_mcp_request(method="tools/list")
assert "Connection error" in str(exc_info.value)
@patch("requests.post")
def test_tool_execution(mock_post, mock_tool_execute_response):
"""Test tool execution via _run method."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = mock_tool_execute_response
mock_post.return_value = mock_response
tool = MergeAgentHandlerTool(
name="test_tool",
description="Test tool",
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
tool_name="linear__create_issue",
)
result = tool._run(title="Test Issue", description="Test description")
assert result["success"] is True
assert result["id"] == "ISS-123"
assert result["title"] == "Test Issue"
@patch("requests.post")
def test_from_tool_name(mock_post, mock_tool_pack_response):
"""Test creating tool from tool name."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = mock_tool_pack_response
mock_post.return_value = mock_response
tool = MergeAgentHandlerTool.from_tool_name(
tool_name="linear__create_issue",
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
)
assert tool.name == "linear__create_issue"
assert tool.description == "Creates a new issue in Linear"
assert tool.tool_name == "linear__create_issue"
@patch("requests.post")
def test_from_tool_name_with_custom_base_url(mock_post, mock_tool_pack_response):
"""Test creating tool from tool name with custom base URL."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = mock_tool_pack_response
mock_post.return_value = mock_response
tool = MergeAgentHandlerTool.from_tool_name(
tool_name="linear__create_issue",
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
base_url="http://localhost:8000",
)
assert tool.base_url == "http://localhost:8000"
@patch("requests.post")
def test_from_tool_pack_all_tools(mock_post, mock_tool_pack_response):
"""Test creating all tools from a Tool Pack."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = mock_tool_pack_response
mock_post.return_value = mock_response
tools = MergeAgentHandlerTool.from_tool_pack(
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
)
assert len(tools) == 2
assert tools[0].name == "linear__create_issue"
assert tools[1].name == "linear__get_issues"
@patch("requests.post")
def test_from_tool_pack_specific_tools(mock_post, mock_tool_pack_response):
"""Test creating specific tools from a Tool Pack."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = mock_tool_pack_response
mock_post.return_value = mock_response
tools = MergeAgentHandlerTool.from_tool_pack(
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
tool_names=["linear__create_issue"],
)
assert len(tools) == 1
assert tools[0].name == "linear__create_issue"
@patch("requests.post")
def test_from_tool_pack_with_custom_base_url(mock_post, mock_tool_pack_response):
"""Test creating tools from Tool Pack with custom base URL."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = mock_tool_pack_response
mock_post.return_value = mock_response
tools = MergeAgentHandlerTool.from_tool_pack(
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
base_url="http://localhost:8000",
)
assert len(tools) == 2
assert all(tool.base_url == "http://localhost:8000" for tool in tools)
@patch("requests.post")
def test_tool_execution_with_text_response(mock_post):
"""Test tool execution with plain text response."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"jsonrpc": "2.0",
"id": "test-id",
"result": {"content": [{"type": "text", "text": "Plain text result"}]},
}
mock_post.return_value = mock_response
tool = MergeAgentHandlerTool(
name="test_tool",
description="Test tool",
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
tool_name="linear__create_issue",
)
result = tool._run(title="Test")
assert result == "Plain text result"
@patch("requests.post")
def test_mcp_request_builds_correct_url(mock_post, mock_tool_pack_response):
"""Test that MCP request builds correct URL."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = mock_tool_pack_response
mock_post.return_value = mock_response
tool = MergeAgentHandlerTool(
name="test_tool",
description="Test tool",
tool_pack_id="test-pack-123",
registered_user_id="user-456",
tool_name="linear__create_issue",
base_url="https://ah-api.merge.dev",
)
tool._make_mcp_request(method="tools/list")
expected_url = (
"https://ah-api.merge.dev/api/v1/tool-packs/"
"test-pack-123/registered-users/user-456/mcp"
)
mock_post.assert_called_once()
assert mock_post.call_args[0][0] == expected_url
@patch("requests.post")
def test_mcp_request_includes_correct_headers(mock_post, mock_tool_pack_response):
"""Test that MCP request includes correct headers."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = mock_tool_pack_response
mock_post.return_value = mock_response
tool = MergeAgentHandlerTool(
name="test_tool",
description="Test tool",
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
tool_name="linear__create_issue",
)
tool._make_mcp_request(method="tools/list")
mock_post.assert_called_once()
headers = mock_post.call_args.kwargs["headers"]
assert headers["Content-Type"] == "application/json"
assert headers["Authorization"] == "Bearer test_key"
assert "Mcp-Session-Id" in headers
@patch("requests.post")
def test_tool_parameters_are_passed_in_request(mock_post):
"""Test that tool parameters are correctly included in the MCP request."""
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {
"jsonrpc": "2.0",
"id": "test-id",
"result": {"content": [{"type": "text", "text": '{"success": true}'}]},
}
mock_post.return_value = mock_response
tool = MergeAgentHandlerTool(
name="test_tool",
description="Test tool",
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
tool_name="linear__update_issue",
)
# Execute tool with specific parameters
tool._run(id="issue-123", title="New Title", priority=1)
# Verify the request was made
mock_post.assert_called_once()
# Get the JSON payload that was sent
payload = mock_post.call_args.kwargs["json"]
# Verify MCP structure
assert payload["jsonrpc"] == "2.0"
assert payload["method"] == "tools/call"
assert "id" in payload
# Verify parameters are in the request
assert "params" in payload
assert payload["params"]["name"] == "linear__update_issue"
assert "arguments" in payload["params"]
# Verify the actual arguments were passed
arguments = payload["params"]["arguments"]
assert arguments["id"] == "issue-123"
assert arguments["title"] == "New Title"
assert arguments["priority"] == 1
@patch("requests.post")
def test_tool_run_method_passes_parameters(mock_post, mock_tool_pack_response):
"""Test that parameters are passed when using the .run() method (how CrewAI calls it)."""
# Mock the tools/list response
mock_response = Mock()
mock_response.status_code = 200
# First call: tools/list
# Second call: tools/call
mock_response.json.side_effect = [
mock_tool_pack_response, # tools/list response
{
"jsonrpc": "2.0",
"id": "test-id",
"result": {"content": [{"type": "text", "text": '{"success": true, "id": "issue-123"}'}]},
}, # tools/call response
]
mock_post.return_value = mock_response
# Create tool using from_tool_name (which fetches schema)
tool = MergeAgentHandlerTool.from_tool_name(
tool_name="linear__create_issue",
tool_pack_id="test-pack-id",
registered_user_id="test-user-id",
)
# Call using .run() method (this is how CrewAI invokes tools)
result = tool.run(title="Test Issue", description="Test description", priority=2)
# Verify two calls were made: tools/list and tools/call
assert mock_post.call_count == 2
# Get the second call (tools/call)
second_call = mock_post.call_args_list[1]
payload = second_call.kwargs["json"]
# Verify it's a tools/call request
assert payload["method"] == "tools/call"
assert payload["params"]["name"] == "linear__create_issue"
# Verify parameters were passed
arguments = payload["params"]["arguments"]
assert arguments["title"] == "Test Issue"
assert arguments["description"] == "Test description"
assert arguments["priority"] == 2
# Verify result was returned
assert result["success"] is True
assert result["id"] == "issue-123"
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai-tools/tests/tools/merge_agent_handler_tool_test.py",
"license": "MIT License",
"lines": 395,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/tracing/test_trace_enable_disable.py | """Tests to verify that traces are sent when enabled and not sent when disabled.
VCR will record HTTP interactions. Inspect cassettes to verify tracing behavior.
"""
import pytest
from crewai import Agent, Crew, Task
from tests.utils import wait_for_event_handlers
class TestTraceEnableDisable:
"""Test suite to verify trace sending behavior with VCR cassette recording."""
@pytest.mark.vcr()
def test_no_http_calls_when_disabled_via_env(self):
"""Test execution when tracing disabled via CREWAI_TRACING_ENABLED=false."""
with pytest.MonkeyPatch.context() as mp:
mp.setenv("CREWAI_TRACING_ENABLED", "false")
mp.setenv("CREWAI_DISABLE_TELEMETRY", "false")
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello",
expected_output="hello",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=False)
result = crew.kickoff()
wait_for_event_handlers()
assert result is not None
@pytest.mark.vcr()
def test_no_http_calls_when_disabled_via_tracing_false(self):
"""Test execution when tracing=False explicitly set."""
with pytest.MonkeyPatch.context() as mp:
mp.setenv("CREWAI_DISABLE_TELEMETRY", "false")
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello",
expected_output="hello",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=False, tracing=False)
result = crew.kickoff()
wait_for_event_handlers()
assert result is not None
@pytest.mark.vcr()
def test_trace_calls_when_enabled_via_env(self):
"""Test execution when tracing enabled via CREWAI_TRACING_ENABLED=true."""
with pytest.MonkeyPatch.context() as mp:
mp.setenv("CREWAI_TRACING_ENABLED", "true")
mp.setenv("CREWAI_DISABLE_TELEMETRY", "false")
mp.setenv("OTEL_SDK_DISABLED", "false")
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello",
expected_output="hello",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=False)
result = crew.kickoff()
wait_for_event_handlers()
assert result is not None
@pytest.mark.vcr()
def test_trace_calls_when_enabled_via_tracing_true(self):
"""Test execution when tracing=True explicitly set."""
with pytest.MonkeyPatch.context() as mp:
mp.setenv("CREWAI_DISABLE_TELEMETRY", "false")
mp.setenv("OTEL_SDK_DISABLED", "false")
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
llm="gpt-4o-mini",
)
task = Task(
description="Say hello",
expected_output="hello",
agent=agent,
)
crew = Crew(agents=[agent], tasks=[task], verbose=False, tracing=True)
result = crew.kickoff()
wait_for_event_handlers()
assert result is not None
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/tracing/test_trace_enable_disable.py",
"license": "MIT License",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/agents/test_a2a_trust_completion_status.py | """Test trust_remote_completion_status flag in A2A wrapper."""
from unittest.mock import MagicMock, patch
import pytest
from crewai.a2a.config import A2AConfig
try:
from a2a.types import Message, Role
A2A_SDK_INSTALLED = True
except ImportError:
A2A_SDK_INSTALLED = False
def _create_mock_agent_card(name: str = "Test", url: str = "http://test-endpoint.com/"):
"""Create a mock agent card with proper model_dump behavior."""
mock_card = MagicMock()
mock_card.name = name
mock_card.url = url
mock_card.model_dump.return_value = {"name": name, "url": url}
mock_card.model_dump_json.return_value = f'{{"name": "{name}", "url": "{url}"}}'
return mock_card
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_trust_remote_completion_status_true_returns_directly():
"""When trust_remote_completion_status=True and A2A returns completed, return result directly."""
from crewai.a2a.wrapper import _delegate_to_a2a
from crewai.a2a.types import AgentResponseProtocol
from crewai import Agent, Task
a2a_config = A2AConfig(
endpoint="http://test-endpoint.com",
trust_remote_completion_status=True,
)
agent = Agent(
role="test manager",
goal="coordinate",
backstory="test",
a2a=a2a_config,
)
task = Task(description="test", expected_output="test", agent=agent)
class MockResponse:
is_a2a = True
message = "Please help"
a2a_ids = ["http://test-endpoint.com/"]
with (
patch("crewai.a2a.wrapper.execute_a2a_delegation") as mock_execute,
patch("crewai.a2a.wrapper._fetch_agent_cards_concurrently") as mock_fetch,
):
mock_card = _create_mock_agent_card()
mock_fetch.return_value = ({"http://test-endpoint.com/": mock_card}, {})
# A2A returns completed
mock_execute.return_value = {
"status": "completed",
"result": "Done by remote",
"history": [],
}
# This should return directly without checking LLM response
result = _delegate_to_a2a(
self=agent,
agent_response=MockResponse(),
task=task,
original_fn=lambda *args, **kwargs: "fallback",
context=None,
tools=None,
agent_cards={"http://test-endpoint.com/": mock_card},
original_task_description="test",
)
assert result == "Done by remote"
assert mock_execute.call_count == 1
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_trust_remote_completion_status_false_continues_conversation():
"""When trust_remote_completion_status=False and A2A returns completed, ask server agent."""
from crewai.a2a.wrapper import _delegate_to_a2a
from crewai import Agent, Task
a2a_config = A2AConfig(
endpoint="http://test-endpoint.com",
trust_remote_completion_status=False,
)
agent = Agent(
role="test manager",
goal="coordinate",
backstory="test",
a2a=a2a_config,
)
task = Task(description="test", expected_output="test", agent=agent)
class MockResponse:
is_a2a = True
message = "Please help"
a2a_ids = ["http://test-endpoint.com/"]
call_count = 0
def mock_original_fn(self, task, context, tools):
nonlocal call_count
call_count += 1
if call_count == 1:
# Server decides to finish
return '{"is_a2a": false, "message": "Server final answer", "a2a_ids": []}'
return "unexpected"
with (
patch("crewai.a2a.wrapper.execute_a2a_delegation") as mock_execute,
patch("crewai.a2a.wrapper._fetch_agent_cards_concurrently") as mock_fetch,
):
mock_card = _create_mock_agent_card()
mock_fetch.return_value = ({"http://test-endpoint.com/": mock_card}, {})
# A2A returns completed
mock_execute.return_value = {
"status": "completed",
"result": "Done by remote",
"history": [],
}
result = _delegate_to_a2a(
self=agent,
agent_response=MockResponse(),
task=task,
original_fn=mock_original_fn,
context=None,
tools=None,
agent_cards={"http://test-endpoint.com/": mock_card},
original_task_description="test",
)
# Should call original_fn to get server response
assert call_count >= 1
assert result == "Server final answer"
@pytest.mark.skipif(not A2A_SDK_INSTALLED, reason="Requires a2a-sdk to be installed")
def test_default_trust_remote_completion_status_is_false():
"""Verify that default value of trust_remote_completion_status is False."""
a2a_config = A2AConfig(
endpoint="http://test-endpoint.com",
)
assert a2a_config.trust_remote_completion_status is False | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/agents/test_a2a_trust_completion_status.py",
"license": "MIT License",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/hooks/decorators.py | from __future__ import annotations
from collections.abc import Callable
from functools import wraps
import inspect
from typing import TYPE_CHECKING, Any, TypeVar, overload
if TYPE_CHECKING:
from crewai.hooks.llm_hooks import LLMCallHookContext
from crewai.hooks.tool_hooks import ToolCallHookContext
F = TypeVar("F", bound=Callable[..., Any])
def _create_hook_decorator(
hook_type: str,
register_function: Callable[..., Any],
marker_attribute: str,
) -> Callable[..., Any]:
"""Create a hook decorator with filtering support.
This factory function eliminates code duplication across the four hook decorators.
Args:
hook_type: Type of hook ("llm" or "tool")
register_function: Function to call for registration (e.g., register_before_llm_call_hook)
marker_attribute: Attribute name to mark functions (e.g., "is_before_llm_call_hook")
Returns:
A decorator function that supports filters and auto-registration
"""
def decorator_factory(
func: Callable[..., Any] | None = None,
*,
tools: list[str] | None = None,
agents: list[str] | None = None,
) -> Callable[..., Any]:
def decorator(f: Callable[..., Any]) -> Callable[..., Any]:
setattr(f, marker_attribute, True)
sig = inspect.signature(f)
params = list(sig.parameters.keys())
is_method = len(params) >= 2 and params[0] == "self"
if tools:
f._filter_tools = tools # type: ignore[attr-defined]
if agents:
f._filter_agents = agents # type: ignore[attr-defined]
if tools or agents:
@wraps(f)
def filtered_hook(context: Any) -> Any:
if tools and hasattr(context, "tool_name"):
if context.tool_name not in tools:
return None
if agents and hasattr(context, "agent"):
if context.agent and context.agent.role not in agents:
return None
return f(context)
if not is_method:
register_function(filtered_hook)
return f
if not is_method:
register_function(f)
return f
if func is None:
return decorator
return decorator(func)
return decorator_factory
@overload
def before_llm_call(
func: Callable[[LLMCallHookContext], None],
) -> Callable[[LLMCallHookContext], None]: ...
@overload
def before_llm_call(
*,
agents: list[str] | None = None,
) -> Callable[
[Callable[[LLMCallHookContext], None]], Callable[[LLMCallHookContext], None]
]: ...
def before_llm_call(
func: Callable[[LLMCallHookContext], None] | None = None,
*,
agents: list[str] | None = None,
) -> (
Callable[[LLMCallHookContext], None]
| Callable[
[Callable[[LLMCallHookContext], None]], Callable[[LLMCallHookContext], None]
]
):
"""Decorator to register a function as a before_llm_call hook.
Example:
Simple usage::
@before_llm_call
def log_calls(context):
print(f"LLM call by {context.agent.role}")
With agent filter::
@before_llm_call(agents=["Researcher", "Analyst"])
def log_specific_agents(context):
print(f"Filtered LLM call: {context.agent.role}")
"""
from crewai.hooks.llm_hooks import register_before_llm_call_hook
return _create_hook_decorator( # type: ignore[return-value]
hook_type="llm",
register_function=register_before_llm_call_hook,
marker_attribute="is_before_llm_call_hook",
)(func=func, agents=agents)
@overload
def after_llm_call(
func: Callable[[LLMCallHookContext], str | None],
) -> Callable[[LLMCallHookContext], str | None]: ...
@overload
def after_llm_call(
*,
agents: list[str] | None = None,
) -> Callable[
[Callable[[LLMCallHookContext], str | None]],
Callable[[LLMCallHookContext], str | None],
]: ...
def after_llm_call(
func: Callable[[LLMCallHookContext], str | None] | None = None,
*,
agents: list[str] | None = None,
) -> (
Callable[[LLMCallHookContext], str | None]
| Callable[
[Callable[[LLMCallHookContext], str | None]],
Callable[[LLMCallHookContext], str | None],
]
):
"""Decorator to register a function as an after_llm_call hook.
Example:
Simple usage::
@after_llm_call
def sanitize(context):
if "SECRET" in context.response:
return context.response.replace("SECRET", "[REDACTED]")
return None
With agent filter::
@after_llm_call(agents=["Researcher"])
def log_researcher_responses(context):
print(f"Response length: {len(context.response)}")
return None
"""
from crewai.hooks.llm_hooks import register_after_llm_call_hook
return _create_hook_decorator( # type: ignore[return-value]
hook_type="llm",
register_function=register_after_llm_call_hook,
marker_attribute="is_after_llm_call_hook",
)(func=func, agents=agents)
@overload
def before_tool_call(
func: Callable[[ToolCallHookContext], bool | None],
) -> Callable[[ToolCallHookContext], bool | None]: ...
@overload
def before_tool_call(
*,
tools: list[str] | None = None,
agents: list[str] | None = None,
) -> Callable[
[Callable[[ToolCallHookContext], bool | None]],
Callable[[ToolCallHookContext], bool | None],
]: ...
def before_tool_call(
func: Callable[[ToolCallHookContext], bool | None] | None = None,
*,
tools: list[str] | None = None,
agents: list[str] | None = None,
) -> (
Callable[[ToolCallHookContext], bool | None]
| Callable[
[Callable[[ToolCallHookContext], bool | None]],
Callable[[ToolCallHookContext], bool | None],
]
):
"""Decorator to register a function as a before_tool_call hook.
Example:
Simple usage::
@before_tool_call
def log_all_tools(context):
print(f"Tool: {context.tool_name}")
return None
With tool filter::
@before_tool_call(tools=["delete_file", "execute_code"])
def approve_dangerous(context):
response = context.request_human_input(prompt="Approve?")
return None if response == "yes" else False
With combined filters::
@before_tool_call(tools=["write_file"], agents=["Developer"])
def approve_dev_writes(context):
return None # Only for Developer writing files
"""
from crewai.hooks.tool_hooks import register_before_tool_call_hook
return _create_hook_decorator( # type: ignore[return-value]
hook_type="tool",
register_function=register_before_tool_call_hook,
marker_attribute="is_before_tool_call_hook",
)(func=func, tools=tools, agents=agents)
@overload
def after_tool_call(
func: Callable[[ToolCallHookContext], str | None],
) -> Callable[[ToolCallHookContext], str | None]: ...
@overload
def after_tool_call(
*,
tools: list[str] | None = None,
agents: list[str] | None = None,
) -> Callable[
[Callable[[ToolCallHookContext], str | None]],
Callable[[ToolCallHookContext], str | None],
]: ...
def after_tool_call(
func: Callable[[ToolCallHookContext], str | None] | None = None,
*,
tools: list[str] | None = None,
agents: list[str] | None = None,
) -> (
Callable[[ToolCallHookContext], str | None]
| Callable[
[Callable[[ToolCallHookContext], str | None]],
Callable[[ToolCallHookContext], str | None],
]
):
"""Decorator to register a function as an after_tool_call hook.
Example:
Simple usage::
@after_tool_call
def log_results(context):
print(f"Result: {len(context.tool_result)} chars")
return None
With tool filter::
@after_tool_call(tools=["web_search", "ExaSearchTool"])
def sanitize_search_results(context):
if "SECRET" in context.tool_result:
return context.tool_result.replace("SECRET", "[REDACTED]")
return None
"""
from crewai.hooks.tool_hooks import register_after_tool_call_hook
return _create_hook_decorator( # type: ignore[return-value]
hook_type="tool",
register_function=register_after_tool_call_hook,
marker_attribute="is_after_tool_call_hook",
)(func=func, tools=tools, agents=agents)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/hooks/decorators.py",
"license": "MIT License",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/hooks/llm_hooks.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any, cast
from crewai.events.event_listener import event_listener
from crewai.hooks.types import (
AfterLLMCallHookCallable,
AfterLLMCallHookType,
BeforeLLMCallHookCallable,
BeforeLLMCallHookType,
)
from crewai.utilities.printer import Printer
if TYPE_CHECKING:
from crewai.agents.crew_agent_executor import CrewAgentExecutor
from crewai.experimental.agent_executor import AgentExecutor
from crewai.lite_agent import LiteAgent
from crewai.llms.base_llm import BaseLLM
from crewai.utilities.types import LLMMessage
class LLMCallHookContext:
"""Context object passed to LLM call hooks.
Provides hooks with complete access to the execution state, allowing
modification of messages, responses, and executor attributes.
Supports both executor-based calls (agents in crews/flows) and direct LLM calls.
Attributes:
executor: Reference to the executor (CrewAgentExecutor/LiteAgent) or None for direct calls
messages: Direct reference to messages (mutable list).
Can be modified in both before_llm_call and after_llm_call hooks.
Modifications in after_llm_call hooks persist to the next iteration,
allowing hooks to modify conversation history for subsequent LLM calls.
IMPORTANT: Modify messages in-place (e.g., append, extend, remove items).
Do NOT replace the list (e.g., context.messages = []), as this will break
the executor. Use context.messages.append() or context.messages.extend()
instead of assignment.
agent: Reference to the agent executing the task (None for direct LLM calls)
task: Reference to the task being executed (None for direct LLM calls or LiteAgent)
crew: Reference to the crew instance (None for direct LLM calls or LiteAgent)
llm: Reference to the LLM instance
iterations: Current iteration count (0 for direct LLM calls)
response: LLM response string (only set for after_llm_call hooks).
Can be modified by returning a new string from after_llm_call hook.
"""
executor: CrewAgentExecutor | AgentExecutor | LiteAgent | None
messages: list[LLMMessage]
agent: Any
task: Any
crew: Any
llm: BaseLLM | None | str | Any
iterations: int
response: str | None
def __init__(
self,
executor: CrewAgentExecutor | AgentExecutor | LiteAgent | None = None,
response: str | None = None,
messages: list[LLMMessage] | None = None,
llm: BaseLLM | str | Any | None = None, # TODO: look into
agent: Any | None = None,
task: Any | None = None,
crew: Any | None = None,
) -> None:
"""Initialize hook context with executor reference or direct parameters.
Args:
executor: The CrewAgentExecutor or LiteAgent instance (None for direct LLM calls)
response: Optional response string (for after_llm_call hooks)
messages: Optional messages list (for direct LLM calls when executor is None)
llm: Optional LLM instance (for direct LLM calls when executor is None)
agent: Optional agent reference (for direct LLM calls when executor is None)
task: Optional task reference (for direct LLM calls when executor is None)
crew: Optional crew reference (for direct LLM calls when executor is None)
"""
if executor is not None:
# Existing path: extract from executor
self.executor = executor
self.messages = executor.messages
self.llm = executor.llm
self.iterations = executor.iterations
# Handle CrewAgentExecutor vs LiteAgent differences
if hasattr(executor, "agent"):
self.agent = executor.agent
self.task = cast("CrewAgentExecutor", executor).task
self.crew = cast("CrewAgentExecutor", executor).crew
else:
# LiteAgent case - is the agent itself, doesn't have task/crew
self.agent = (
executor.original_agent
if hasattr(executor, "original_agent")
else executor
)
self.task = None
self.crew = None
else:
# New path: direct LLM call with explicit parameters
self.executor = None
self.messages = messages or []
self.llm = llm
self.agent = agent
self.task = task
self.crew = crew
self.iterations = 0
self.response = response
def request_human_input(
self,
prompt: str,
default_message: str = "Press Enter to continue, or provide feedback:",
) -> str:
"""Request human input during LLM hook execution.
This method pauses live console updates, displays a prompt to the user,
waits for their input, and then resumes live updates. This is useful for
approval gates, debugging, or getting human feedback during execution.
Args:
prompt: Custom message to display to the user
default_message: Message shown after the prompt
Returns:
User's input as a string (empty string if just Enter pressed)
Example:
>>> def approval_hook(context: LLMCallHookContext) -> None:
... if context.iterations > 5:
... response = context.request_human_input(
... prompt="Allow this LLM call?",
... default_message="Type 'no' to skip, or press Enter:",
... )
... if response.lower() == "no":
... print("LLM call skipped by user")
"""
printer = Printer()
event_listener.formatter.pause_live_updates()
try:
printer.print(content=f"\n{prompt}", color="bold_yellow")
printer.print(content=default_message, color="cyan")
response = input().strip()
if response:
printer.print(content="\nProcessing your input...", color="cyan")
return response
finally:
event_listener.formatter.resume_live_updates()
_before_llm_call_hooks: list[BeforeLLMCallHookType | BeforeLLMCallHookCallable] = []
_after_llm_call_hooks: list[AfterLLMCallHookType | AfterLLMCallHookCallable] = []
def register_before_llm_call_hook(
hook: BeforeLLMCallHookType | BeforeLLMCallHookCallable,
) -> None:
"""Register a global before_llm_call hook.
Global hooks are added to all executors automatically.
This is a convenience function for registering hooks that should
apply to all LLM calls across all executors.
Args:
hook: Function that receives LLMCallHookContext and can:
- Modify context.messages directly (in-place)
- Return False to block LLM execution
- Return True or None to allow execution
IMPORTANT: Modify messages in-place (append, extend, remove items).
Do NOT replace the list (context.messages = []), as this will break execution.
Example:
>>> def log_llm_calls(context: LLMCallHookContext) -> None:
... print(f"LLM call by {context.agent.role}")
... print(f"Messages: {len(context.messages)}")
... return None # Allow execution
>>>
>>> register_before_llm_call_hook(log_llm_calls)
>>>
>>> def block_excessive_iterations(context: LLMCallHookContext) -> bool | None:
... if context.iterations > 10:
... print("Blocked: Too many iterations")
... return False # Block execution
... return None # Allow execution
>>>
>>> register_before_llm_call_hook(block_excessive_iterations)
"""
_before_llm_call_hooks.append(hook)
def register_after_llm_call_hook(
hook: AfterLLMCallHookType | AfterLLMCallHookCallable,
) -> None:
"""Register a global after_llm_call hook.
Global hooks are added to all executors automatically.
This is a convenience function for registering hooks that should
apply to all LLM calls across all executors.
Args:
hook: Function that receives LLMCallHookContext and can modify:
- The response: Return modified response string or None to keep original
- The messages: Modify context.messages directly (mutable reference)
Both modifications are supported and can be used together.
IMPORTANT: Modify messages in-place (append, extend, remove items).
Do NOT replace the list (context.messages = []), as this will break execution.
Example:
>>> def sanitize_response(context: LLMCallHookContext) -> str | None:
... if context.response and "SECRET" in context.response:
... return context.response.replace("SECRET", "[REDACTED]")
... return None
>>>
>>> register_after_llm_call_hook(sanitize_response)
"""
_after_llm_call_hooks.append(hook)
def get_before_llm_call_hooks() -> list[
BeforeLLMCallHookType | BeforeLLMCallHookCallable
]:
"""Get all registered global before_llm_call hooks.
Returns:
List of registered before hooks
"""
return _before_llm_call_hooks.copy()
def get_after_llm_call_hooks() -> list[AfterLLMCallHookType | AfterLLMCallHookCallable]:
"""Get all registered global after_llm_call hooks.
Returns:
List of registered after hooks
"""
return _after_llm_call_hooks.copy()
def unregister_before_llm_call_hook(
hook: BeforeLLMCallHookType | BeforeLLMCallHookCallable,
) -> bool:
"""Unregister a specific global before_llm_call hook.
Args:
hook: The hook function to remove
Returns:
True if the hook was found and removed, False otherwise
Example:
>>> def my_hook(context: LLMCallHookContext) -> None:
... print("Before LLM call")
>>>
>>> register_before_llm_call_hook(my_hook)
>>> unregister_before_llm_call_hook(my_hook)
True
"""
try:
_before_llm_call_hooks.remove(hook)
return True
except ValueError:
return False
def unregister_after_llm_call_hook(
hook: AfterLLMCallHookType | AfterLLMCallHookCallable,
) -> bool:
"""Unregister a specific global after_llm_call hook.
Args:
hook: The hook function to remove
Returns:
True if the hook was found and removed, False otherwise
Example:
>>> def my_hook(context: LLMCallHookContext) -> str | None:
... return None
>>>
>>> register_after_llm_call_hook(my_hook)
>>> unregister_after_llm_call_hook(my_hook)
True
"""
try:
_after_llm_call_hooks.remove(hook)
return True
except ValueError:
return False
def clear_before_llm_call_hooks() -> int:
"""Clear all registered global before_llm_call hooks.
Returns:
Number of hooks that were cleared
Example:
>>> register_before_llm_call_hook(hook1)
>>> register_before_llm_call_hook(hook2)
>>> clear_before_llm_call_hooks()
2
"""
count = len(_before_llm_call_hooks)
_before_llm_call_hooks.clear()
return count
def clear_after_llm_call_hooks() -> int:
"""Clear all registered global after_llm_call hooks.
Returns:
Number of hooks that were cleared
Example:
>>> register_after_llm_call_hook(hook1)
>>> register_after_llm_call_hook(hook2)
>>> clear_after_llm_call_hooks()
2
"""
count = len(_after_llm_call_hooks)
_after_llm_call_hooks.clear()
return count
def clear_all_llm_call_hooks() -> tuple[int, int]:
"""Clear all registered global LLM call hooks (both before and after).
Returns:
Tuple of (before_hooks_cleared, after_hooks_cleared)
Example:
>>> register_before_llm_call_hook(before_hook)
>>> register_after_llm_call_hook(after_hook)
>>> clear_all_llm_call_hooks()
(1, 1)
"""
before_count = clear_before_llm_call_hooks()
after_count = clear_after_llm_call_hooks()
return (before_count, after_count)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/hooks/llm_hooks.py",
"license": "MIT License",
"lines": 283,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/hooks/tool_hooks.py | from __future__ import annotations
from typing import TYPE_CHECKING, Any
from crewai.events.event_listener import event_listener
from crewai.hooks.types import (
AfterToolCallHookCallable,
AfterToolCallHookType,
BeforeToolCallHookCallable,
BeforeToolCallHookType,
)
from crewai.utilities.printer import Printer
if TYPE_CHECKING:
from crewai.agent import Agent
from crewai.agents.agent_builder.base_agent import BaseAgent
from crewai.crew import Crew
from crewai.task import Task
from crewai.tools.structured_tool import CrewStructuredTool
class ToolCallHookContext:
"""Context object passed to tool call hooks.
Provides hooks with access to the tool being called, its input,
the agent/task/crew context, and the result (for after hooks).
Attributes:
tool_name: Name of the tool being called
tool_input: Tool input parameters (mutable dict).
Can be modified in-place by before_tool_call hooks.
IMPORTANT: Modify in-place (e.g., context.tool_input['key'] = value).
Do NOT replace the dict (e.g., context.tool_input = {}), as this
will not affect the actual tool execution.
tool: Reference to the CrewStructuredTool instance
agent: Agent executing the tool (may be None)
task: Current task being executed (may be None)
crew: Crew instance (may be None)
tool_result: Tool execution result (only set for after_tool_call hooks).
Can be modified by returning a new string from after_tool_call hook.
"""
def __init__(
self,
tool_name: str,
tool_input: dict[str, Any],
tool: CrewStructuredTool,
agent: Agent | BaseAgent | None = None,
task: Task | None = None,
crew: Crew | None = None,
tool_result: str | None = None,
) -> None:
"""Initialize tool call hook context.
Args:
tool_name: Name of the tool being called
tool_input: Tool input parameters (mutable)
tool: Tool instance reference
agent: Optional agent executing the tool
task: Optional current task
crew: Optional crew instance
tool_result: Optional tool result (for after hooks)
"""
self.tool_name = tool_name
self.tool_input = tool_input
self.tool = tool
self.agent = agent
self.task = task
self.crew = crew
self.tool_result = tool_result
def request_human_input(
self,
prompt: str,
default_message: str = "Press Enter to continue, or provide feedback:",
) -> str:
"""Request human input during tool hook execution.
This method pauses live console updates, displays a prompt to the user,
waits for their input, and then resumes live updates. This is useful for
approval gates, reviewing tool results, or getting human feedback during execution.
Args:
prompt: Custom message to display to the user
default_message: Message shown after the prompt
Returns:
User's input as a string (empty string if just Enter pressed)
Example:
>>> def approval_hook(context: ToolCallHookContext) -> bool | None:
... if context.tool_name == "delete_file":
... response = context.request_human_input(
... prompt="Allow file deletion?",
... default_message="Type 'approve' to continue:",
... )
... if response.lower() != "approve":
... return False # Block execution
... return None # Allow execution
"""
printer = Printer()
event_listener.formatter.pause_live_updates()
try:
printer.print(content=f"\n{prompt}", color="bold_yellow")
printer.print(content=default_message, color="cyan")
response = input().strip()
if response:
printer.print(content="\nProcessing your input...", color="cyan")
return response
finally:
event_listener.formatter.resume_live_updates()
# Global hook registries
_before_tool_call_hooks: list[BeforeToolCallHookType | BeforeToolCallHookCallable] = []
_after_tool_call_hooks: list[AfterToolCallHookType | AfterToolCallHookCallable] = []
def register_before_tool_call_hook(
hook: BeforeToolCallHookType | BeforeToolCallHookCallable,
) -> None:
"""Register a global before_tool_call hook.
Global hooks are added to all tool executions automatically.
This is a convenience function for registering hooks that should
apply to all tool calls across all agents and crews.
Args:
hook: Function that receives ToolCallHookContext and can:
- Modify tool_input in-place
- Return False to block tool execution
- Return True or None to allow execution
IMPORTANT: Modify tool_input in-place (e.g., context.tool_input['key'] = value).
Do NOT replace the dict (context.tool_input = {}), as this will not affect
the actual tool execution.
Example:
>>> def log_tool_usage(context: ToolCallHookContext) -> None:
... print(f"Executing tool: {context.tool_name}")
... print(f"Input: {context.tool_input}")
... return None # Allow execution
>>>
>>> register_before_tool_call_hook(log_tool_usage)
>>> def block_dangerous_tools(context: ToolCallHookContext) -> bool | None:
... if context.tool_name == "delete_database":
... print("Blocked dangerous tool execution!")
... return False # Block execution
... return None # Allow execution
>>>
>>> register_before_tool_call_hook(block_dangerous_tools)
"""
_before_tool_call_hooks.append(hook)
def register_after_tool_call_hook(
hook: AfterToolCallHookType | AfterToolCallHookCallable,
) -> None:
"""Register a global after_tool_call hook.
Global hooks are added to all tool executions automatically.
This is a convenience function for registering hooks that should
apply to all tool calls across all agents and crews.
Args:
hook: Function that receives ToolCallHookContext and can modify
the tool result. Return modified result string or None to keep
the original result. The tool_result is available in context.tool_result.
Example:
>>> def sanitize_output(context: ToolCallHookContext) -> str | None:
... if context.tool_result and "SECRET_KEY" in context.tool_result:
... return context.tool_result.replace("SECRET_KEY=...", "[REDACTED]")
... return None # Keep original result
>>>
>>> register_after_tool_call_hook(sanitize_output)
>>> def log_tool_results(context: ToolCallHookContext) -> None:
... print(f"Tool {context.tool_name} returned: {context.tool_result[:100]}")
... return None # Keep original result
>>>
>>> register_after_tool_call_hook(log_tool_results)
"""
_after_tool_call_hooks.append(hook)
def get_before_tool_call_hooks() -> list[
BeforeToolCallHookType | BeforeToolCallHookCallable
]:
"""Get all registered global before_tool_call hooks.
Returns:
List of registered before hooks
"""
return _before_tool_call_hooks.copy()
def get_after_tool_call_hooks() -> list[
AfterToolCallHookType | AfterToolCallHookCallable
]:
"""Get all registered global after_tool_call hooks.
Returns:
List of registered after hooks
"""
return _after_tool_call_hooks.copy()
def unregister_before_tool_call_hook(
hook: BeforeToolCallHookType | BeforeToolCallHookCallable,
) -> bool:
"""Unregister a specific global before_tool_call hook.
Args:
hook: The hook function to remove
Returns:
True if the hook was found and removed, False otherwise
Example:
>>> def my_hook(context: ToolCallHookContext) -> None:
... print("Before tool call")
>>>
>>> register_before_tool_call_hook(my_hook)
>>> unregister_before_tool_call_hook(my_hook)
True
"""
try:
_before_tool_call_hooks.remove(hook)
return True
except ValueError:
return False
def unregister_after_tool_call_hook(
hook: AfterToolCallHookType | AfterToolCallHookCallable,
) -> bool:
"""Unregister a specific global after_tool_call hook.
Args:
hook: The hook function to remove
Returns:
True if the hook was found and removed, False otherwise
Example:
>>> def my_hook(context: ToolCallHookContext) -> str | None:
... return None
>>>
>>> register_after_tool_call_hook(my_hook)
>>> unregister_after_tool_call_hook(my_hook)
True
"""
try:
_after_tool_call_hooks.remove(hook)
return True
except ValueError:
return False
def clear_before_tool_call_hooks() -> int:
"""Clear all registered global before_tool_call hooks.
Returns:
Number of hooks that were cleared
Example:
>>> register_before_tool_call_hook(hook1)
>>> register_before_tool_call_hook(hook2)
>>> clear_before_tool_call_hooks()
2
"""
count = len(_before_tool_call_hooks)
_before_tool_call_hooks.clear()
return count
def clear_after_tool_call_hooks() -> int:
"""Clear all registered global after_tool_call hooks.
Returns:
Number of hooks that were cleared
Example:
>>> register_after_tool_call_hook(hook1)
>>> register_after_tool_call_hook(hook2)
>>> clear_after_tool_call_hooks()
2
"""
count = len(_after_tool_call_hooks)
_after_tool_call_hooks.clear()
return count
def clear_all_tool_call_hooks() -> tuple[int, int]:
"""Clear all registered global tool call hooks (both before and after).
Returns:
Tuple of (before_hooks_cleared, after_hooks_cleared)
Example:
>>> register_before_tool_call_hook(before_hook)
>>> register_after_tool_call_hook(after_hook)
>>> clear_all_tool_call_hooks()
(1, 1)
"""
before_count = clear_before_tool_call_hooks()
after_count = clear_after_tool_call_hooks()
return (before_count, after_count)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/hooks/tool_hooks.py",
"license": "MIT License",
"lines": 253,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/hooks/types.py | from __future__ import annotations
from collections.abc import Callable
from typing import TYPE_CHECKING, Generic, Protocol, TypeVar, runtime_checkable
if TYPE_CHECKING:
from crewai.hooks.llm_hooks import LLMCallHookContext
from crewai.hooks.tool_hooks import ToolCallHookContext
ContextT = TypeVar("ContextT", contravariant=True)
ReturnT = TypeVar("ReturnT", covariant=True)
@runtime_checkable
class Hook(Protocol, Generic[ContextT, ReturnT]):
"""Generic protocol for hook functions.
This protocol defines the common interface for all hook types in CrewAI.
Hooks receive a context object and optionally return a modified result.
Type Parameters:
ContextT: The context type (LLMCallHookContext or ToolCallHookContext)
ReturnT: The return type (None, str | None, or bool | None)
Example:
>>> # Before LLM call hook: receives LLMCallHookContext, returns None
>>> hook: Hook[LLMCallHookContext, None] = lambda ctx: print(ctx.iterations)
>>>
>>> # After LLM call hook: receives LLMCallHookContext, returns str | None
>>> hook: Hook[LLMCallHookContext, str | None] = lambda ctx: ctx.response
"""
def __call__(self, context: ContextT) -> ReturnT:
"""Execute the hook with the given context.
Args:
context: Context object with relevant execution state
Returns:
Hook-specific return value (None, str | None, or bool | None)
"""
...
class BeforeLLMCallHook(Hook["LLMCallHookContext", bool | None], Protocol):
"""Protocol for before_llm_call hooks.
These hooks are called before an LLM is invoked and can modify the messages
that will be sent to the LLM or block the execution entirely.
"""
def __call__(self, context: LLMCallHookContext) -> bool | None:
"""Execute the before LLM call hook.
Args:
context: Context object with executor, messages, agent, task, etc.
Messages can be modified in-place.
Returns:
False to block LLM execution, True or None to allow execution
"""
...
class AfterLLMCallHook(Hook["LLMCallHookContext", str | None], Protocol):
"""Protocol for after_llm_call hooks.
These hooks are called after an LLM returns a response and can modify
the response or the message history.
"""
def __call__(self, context: LLMCallHookContext) -> str | None:
"""Execute the after LLM call hook.
Args:
context: Context object with executor, messages, agent, task, response, etc.
Messages can be modified in-place. Response is available in context.response.
Returns:
Modified response string, or None to keep the original response
"""
...
class BeforeToolCallHook(Hook["ToolCallHookContext", bool | None], Protocol):
"""Protocol for before_tool_call hooks.
These hooks are called before a tool is executed and can modify the tool
input or block the execution entirely.
"""
def __call__(self, context: ToolCallHookContext) -> bool | None:
"""Execute the before tool call hook.
Args:
context: Context object with tool_name, tool_input, tool, agent, task, etc.
Tool input can be modified in-place.
Returns:
False to block tool execution, True or None to allow execution
"""
...
class AfterToolCallHook(Hook["ToolCallHookContext", str | None], Protocol):
"""Protocol for after_tool_call hooks.
These hooks are called after a tool executes and can modify the result.
"""
def __call__(self, context: ToolCallHookContext) -> str | None:
"""Execute the after tool call hook.
Args:
context: Context object with tool_name, tool_input, tool_result, etc.
Tool result is available in context.tool_result.
Returns:
Modified tool result string, or None to keep the original result
"""
...
# - All before hooks: bool | None (False = block execution, True/None = allow)
# - All after hooks: str | None (str = modified result, None = keep original)
BeforeLLMCallHookType = Hook["LLMCallHookContext", bool | None]
AfterLLMCallHookType = Hook["LLMCallHookContext", str | None]
BeforeToolCallHookType = Hook["ToolCallHookContext", bool | None]
AfterToolCallHookType = Hook["ToolCallHookContext", str | None]
# Alternative Callable-based type aliases for compatibility
BeforeLLMCallHookCallable = Callable[["LLMCallHookContext"], bool | None]
AfterLLMCallHookCallable = Callable[["LLMCallHookContext"], str | None]
BeforeToolCallHookCallable = Callable[["ToolCallHookContext"], bool | None]
AfterToolCallHookCallable = Callable[["ToolCallHookContext"], str | None]
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/hooks/types.py",
"license": "MIT License",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/hooks/wrappers.py | from __future__ import annotations
from collections.abc import Callable
from typing import TYPE_CHECKING, Any, TypeVar
if TYPE_CHECKING:
from crewai.hooks.llm_hooks import LLMCallHookContext
from crewai.hooks.tool_hooks import ToolCallHookContext
P = TypeVar("P")
R = TypeVar("R")
def _copy_method_metadata(wrapper: Any, original: Callable[..., Any]) -> None:
"""Copy metadata from original function to wrapper.
Args:
wrapper: The wrapper object to copy metadata to
original: The original function to copy from
"""
wrapper.__name__ = original.__name__
wrapper.__doc__ = original.__doc__
wrapper.__module__ = original.__module__
wrapper.__qualname__ = original.__qualname__
wrapper.__annotations__ = original.__annotations__
class BeforeLLMCallHookMethod:
"""Wrapper for methods marked as before_llm_call hooks within @CrewBase classes.
This wrapper marks a method so it can be detected and registered as a
crew-scoped hook during crew initialization.
"""
is_before_llm_call_hook: bool = True
def __init__(
self,
meth: Callable[[Any, LLMCallHookContext], None],
agents: list[str] | None = None,
) -> None:
"""Initialize the hook method wrapper.
Args:
meth: The method to wrap
agents: Optional list of agent roles to filter
"""
self._meth = meth
self.agents = agents
_copy_method_metadata(self, meth)
def __call__(self, *args: Any, **kwargs: Any) -> None:
"""Call the wrapped method.
Args:
*args: Positional arguments
**kwargs: Keyword arguments
"""
return self._meth(*args, **kwargs)
def __get__(self, obj: Any, objtype: type[Any] | None = None) -> Any:
"""Support instance methods by implementing descriptor protocol.
Args:
obj: The instance that the method is accessed through
objtype: The type of the instance
Returns:
Self when accessed through class, bound method when accessed through instance
"""
if obj is None:
return self
# Return bound method
return lambda context: self._meth(obj, context)
class AfterLLMCallHookMethod:
"""Wrapper for methods marked as after_llm_call hooks within @CrewBase classes."""
is_after_llm_call_hook: bool = True
def __init__(
self,
meth: Callable[[Any, LLMCallHookContext], str | None],
agents: list[str] | None = None,
) -> None:
"""Initialize the hook method wrapper."""
self._meth = meth
self.agents = agents
_copy_method_metadata(self, meth)
def __call__(self, *args: Any, **kwargs: Any) -> str | None:
"""Call the wrapped method."""
return self._meth(*args, **kwargs)
def __get__(self, obj: Any, objtype: type[Any] | None = None) -> Any:
"""Support instance methods."""
if obj is None:
return self
return lambda context: self._meth(obj, context)
class BeforeToolCallHookMethod:
"""Wrapper for methods marked as before_tool_call hooks within @CrewBase classes."""
is_before_tool_call_hook: bool = True
def __init__(
self,
meth: Callable[[Any, ToolCallHookContext], bool | None],
tools: list[str] | None = None,
agents: list[str] | None = None,
) -> None:
"""Initialize the hook method wrapper."""
self._meth = meth
self.tools = tools
self.agents = agents
_copy_method_metadata(self, meth)
def __call__(self, *args: Any, **kwargs: Any) -> bool | None:
"""Call the wrapped method."""
return self._meth(*args, **kwargs)
def __get__(self, obj: Any, objtype: type[Any] | None = None) -> Any:
"""Support instance methods."""
if obj is None:
return self
return lambda context: self._meth(obj, context)
class AfterToolCallHookMethod:
"""Wrapper for methods marked as after_tool_call hooks within @CrewBase classes."""
is_after_tool_call_hook: bool = True
def __init__(
self,
meth: Callable[[Any, ToolCallHookContext], str | None],
tools: list[str] | None = None,
agents: list[str] | None = None,
) -> None:
"""Initialize the hook method wrapper."""
self._meth = meth
self.tools = tools
self.agents = agents
_copy_method_metadata(self, meth)
def __call__(self, *args: Any, **kwargs: Any) -> str | None:
"""Call the wrapped method."""
return self._meth(*args, **kwargs)
def __get__(self, obj: Any, objtype: type[Any] | None = None) -> Any:
"""Support instance methods."""
if obj is None:
return self
return lambda context: self._meth(obj, context)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/hooks/wrappers.py",
"license": "MIT License",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/tests/hooks/test_crew_scoped_hooks.py | """Tests for crew-scoped hooks within @CrewBase classes."""
from __future__ import annotations
from unittest.mock import Mock
import pytest
from crewai import Agent, Crew
from crewai.hooks import (
LLMCallHookContext,
ToolCallHookContext,
before_llm_call,
before_tool_call,
get_before_llm_call_hooks,
get_before_tool_call_hooks,
)
from crewai.project import CrewBase, agent, crew
@pytest.fixture(autouse=True)
def clear_hooks():
"""Clear global hooks before and after each test."""
from crewai.hooks import llm_hooks, tool_hooks
# Store original hooks
original_before_llm = llm_hooks._before_llm_call_hooks.copy()
original_before_tool = tool_hooks._before_tool_call_hooks.copy()
# Clear hooks
llm_hooks._before_llm_call_hooks.clear()
tool_hooks._before_tool_call_hooks.clear()
yield
# Restore original hooks
llm_hooks._before_llm_call_hooks.clear()
tool_hooks._before_tool_call_hooks.clear()
llm_hooks._before_llm_call_hooks.extend(original_before_llm)
tool_hooks._before_tool_call_hooks.extend(original_before_tool)
class TestCrewScopedHooks:
"""Test hooks defined as methods within @CrewBase classes."""
def test_crew_scoped_hook_is_registered_on_instance_creation(self):
"""Test that crew-scoped hooks are registered when crew instance is created."""
@CrewBase
class TestCrew:
@before_llm_call
def my_hook(self, context):
pass
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Check hooks before instance creation
hooks_before = get_before_llm_call_hooks()
initial_count = len(hooks_before)
# Create instance - should register the hook
crew_instance = TestCrew()
# Check hooks after instance creation
hooks_after = get_before_llm_call_hooks()
# Should have one more hook registered
assert len(hooks_after) == initial_count + 1
def test_crew_scoped_hook_has_access_to_self(self):
"""Test that crew-scoped hooks can access self and instance variables."""
execution_log = []
@CrewBase
class TestCrew:
def __init__(self):
self.crew_name = "TestCrew"
self.call_count = 0
@before_llm_call
def my_hook(self, context):
# Can access self
self.call_count += 1
execution_log.append(f"{self.crew_name}:{self.call_count}")
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create instance
crew_instance = TestCrew()
# Get the registered hook
hooks = get_before_llm_call_hooks()
crew_hook = hooks[-1] # Last registered hook
# Create mock context
mock_executor = Mock()
mock_executor.messages = []
mock_executor.agent = Mock(role="Test")
mock_executor.task = Mock()
mock_executor.crew = Mock()
mock_executor.llm = Mock()
mock_executor.iterations = 0
context = LLMCallHookContext(executor=mock_executor)
# Execute hook multiple times
crew_hook(context)
crew_hook(context)
# Verify hook accessed self and modified instance state
assert len(execution_log) == 2
assert execution_log[0] == "TestCrew:1"
assert execution_log[1] == "TestCrew:2"
assert crew_instance.call_count == 2
def test_multiple_crews_have_isolated_hooks(self):
"""Test that different crew instances have isolated hooks."""
crew1_executions = []
crew2_executions = []
@CrewBase
class Crew1:
@before_llm_call
def crew1_hook(self, context):
crew1_executions.append("crew1")
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
@CrewBase
class Crew2:
@before_llm_call
def crew2_hook(self, context):
crew2_executions.append("crew2")
@agent
def analyst(self):
return Agent(role="Analyst", goal="Analyze", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create both instances
instance1 = Crew1()
instance2 = Crew2()
# Both hooks should be registered
hooks = get_before_llm_call_hooks()
assert len(hooks) >= 2
# Create mock context
mock_executor = Mock()
mock_executor.messages = []
mock_executor.agent = Mock(role="Test")
mock_executor.task = Mock()
mock_executor.crew = Mock()
mock_executor.llm = Mock()
mock_executor.iterations = 0
context = LLMCallHookContext(executor=mock_executor)
# Execute all hooks
for hook in hooks:
hook(context)
# Both hooks should have executed
assert "crew1" in crew1_executions
assert "crew2" in crew2_executions
def test_crew_scoped_hook_with_filters(self):
"""Test that filtered crew-scoped hooks work correctly."""
execution_log = []
@CrewBase
class TestCrew:
@before_tool_call(tools=["delete_file"])
def filtered_hook(self, context):
execution_log.append(f"filtered:{context.tool_name}")
return None
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create instance
crew_instance = TestCrew()
# Get registered hooks
hooks = get_before_tool_call_hooks()
crew_hook = hooks[-1] # Last registered
# Test with matching tool
mock_tool = Mock()
context1 = ToolCallHookContext(
tool_name="delete_file", tool_input={}, tool=mock_tool
)
crew_hook(context1)
assert len(execution_log) == 1
assert execution_log[0] == "filtered:delete_file"
# Test with non-matching tool
context2 = ToolCallHookContext(
tool_name="read_file", tool_input={}, tool=mock_tool
)
crew_hook(context2)
# Should still be 1 (filtered hook didn't run)
assert len(execution_log) == 1
def test_crew_scoped_hook_no_double_registration(self):
"""Test that crew-scoped hooks are not registered twice."""
@CrewBase
class TestCrew:
@before_llm_call
def my_hook(self, context):
pass
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Get initial hook count
initial_hooks = len(get_before_llm_call_hooks())
# Create first instance
instance1 = TestCrew()
# Should add 1 hook
hooks_after_first = get_before_llm_call_hooks()
assert len(hooks_after_first) == initial_hooks + 1
# Create second instance
instance2 = TestCrew()
# Should add another hook (one per instance)
hooks_after_second = get_before_llm_call_hooks()
assert len(hooks_after_second) == initial_hooks + 2
def test_crew_scoped_hook_method_signature(self):
"""Test that crew-scoped hooks have correct signature (self + context)."""
@CrewBase
class TestCrew:
def __init__(self):
self.test_value = "test"
@before_llm_call
def my_hook(self, context):
# Should be able to access both self and context
return f"{self.test_value}:{context.iterations}"
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create instance
crew_instance = TestCrew()
# Verify the hook method has is_before_llm_call_hook marker
assert hasattr(crew_instance.my_hook, "__func__")
hook_func = crew_instance.my_hook.__func__
assert hasattr(hook_func, "is_before_llm_call_hook")
assert hook_func.is_before_llm_call_hook is True
def test_crew_scoped_with_agent_filter(self):
"""Test crew-scoped hooks with agent filters."""
execution_log = []
@CrewBase
class TestCrew:
@before_llm_call(agents=["Researcher"])
def filtered_hook(self, context):
execution_log.append(context.agent.role)
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create instance
crew_instance = TestCrew()
# Get hooks
hooks = get_before_llm_call_hooks()
crew_hook = hooks[-1]
# Test with matching agent
mock_executor = Mock()
mock_executor.messages = []
mock_executor.agent = Mock(role="Researcher")
mock_executor.task = Mock()
mock_executor.crew = Mock()
mock_executor.llm = Mock()
mock_executor.iterations = 0
context1 = LLMCallHookContext(executor=mock_executor)
crew_hook(context1)
assert len(execution_log) == 1
assert execution_log[0] == "Researcher"
# Test with non-matching agent
mock_executor.agent.role = "Analyst"
context2 = LLMCallHookContext(executor=mock_executor)
crew_hook(context2)
# Should still be 1 (filtered out)
assert len(execution_log) == 1
class TestCrewScopedHookAttributes:
"""Test that crew-scoped hooks have correct attributes set."""
def test_hook_marker_attribute_is_set(self):
"""Test that decorator sets marker attribute on method."""
@CrewBase
class TestCrew:
@before_llm_call
def my_hook(self, context):
pass
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Check the unbound method has the marker
assert hasattr(TestCrew.__dict__["my_hook"], "is_before_llm_call_hook")
assert TestCrew.__dict__["my_hook"].is_before_llm_call_hook is True
def test_filter_attributes_are_preserved(self):
"""Test that filter attributes are preserved on methods."""
@CrewBase
class TestCrew:
@before_tool_call(tools=["delete_file"], agents=["Dev"])
def filtered_hook(self, context):
return None
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Check filter attributes are set
hook_method = TestCrew.__dict__["filtered_hook"]
assert hasattr(hook_method, "is_before_tool_call_hook")
assert hasattr(hook_method, "_filter_tools")
assert hasattr(hook_method, "_filter_agents")
assert hook_method._filter_tools == ["delete_file"]
assert hook_method._filter_agents == ["Dev"]
def test_registered_hooks_tracked_on_instance(self):
"""Test that registered hooks are tracked on the crew instance."""
@CrewBase
class TestCrew:
@before_llm_call
def llm_hook(self, context):
pass
@before_tool_call
def tool_hook(self, context):
return None
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create instance
crew_instance = TestCrew()
# Check that hooks are tracked
assert hasattr(crew_instance, "_registered_hook_functions")
assert isinstance(crew_instance._registered_hook_functions, list)
assert len(crew_instance._registered_hook_functions) == 2
# Check hook types
hook_types = [ht for ht, _ in crew_instance._registered_hook_functions]
assert "before_llm_call" in hook_types
assert "before_tool_call" in hook_types
class TestCrewScopedHookExecution:
"""Test execution behavior of crew-scoped hooks."""
def test_crew_hook_executes_with_bound_self(self):
"""Test that crew-scoped hook executes with self properly bound."""
execution_log = []
@CrewBase
class TestCrew:
def __init__(self):
self.instance_id = id(self)
@before_llm_call
def my_hook(self, context):
# Should have access to self
execution_log.append(self.instance_id)
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create instance
crew_instance = TestCrew()
expected_id = crew_instance.instance_id
# Get and execute hook
hooks = get_before_llm_call_hooks()
crew_hook = hooks[-1]
mock_executor = Mock()
mock_executor.messages = []
mock_executor.agent = Mock(role="Test")
mock_executor.task = Mock()
mock_executor.crew = Mock()
mock_executor.llm = Mock()
mock_executor.iterations = 0
context = LLMCallHookContext(executor=mock_executor)
# Execute hook
crew_hook(context)
# Verify it had access to self
assert len(execution_log) == 1
assert execution_log[0] == expected_id
def test_crew_hook_can_modify_instance_state(self):
"""Test that crew-scoped hooks can modify instance variables."""
@CrewBase
class TestCrew:
def __init__(self):
self.counter = 0
@before_tool_call
def increment_counter(self, context):
self.counter += 1
return None
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create instance
crew_instance = TestCrew()
assert crew_instance.counter == 0
# Get and execute hook
hooks = get_before_tool_call_hooks()
crew_hook = hooks[-1]
mock_tool = Mock()
context = ToolCallHookContext(tool_name="test", tool_input={}, tool=mock_tool)
# Execute hook 3 times
crew_hook(context)
crew_hook(context)
crew_hook(context)
# Verify counter was incremented
assert crew_instance.counter == 3
def test_multiple_instances_maintain_separate_state(self):
"""Test that multiple instances of the same crew maintain separate state."""
@CrewBase
class TestCrew:
def __init__(self):
self.call_count = 0
@before_llm_call
def count_calls(self, context):
self.call_count += 1
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Create two instances
instance1 = TestCrew()
instance2 = TestCrew()
# Get all hooks (should include hooks from both instances)
all_hooks = get_before_llm_call_hooks()
# Find hooks for each instance (last 2 registered)
hook1 = all_hooks[-2]
hook2 = all_hooks[-1]
# Create mock context
mock_executor = Mock()
mock_executor.messages = []
mock_executor.agent = Mock(role="Test")
mock_executor.task = Mock()
mock_executor.crew = Mock()
mock_executor.llm = Mock()
mock_executor.iterations = 0
context = LLMCallHookContext(executor=mock_executor)
# Execute first hook twice
hook1(context)
hook1(context)
# Execute second hook once
hook2(context)
# Each instance should have independent state
# Note: We can't easily verify which hook belongs to which instance
# in this test without more introspection, but the fact that it doesn't
# crash and hooks can maintain state proves isolation works
class TestSignatureDetection:
"""Test that signature detection correctly identifies methods vs functions."""
def test_method_signature_detected(self):
"""Test that methods with 'self' parameter are detected."""
import inspect
@CrewBase
class TestCrew:
@before_llm_call
def method_hook(self, context):
pass
@agent
def researcher(self):
return Agent(role="Researcher", goal="Research", backstory="Expert")
@crew
def crew(self):
return Crew(agents=self.agents, tasks=[], verbose=False)
# Check that method has self parameter
method = TestCrew.__dict__["method_hook"]
sig = inspect.signature(method)
params = list(sig.parameters.keys())
assert params[0] == "self"
assert len(params) == 2 # self + context
def test_standalone_function_signature_detected(self):
"""Test that standalone functions without 'self' are detected."""
import inspect
@before_llm_call
def standalone_hook(context):
pass
# Should have only context parameter (no self)
sig = inspect.signature(standalone_hook)
params = list(sig.parameters.keys())
assert "self" not in params
assert len(params) == 1 # Just context
# Should be registered
hooks = get_before_llm_call_hooks()
assert len(hooks) >= 1
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/hooks/test_crew_scoped_hooks.py",
"license": "MIT License",
"lines": 471,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/hooks/test_decorators.py | """Tests for decorator-based hook registration."""
from __future__ import annotations
from unittest.mock import Mock
import pytest
from crewai.hooks import (
after_llm_call,
after_tool_call,
before_llm_call,
before_tool_call,
get_after_llm_call_hooks,
get_after_tool_call_hooks,
get_before_llm_call_hooks,
get_before_tool_call_hooks,
)
from crewai.hooks.llm_hooks import LLMCallHookContext
from crewai.hooks.tool_hooks import ToolCallHookContext
@pytest.fixture(autouse=True)
def clear_hooks():
"""Clear global hooks before and after each test."""
from crewai.hooks import llm_hooks, tool_hooks
# Store original hooks
original_before_llm = llm_hooks._before_llm_call_hooks.copy()
original_after_llm = llm_hooks._after_llm_call_hooks.copy()
original_before_tool = tool_hooks._before_tool_call_hooks.copy()
original_after_tool = tool_hooks._after_tool_call_hooks.copy()
# Clear hooks
llm_hooks._before_llm_call_hooks.clear()
llm_hooks._after_llm_call_hooks.clear()
tool_hooks._before_tool_call_hooks.clear()
tool_hooks._after_tool_call_hooks.clear()
yield
# Restore original hooks
llm_hooks._before_llm_call_hooks.clear()
llm_hooks._after_llm_call_hooks.clear()
tool_hooks._before_tool_call_hooks.clear()
tool_hooks._after_tool_call_hooks.clear()
llm_hooks._before_llm_call_hooks.extend(original_before_llm)
llm_hooks._after_llm_call_hooks.extend(original_after_llm)
tool_hooks._before_tool_call_hooks.extend(original_before_tool)
tool_hooks._after_tool_call_hooks.extend(original_after_tool)
class TestLLMHookDecorators:
"""Test LLM hook decorators."""
def test_before_llm_call_decorator_registers_hook(self):
"""Test that @before_llm_call decorator registers the hook."""
@before_llm_call
def test_hook(context):
pass
hooks = get_before_llm_call_hooks()
assert len(hooks) == 1
def test_after_llm_call_decorator_registers_hook(self):
"""Test that @after_llm_call decorator registers the hook."""
@after_llm_call
def test_hook(context):
return None
hooks = get_after_llm_call_hooks()
assert len(hooks) == 1
def test_decorated_hook_executes_correctly(self):
"""Test that decorated hook executes and modifies behavior."""
execution_log = []
@before_llm_call
def test_hook(context):
execution_log.append("executed")
# Create mock context
mock_executor = Mock()
mock_executor.messages = []
mock_executor.agent = Mock(role="Test")
mock_executor.task = Mock()
mock_executor.crew = Mock()
mock_executor.llm = Mock()
mock_executor.iterations = 0
context = LLMCallHookContext(executor=mock_executor)
# Execute the hook
hooks = get_before_llm_call_hooks()
hooks[0](context)
assert len(execution_log) == 1
assert execution_log[0] == "executed"
def test_before_llm_call_with_agent_filter(self):
"""Test that agent filter works correctly."""
execution_log = []
@before_llm_call(agents=["Researcher"])
def filtered_hook(context):
execution_log.append(context.agent.role)
hooks = get_before_llm_call_hooks()
assert len(hooks) == 1
# Test with matching agent
mock_executor = Mock()
mock_executor.messages = []
mock_executor.agent = Mock(role="Researcher")
mock_executor.task = Mock()
mock_executor.crew = Mock()
mock_executor.llm = Mock()
mock_executor.iterations = 0
context = LLMCallHookContext(executor=mock_executor)
hooks[0](context)
assert len(execution_log) == 1
assert execution_log[0] == "Researcher"
# Test with non-matching agent
mock_executor.agent.role = "Analyst"
context2 = LLMCallHookContext(executor=mock_executor)
hooks[0](context2)
# Should still be 1 (hook didn't execute)
assert len(execution_log) == 1
class TestToolHookDecorators:
"""Test tool hook decorators."""
def test_before_tool_call_decorator_registers_hook(self):
"""Test that @before_tool_call decorator registers the hook."""
@before_tool_call
def test_hook(context):
return None
hooks = get_before_tool_call_hooks()
assert len(hooks) == 1
def test_after_tool_call_decorator_registers_hook(self):
"""Test that @after_tool_call decorator registers the hook."""
@after_tool_call
def test_hook(context):
return None
hooks = get_after_tool_call_hooks()
assert len(hooks) == 1
def test_before_tool_call_with_tool_filter(self):
"""Test that tool filter works correctly."""
execution_log = []
@before_tool_call(tools=["delete_file", "execute_code"])
def filtered_hook(context):
execution_log.append(context.tool_name)
return None
hooks = get_before_tool_call_hooks()
assert len(hooks) == 1
# Test with matching tool
mock_tool = Mock()
context = ToolCallHookContext(
tool_name="delete_file",
tool_input={},
tool=mock_tool,
)
hooks[0](context)
assert len(execution_log) == 1
assert execution_log[0] == "delete_file"
# Test with non-matching tool
context2 = ToolCallHookContext(
tool_name="read_file",
tool_input={},
tool=mock_tool,
)
hooks[0](context2)
# Should still be 1 (hook didn't execute for read_file)
assert len(execution_log) == 1
def test_before_tool_call_with_combined_filters(self):
"""Test that combined tool and agent filters work."""
execution_log = []
@before_tool_call(tools=["write_file"], agents=["Developer"])
def filtered_hook(context):
execution_log.append(f"{context.tool_name}-{context.agent.role}")
return None
hooks = get_before_tool_call_hooks()
mock_tool = Mock()
mock_agent = Mock(role="Developer")
# Test with both matching
context = ToolCallHookContext(
tool_name="write_file",
tool_input={},
tool=mock_tool,
agent=mock_agent,
)
hooks[0](context)
assert len(execution_log) == 1
assert execution_log[0] == "write_file-Developer"
# Test with tool matching but agent not
mock_agent.role = "Researcher"
context2 = ToolCallHookContext(
tool_name="write_file",
tool_input={},
tool=mock_tool,
agent=mock_agent,
)
hooks[0](context2)
# Should still be 1 (hook didn't execute)
assert len(execution_log) == 1
def test_after_tool_call_with_filter(self):
"""Test that after_tool_call decorator with filter works."""
@after_tool_call(tools=["web_search"])
def filtered_hook(context):
if context.tool_result:
return context.tool_result.upper()
return None
hooks = get_after_tool_call_hooks()
mock_tool = Mock()
# Test with matching tool
context = ToolCallHookContext(
tool_name="web_search",
tool_input={},
tool=mock_tool,
tool_result="result",
)
result = hooks[0](context)
assert result == "RESULT"
# Test with non-matching tool
context2 = ToolCallHookContext(
tool_name="other_tool",
tool_input={},
tool=mock_tool,
tool_result="result",
)
result2 = hooks[0](context2)
assert result2 is None # Hook didn't run, returns None
class TestDecoratorAttributes:
"""Test that decorators set proper attributes on functions."""
def test_before_llm_call_sets_attribute(self):
"""Test that decorator sets is_before_llm_call_hook attribute."""
@before_llm_call
def test_hook(context):
pass
assert hasattr(test_hook, "is_before_llm_call_hook")
assert test_hook.is_before_llm_call_hook is True
def test_before_tool_call_sets_attributes_with_filters(self):
"""Test that decorator with filters sets filter attributes."""
@before_tool_call(tools=["delete_file"], agents=["Dev"])
def test_hook(context):
return None
assert hasattr(test_hook, "is_before_tool_call_hook")
assert test_hook.is_before_tool_call_hook is True
assert hasattr(test_hook, "_filter_tools")
assert test_hook._filter_tools == ["delete_file"]
assert hasattr(test_hook, "_filter_agents")
assert test_hook._filter_agents == ["Dev"]
class TestMultipleDecorators:
"""Test using multiple decorators together."""
def test_multiple_decorators_all_register(self):
"""Test that multiple decorated functions all register."""
@before_llm_call
def hook1(context):
pass
@before_llm_call
def hook2(context):
pass
@after_llm_call
def hook3(context):
return None
before_hooks = get_before_llm_call_hooks()
after_hooks = get_after_llm_call_hooks()
assert len(before_hooks) == 2
assert len(after_hooks) == 1
def test_decorator_and_manual_registration_work_together(self):
"""Test that decorators and manual registration can be mixed."""
from crewai.hooks import register_before_tool_call_hook
@before_tool_call
def decorated_hook(context):
return None
def manual_hook(context):
return None
register_before_tool_call_hook(manual_hook)
hooks = get_before_tool_call_hooks()
assert len(hooks) == 2
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/hooks/test_decorators.py",
"license": "MIT License",
"lines": 252,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/hooks/test_human_approval.py | """Tests for human approval functionality in hooks."""
from __future__ import annotations
from unittest.mock import Mock, patch
from crewai.hooks.llm_hooks import LLMCallHookContext
from crewai.hooks.tool_hooks import ToolCallHookContext
import pytest
@pytest.fixture
def mock_executor():
"""Create a mock executor for LLM hook context."""
executor = Mock()
executor.messages = [{"role": "system", "content": "Test message"}]
executor.agent = Mock(role="Test Agent")
executor.task = Mock(description="Test Task")
executor.crew = Mock()
executor.llm = Mock()
executor.iterations = 0
return executor
@pytest.fixture
def mock_tool():
"""Create a mock tool for tool hook context."""
tool = Mock()
tool.name = "test_tool"
tool.description = "Test tool description"
return tool
@pytest.fixture
def mock_agent():
"""Create a mock agent."""
agent = Mock()
agent.role = "Test Agent"
return agent
@pytest.fixture
def mock_task():
"""Create a mock task."""
task = Mock()
task.description = "Test task"
return task
class TestLLMHookHumanInput:
"""Test request_human_input() on LLMCallHookContext."""
@patch("builtins.input", return_value="test response")
@patch("crewai.hooks.llm_hooks.event_listener")
def test_request_human_input_returns_user_response(
self, mock_event_listener, mock_input, mock_executor
):
"""Test that request_human_input returns the user's input."""
# Setup mock formatter
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = LLMCallHookContext(executor=mock_executor)
response = context.request_human_input(
prompt="Test prompt", default_message="Test default message"
)
assert response == "test response"
mock_input.assert_called_once()
@patch("builtins.input", return_value="")
@patch("crewai.hooks.llm_hooks.event_listener")
def test_request_human_input_returns_empty_string_on_enter(
self, mock_event_listener, mock_input, mock_executor
):
"""Test that pressing Enter returns empty string."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = LLMCallHookContext(executor=mock_executor)
response = context.request_human_input(prompt="Test")
assert response == ""
mock_input.assert_called_once()
@patch("builtins.input", return_value="test")
@patch("crewai.hooks.llm_hooks.event_listener")
def test_request_human_input_pauses_and_resumes_live_updates(
self, mock_event_listener, mock_input, mock_executor
):
"""Test that live updates are paused and resumed."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = LLMCallHookContext(executor=mock_executor)
context.request_human_input(prompt="Test")
# Verify pause was called
mock_formatter.pause_live_updates.assert_called_once()
# Verify resume was called
mock_formatter.resume_live_updates.assert_called_once()
@patch("builtins.input", side_effect=Exception("Input error"))
@patch("crewai.hooks.llm_hooks.event_listener")
def test_request_human_input_resumes_on_exception(
self, mock_event_listener, mock_input, mock_executor
):
"""Test that live updates are resumed even if input raises exception."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = LLMCallHookContext(executor=mock_executor)
with pytest.raises(Exception, match="Input error"):
context.request_human_input(prompt="Test")
# Verify resume was still called (in finally block)
mock_formatter.resume_live_updates.assert_called_once()
@patch("builtins.input", return_value=" test response ")
@patch("crewai.hooks.llm_hooks.event_listener")
def test_request_human_input_strips_whitespace(
self, mock_event_listener, mock_input, mock_executor
):
"""Test that user input is stripped of leading/trailing whitespace."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = LLMCallHookContext(executor=mock_executor)
response = context.request_human_input(prompt="Test")
assert response == "test response" # Whitespace stripped
class TestToolHookHumanInput:
"""Test request_human_input() on ToolCallHookContext."""
@patch("builtins.input", return_value="approve")
@patch("crewai.hooks.tool_hooks.event_listener")
def test_request_human_input_returns_user_response(
self, mock_event_listener, mock_input, mock_tool, mock_agent, mock_task
):
"""Test that request_human_input returns the user's input."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={"arg": "value"},
tool=mock_tool,
agent=mock_agent,
task=mock_task,
)
response = context.request_human_input(
prompt="Approve this tool?", default_message="Type 'approve':"
)
assert response == "approve"
mock_input.assert_called_once()
@patch("builtins.input", return_value="")
@patch("crewai.hooks.tool_hooks.event_listener")
def test_request_human_input_handles_empty_input(
self, mock_event_listener, mock_input, mock_tool
):
"""Test that empty input (Enter key) is handled correctly."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={},
tool=mock_tool,
)
response = context.request_human_input(prompt="Test")
assert response == ""
@patch("builtins.input", return_value="test")
@patch("crewai.hooks.tool_hooks.event_listener")
def test_request_human_input_pauses_and_resumes(
self, mock_event_listener, mock_input, mock_tool
):
"""Test that live updates are properly paused and resumed."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={},
tool=mock_tool,
)
context.request_human_input(prompt="Test")
mock_formatter.pause_live_updates.assert_called_once()
mock_formatter.resume_live_updates.assert_called_once()
@patch("builtins.input", side_effect=KeyboardInterrupt)
@patch("crewai.hooks.tool_hooks.event_listener")
def test_request_human_input_resumes_on_keyboard_interrupt(
self, mock_event_listener, mock_input, mock_tool
):
"""Test that live updates are resumed even on keyboard interrupt."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={},
tool=mock_tool,
)
with pytest.raises(KeyboardInterrupt):
context.request_human_input(prompt="Test")
# Verify resume was still called (in finally block)
mock_formatter.resume_live_updates.assert_called_once()
class TestApprovalHookIntegration:
"""Test integration scenarios with approval hooks."""
@patch("builtins.input", return_value="approve")
@patch("crewai.hooks.tool_hooks.event_listener")
def test_approval_hook_allows_execution(
self, mock_event_listener, mock_input, mock_tool
):
"""Test that approval hook allows execution when approved."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
def approval_hook(context: ToolCallHookContext) -> bool | None:
response = context.request_human_input(
prompt="Approve?", default_message="Type 'approve':"
)
return None if response == "approve" else False
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={},
tool=mock_tool,
)
result = approval_hook(context)
assert result is None # Allowed
assert mock_input.called
@patch("builtins.input", return_value="deny")
@patch("crewai.hooks.tool_hooks.event_listener")
def test_approval_hook_blocks_execution(
self, mock_event_listener, mock_input, mock_tool
):
"""Test that approval hook blocks execution when denied."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
def approval_hook(context: ToolCallHookContext) -> bool | None:
response = context.request_human_input(
prompt="Approve?", default_message="Type 'approve':"
)
return None if response == "approve" else False
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={},
tool=mock_tool,
)
result = approval_hook(context)
assert result is False # Blocked
assert mock_input.called
@patch("builtins.input", return_value="modified result")
@patch("crewai.hooks.tool_hooks.event_listener")
def test_review_hook_modifies_result(
self, mock_event_listener, mock_input, mock_tool
):
"""Test that review hook can modify tool results."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
def review_hook(context: ToolCallHookContext) -> str | None:
response = context.request_human_input(
prompt="Review result",
default_message="Press Enter to keep, or provide modified version:",
)
return response if response else None
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={},
tool=mock_tool,
tool_result="original result",
)
modified_result = review_hook(context)
assert modified_result == "modified result"
assert mock_input.called
@patch("builtins.input", return_value="")
@patch("crewai.hooks.tool_hooks.event_listener")
def test_review_hook_keeps_original_on_enter(
self, mock_event_listener, mock_input, mock_tool
):
"""Test that pressing Enter keeps original result."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
def review_hook(context: ToolCallHookContext) -> str | None:
response = context.request_human_input(
prompt="Review result", default_message="Press Enter to keep:"
)
return response if response else None
context = ToolCallHookContext(
tool_name="test_tool",
tool_input={},
tool=mock_tool,
tool_result="original result",
)
modified_result = review_hook(context)
assert modified_result is None # Keep original
class TestCostControlApproval:
"""Test cost control approval hook scenarios."""
@patch("builtins.input", return_value="yes")
@patch("crewai.hooks.llm_hooks.event_listener")
def test_cost_control_allows_when_approved(
self, mock_event_listener, mock_input, mock_executor
):
"""Test that expensive calls are allowed when approved."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
# Set high iteration count
mock_executor.iterations = 10
def cost_control_hook(context: LLMCallHookContext) -> None:
if context.iterations > 5:
response = context.request_human_input(
prompt=f"Iteration {context.iterations} - expensive call",
default_message="Type 'yes' to continue:",
)
if response.lower() != "yes":
print("Call blocked")
context = LLMCallHookContext(executor=mock_executor)
# Should not raise exception and should call input
cost_control_hook(context)
assert mock_input.called
@patch("builtins.input", return_value="no")
@patch("crewai.hooks.llm_hooks.event_listener")
def test_cost_control_logs_when_denied(
self, mock_event_listener, mock_input, mock_executor
):
"""Test that denied calls are logged."""
mock_formatter = Mock()
mock_event_listener.formatter = mock_formatter
mock_executor.iterations = 10
messages_logged = []
def cost_control_hook(context: LLMCallHookContext) -> None:
if context.iterations > 5:
response = context.request_human_input(
prompt=f"Iteration {context.iterations}",
default_message="Type 'yes' to continue:",
)
if response.lower() != "yes":
messages_logged.append("blocked")
context = LLMCallHookContext(executor=mock_executor)
cost_control_hook(context)
assert len(messages_logged) == 1
assert messages_logged[0] == "blocked"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/hooks/test_human_approval.py",
"license": "MIT License",
"lines": 307,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/hooks/test_llm_hooks.py | """Unit tests for LLM hooks functionality."""
from __future__ import annotations
from unittest.mock import Mock
from crewai.hooks import clear_all_llm_call_hooks, unregister_after_llm_call_hook, unregister_before_llm_call_hook
import pytest
from crewai.hooks.llm_hooks import (
LLMCallHookContext,
get_after_llm_call_hooks,
get_before_llm_call_hooks,
register_after_llm_call_hook,
register_before_llm_call_hook,
)
@pytest.fixture
def mock_executor():
"""Create a mock executor for testing."""
executor = Mock()
executor.messages = [{"role": "system", "content": "Test message"}]
executor.agent = Mock(role="Test Agent")
executor.task = Mock(description="Test Task")
executor.crew = Mock()
executor.llm = Mock()
executor.iterations = 0
return executor
@pytest.fixture(autouse=True)
def clear_hooks():
"""Clear global hooks before and after each test."""
# Import the private variables to clear them
from crewai.hooks import llm_hooks
# Store original hooks
original_before = llm_hooks._before_llm_call_hooks.copy()
original_after = llm_hooks._after_llm_call_hooks.copy()
# Clear hooks
llm_hooks._before_llm_call_hooks.clear()
llm_hooks._after_llm_call_hooks.clear()
yield
# Restore original hooks
llm_hooks._before_llm_call_hooks.clear()
llm_hooks._after_llm_call_hooks.clear()
llm_hooks._before_llm_call_hooks.extend(original_before)
llm_hooks._after_llm_call_hooks.extend(original_after)
class TestLLMCallHookContext:
"""Test LLMCallHookContext initialization and attributes."""
def test_context_initialization(self, mock_executor):
"""Test that context is initialized correctly with executor."""
context = LLMCallHookContext(executor=mock_executor)
assert context.executor == mock_executor
assert context.messages == mock_executor.messages
assert context.agent == mock_executor.agent
assert context.task == mock_executor.task
assert context.crew == mock_executor.crew
assert context.llm == mock_executor.llm
assert context.iterations == mock_executor.iterations
assert context.response is None
def test_context_with_response(self, mock_executor):
"""Test that context includes response when provided."""
test_response = "Test LLM response"
context = LLMCallHookContext(executor=mock_executor, response=test_response)
assert context.response == test_response
def test_messages_are_mutable_reference(self, mock_executor):
"""Test that modifying context.messages modifies executor.messages."""
context = LLMCallHookContext(executor=mock_executor)
# Add a message through context
new_message = {"role": "user", "content": "New message"}
context.messages.append(new_message)
# Check that executor.messages is also modified
assert new_message in mock_executor.messages
assert len(mock_executor.messages) == 2
class TestBeforeLLMCallHooks:
"""Test before_llm_call hook registration and execution."""
def test_register_before_hook(self):
"""Test that before hooks are registered correctly."""
def test_hook(context):
pass
register_before_llm_call_hook(test_hook)
hooks = get_before_llm_call_hooks()
assert len(hooks) == 1
assert hooks[0] == test_hook
def test_multiple_before_hooks(self):
"""Test that multiple before hooks can be registered."""
def hook1(context):
pass
def hook2(context):
pass
register_before_llm_call_hook(hook1)
register_before_llm_call_hook(hook2)
hooks = get_before_llm_call_hooks()
assert len(hooks) == 2
assert hook1 in hooks
assert hook2 in hooks
def test_before_hook_can_modify_messages(self, mock_executor):
"""Test that before hooks can modify messages in-place."""
def add_message_hook(context):
context.messages.append({"role": "system", "content": "Added by hook"})
context = LLMCallHookContext(executor=mock_executor)
add_message_hook(context)
assert len(context.messages) == 2
assert context.messages[1]["content"] == "Added by hook"
def test_get_before_hooks_returns_copy(self):
"""Test that get_before_llm_call_hooks returns a copy."""
def test_hook(context):
pass
register_before_llm_call_hook(test_hook)
hooks1 = get_before_llm_call_hooks()
hooks2 = get_before_llm_call_hooks()
# They should be equal but not the same object
assert hooks1 == hooks2
assert hooks1 is not hooks2
class TestAfterLLMCallHooks:
"""Test after_llm_call hook registration and execution."""
def test_register_after_hook(self):
"""Test that after hooks are registered correctly."""
def test_hook(context):
return None
register_after_llm_call_hook(test_hook)
hooks = get_after_llm_call_hooks()
assert len(hooks) == 1
assert hooks[0] == test_hook
def test_multiple_after_hooks(self):
"""Test that multiple after hooks can be registered."""
def hook1(context):
return None
def hook2(context):
return None
register_after_llm_call_hook(hook1)
register_after_llm_call_hook(hook2)
hooks = get_after_llm_call_hooks()
assert len(hooks) == 2
assert hook1 in hooks
assert hook2 in hooks
def test_after_hook_can_modify_response(self, mock_executor):
"""Test that after hooks can modify the response."""
original_response = "Original response"
def modify_response_hook(context):
if context.response:
return context.response.replace("Original", "Modified")
return None
context = LLMCallHookContext(executor=mock_executor, response=original_response)
modified = modify_response_hook(context)
assert modified == "Modified response"
def test_after_hook_returns_none_keeps_original(self, mock_executor):
"""Test that returning None keeps the original response."""
original_response = "Original response"
def no_change_hook(context):
return None
context = LLMCallHookContext(executor=mock_executor, response=original_response)
result = no_change_hook(context)
assert result is None
assert context.response == original_response
def test_get_after_hooks_returns_copy(self):
"""Test that get_after_llm_call_hooks returns a copy."""
def test_hook(context):
return None
register_after_llm_call_hook(test_hook)
hooks1 = get_after_llm_call_hooks()
hooks2 = get_after_llm_call_hooks()
# They should be equal but not the same object
assert hooks1 == hooks2
assert hooks1 is not hooks2
class TestLLMHooksIntegration:
"""Test integration scenarios with multiple hooks."""
def test_multiple_before_hooks_execute_in_order(self, mock_executor):
"""Test that multiple before hooks execute in registration order."""
execution_order = []
def hook1(context):
execution_order.append(1)
def hook2(context):
execution_order.append(2)
def hook3(context):
execution_order.append(3)
register_before_llm_call_hook(hook1)
register_before_llm_call_hook(hook2)
register_before_llm_call_hook(hook3)
context = LLMCallHookContext(executor=mock_executor)
hooks = get_before_llm_call_hooks()
for hook in hooks:
hook(context)
assert execution_order == [1, 2, 3]
def test_multiple_after_hooks_chain_modifications(self, mock_executor):
"""Test that multiple after hooks can chain modifications."""
def hook1(context):
if context.response:
return context.response + " [hook1]"
return None
def hook2(context):
if context.response:
return context.response + " [hook2]"
return None
register_after_llm_call_hook(hook1)
register_after_llm_call_hook(hook2)
context = LLMCallHookContext(executor=mock_executor, response="Original")
hooks = get_after_llm_call_hooks()
# Simulate chaining (how it would be used in practice)
result = context.response
for hook in hooks:
# Update context for next hook
context.response = result
modified = hook(context)
if modified is not None:
result = modified
assert result == "Original [hook1] [hook2]"
def test_unregister_before_hook(self):
"""Test that before hooks can be unregistered."""
def test_hook(context):
pass
register_before_llm_call_hook(test_hook)
unregister_before_llm_call_hook(test_hook)
hooks = get_before_llm_call_hooks()
assert len(hooks) == 0
def test_unregister_after_hook(self):
"""Test that after hooks can be unregistered."""
def test_hook(context):
return None
register_after_llm_call_hook(test_hook)
unregister_after_llm_call_hook(test_hook)
hooks = get_after_llm_call_hooks()
assert len(hooks) == 0
def test_clear_all_llm_call_hooks(self):
"""Test that all llm call hooks can be cleared."""
def test_hook(context):
pass
register_before_llm_call_hook(test_hook)
register_after_llm_call_hook(test_hook)
clear_all_llm_call_hooks()
hooks = get_before_llm_call_hooks()
assert len(hooks) == 0
@pytest.mark.vcr()
def test_lite_agent_hooks_integration_with_real_llm(self):
"""Test that LiteAgent executes before/after LLM call hooks and prints messages correctly."""
import os
from crewai.lite_agent import LiteAgent
# Skip if no API key available
if not os.environ.get("OPENAI_API_KEY"):
pytest.skip("OPENAI_API_KEY not set - skipping real LLM test")
# Track hook invocations
hook_calls = {"before": [], "after": []}
def before_llm_call_hook(context: LLMCallHookContext) -> bool:
"""Log and verify before hook execution."""
print(f"\n[BEFORE HOOK] Agent: {context.agent.role if context.agent else 'None'}")
print(f"[BEFORE HOOK] Iterations: {context.iterations}")
print(f"[BEFORE HOOK] Message count: {len(context.messages)}")
print(f"[BEFORE HOOK] Messages: {context.messages}")
# Track the call
hook_calls["before"].append({
"iterations": context.iterations,
"message_count": len(context.messages),
"has_task": context.task is not None,
"has_crew": context.crew is not None,
})
return True # Allow execution
def after_llm_call_hook(context: LLMCallHookContext) -> str | None:
"""Log and verify after hook execution."""
print(f"\n[AFTER HOOK] Agent: {context.agent.role if context.agent else 'None'}")
print(f"[AFTER HOOK] Iterations: {context.iterations}")
print(f"[AFTER HOOK] Response: {context.response[:100] if context.response else 'None'}...")
print(f"[AFTER HOOK] Final message count: {len(context.messages)}")
# Track the call
hook_calls["after"].append({
"iterations": context.iterations,
"has_response": context.response is not None,
"response_length": len(context.response) if context.response else 0,
})
# Optionally modify response
if context.response:
return f"[HOOKED] {context.response}"
return None
# Register hooks
register_before_llm_call_hook(before_llm_call_hook)
register_after_llm_call_hook(after_llm_call_hook)
try:
# Create LiteAgent
lite_agent = LiteAgent(
role="Test Assistant",
goal="Answer questions briefly",
backstory="You are a helpful test assistant",
verbose=True,
)
# Verify hooks are loaded
assert len(lite_agent.before_llm_call_hooks) > 0, "Before hooks not loaded"
assert len(lite_agent.after_llm_call_hooks) > 0, "After hooks not loaded"
# Execute with a simple prompt
result = lite_agent.kickoff("Say 'Hello World' and nothing else")
# Verify hooks were called
assert len(hook_calls["before"]) > 0, "Before hook was never called"
assert len(hook_calls["after"]) > 0, "After hook was never called"
# Verify context had correct attributes for LiteAgent (used in flows)
# LiteAgent doesn't have task/crew context, unlike agents in CrewBase
before_call = hook_calls["before"][0]
assert before_call["has_task"] is False, "Task should be None for LiteAgent in flows"
assert before_call["has_crew"] is False, "Crew should be None for LiteAgent in flows"
assert before_call["message_count"] > 0, "Should have messages"
# Verify after hook received response
after_call = hook_calls["after"][0]
assert after_call["has_response"] is True, "After hook should have response"
assert after_call["response_length"] > 0, "Response should not be empty"
# Verify response was modified by after hook
# Note: The hook modifies the raw LLM response, but LiteAgent then parses it
# to extract the "Final Answer" portion. We check the messages to see the modification.
assert len(result.messages) > 2, "Should have assistant message in messages"
last_message = result.messages[-1]
assert last_message["role"] == "assistant", "Last message should be from assistant"
assert "[HOOKED]" in last_message["content"], "Hook should have modified the assistant message"
finally:
# Clean up hooks
unregister_before_llm_call_hook(before_llm_call_hook)
unregister_after_llm_call_hook(after_llm_call_hook)
@pytest.mark.vcr()
def test_direct_llm_call_hooks_integration(self):
"""Test that hooks work for direct llm.call() without agents."""
import os
from crewai.llm import LLM
# Skip if no API key available
if not os.environ.get("OPENAI_API_KEY"):
pytest.skip("OPENAI_API_KEY not set - skipping real LLM test")
# Track hook invocations
hook_calls = {"before": [], "after": []}
def before_hook(context: LLMCallHookContext) -> bool:
"""Log and verify before hook execution."""
print(f"\n[BEFORE HOOK] Agent: {context.agent}")
print(f"[BEFORE HOOK] Task: {context.task}")
print(f"[BEFORE HOOK] Crew: {context.crew}")
print(f"[BEFORE HOOK] LLM: {context.llm}")
print(f"[BEFORE HOOK] Iterations: {context.iterations}")
print(f"[BEFORE HOOK] Message count: {len(context.messages)}")
# Track the call
hook_calls["before"].append({
"agent": context.agent,
"task": context.task,
"crew": context.crew,
"llm": context.llm is not None,
"message_count": len(context.messages),
})
return True # Allow execution
def after_hook(context: LLMCallHookContext) -> str | None:
"""Log and verify after hook execution."""
print(f"\n[AFTER HOOK] Agent: {context.agent}")
print(f"[AFTER HOOK] Response: {context.response[:100] if context.response else 'None'}...")
# Track the call
hook_calls["after"].append({
"has_response": context.response is not None,
"response_length": len(context.response) if context.response else 0,
})
# Modify response
if context.response:
return f"[HOOKED] {context.response}"
return None
# Register hooks
register_before_llm_call_hook(before_hook)
register_after_llm_call_hook(after_hook)
try:
# Create LLM and make direct call
llm = LLM(model="gpt-4o-mini")
result = llm.call([{"role": "user", "content": "Say hello"}])
print(f"\n[TEST] Final result: {result}")
# Verify hooks were called
assert len(hook_calls["before"]) > 0, "Before hook was never called"
assert len(hook_calls["after"]) > 0, "After hook was never called"
# Verify context had correct attributes for direct LLM calls
before_call = hook_calls["before"][0]
assert before_call["agent"] is None, "Agent should be None for direct LLM calls"
assert before_call["task"] is None, "Task should be None for direct LLM calls"
assert before_call["crew"] is None, "Crew should be None for direct LLM calls"
assert before_call["llm"] is True, "LLM should be present"
assert before_call["message_count"] > 0, "Should have messages"
# Verify after hook received response
after_call = hook_calls["after"][0]
assert after_call["has_response"] is True, "After hook should have response"
assert after_call["response_length"] > 0, "Response should not be empty"
# Verify response was modified by after hook
assert "[HOOKED]" in result, "Response should be modified by after hook"
finally:
# Clean up hooks
unregister_before_llm_call_hook(before_hook)
unregister_after_llm_call_hook(after_hook)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/hooks/test_llm_hooks.py",
"license": "MIT License",
"lines": 373,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/hooks/test_tool_hooks.py | from __future__ import annotations
from unittest.mock import Mock
from crewai.hooks import clear_all_tool_call_hooks, unregister_after_tool_call_hook, unregister_before_tool_call_hook
import pytest
from crewai.hooks.tool_hooks import (
ToolCallHookContext,
get_after_tool_call_hooks,
get_before_tool_call_hooks,
register_after_tool_call_hook,
register_before_tool_call_hook,
)
@pytest.fixture
def mock_tool():
"""Create a mock tool for testing."""
tool = Mock()
tool.name = "test_tool"
tool.description = "Test tool description"
return tool
@pytest.fixture
def mock_agent():
"""Create a mock agent for testing."""
agent = Mock()
agent.role = "Test Agent"
return agent
@pytest.fixture
def mock_task():
"""Create a mock task for testing."""
task = Mock()
task.description = "Test task"
return task
@pytest.fixture
def mock_crew():
"""Create a mock crew for testing."""
crew = Mock()
return crew
@pytest.fixture(autouse=True)
def clear_hooks():
"""Clear global hooks before and after each test."""
from crewai.hooks import tool_hooks
# Store original hooks
original_before = tool_hooks._before_tool_call_hooks.copy()
original_after = tool_hooks._after_tool_call_hooks.copy()
# Clear hooks
tool_hooks._before_tool_call_hooks.clear()
tool_hooks._after_tool_call_hooks.clear()
yield
# Restore original hooks
tool_hooks._before_tool_call_hooks.clear()
tool_hooks._after_tool_call_hooks.clear()
tool_hooks._before_tool_call_hooks.extend(original_before)
tool_hooks._after_tool_call_hooks.extend(original_after)
class TestToolCallHookContext:
"""Test ToolCallHookContext initialization and attributes."""
def test_context_initialization(self, mock_tool, mock_agent, mock_task, mock_crew):
"""Test that context is initialized correctly."""
tool_input = {"arg1": "value1", "arg2": "value2"}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
agent=mock_agent,
task=mock_task,
crew=mock_crew,
)
assert context.tool_name == "test_tool"
assert context.tool_input == tool_input
assert context.tool == mock_tool
assert context.agent == mock_agent
assert context.task == mock_task
assert context.crew == mock_crew
assert context.tool_result is None
def test_context_with_result(self, mock_tool):
"""Test that context includes result when provided."""
tool_input = {"arg1": "value1"}
tool_result = "Test tool result"
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
tool_result=tool_result,
)
assert context.tool_result == tool_result
def test_tool_input_is_mutable_reference(self, mock_tool):
"""Test that modifying context.tool_input modifies the original dict."""
tool_input = {"arg1": "value1"}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
)
# Modify through context
context.tool_input["arg2"] = "value2"
# Check that original dict is also modified
assert "arg2" in tool_input
assert tool_input["arg2"] == "value2"
class TestBeforeToolCallHooks:
"""Test before_tool_call hook registration and execution."""
def test_register_before_hook(self):
"""Test that before hooks are registered correctly."""
def test_hook(context):
return None
register_before_tool_call_hook(test_hook)
hooks = get_before_tool_call_hooks()
assert len(hooks) == 1
assert hooks[0] == test_hook
def test_multiple_before_hooks(self):
"""Test that multiple before hooks can be registered."""
def hook1(context):
return None
def hook2(context):
return None
register_before_tool_call_hook(hook1)
register_before_tool_call_hook(hook2)
hooks = get_before_tool_call_hooks()
assert len(hooks) == 2
assert hook1 in hooks
assert hook2 in hooks
def test_before_hook_can_block_execution(self, mock_tool):
"""Test that before hooks can block tool execution."""
def block_hook(context):
if context.tool_name == "dangerous_tool":
return False # Block execution
return None # Allow execution
tool_input = {}
context = ToolCallHookContext(
tool_name="dangerous_tool",
tool_input=tool_input,
tool=mock_tool,
)
result = block_hook(context)
assert result is False
def test_before_hook_can_allow_execution(self, mock_tool):
"""Test that before hooks can explicitly allow execution."""
def allow_hook(context):
return None # Allow execution
tool_input = {}
context = ToolCallHookContext(
tool_name="safe_tool",
tool_input=tool_input,
tool=mock_tool,
)
result = allow_hook(context)
assert result is None
def test_before_hook_can_modify_input(self, mock_tool):
"""Test that before hooks can modify tool input in-place."""
def modify_input_hook(context):
context.tool_input["modified_by_hook"] = True
return None
tool_input = {"arg1": "value1"}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
)
modify_input_hook(context)
assert "modified_by_hook" in context.tool_input
assert context.tool_input["modified_by_hook"] is True
def test_get_before_hooks_returns_copy(self):
"""Test that get_before_tool_call_hooks returns a copy."""
def test_hook(context):
return None
register_before_tool_call_hook(test_hook)
hooks1 = get_before_tool_call_hooks()
hooks2 = get_before_tool_call_hooks()
# They should be equal but not the same object
assert hooks1 == hooks2
assert hooks1 is not hooks2
class TestAfterToolCallHooks:
"""Test after_tool_call hook registration and execution."""
def test_register_after_hook(self):
"""Test that after hooks are registered correctly."""
def test_hook(context):
return None
register_after_tool_call_hook(test_hook)
hooks = get_after_tool_call_hooks()
assert len(hooks) == 1
assert hooks[0] == test_hook
def test_multiple_after_hooks(self):
"""Test that multiple after hooks can be registered."""
def hook1(context):
return None
def hook2(context):
return None
register_after_tool_call_hook(hook1)
register_after_tool_call_hook(hook2)
hooks = get_after_tool_call_hooks()
assert len(hooks) == 2
assert hook1 in hooks
assert hook2 in hooks
def test_after_hook_can_modify_result(self, mock_tool):
"""Test that after hooks can modify the tool result."""
original_result = "Original result"
def modify_result_hook(context):
if context.tool_result:
return context.tool_result.replace("Original", "Modified")
return None
tool_input = {}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
tool_result=original_result,
)
modified = modify_result_hook(context)
assert modified == "Modified result"
def test_after_hook_returns_none_keeps_original(self, mock_tool):
"""Test that returning None keeps the original result."""
original_result = "Original result"
def no_change_hook(context):
return None
tool_input = {}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
tool_result=original_result,
)
result = no_change_hook(context)
assert result is None
assert context.tool_result == original_result
def test_get_after_hooks_returns_copy(self):
"""Test that get_after_tool_call_hooks returns a copy."""
def test_hook(context):
return None
register_after_tool_call_hook(test_hook)
hooks1 = get_after_tool_call_hooks()
hooks2 = get_after_tool_call_hooks()
# They should be equal but not the same object
assert hooks1 == hooks2
assert hooks1 is not hooks2
class TestToolHooksIntegration:
"""Test integration scenarios with multiple hooks."""
def test_multiple_before_hooks_execute_in_order(self, mock_tool):
"""Test that multiple before hooks execute in registration order."""
execution_order = []
def hook1(context):
execution_order.append(1)
return None
def hook2(context):
execution_order.append(2)
return None
def hook3(context):
execution_order.append(3)
return None
register_before_tool_call_hook(hook1)
register_before_tool_call_hook(hook2)
register_before_tool_call_hook(hook3)
tool_input = {}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
)
hooks = get_before_tool_call_hooks()
for hook in hooks:
hook(context)
assert execution_order == [1, 2, 3]
def test_first_blocking_hook_stops_execution(self, mock_tool):
"""Test that first hook returning False blocks execution."""
execution_order = []
def hook1(context):
execution_order.append(1)
return None # Allow
def hook2(context):
execution_order.append(2)
return False # Block
def hook3(context):
execution_order.append(3)
return None # This shouldn't run
register_before_tool_call_hook(hook1)
register_before_tool_call_hook(hook2)
register_before_tool_call_hook(hook3)
tool_input = {}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
)
hooks = get_before_tool_call_hooks()
blocked = False
for hook in hooks:
result = hook(context)
if result is False:
blocked = True
break
assert blocked is True
assert execution_order == [1, 2] # hook3 didn't run
def test_multiple_after_hooks_chain_modifications(self, mock_tool):
"""Test that multiple after hooks can chain modifications."""
def hook1(context):
if context.tool_result:
return context.tool_result + " [hook1]"
return None
def hook2(context):
if context.tool_result:
return context.tool_result + " [hook2]"
return None
register_after_tool_call_hook(hook1)
register_after_tool_call_hook(hook2)
tool_input = {}
context = ToolCallHookContext(
tool_name="test_tool",
tool_input=tool_input,
tool=mock_tool,
tool_result="Original",
)
hooks = get_after_tool_call_hooks()
# Simulate chaining (how it would be used in practice)
result = context.tool_result
for hook in hooks:
# Update context for next hook
context.tool_result = result
modified = hook(context)
if modified is not None:
result = modified
assert result == "Original [hook1] [hook2]"
def test_hooks_with_validation_and_sanitization(self, mock_tool):
"""Test a realistic scenario with validation and sanitization hooks."""
# Validation hook (before)
def validate_file_path(context):
if context.tool_name == "write_file":
file_path = context.tool_input.get("file_path", "")
if ".env" in file_path:
return False # Block sensitive files
return None
# Sanitization hook (after)
def sanitize_secrets(context):
if context.tool_result and "SECRET_KEY" in context.tool_result:
return context.tool_result.replace("SECRET_KEY=abc123", "SECRET_KEY=[REDACTED]")
return None
register_before_tool_call_hook(validate_file_path)
register_after_tool_call_hook(sanitize_secrets)
# Test blocking
blocked_context = ToolCallHookContext(
tool_name="write_file",
tool_input={"file_path": ".env"},
tool=mock_tool,
)
before_hooks = get_before_tool_call_hooks()
blocked = False
for hook in before_hooks:
if hook(blocked_context) is False:
blocked = True
break
assert blocked is True
# Test sanitization
sanitize_context = ToolCallHookContext(
tool_name="read_file",
tool_input={"file_path": "config.txt"},
tool=mock_tool,
tool_result="Content: SECRET_KEY=abc123",
)
after_hooks = get_after_tool_call_hooks()
result = sanitize_context.tool_result
for hook in after_hooks:
sanitize_context.tool_result = result
modified = hook(sanitize_context)
if modified is not None:
result = modified
assert "SECRET_KEY=[REDACTED]" in result
assert "abc123" not in result
def test_unregister_before_hook(self):
"""Test that before hooks can be unregistered."""
def test_hook(context):
pass
register_before_tool_call_hook(test_hook)
unregister_before_tool_call_hook(test_hook)
hooks = get_before_tool_call_hooks()
assert len(hooks) == 0
def test_unregister_after_hook(self):
"""Test that after hooks can be unregistered."""
def test_hook(context):
return None
register_after_tool_call_hook(test_hook)
unregister_after_tool_call_hook(test_hook)
hooks = get_after_tool_call_hooks()
assert len(hooks) == 0
def test_clear_all_tool_call_hooks(self):
"""Test that all tool call hooks can be cleared."""
def test_hook(context):
pass
register_before_tool_call_hook(test_hook)
register_after_tool_call_hook(test_hook)
clear_all_tool_call_hooks()
hooks = get_before_tool_call_hooks()
assert len(hooks) == 0
@pytest.mark.vcr()
def test_lite_agent_hooks_integration_with_real_tool(self):
"""Test that LiteAgent executes before/after tool call hooks with real tool calls."""
import os
from crewai.lite_agent import LiteAgent
from crewai.tools import tool
# Skip if no API key available
if not os.environ.get("OPENAI_API_KEY"):
pytest.skip("OPENAI_API_KEY not set - skipping real tool test")
# Track hook invocations
hook_calls = {"before": [], "after": []}
# Create a simple test tool
@tool("calculate_sum")
def calculate_sum(a: int, b: int) -> int:
"""Add two numbers together."""
return a + b
def before_tool_call_hook(context: ToolCallHookContext) -> bool:
"""Log and verify before hook execution."""
print(f"\n[BEFORE HOOK] Tool: {context.tool_name}")
print(f"[BEFORE HOOK] Tool input: {context.tool_input}")
print(f"[BEFORE HOOK] Agent: {context.agent.role if context.agent else 'None'}")
print(f"[BEFORE HOOK] Task: {context.task}")
print(f"[BEFORE HOOK] Crew: {context.crew}")
# Track the call
hook_calls["before"].append({
"tool_name": context.tool_name,
"tool_input": context.tool_input,
"has_agent": context.agent is not None,
"has_task": context.task is not None,
"has_crew": context.crew is not None,
})
return True # Allow execution
def after_tool_call_hook(context: ToolCallHookContext) -> str | None:
"""Log and verify after hook execution."""
print(f"\n[AFTER HOOK] Tool: {context.tool_name}")
print(f"[AFTER HOOK] Tool result: {context.tool_result}")
print(f"[AFTER HOOK] Agent: {context.agent.role if context.agent else 'None'}")
# Track the call
hook_calls["after"].append({
"tool_name": context.tool_name,
"tool_result": context.tool_result,
"has_result": context.tool_result is not None,
})
return None # Don't modify result
# Register hooks
register_before_tool_call_hook(before_tool_call_hook)
register_after_tool_call_hook(after_tool_call_hook)
try:
# Create LiteAgent with the tool
lite_agent = LiteAgent(
role="Calculator Assistant",
goal="Help with math calculations",
backstory="You are a helpful calculator assistant",
tools=[calculate_sum],
verbose=True,
)
# Execute with a prompt that should trigger tool usage
result = lite_agent.kickoff("What is 5 + 3? Use the calculate_sum tool.")
# Verify hooks were called
assert len(hook_calls["before"]) > 0, "Before hook was never called"
assert len(hook_calls["after"]) > 0, "After hook was never called"
# Verify context had correct attributes for LiteAgent (used in flows)
# LiteAgent doesn't have task/crew context, unlike agents in CrewBase
before_call = hook_calls["before"][0]
assert before_call["tool_name"] == "calculate_sum", "Tool name should be 'calculate_sum'"
assert "a" in before_call["tool_input"], "Tool input should have 'a' parameter"
assert "b" in before_call["tool_input"], "Tool input should have 'b' parameter"
# Verify after hook received result
after_call = hook_calls["after"][0]
assert after_call["has_result"] is True, "After hook should have tool result"
assert after_call["tool_name"] == "calculate_sum", "Tool name should match"
# The result should contain the sum (8)
assert "8" in str(after_call["tool_result"]), "Tool result should contain the sum"
finally:
# Clean up hooks
unregister_before_tool_call_hook(before_tool_call_hook)
unregister_after_tool_call_hook(after_tool_call_hook)
class TestNativeToolCallingHooksIntegration:
"""Integration tests for hooks with native function calling (Agent and Crew)."""
@pytest.mark.vcr()
def test_agent_native_tool_hooks_before_and_after(self):
"""Test that Agent with native tool calling executes before/after hooks."""
import os
from crewai import Agent
from crewai.tools import tool
hook_calls = {"before": [], "after": []}
@tool("multiply_numbers")
def multiply_numbers(a: int, b: int) -> int:
"""Multiply two numbers together."""
return a * b
def before_hook(context: ToolCallHookContext) -> bool | None:
hook_calls["before"].append({
"tool_name": context.tool_name,
"tool_input": dict(context.tool_input),
"has_agent": context.agent is not None,
})
return None
def after_hook(context: ToolCallHookContext) -> str | None:
hook_calls["after"].append({
"tool_name": context.tool_name,
"tool_result": context.tool_result,
"has_agent": context.agent is not None,
})
return None
register_before_tool_call_hook(before_hook)
register_after_tool_call_hook(after_hook)
try:
agent = Agent(
role="Calculator",
goal="Perform calculations",
backstory="You are a calculator assistant",
tools=[multiply_numbers],
verbose=True,
)
agent.kickoff(
messages="What is 7 times 6? Use the multiply_numbers tool."
)
# Verify before hook was called
assert len(hook_calls["before"]) > 0, "Before hook was never called"
before_call = hook_calls["before"][0]
assert before_call["tool_name"] == "multiply_numbers"
assert "a" in before_call["tool_input"]
assert "b" in before_call["tool_input"]
assert before_call["has_agent"] is True
# Verify after hook was called
assert len(hook_calls["after"]) > 0, "After hook was never called"
after_call = hook_calls["after"][0]
assert after_call["tool_name"] == "multiply_numbers"
assert "42" in str(after_call["tool_result"])
assert after_call["has_agent"] is True
finally:
unregister_before_tool_call_hook(before_hook)
unregister_after_tool_call_hook(after_hook)
@pytest.mark.vcr()
def test_crew_native_tool_hooks_before_and_after(self):
"""Test that Crew with Agent executes before/after hooks with full context."""
import os
from crewai import Agent, Crew, Task
from crewai.tools import tool
hook_calls = {"before": [], "after": []}
@tool("divide_numbers")
def divide_numbers(a: int, b: int) -> float:
"""Divide first number by second number."""
return a / b
def before_hook(context: ToolCallHookContext) -> bool | None:
hook_calls["before"].append({
"tool_name": context.tool_name,
"tool_input": dict(context.tool_input),
"has_agent": context.agent is not None,
"has_task": context.task is not None,
"has_crew": context.crew is not None,
"agent_role": context.agent.role if context.agent else None,
})
return None
def after_hook(context: ToolCallHookContext) -> str | None:
hook_calls["after"].append({
"tool_name": context.tool_name,
"tool_result": context.tool_result,
"has_agent": context.agent is not None,
"has_task": context.task is not None,
"has_crew": context.crew is not None,
})
return None
register_before_tool_call_hook(before_hook)
register_after_tool_call_hook(after_hook)
try:
agent = Agent(
role="Math Assistant",
goal="Perform division calculations accurately",
backstory="You are a math assistant that helps with division",
tools=[divide_numbers],
verbose=True,
)
task = Task(
description="Calculate 100 divided by 4 using the divide_numbers tool.",
expected_output="The result of the division",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
)
crew.kickoff()
# Verify before hook was called with full context
assert len(hook_calls["before"]) > 0, "Before hook was never called"
before_call = hook_calls["before"][0]
assert before_call["tool_name"] == "divide_numbers"
assert "a" in before_call["tool_input"]
assert "b" in before_call["tool_input"]
assert before_call["has_agent"] is True
assert before_call["has_task"] is True
assert before_call["has_crew"] is True
assert before_call["agent_role"] == "Math Assistant"
# Verify after hook was called with full context
assert len(hook_calls["after"]) > 0, "After hook was never called"
after_call = hook_calls["after"][0]
assert after_call["tool_name"] == "divide_numbers"
assert "25" in str(after_call["tool_result"])
assert after_call["has_agent"] is True
assert after_call["has_task"] is True
assert after_call["has_crew"] is True
finally:
unregister_before_tool_call_hook(before_hook)
unregister_after_tool_call_hook(after_hook)
@pytest.mark.vcr()
def test_before_hook_blocks_tool_execution_in_crew(self):
"""Test that returning False from before hook blocks tool execution."""
import os
from crewai import Agent, Crew, Task
from crewai.tools import tool
hook_calls = {"before": [], "after": [], "tool_executed": False}
@tool("dangerous_operation")
def dangerous_operation(action: str) -> str:
"""Perform a dangerous operation that should be blocked."""
hook_calls["tool_executed"] = True
return f"Executed: {action}"
def blocking_before_hook(context: ToolCallHookContext) -> bool | None:
hook_calls["before"].append({
"tool_name": context.tool_name,
"tool_input": dict(context.tool_input),
})
# Block all calls to dangerous_operation
if context.tool_name == "dangerous_operation":
return False
return None
def after_hook(context: ToolCallHookContext) -> str | None:
hook_calls["after"].append({
"tool_name": context.tool_name,
"tool_result": context.tool_result,
})
return None
register_before_tool_call_hook(blocking_before_hook)
register_after_tool_call_hook(after_hook)
try:
agent = Agent(
role="Test Agent",
goal="Try to use the dangerous operation tool",
backstory="You are a test agent",
tools=[dangerous_operation],
verbose=True,
)
task = Task(
description="Use the dangerous_operation tool with action 'delete_all'.",
expected_output="The result of the operation",
agent=agent,
)
crew = Crew(
agents=[agent],
tasks=[task],
verbose=True,
)
crew.kickoff()
# Verify before hook was called
assert len(hook_calls["before"]) > 0, "Before hook was never called"
before_call = hook_calls["before"][0]
assert before_call["tool_name"] == "dangerous_operation"
# Verify the actual tool function was NOT executed
assert hook_calls["tool_executed"] is False, "Tool should have been blocked"
# Verify after hook was still called (with blocked message)
assert len(hook_calls["after"]) > 0, "After hook was never called"
after_call = hook_calls["after"][0]
assert "blocked" in after_call["tool_result"].lower()
finally:
unregister_before_tool_call_hook(blocking_before_hook)
unregister_after_tool_call_hook(after_hook)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/hooks/test_tool_hooks.py",
"license": "MIT License",
"lines": 652,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/llms/constants.py | from typing import Literal, TypeAlias
OpenAIModels: TypeAlias = Literal[
"gpt-3.5-turbo",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-instruct",
"gpt-3.5-turbo-instruct-0914",
"gpt-4",
"gpt-4-0125-preview",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-1106-preview",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview",
"gpt-4-vision-preview",
"gpt-4.1",
"gpt-4.1-2025-04-14",
"gpt-4.1-mini",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano",
"gpt-4.1-nano-2025-04-14",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
"gpt-4o-2024-11-20",
"gpt-4o-audio-preview",
"gpt-4o-audio-preview-2024-10-01",
"gpt-4o-audio-preview-2024-12-17",
"gpt-4o-audio-preview-2025-06-03",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-4o-mini-audio-preview",
"gpt-4o-mini-audio-preview-2024-12-17",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-4o-mini-search-preview",
"gpt-4o-mini-search-preview-2025-03-11",
"gpt-4o-mini-transcribe",
"gpt-4o-mini-tts",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-search-preview",
"gpt-4o-search-preview-2025-03-11",
"gpt-4o-transcribe",
"gpt-4o-transcribe-diarize",
"gpt-5",
"gpt-5-2025-08-07",
"gpt-5-chat",
"gpt-5-chat-latest",
"gpt-5-codex",
"gpt-5-mini",
"gpt-5-mini-2025-08-07",
"gpt-5-nano",
"gpt-5-nano-2025-08-07",
"gpt-5-pro",
"gpt-5-pro-2025-10-06",
"gpt-5-search-api",
"gpt-5-search-api-2025-10-14",
"gpt-audio",
"gpt-audio-2025-08-28",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-image-1",
"gpt-image-1-mini",
"gpt-realtime",
"gpt-realtime-2025-08-28",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"o1",
"o1-preview",
"o1-2024-12-17",
"o1-mini",
"o1-mini-2024-09-12",
"o1-pro",
"o1-pro-2025-03-19",
"o3-mini",
"o3",
"o4-mini",
"whisper-1",
]
OPENAI_MODELS: list[OpenAIModels] = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-3.5-turbo-instruct",
"gpt-3.5-turbo-instruct-0914",
"gpt-4",
"gpt-4-0125-preview",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-1106-preview",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-turbo-preview",
"gpt-4-vision-preview",
"gpt-4.1",
"gpt-4.1-2025-04-14",
"gpt-4.1-mini",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano",
"gpt-4.1-nano-2025-04-14",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
"gpt-4o-2024-11-20",
"gpt-4o-audio-preview",
"gpt-4o-audio-preview-2024-10-01",
"gpt-4o-audio-preview-2024-12-17",
"gpt-4o-audio-preview-2025-06-03",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-4o-mini-audio-preview",
"gpt-4o-mini-audio-preview-2024-12-17",
"gpt-4o-mini-realtime-preview",
"gpt-4o-mini-realtime-preview-2024-12-17",
"gpt-4o-mini-search-preview",
"gpt-4o-mini-search-preview-2025-03-11",
"gpt-4o-mini-transcribe",
"gpt-4o-mini-tts",
"gpt-4o-realtime-preview",
"gpt-4o-realtime-preview-2024-10-01",
"gpt-4o-realtime-preview-2024-12-17",
"gpt-4o-realtime-preview-2025-06-03",
"gpt-4o-search-preview",
"gpt-4o-search-preview-2025-03-11",
"gpt-4o-transcribe",
"gpt-4o-transcribe-diarize",
"gpt-5",
"gpt-5-2025-08-07",
"gpt-5-chat",
"gpt-5-chat-latest",
"gpt-5-codex",
"gpt-5-mini",
"gpt-5-mini-2025-08-07",
"gpt-5-nano",
"gpt-5-nano-2025-08-07",
"gpt-5-pro",
"gpt-5-pro-2025-10-06",
"gpt-5-search-api",
"gpt-5-search-api-2025-10-14",
"gpt-audio",
"gpt-audio-2025-08-28",
"gpt-audio-mini",
"gpt-audio-mini-2025-10-06",
"gpt-image-1",
"gpt-image-1-mini",
"gpt-realtime",
"gpt-realtime-2025-08-28",
"gpt-realtime-mini",
"gpt-realtime-mini-2025-10-06",
"o1",
"o1-preview",
"o1-2024-12-17",
"o1-mini",
"o1-mini-2024-09-12",
"o1-pro",
"o1-pro-2025-03-19",
"o3-mini",
"o3",
"o4-mini",
"whisper-1",
]
AnthropicModels: TypeAlias = Literal[
"claude-opus-4-5-20251101",
"claude-opus-4-5",
"claude-3-7-sonnet-latest",
"claude-3-7-sonnet-20250219",
"claude-3-5-haiku-latest",
"claude-3-5-haiku-20241022",
"claude-haiku-4-5",
"claude-haiku-4-5-20251001",
"claude-sonnet-4-20250514",
"claude-sonnet-4-0",
"claude-4-sonnet-20250514",
"claude-sonnet-4-5",
"claude-sonnet-4-5-20250929",
"claude-3-5-sonnet-latest",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-20240620",
"claude-opus-4-0",
"claude-opus-4-20250514",
"claude-4-opus-20250514",
"claude-opus-4-1",
"claude-opus-4-1-20250805",
"claude-3-opus-latest",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-latest",
"claude-3-haiku-20240307",
]
ANTHROPIC_MODELS: list[AnthropicModels] = [
"claude-opus-4-5-20251101",
"claude-opus-4-5",
"claude-3-7-sonnet-latest",
"claude-3-7-sonnet-20250219",
"claude-3-5-haiku-latest",
"claude-3-5-haiku-20241022",
"claude-haiku-4-5",
"claude-haiku-4-5-20251001",
"claude-sonnet-4-20250514",
"claude-sonnet-4-0",
"claude-4-sonnet-20250514",
"claude-sonnet-4-5",
"claude-sonnet-4-5-20250929",
"claude-3-5-sonnet-latest",
"claude-3-5-sonnet-20241022",
"claude-3-5-sonnet-20240620",
"claude-opus-4-0",
"claude-opus-4-20250514",
"claude-4-opus-20250514",
"claude-opus-4-1",
"claude-opus-4-1-20250805",
"claude-3-opus-latest",
"claude-3-opus-20240229",
"claude-3-sonnet-20240229",
"claude-3-haiku-latest",
"claude-3-haiku-20240307",
]
GeminiModels: TypeAlias = Literal[
"gemini-3-pro-preview",
"gemini-2.5-pro",
"gemini-2.5-pro-preview-03-25",
"gemini-2.5-pro-preview-05-06",
"gemini-2.5-pro-preview-06-05",
"gemini-2.5-flash",
"gemini-2.5-flash-preview-05-20",
"gemini-2.5-flash-preview-04-17",
"gemini-2.5-flash-image",
"gemini-2.5-flash-image-preview",
"gemini-2.5-flash-lite",
"gemini-2.5-flash-lite-preview-06-17",
"gemini-2.5-flash-preview-09-2025",
"gemini-2.5-flash-lite-preview-09-2025",
"gemini-2.5-flash-preview-tts",
"gemini-2.5-pro-preview-tts",
"gemini-2.5-computer-use-preview-10-2025",
"gemini-2.5-pro-exp-03-25",
"gemini-2.0-flash",
"gemini-2.0-flash-001",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-exp-image-generation",
"gemini-2.0-flash-lite",
"gemini-2.0-flash-lite-001",
"gemini-2.0-flash-lite-preview",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-2.0-flash-preview-image-generation",
"gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-flash-thinking-exp-1219",
"gemini-2.0-pro-exp",
"gemini-2.0-pro-exp-02-05",
"gemini-exp-1206",
"gemini-1.5-pro",
"gemini-1.5-flash",
"gemini-1.5-flash-8b",
"gemini-flash-latest",
"gemini-flash-lite-latest",
"gemini-pro-latest",
"gemini-2.0-flash-live-001",
"gemini-live-2.5-flash-preview",
"gemini-2.5-flash-live-preview",
"gemini-robotics-er-1.5-preview",
"gemini-gemma-2-27b-it",
"gemini-gemma-2-9b-it",
"gemma-3-1b-it",
"gemma-3-4b-it",
"gemma-3-12b-it",
"gemma-3-27b-it",
"gemma-3n-e2b-it",
"gemma-3n-e4b-it",
"learnlm-2.0-flash-experimental",
]
GEMINI_MODELS: list[GeminiModels] = [
"gemini-3-pro-preview",
"gemini-2.5-pro",
"gemini-2.5-pro-preview-03-25",
"gemini-2.5-pro-preview-05-06",
"gemini-2.5-pro-preview-06-05",
"gemini-2.5-flash",
"gemini-2.5-flash-preview-05-20",
"gemini-2.5-flash-preview-04-17",
"gemini-2.5-flash-image",
"gemini-2.5-flash-image-preview",
"gemini-2.5-flash-lite",
"gemini-2.5-flash-lite-preview-06-17",
"gemini-2.5-flash-preview-09-2025",
"gemini-2.5-flash-lite-preview-09-2025",
"gemini-2.5-flash-preview-tts",
"gemini-2.5-pro-preview-tts",
"gemini-2.5-computer-use-preview-10-2025",
"gemini-2.5-pro-exp-03-25",
"gemini-2.0-flash",
"gemini-2.0-flash-001",
"gemini-2.0-flash-exp",
"gemini-2.0-flash-exp-image-generation",
"gemini-2.0-flash-lite",
"gemini-2.0-flash-lite-001",
"gemini-2.0-flash-lite-preview",
"gemini-2.0-flash-lite-preview-02-05",
"gemini-2.0-flash-preview-image-generation",
"gemini-2.0-flash-thinking-exp",
"gemini-2.0-flash-thinking-exp-01-21",
"gemini-2.0-flash-thinking-exp-1219",
"gemini-2.0-pro-exp",
"gemini-2.0-pro-exp-02-05",
"gemini-exp-1206",
"gemini-1.5-pro",
"gemini-1.5-flash",
"gemini-1.5-flash-8b",
"gemini-flash-latest",
"gemini-flash-lite-latest",
"gemini-pro-latest",
"gemini-2.0-flash-live-001",
"gemini-live-2.5-flash-preview",
"gemini-2.5-flash-live-preview",
"gemini-robotics-er-1.5-preview",
"gemini-gemma-2-27b-it",
"gemini-gemma-2-9b-it",
"gemma-3-1b-it",
"gemma-3-4b-it",
"gemma-3-12b-it",
"gemma-3-27b-it",
"gemma-3n-e2b-it",
"gemma-3n-e4b-it",
"learnlm-2.0-flash-experimental",
]
AzureModels: TypeAlias = Literal[
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-35-turbo",
"gpt-35-turbo-0125",
"gpt-35-turbo-1106",
"gpt-35-turbo-16k-0613",
"gpt-35-turbo-instruct-0914",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-1106-preview",
"gpt-4-0125-preview",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-vision",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
"gpt-4o-2024-11-20",
"gpt-4o-mini",
"gpt-5",
"o1",
"o1-mini",
"o1-preview",
"o3-mini",
"o3",
"o4-mini",
]
AZURE_MODELS: list[AzureModels] = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-16k-0613",
"gpt-35-turbo",
"gpt-35-turbo-0125",
"gpt-35-turbo-1106",
"gpt-35-turbo-16k-0613",
"gpt-35-turbo-instruct-0914",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-1106-preview",
"gpt-4-0125-preview",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-vision",
"gpt-4o",
"gpt-4o-2024-05-13",
"gpt-4o-2024-08-06",
"gpt-4o-2024-11-20",
"gpt-4o-mini",
"gpt-5",
"o1",
"o1-mini",
"o1-preview",
"o3-mini",
"o3",
"o4-mini",
]
BedrockModels: TypeAlias = Literal[
"ai21.jamba-1-5-large-v1:0",
"ai21.jamba-1-5-mini-v1:0",
"amazon.nova-lite-v1:0",
"amazon.nova-lite-v1:0:24k",
"amazon.nova-lite-v1:0:300k",
"amazon.nova-micro-v1:0",
"amazon.nova-micro-v1:0:128k",
"amazon.nova-micro-v1:0:24k",
"amazon.nova-premier-v1:0",
"amazon.nova-premier-v1:0:1000k",
"amazon.nova-premier-v1:0:20k",
"amazon.nova-premier-v1:0:8k",
"amazon.nova-premier-v1:0:mm",
"amazon.nova-pro-v1:0",
"amazon.nova-pro-v1:0:24k",
"amazon.nova-pro-v1:0:300k",
"amazon.titan-text-express-v1",
"amazon.titan-text-express-v1:0:8k",
"amazon.titan-text-lite-v1",
"amazon.titan-text-lite-v1:0:4k",
"amazon.titan-tg1-large",
"anthropic.claude-3-5-haiku-20241022-v1:0",
"anthropic.claude-3-5-sonnet-20240620-v1:0",
"anthropic.claude-3-5-sonnet-20241022-v2:0",
"anthropic.claude-3-7-sonnet-20250219-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0:200k",
"anthropic.claude-3-haiku-20240307-v1:0:48k",
"anthropic.claude-3-opus-20240229-v1:0",
"anthropic.claude-3-opus-20240229-v1:0:12k",
"anthropic.claude-3-opus-20240229-v1:0:200k",
"anthropic.claude-3-opus-20240229-v1:0:28k",
"anthropic.claude-3-sonnet-20240229-v1:0",
"anthropic.claude-3-sonnet-20240229-v1:0:200k",
"anthropic.claude-3-sonnet-20240229-v1:0:28k",
"anthropic.claude-haiku-4-5-20251001-v1:0",
"anthropic.claude-instant-v1:2:100k",
"anthropic.claude-opus-4-5-20251101-v1:0",
"anthropic.claude-opus-4-1-20250805-v1:0",
"anthropic.claude-opus-4-20250514-v1:0",
"anthropic.claude-sonnet-4-20250514-v1:0",
"anthropic.claude-sonnet-4-5-20250929-v1:0",
"anthropic.claude-v2:0:100k",
"anthropic.claude-v2:0:18k",
"anthropic.claude-v2:1:18k",
"anthropic.claude-v2:1:200k",
"cohere.command-r-plus-v1:0",
"cohere.command-r-v1:0",
"cohere.rerank-v3-5:0",
"deepseek.r1-v1:0",
"meta.llama3-1-70b-instruct-v1:0",
"meta.llama3-1-8b-instruct-v1:0",
"meta.llama3-2-11b-instruct-v1:0",
"meta.llama3-2-1b-instruct-v1:0",
"meta.llama3-2-3b-instruct-v1:0",
"meta.llama3-2-90b-instruct-v1:0",
"meta.llama3-3-70b-instruct-v1:0",
"meta.llama3-70b-instruct-v1:0",
"meta.llama3-8b-instruct-v1:0",
"meta.llama4-maverick-17b-instruct-v1:0",
"meta.llama4-scout-17b-instruct-v1:0",
"mistral.mistral-7b-instruct-v0:2",
"mistral.mistral-large-2402-v1:0",
"mistral.mistral-small-2402-v1:0",
"mistral.mixtral-8x7b-instruct-v0:1",
"mistral.pixtral-large-2502-v1:0",
"openai.gpt-oss-120b-1:0",
"openai.gpt-oss-20b-1:0",
"qwen.qwen3-32b-v1:0",
"qwen.qwen3-coder-30b-a3b-v1:0",
"twelvelabs.pegasus-1-2-v1:0",
]
BEDROCK_MODELS: list[BedrockModels] = [
"ai21.jamba-1-5-large-v1:0",
"ai21.jamba-1-5-mini-v1:0",
"amazon.nova-lite-v1:0",
"amazon.nova-lite-v1:0:24k",
"amazon.nova-lite-v1:0:300k",
"amazon.nova-micro-v1:0",
"amazon.nova-micro-v1:0:128k",
"amazon.nova-micro-v1:0:24k",
"amazon.nova-premier-v1:0",
"amazon.nova-premier-v1:0:1000k",
"amazon.nova-premier-v1:0:20k",
"amazon.nova-premier-v1:0:8k",
"amazon.nova-premier-v1:0:mm",
"amazon.nova-pro-v1:0",
"amazon.nova-pro-v1:0:24k",
"amazon.nova-pro-v1:0:300k",
"amazon.titan-text-express-v1",
"amazon.titan-text-express-v1:0:8k",
"amazon.titan-text-lite-v1",
"amazon.titan-text-lite-v1:0:4k",
"amazon.titan-tg1-large",
"anthropic.claude-3-5-haiku-20241022-v1:0",
"anthropic.claude-3-5-sonnet-20240620-v1:0",
"anthropic.claude-3-5-sonnet-20241022-v2:0",
"anthropic.claude-3-7-sonnet-20250219-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0",
"anthropic.claude-3-haiku-20240307-v1:0:200k",
"anthropic.claude-3-haiku-20240307-v1:0:48k",
"anthropic.claude-3-opus-20240229-v1:0",
"anthropic.claude-3-opus-20240229-v1:0:12k",
"anthropic.claude-3-opus-20240229-v1:0:200k",
"anthropic.claude-3-opus-20240229-v1:0:28k",
"anthropic.claude-3-sonnet-20240229-v1:0",
"anthropic.claude-3-sonnet-20240229-v1:0:200k",
"anthropic.claude-3-sonnet-20240229-v1:0:28k",
"anthropic.claude-haiku-4-5-20251001-v1:0",
"anthropic.claude-instant-v1:2:100k",
"anthropic.claude-opus-4-5-20251101-v1:0",
"anthropic.claude-opus-4-1-20250805-v1:0",
"anthropic.claude-opus-4-20250514-v1:0",
"anthropic.claude-sonnet-4-20250514-v1:0",
"anthropic.claude-sonnet-4-5-20250929-v1:0",
"anthropic.claude-v2:0:100k",
"anthropic.claude-v2:0:18k",
"anthropic.claude-v2:1:18k",
"anthropic.claude-v2:1:200k",
"cohere.command-r-plus-v1:0",
"cohere.command-r-v1:0",
"cohere.rerank-v3-5:0",
"deepseek.r1-v1:0",
"meta.llama3-1-70b-instruct-v1:0",
"meta.llama3-1-8b-instruct-v1:0",
"meta.llama3-2-11b-instruct-v1:0",
"meta.llama3-2-1b-instruct-v1:0",
"meta.llama3-2-3b-instruct-v1:0",
"meta.llama3-2-90b-instruct-v1:0",
"meta.llama3-3-70b-instruct-v1:0",
"meta.llama3-70b-instruct-v1:0",
"meta.llama3-8b-instruct-v1:0",
"meta.llama4-maverick-17b-instruct-v1:0",
"meta.llama4-scout-17b-instruct-v1:0",
"mistral.mistral-7b-instruct-v0:2",
"mistral.mistral-large-2402-v1:0",
"mistral.mistral-small-2402-v1:0",
"mistral.mixtral-8x7b-instruct-v0:1",
"mistral.pixtral-large-2502-v1:0",
"openai.gpt-oss-120b-1:0",
"openai.gpt-oss-20b-1:0",
"qwen.qwen3-32b-v1:0",
"qwen.qwen3-coder-30b-a3b-v1:0",
"twelvelabs.pegasus-1-2-v1:0",
]
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/llms/constants.py",
"license": "MIT License",
"lines": 559,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/src/crewai/events/types/mcp_events.py | from datetime import datetime
from typing import Any
from crewai.events.base_events import BaseEvent
class MCPEvent(BaseEvent):
"""Base event for MCP operations."""
server_name: str
server_url: str | None = None
transport_type: str | None = None # "stdio", "http", "sse"
agent_id: str | None = None
agent_role: str | None = None
from_agent: Any | None = None
from_task: Any | None = None
def __init__(self, **data):
super().__init__(**data)
self._set_agent_params(data)
self._set_task_params(data)
class MCPConnectionStartedEvent(MCPEvent):
"""Event emitted when starting to connect to an MCP server."""
type: str = "mcp_connection_started"
connect_timeout: int | None = None
is_reconnect: bool = (
False # True if this is a reconnection, False for first connection
)
class MCPConnectionCompletedEvent(MCPEvent):
"""Event emitted when successfully connected to an MCP server."""
type: str = "mcp_connection_completed"
started_at: datetime | None = None
completed_at: datetime | None = None
connection_duration_ms: float | None = None
is_reconnect: bool = (
False # True if this was a reconnection, False for first connection
)
class MCPConnectionFailedEvent(MCPEvent):
"""Event emitted when connection to an MCP server fails."""
type: str = "mcp_connection_failed"
error: str
error_type: str | None = None # "timeout", "authentication", "network", etc.
started_at: datetime | None = None
failed_at: datetime | None = None
class MCPToolExecutionStartedEvent(MCPEvent):
"""Event emitted when starting to execute an MCP tool."""
type: str = "mcp_tool_execution_started"
tool_name: str
tool_args: dict[str, Any] | None = None
class MCPToolExecutionCompletedEvent(MCPEvent):
"""Event emitted when MCP tool execution completes."""
type: str = "mcp_tool_execution_completed"
tool_name: str
tool_args: dict[str, Any] | None = None
result: Any | None = None
started_at: datetime | None = None
completed_at: datetime | None = None
execution_duration_ms: float | None = None
class MCPToolExecutionFailedEvent(MCPEvent):
"""Event emitted when MCP tool execution fails."""
type: str = "mcp_tool_execution_failed"
tool_name: str
tool_args: dict[str, Any] | None = None
error: str
error_type: str | None = None # "timeout", "validation", "server_error", etc.
started_at: datetime | None = None
failed_at: datetime | None = None
class MCPConfigFetchFailedEvent(BaseEvent):
"""Event emitted when fetching an AMP MCP server config fails.
This covers cases where the slug is not connected, the API call
failed, or native MCP resolution failed after config was fetched.
"""
type: str = "mcp_config_fetch_failed"
slug: str
error: str
error_type: str | None = None # "not_connected", "api_error", "connection_failed"
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/events/types/mcp_events.py",
"license": "MIT License",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/src/crewai/mcp/client.py | """MCP client with session management for CrewAI agents."""
import asyncio
from collections.abc import Callable
from contextlib import AsyncExitStack
from datetime import datetime
import logging
import time
from typing import Any, NamedTuple
from typing_extensions import Self
# BaseExceptionGroup is available in Python 3.11+
try:
from builtins import BaseExceptionGroup
except ImportError:
# Fallback for Python < 3.11 (shouldn't happen in practice)
BaseExceptionGroup = Exception
from crewai.events.event_bus import crewai_event_bus
from crewai.events.types.mcp_events import (
MCPConnectionCompletedEvent,
MCPConnectionFailedEvent,
MCPConnectionStartedEvent,
MCPToolExecutionCompletedEvent,
MCPToolExecutionFailedEvent,
MCPToolExecutionStartedEvent,
)
from crewai.mcp.transports.base import BaseTransport
from crewai.mcp.transports.http import HTTPTransport
from crewai.mcp.transports.sse import SSETransport
from crewai.mcp.transports.stdio import StdioTransport
from crewai.utilities.string_utils import sanitize_tool_name
class _MCPToolResult(NamedTuple):
"""Internal result from an MCP tool call, carrying the ``isError`` flag."""
content: str
is_error: bool
# MCP Connection timeout constants (in seconds)
MCP_CONNECTION_TIMEOUT = 30 # Increased for slow servers
MCP_TOOL_EXECUTION_TIMEOUT = 30
MCP_DISCOVERY_TIMEOUT = 30 # Increased for slow servers
MCP_MAX_RETRIES = 3
# Simple in-memory cache for MCP tool schemas (duration: 5 minutes)
_mcp_schema_cache: dict[str, tuple[dict[str, Any], float]] = {}
_cache_ttl = 300 # 5 minutes
class MCPClient:
"""MCP client with session management.
This client manages connections to MCP servers and provides a high-level
interface for interacting with MCP tools, prompts, and resources.
Example:
```python
transport = StdioTransport(command="python", args=["server.py"])
client = MCPClient(transport)
async with client:
tools = await client.list_tools()
result = await client.call_tool("tool_name", {"arg": "value"})
```
"""
def __init__(
self,
transport: BaseTransport,
connect_timeout: int = MCP_CONNECTION_TIMEOUT,
execution_timeout: int = MCP_TOOL_EXECUTION_TIMEOUT,
discovery_timeout: int = MCP_DISCOVERY_TIMEOUT,
max_retries: int = MCP_MAX_RETRIES,
cache_tools_list: bool = False,
logger: logging.Logger | None = None,
) -> None:
"""Initialize MCP client.
Args:
transport: Transport instance for MCP server connection.
connect_timeout: Connection timeout in seconds.
execution_timeout: Tool execution timeout in seconds.
discovery_timeout: Tool discovery timeout in seconds.
max_retries: Maximum retry attempts for operations.
cache_tools_list: Whether to cache tool list results.
logger: Optional logger instance.
"""
self.transport = transport
self.connect_timeout = connect_timeout
self.execution_timeout = execution_timeout
self.discovery_timeout = discovery_timeout
self.max_retries = max_retries
self.cache_tools_list = cache_tools_list
# self._logger = logger or logging.getLogger(__name__)
self._session: Any = None
self._initialized = False
self._exit_stack = AsyncExitStack()
self._was_connected = False
@property
def connected(self) -> bool:
"""Check if client is connected to server."""
return self.transport.connected and self._initialized
@property
def session(self) -> Any:
"""Get the MCP session."""
if self._session is None:
raise RuntimeError("Client not connected. Call connect() first.")
return self._session
def _get_server_info(self) -> tuple[str, str | None, str | None]:
"""Get server information for events.
Returns:
Tuple of (server_name, server_url, transport_type).
"""
if isinstance(self.transport, StdioTransport):
server_name = f"{self.transport.command} {' '.join(self.transport.args)}"
server_url = None
transport_type = self.transport.transport_type.value
elif isinstance(self.transport, HTTPTransport):
server_name = self.transport.url
server_url = self.transport.url
transport_type = self.transport.transport_type.value
elif isinstance(self.transport, SSETransport):
server_name = self.transport.url
server_url = self.transport.url
transport_type = self.transport.transport_type.value
else:
server_name = "Unknown MCP Server"
server_url = None
transport_type = (
self.transport.transport_type.value
if hasattr(self.transport, "transport_type")
else None
)
return server_name, server_url, transport_type
async def connect(self) -> Self:
"""Connect to MCP server and initialize session.
Returns:
Self for method chaining.
Raises:
ConnectionError: If connection fails.
ImportError: If MCP SDK not available.
"""
if self.connected:
return self
# Get server info for events
server_name, server_url, transport_type = self._get_server_info()
is_reconnect = self._was_connected
# Emit connection started event
started_at = datetime.now()
crewai_event_bus.emit(
self,
MCPConnectionStartedEvent(
server_name=server_name,
server_url=server_url,
transport_type=transport_type,
is_reconnect=is_reconnect,
connect_timeout=self.connect_timeout,
),
)
try:
from mcp import ClientSession
# Use AsyncExitStack to manage transport and session contexts together
# This ensures they're in the same async scope and prevents cancel scope errors
# Always enter transport context via exit stack (it handles already-connected state)
await self._exit_stack.enter_async_context(self.transport)
# Create ClientSession with transport streams
self._session = ClientSession(
self.transport.read_stream,
self.transport.write_stream,
)
# Enter the session's async context manager via exit stack
await self._exit_stack.enter_async_context(self._session)
# Initialize the session (required by MCP protocol)
try:
await asyncio.wait_for(
self._session.initialize(),
timeout=self.connect_timeout,
)
except asyncio.CancelledError:
# If initialization was cancelled (e.g., event loop closing),
# cleanup and re-raise - don't suppress cancellation
await self._cleanup_on_error()
raise
except BaseExceptionGroup as eg:
# Handle exception groups from anyio task groups
# Extract the actual meaningful error (not GeneratorExit)
actual_error = None
for exc in eg.exceptions:
if isinstance(exc, Exception) and not isinstance(
exc, GeneratorExit
):
# Check if it's an HTTP error (like 401)
error_msg = str(exc).lower()
if "401" in error_msg or "unauthorized" in error_msg:
actual_error = exc
break
if "cancel scope" not in error_msg and "task" not in error_msg:
actual_error = exc
break
await self._cleanup_on_error()
if actual_error:
raise ConnectionError(
f"Failed to connect to MCP server: {actual_error}"
) from actual_error
raise ConnectionError(f"Failed to connect to MCP server: {eg}") from eg
self._initialized = True
self._was_connected = True
completed_at = datetime.now()
connection_duration_ms = (completed_at - started_at).total_seconds() * 1000
crewai_event_bus.emit(
self,
MCPConnectionCompletedEvent(
server_name=server_name,
server_url=server_url,
transport_type=transport_type,
started_at=started_at,
completed_at=completed_at,
connection_duration_ms=connection_duration_ms,
is_reconnect=is_reconnect,
),
)
return self
except ImportError as e:
await self._cleanup_on_error()
error_msg = (
"MCP library not available. Please install with: pip install mcp"
)
self._emit_connection_failed(
server_name,
server_url,
transport_type,
error_msg,
"import_error",
started_at,
)
raise ImportError(error_msg) from e
except asyncio.TimeoutError as e:
await self._cleanup_on_error()
error_msg = f"MCP connection timed out after {self.connect_timeout} seconds. The server may be slow or unreachable."
self._emit_connection_failed(
server_name,
server_url,
transport_type,
error_msg,
"timeout",
started_at,
)
raise ConnectionError(error_msg) from e
except asyncio.CancelledError:
# Re-raise cancellation - don't suppress it
await self._cleanup_on_error()
self._emit_connection_failed(
server_name,
server_url,
transport_type,
"Connection cancelled",
"cancelled",
started_at,
)
raise
except BaseExceptionGroup as eg:
# Handle exception groups from anyio task groups at outer level
actual_error = None
for exc in eg.exceptions:
if isinstance(exc, Exception) and not isinstance(exc, GeneratorExit):
error_msg = str(exc).lower()
if "401" in error_msg or "unauthorized" in error_msg:
actual_error = exc
break
if "cancel scope" not in error_msg and "task" not in error_msg:
actual_error = exc
break
await self._cleanup_on_error()
error_type = (
"authentication"
if actual_error
and (
"401" in str(actual_error).lower()
or "unauthorized" in str(actual_error).lower()
)
else "network"
)
error_msg = str(actual_error) if actual_error else str(eg)
self._emit_connection_failed(
server_name,
server_url,
transport_type,
error_msg,
error_type,
started_at,
)
if actual_error:
raise ConnectionError(
f"Failed to connect to MCP server: {actual_error}"
) from actual_error
raise ConnectionError(f"Failed to connect to MCP server: {eg}") from eg
except Exception as e:
await self._cleanup_on_error()
error_type = (
"authentication"
if "401" in str(e).lower() or "unauthorized" in str(e).lower()
else "network"
)
self._emit_connection_failed(
server_name, server_url, transport_type, str(e), error_type, started_at
)
raise ConnectionError(f"Failed to connect to MCP server: {e}") from e
def _emit_connection_failed(
self,
server_name: str,
server_url: str | None,
transport_type: str | None,
error: str,
error_type: str,
started_at: datetime,
) -> None:
"""Emit connection failed event."""
failed_at = datetime.now()
crewai_event_bus.emit(
self,
MCPConnectionFailedEvent(
server_name=server_name,
server_url=server_url,
transport_type=transport_type,
error=error,
error_type=error_type,
started_at=started_at,
failed_at=failed_at,
),
)
async def _cleanup_on_error(self) -> None:
"""Cleanup resources when an error occurs during connection."""
try:
await self._exit_stack.aclose()
except Exception as e:
# Best effort cleanup - ignore all other errors
raise RuntimeError(f"Error during MCP client cleanup: {e}") from e
finally:
self._session = None
self._initialized = False
self._exit_stack = AsyncExitStack()
async def disconnect(self) -> None:
"""Disconnect from MCP server and cleanup resources."""
if not self.connected:
return
try:
await self._exit_stack.aclose()
except Exception as e:
raise RuntimeError(f"Error during MCP client disconnect: {e}") from e
finally:
self._session = None
self._initialized = False
self._exit_stack = AsyncExitStack()
async def list_tools(self, use_cache: bool | None = None) -> list[dict[str, Any]]:
"""List available tools from MCP server.
Args:
use_cache: Whether to use cached results. If None, uses
client's cache_tools_list setting.
Returns:
List of tool definitions with name, description, and inputSchema.
"""
if not self.connected:
await self.connect()
# Check cache if enabled
use_cache = use_cache if use_cache is not None else self.cache_tools_list
if use_cache:
cache_key = self._get_cache_key("tools")
if cache_key in _mcp_schema_cache:
cached_data, cache_time = _mcp_schema_cache[cache_key]
if time.time() - cache_time < _cache_ttl:
# Logger removed - return cached data
return cached_data
# List tools with timeout and retries
tools = await self._retry_operation(
self._list_tools_impl,
timeout=self.discovery_timeout,
)
# Cache results if enabled
if use_cache:
cache_key = self._get_cache_key("tools")
_mcp_schema_cache[cache_key] = (tools, time.time())
return tools
async def _list_tools_impl(self) -> list[dict[str, Any]]:
"""Internal implementation of list_tools."""
tools_result = await asyncio.wait_for(
self.session.list_tools(),
timeout=self.discovery_timeout,
)
return [
{
"name": sanitize_tool_name(tool.name),
"original_name": tool.name,
"description": getattr(tool, "description", ""),
"inputSchema": getattr(tool, "inputSchema", {}),
}
for tool in tools_result.tools
]
async def call_tool(
self, tool_name: str, arguments: dict[str, Any] | None = None
) -> Any:
"""Call a tool on the MCP server.
Args:
tool_name: Name of the tool to call.
arguments: Tool arguments.
Returns:
Tool execution result.
"""
if not self.connected:
await self.connect()
arguments = arguments or {}
cleaned_arguments = self._clean_tool_arguments(arguments)
# Get server info for events
server_name, server_url, transport_type = self._get_server_info()
# Emit tool execution started event
started_at = datetime.now()
crewai_event_bus.emit(
self,
MCPToolExecutionStartedEvent(
server_name=server_name,
server_url=server_url,
transport_type=transport_type,
tool_name=tool_name,
tool_args=cleaned_arguments,
),
)
try:
tool_result: _MCPToolResult = await self._retry_operation(
lambda: self._call_tool_impl(tool_name, cleaned_arguments),
timeout=self.execution_timeout,
)
finished_at = datetime.now()
execution_duration_ms = (finished_at - started_at).total_seconds() * 1000
if tool_result.is_error:
crewai_event_bus.emit(
self,
MCPToolExecutionFailedEvent(
server_name=server_name,
server_url=server_url,
transport_type=transport_type,
tool_name=tool_name,
tool_args=cleaned_arguments,
error=tool_result.content,
error_type="tool_error",
started_at=started_at,
failed_at=finished_at,
),
)
else:
crewai_event_bus.emit(
self,
MCPToolExecutionCompletedEvent(
server_name=server_name,
server_url=server_url,
transport_type=transport_type,
tool_name=tool_name,
tool_args=cleaned_arguments,
result=tool_result.content,
started_at=started_at,
completed_at=finished_at,
execution_duration_ms=execution_duration_ms,
),
)
return tool_result.content
except Exception as e:
failed_at = datetime.now()
error_type = (
"timeout"
if isinstance(e, (asyncio.TimeoutError, ConnectionError))
and "timeout" in str(e).lower()
else "server_error"
)
crewai_event_bus.emit(
self,
MCPToolExecutionFailedEvent(
server_name=server_name,
server_url=server_url,
transport_type=transport_type,
tool_name=tool_name,
tool_args=cleaned_arguments,
error=str(e),
error_type=error_type,
started_at=started_at,
failed_at=failed_at,
),
)
raise
def _clean_tool_arguments(self, arguments: dict[str, Any]) -> dict[str, Any]:
"""Clean tool arguments by removing None values and fixing formats.
Args:
arguments: Raw tool arguments.
Returns:
Cleaned arguments ready for MCP server.
"""
cleaned = {}
for key, value in arguments.items():
# Skip None values
if value is None:
continue
# Fix sources array format: convert ["web"] to [{"type": "web"}]
if key == "sources" and isinstance(value, list):
fixed_sources = []
for item in value:
if isinstance(item, str):
# Convert string to object format
fixed_sources.append({"type": item})
elif isinstance(item, dict):
# Already in correct format
fixed_sources.append(item)
else:
# Keep as is if unknown format
fixed_sources.append(item)
if fixed_sources:
cleaned[key] = fixed_sources
continue
# Recursively clean nested dictionaries
if isinstance(value, dict):
nested_cleaned = self._clean_tool_arguments(value)
if nested_cleaned: # Only add if not empty
cleaned[key] = nested_cleaned
elif isinstance(value, list):
# Clean list items
cleaned_list = []
for item in value:
if isinstance(item, dict):
cleaned_item = self._clean_tool_arguments(item)
if cleaned_item:
cleaned_list.append(cleaned_item)
elif item is not None:
cleaned_list.append(item)
if cleaned_list:
cleaned[key] = cleaned_list
else:
# Keep primitive values
cleaned[key] = value
return cleaned
async def _call_tool_impl(
self, tool_name: str, arguments: dict[str, Any]
) -> _MCPToolResult:
"""Internal implementation of call_tool."""
result = await asyncio.wait_for(
self.session.call_tool(tool_name, arguments),
timeout=self.execution_timeout,
)
is_error = getattr(result, "isError", False) or False
# Extract result content
if hasattr(result, "content") and result.content:
if isinstance(result.content, list) and len(result.content) > 0:
content_item = result.content[0]
if hasattr(content_item, "text"):
return _MCPToolResult(str(content_item.text), is_error)
return _MCPToolResult(str(content_item), is_error)
return _MCPToolResult(str(result.content), is_error)
return _MCPToolResult(str(result), is_error)
async def list_prompts(self) -> list[dict[str, Any]]:
"""List available prompts from MCP server.
Returns:
List of prompt definitions.
"""
if not self.connected:
await self.connect()
return await self._retry_operation(
self._list_prompts_impl,
timeout=self.discovery_timeout,
)
async def _list_prompts_impl(self) -> list[dict[str, Any]]:
"""Internal implementation of list_prompts."""
prompts_result = await asyncio.wait_for(
self.session.list_prompts(),
timeout=self.discovery_timeout,
)
return [
{
"name": prompt.name,
"description": getattr(prompt, "description", ""),
"arguments": getattr(prompt, "arguments", []),
}
for prompt in prompts_result.prompts
]
async def get_prompt(
self, prompt_name: str, arguments: dict[str, Any] | None = None
) -> dict[str, Any]:
"""Get a prompt from the MCP server.
Args:
prompt_name: Name of the prompt to get.
arguments: Optional prompt arguments.
Returns:
Prompt content and metadata.
"""
if not self.connected:
await self.connect()
arguments = arguments or {}
return await self._retry_operation(
lambda: self._get_prompt_impl(prompt_name, arguments),
timeout=self.execution_timeout,
)
async def _get_prompt_impl(
self, prompt_name: str, arguments: dict[str, Any]
) -> dict[str, Any]:
"""Internal implementation of get_prompt."""
result = await asyncio.wait_for(
self.session.get_prompt(prompt_name, arguments),
timeout=self.execution_timeout,
)
return {
"name": prompt_name,
"messages": [
{
"role": msg.role,
"content": msg.content,
}
for msg in result.messages
],
"arguments": arguments,
}
async def _retry_operation(
self,
operation: Callable[[], Any],
timeout: int | None = None,
) -> Any:
"""Retry an operation with exponential backoff.
Args:
operation: Async operation to retry.
timeout: Operation timeout in seconds.
Returns:
Operation result.
"""
last_error = None
timeout = timeout or self.execution_timeout
for attempt in range(self.max_retries):
try:
if timeout:
return await asyncio.wait_for(operation(), timeout=timeout)
return await operation()
except asyncio.TimeoutError as e: # noqa: PERF203
last_error = f"Operation timed out after {timeout} seconds"
if attempt < self.max_retries - 1:
wait_time = 2**attempt
await asyncio.sleep(wait_time)
else:
raise ConnectionError(last_error) from e
except Exception as e:
error_str = str(e).lower()
# Classify errors as retryable or non-retryable
if "authentication" in error_str or "unauthorized" in error_str:
raise ConnectionError(f"Authentication failed: {e}") from e
if "not found" in error_str:
raise ValueError(f"Resource not found: {e}") from e
# Retryable errors
last_error = str(e)
if attempt < self.max_retries - 1:
wait_time = 2**attempt
await asyncio.sleep(wait_time)
else:
raise ConnectionError(
f"Operation failed after {self.max_retries} attempts: {last_error}"
) from e
raise ConnectionError(f"Operation failed: {last_error}")
def _get_cache_key(self, resource_type: str) -> str:
"""Generate cache key for resource.
Args:
resource_type: Type of resource (e.g., "tools", "prompts").
Returns:
Cache key string.
"""
# Use transport type and URL/command as cache key
if isinstance(self.transport, StdioTransport):
key = f"stdio:{self.transport.command}:{':'.join(self.transport.args)}"
elif isinstance(self.transport, HTTPTransport):
key = f"http:{self.transport.url}"
elif isinstance(self.transport, SSETransport):
key = f"sse:{self.transport.url}"
else:
key = f"{self.transport.transport_type}:unknown"
return f"mcp:{key}:{resource_type}"
async def __aenter__(self) -> Self:
"""Async context manager entry."""
return await self.connect()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: Any,
) -> None:
"""Async context manager exit."""
await self.disconnect()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/mcp/client.py",
"license": "MIT License",
"lines": 672,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/mcp/config.py | """MCP server configuration models for CrewAI agents.
This module provides Pydantic models for configuring MCP servers with
various transport types, similar to OpenAI's Agents SDK.
"""
from pydantic import BaseModel, Field
from crewai.mcp.filters import ToolFilter
class MCPServerStdio(BaseModel):
"""Stdio MCP server configuration.
This configuration is used for connecting to local MCP servers
that run as processes and communicate via standard input/output.
Example:
```python
mcp_server = MCPServerStdio(
command="python",
args=["path/to/server.py"],
env={"API_KEY": "..."},
tool_filter=create_static_tool_filter(
allowed_tool_names=["read_file", "write_file"]
),
)
```
"""
command: str = Field(
...,
description="Command to execute (e.g., 'python', 'node', 'npx', 'uvx').",
)
args: list[str] = Field(
default_factory=list,
description="Command arguments (e.g., ['server.py'] or ['-y', '@mcp/server']).",
)
env: dict[str, str] | None = Field(
default=None,
description="Environment variables to pass to the process.",
)
tool_filter: ToolFilter | None = Field(
default=None,
description="Optional tool filter for filtering available tools.",
)
cache_tools_list: bool = Field(
default=False,
description="Whether to cache the tool list for faster subsequent access.",
)
class MCPServerHTTP(BaseModel):
"""HTTP/Streamable HTTP MCP server configuration.
This configuration is used for connecting to remote MCP servers
over HTTP/HTTPS using streamable HTTP transport.
Example:
```python
mcp_server = MCPServerHTTP(
url="https://api.example.com/mcp",
headers={"Authorization": "Bearer ..."},
cache_tools_list=True,
)
```
"""
url: str = Field(
..., description="Server URL (e.g., 'https://api.example.com/mcp')."
)
headers: dict[str, str] | None = Field(
default=None,
description="Optional HTTP headers for authentication or other purposes.",
)
streamable: bool = Field(
default=True,
description="Whether to use streamable HTTP transport (default: True).",
)
tool_filter: ToolFilter | None = Field(
default=None,
description="Optional tool filter for filtering available tools.",
)
cache_tools_list: bool = Field(
default=False,
description="Whether to cache the tool list for faster subsequent access.",
)
class MCPServerSSE(BaseModel):
"""Server-Sent Events (SSE) MCP server configuration.
This configuration is used for connecting to remote MCP servers
using Server-Sent Events for real-time streaming communication.
Example:
```python
mcp_server = MCPServerSSE(
url="https://api.example.com/mcp/sse",
headers={"Authorization": "Bearer ..."},
)
```
"""
url: str = Field(
...,
description="Server URL (e.g., 'https://api.example.com/mcp/sse').",
)
headers: dict[str, str] | None = Field(
default=None,
description="Optional HTTP headers for authentication or other purposes.",
)
tool_filter: ToolFilter | None = Field(
default=None,
description="Optional tool filter for filtering available tools.",
)
cache_tools_list: bool = Field(
default=False,
description="Whether to cache the tool list for faster subsequent access.",
)
# Type alias for all MCP server configurations
MCPServerConfig = MCPServerStdio | MCPServerHTTP | MCPServerSSE
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/mcp/config.py",
"license": "MIT License",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/mcp/filters.py | """Tool filtering support for MCP servers.
This module provides utilities for filtering tools from MCP servers,
including static allow/block lists and dynamic context-aware filtering.
"""
from collections.abc import Callable
from typing import TYPE_CHECKING, Any
from pydantic import BaseModel, Field
if TYPE_CHECKING:
pass
class ToolFilterContext(BaseModel):
"""Context for dynamic tool filtering.
This context is passed to dynamic tool filters to provide
information about the agent, run context, and server.
"""
agent: Any = Field(..., description="The agent requesting tools.")
server_name: str = Field(..., description="Name of the MCP server.")
run_context: dict[str, Any] | None = Field(
default=None,
description="Optional run context for additional filtering logic.",
)
# Type alias for tool filter functions
ToolFilter = (
Callable[[ToolFilterContext, dict[str, Any]], bool]
| Callable[[dict[str, Any]], bool]
)
class StaticToolFilter:
"""Static tool filter with allow/block lists.
This filter provides simple allow/block list filtering based on
tool names. Useful for restricting which tools are available
from an MCP server.
Example:
```python
filter = StaticToolFilter(
allowed_tool_names=["read_file", "write_file"],
blocked_tool_names=["delete_file"],
)
```
"""
def __init__(
self,
allowed_tool_names: list[str] | None = None,
blocked_tool_names: list[str] | None = None,
) -> None:
"""Initialize static tool filter.
Args:
allowed_tool_names: List of tool names to allow. If None,
all tools are allowed (unless blocked).
blocked_tool_names: List of tool names to block. Blocked tools
take precedence over allowed tools.
"""
self.allowed_tool_names = set(allowed_tool_names or [])
self.blocked_tool_names = set(blocked_tool_names or [])
def __call__(self, tool: dict[str, Any]) -> bool:
"""Filter tool based on allow/block lists.
Args:
tool: Tool definition dictionary with at least 'name' key.
Returns:
True if tool should be included, False otherwise.
"""
tool_name = tool.get("name", "")
# Blocked tools take precedence
if self.blocked_tool_names and tool_name in self.blocked_tool_names:
return False
# If allow list exists, tool must be in it
if self.allowed_tool_names:
return tool_name in self.allowed_tool_names
# No restrictions - allow all
return True
def create_static_tool_filter(
allowed_tool_names: list[str] | None = None,
blocked_tool_names: list[str] | None = None,
) -> Callable[[dict[str, Any]], bool]:
"""Create a static tool filter function.
This is a convenience function for creating static tool filters
with allow/block lists.
Args:
allowed_tool_names: List of tool names to allow. If None,
all tools are allowed (unless blocked).
blocked_tool_names: List of tool names to block. Blocked tools
take precedence over allowed tools.
Returns:
Tool filter function that returns True for allowed tools.
Example:
```python
filter_fn = create_static_tool_filter(
allowed_tool_names=["read_file", "write_file"],
blocked_tool_names=["delete_file"],
)
# Use in MCPServerStdio
mcp_server = MCPServerStdio(
command="npx",
args=["-y", "@modelcontextprotocol/server-filesystem"],
tool_filter=filter_fn,
)
```
"""
return StaticToolFilter(
allowed_tool_names=allowed_tool_names,
blocked_tool_names=blocked_tool_names,
)
def create_dynamic_tool_filter(
filter_func: Callable[[ToolFilterContext, dict[str, Any]], bool],
) -> Callable[[ToolFilterContext, dict[str, Any]], bool]:
"""Create a dynamic tool filter function.
This function wraps a dynamic filter function that has access
to the tool filter context (agent, server, run context).
Args:
filter_func: Function that takes (context, tool) and returns bool.
Returns:
Tool filter function that can be used with MCP server configs.
Example:
```python
async def context_aware_filter(
context: ToolFilterContext, tool: dict[str, Any]
) -> bool:
# Block dangerous tools for code reviewers
if context.agent.role == "Code Reviewer":
if tool["name"].startswith("danger_"):
return False
return True
filter_fn = create_dynamic_tool_filter(context_aware_filter)
mcp_server = MCPServerStdio(
command="python", args=["server.py"], tool_filter=filter_fn
)
```
"""
return filter_func
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/mcp/filters.py",
"license": "MIT License",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/mcp/transports/base.py | """Base transport interface for MCP connections."""
from abc import ABC, abstractmethod
from enum import Enum
from typing import Any, Protocol
from typing_extensions import Self
class TransportType(str, Enum):
"""MCP transport types."""
STDIO = "stdio"
HTTP = "http"
STREAMABLE_HTTP = "streamable-http"
SSE = "sse"
class ReadStream(Protocol):
"""Protocol for read streams."""
async def read(self, n: int = -1) -> bytes:
"""Read bytes from stream."""
...
class WriteStream(Protocol):
"""Protocol for write streams."""
async def write(self, data: bytes) -> None:
"""Write bytes to stream."""
...
class BaseTransport(ABC):
"""Base class for MCP transport implementations.
This abstract base class defines the interface that all transport
implementations must follow. Transports handle the low-level communication
with MCP servers.
"""
def __init__(self, **kwargs: Any) -> None:
"""Initialize the transport.
Args:
**kwargs: Transport-specific configuration options.
"""
self._read_stream: ReadStream | None = None
self._write_stream: WriteStream | None = None
self._connected = False
@property
@abstractmethod
def transport_type(self) -> TransportType:
"""Return the transport type."""
...
@property
def connected(self) -> bool:
"""Check if transport is connected."""
return self._connected
@property
def read_stream(self) -> ReadStream:
"""Get the read stream."""
if self._read_stream is None:
raise RuntimeError("Transport not connected. Call connect() first.")
return self._read_stream
@property
def write_stream(self) -> WriteStream:
"""Get the write stream."""
if self._write_stream is None:
raise RuntimeError("Transport not connected. Call connect() first.")
return self._write_stream
@abstractmethod
async def connect(self) -> Self:
"""Establish connection to MCP server.
Returns:
Self for method chaining.
Raises:
ConnectionError: If connection fails.
"""
...
@abstractmethod
async def disconnect(self) -> None:
"""Close connection to MCP server."""
...
@abstractmethod
async def __aenter__(self) -> Self:
"""Async context manager entry."""
...
@abstractmethod
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: Any,
) -> None:
"""Async context manager exit."""
...
def _set_streams(self, read: ReadStream, write: WriteStream) -> None:
"""Set the read and write streams.
Args:
read: Read stream.
write: Write stream.
"""
self._read_stream = read
self._write_stream = write
self._connected = True
def _clear_streams(self) -> None:
"""Clear the read and write streams."""
self._read_stream = None
self._write_stream = None
self._connected = False
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/mcp/transports/base.py",
"license": "MIT License",
"lines": 96,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
crewAIInc/crewAI:lib/crewai/src/crewai/mcp/transports/http.py | """HTTP and Streamable HTTP transport for MCP servers."""
import asyncio
from typing import Any
from typing_extensions import Self
# BaseExceptionGroup is available in Python 3.11+
try:
from builtins import BaseExceptionGroup
except ImportError:
# Fallback for Python < 3.11 (shouldn't happen in practice)
BaseExceptionGroup = Exception
from crewai.mcp.transports.base import BaseTransport, TransportType
class HTTPTransport(BaseTransport):
"""HTTP/Streamable HTTP transport for connecting to remote MCP servers.
This transport connects to MCP servers over HTTP/HTTPS using the
streamable HTTP client from the MCP SDK.
Example:
```python
transport = HTTPTransport(
url="https://api.example.com/mcp",
headers={"Authorization": "Bearer ..."}
)
async with transport:
# Use transport...
```
"""
def __init__(
self,
url: str,
headers: dict[str, str] | None = None,
streamable: bool = True,
**kwargs: Any,
) -> None:
"""Initialize HTTP transport.
Args:
url: Server URL (e.g., "https://api.example.com/mcp").
headers: Optional HTTP headers.
streamable: Whether to use streamable HTTP (default: True).
**kwargs: Additional transport options.
"""
super().__init__(**kwargs)
self.url = url
self.headers = headers or {}
self.streamable = streamable
self._transport_context: Any = None
@property
def transport_type(self) -> TransportType:
"""Return the transport type."""
return TransportType.STREAMABLE_HTTP if self.streamable else TransportType.HTTP
async def connect(self) -> Self:
"""Establish HTTP connection to MCP server.
Returns:
Self for method chaining.
Raises:
ConnectionError: If connection fails.
ImportError: If MCP SDK not available.
"""
if self._connected:
return self
try:
from mcp.client.streamable_http import streamablehttp_client
self._transport_context = streamablehttp_client(
self.url,
headers=self.headers if self.headers else None,
terminate_on_close=True,
)
try:
read, write, _ = await asyncio.wait_for(
self._transport_context.__aenter__(), timeout=30.0
)
except asyncio.TimeoutError as e:
self._transport_context = None
raise ConnectionError(
"Transport context entry timed out after 30 seconds. "
"Server may be slow or unreachable."
) from e
except Exception as e:
self._transport_context = None
raise ConnectionError(f"Failed to enter transport context: {e}") from e
self._set_streams(read=read, write=write)
return self
except ImportError as e:
raise ImportError(
"MCP library not available. Please install with: pip install mcp"
) from e
except Exception as e:
self._clear_streams()
if self._transport_context is not None:
self._transport_context = None
raise ConnectionError(f"Failed to connect to MCP server: {e}") from e
async def disconnect(self) -> None:
"""Close HTTP connection."""
if not self._connected:
return
try:
# Clear streams first
self._clear_streams()
# await self._exit_stack.aclose()
# Exit transport context - this will clean up background tasks
# Give a small delay to allow background tasks to complete
if self._transport_context is not None:
try:
# Wait a tiny bit for any pending operations
await asyncio.sleep(0.1)
await self._transport_context.__aexit__(None, None, None)
except (RuntimeError, asyncio.CancelledError) as e:
# Ignore "exit cancel scope in different task" errors and cancellation
# These happen when asyncio.run() closes the event loop
# while background tasks are still running
error_msg = str(e).lower()
if "cancel scope" not in error_msg and "task" not in error_msg:
# Only suppress cancel scope/task errors, re-raise others
if isinstance(e, RuntimeError):
raise
# For CancelledError, just suppress it
except BaseExceptionGroup as eg:
# Handle exception groups from anyio task groups
# Suppress if they contain cancel scope errors
should_suppress = False
for exc in eg.exceptions:
error_msg = str(exc).lower()
if "cancel scope" in error_msg or "task" in error_msg:
should_suppress = True
break
if not should_suppress:
raise
except Exception as e:
raise RuntimeError(
f"Error during HTTP transport disconnect: {e}"
) from e
self._connected = False
except Exception as e:
# Log but don't raise - cleanup should be best effort
import logging
logger = logging.getLogger(__name__)
logger.warning(f"Error during HTTP transport disconnect: {e}")
async def __aenter__(self) -> Self:
"""Async context manager entry."""
return await self.connect()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: Any,
) -> None:
"""Async context manager exit."""
await self.disconnect()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/mcp/transports/http.py",
"license": "MIT License",
"lines": 146,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/mcp/transports/sse.py | """Server-Sent Events (SSE) transport for MCP servers."""
from typing import Any
from typing_extensions import Self
from crewai.mcp.transports.base import BaseTransport, TransportType
class SSETransport(BaseTransport):
"""SSE transport for connecting to remote MCP servers.
This transport connects to MCP servers using Server-Sent Events (SSE)
for real-time streaming communication.
Example:
```python
transport = SSETransport(
url="https://api.example.com/mcp/sse",
headers={"Authorization": "Bearer ..."}
)
async with transport:
# Use transport...
```
"""
def __init__(
self,
url: str,
headers: dict[str, str] | None = None,
**kwargs: Any,
) -> None:
"""Initialize SSE transport.
Args:
url: Server URL (e.g., "https://api.example.com/mcp/sse").
headers: Optional HTTP headers.
**kwargs: Additional transport options.
"""
super().__init__(**kwargs)
self.url = url
self.headers = headers or {}
self._transport_context: Any = None
@property
def transport_type(self) -> TransportType:
"""Return the transport type."""
return TransportType.SSE
async def connect(self) -> Self:
"""Establish SSE connection to MCP server.
Returns:
Self for method chaining.
Raises:
ConnectionError: If connection fails.
ImportError: If MCP SDK not available.
"""
if self._connected:
return self
try:
from mcp.client.sse import sse_client
self._transport_context = sse_client(
self.url,
headers=self.headers if self.headers else None,
)
read, write = await self._transport_context.__aenter__()
self._set_streams(read=read, write=write)
return self
except ImportError as e:
raise ImportError(
"MCP library not available. Please install with: pip install mcp"
) from e
except Exception as e:
self._clear_streams()
raise ConnectionError(f"Failed to connect to SSE MCP server: {e}") from e
async def disconnect(self) -> None:
"""Close SSE connection."""
if not self._connected:
return
try:
self._clear_streams()
if self._transport_context is not None:
await self._transport_context.__aexit__(None, None, None)
except Exception as e:
import logging
logger = logging.getLogger(__name__)
logger.warning(f"Error during SSE transport disconnect: {e}")
async def __aenter__(self) -> Self:
"""Async context manager entry."""
return await self.connect()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: Any,
) -> None:
"""Async context manager exit."""
await self.disconnect()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/mcp/transports/sse.py",
"license": "MIT License",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/mcp/transports/stdio.py | """Stdio transport for MCP servers running as local processes."""
import asyncio
import os
import subprocess
from typing import Any
from typing_extensions import Self
from crewai.mcp.transports.base import BaseTransport, TransportType
class StdioTransport(BaseTransport):
"""Stdio transport for connecting to local MCP servers.
This transport connects to MCP servers running as local processes,
communicating via standard input/output streams. Supports Python,
Node.js, and other command-line servers.
Example:
```python
transport = StdioTransport(
command="python",
args=["path/to/server.py"],
env={"API_KEY": "..."}
)
async with transport:
# Use transport...
```
"""
def __init__(
self,
command: str,
args: list[str] | None = None,
env: dict[str, str] | None = None,
**kwargs: Any,
) -> None:
"""Initialize stdio transport.
Args:
command: Command to execute (e.g., "python", "node", "npx").
args: Command arguments (e.g., ["server.py"] or ["-y", "@mcp/server"]).
env: Environment variables to pass to the process.
**kwargs: Additional transport options.
"""
super().__init__(**kwargs)
self.command = command
self.args = args or []
self.env = env or {}
self._process: subprocess.Popen[bytes] | None = None
self._transport_context: Any = None
@property
def transport_type(self) -> TransportType:
"""Return the transport type."""
return TransportType.STDIO
async def connect(self) -> Self:
"""Start the MCP server process and establish connection.
Returns:
Self for method chaining.
Raises:
ConnectionError: If process fails to start.
ImportError: If MCP SDK not available.
"""
if self._connected:
return self
try:
from mcp import StdioServerParameters
from mcp.client.stdio import stdio_client
process_env = os.environ.copy()
process_env.update(self.env)
server_params = StdioServerParameters(
command=self.command,
args=self.args,
env=process_env if process_env else None,
)
self._transport_context = stdio_client(server_params)
try:
read, write = await self._transport_context.__aenter__()
except Exception as e:
import traceback
traceback.print_exc()
self._transport_context = None
raise ConnectionError(
f"Failed to enter stdio transport context: {e}"
) from e
self._set_streams(read=read, write=write)
return self
except ImportError as e:
raise ImportError(
"MCP library not available. Please install with: pip install mcp"
) from e
except Exception as e:
self._clear_streams()
if self._transport_context is not None:
self._transport_context = None
raise ConnectionError(f"Failed to start MCP server process: {e}") from e
async def disconnect(self) -> None:
"""Terminate the MCP server process and close connection."""
if not self._connected:
return
try:
self._clear_streams()
if self._transport_context is not None:
await self._transport_context.__aexit__(None, None, None)
if self._process is not None:
try:
self._process.terminate()
try:
await asyncio.wait_for(self._process.wait(), timeout=5.0)
except asyncio.TimeoutError:
self._process.kill()
await self._process.wait()
# except ProcessLookupError:
# pass
finally:
self._process = None
except Exception as e:
# Log but don't raise - cleanup should be best effort
import logging
logger = logging.getLogger(__name__)
logger.warning(f"Error during stdio transport disconnect: {e}")
async def __aenter__(self) -> Self:
"""Async context manager entry."""
return await self.connect()
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: Any,
) -> None:
"""Async context manager exit."""
await self.disconnect()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/mcp/transports/stdio.py",
"license": "MIT License",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/src/crewai/tools/mcp_native_tool.py | """Native MCP tool wrapper for CrewAI agents.
This module provides a tool wrapper that reuses existing MCP client sessions
for better performance and connection management.
"""
import asyncio
from typing import Any
from crewai.tools import BaseTool
class MCPNativeTool(BaseTool):
"""Native MCP tool that reuses client sessions.
This tool wrapper is used when agents connect to MCP servers using
structured configurations. It reuses existing client sessions for
better performance and proper connection lifecycle management.
Unlike MCPToolWrapper which connects on-demand, this tool uses
a shared MCP client instance that maintains a persistent connection.
"""
def __init__(
self,
mcp_client: Any,
tool_name: str,
tool_schema: dict[str, Any],
server_name: str,
original_tool_name: str | None = None,
) -> None:
"""Initialize native MCP tool.
Args:
mcp_client: MCPClient instance with active session.
tool_name: Name of the tool (may be prefixed).
tool_schema: Schema information for the tool.
server_name: Name of the MCP server for prefixing.
original_tool_name: Original name of the tool on the MCP server.
"""
# Create tool name with server prefix to avoid conflicts
prefixed_name = f"{server_name}_{tool_name}"
# Handle args_schema properly - BaseTool expects a BaseModel subclass
args_schema = tool_schema.get("args_schema")
# Only pass args_schema if it's provided
kwargs = {
"name": prefixed_name,
"description": tool_schema.get(
"description", f"Tool {tool_name} from {server_name}"
),
}
if args_schema is not None:
kwargs["args_schema"] = args_schema
super().__init__(**kwargs)
# Set instance attributes after super().__init__
self._mcp_client = mcp_client
self._original_tool_name = original_tool_name or tool_name
self._server_name = server_name
# self._logger = logging.getLogger(__name__)
@property
def mcp_client(self) -> Any:
"""Get the MCP client instance."""
return self._mcp_client
@property
def original_tool_name(self) -> str:
"""Get the original tool name."""
return self._original_tool_name
@property
def server_name(self) -> str:
"""Get the server name."""
return self._server_name
def _run(self, **kwargs) -> str:
"""Execute tool using the MCP client session.
Args:
**kwargs: Arguments to pass to the MCP tool.
Returns:
Result from the MCP tool execution.
"""
try:
try:
asyncio.get_running_loop()
import concurrent.futures
with concurrent.futures.ThreadPoolExecutor() as executor:
coro = self._run_async(**kwargs)
future = executor.submit(asyncio.run, coro)
return future.result()
except RuntimeError:
return asyncio.run(self._run_async(**kwargs))
except Exception as e:
raise RuntimeError(
f"Error executing MCP tool {self.original_tool_name}: {e!s}"
) from e
async def _run_async(self, **kwargs) -> str:
"""Async implementation of tool execution.
Args:
**kwargs: Arguments to pass to the MCP tool.
Returns:
Result from the MCP tool execution.
"""
# Note: Since we use asyncio.run() which creates a new event loop each time,
# Always reconnect on-demand because asyncio.run() creates new event loops per call
# All MCP transport context managers (stdio, streamablehttp_client, sse_client)
# use anyio.create_task_group() which can't span different event loops
if self._mcp_client.connected:
await self._mcp_client.disconnect()
await self._mcp_client.connect()
try:
result = await self._mcp_client.call_tool(self.original_tool_name, kwargs)
except Exception as e:
error_str = str(e).lower()
if (
"not connected" in error_str
or "connection" in error_str
or "send" in error_str
):
await self._mcp_client.disconnect()
await self._mcp_client.connect()
# Retry the call
result = await self._mcp_client.call_tool(
self.original_tool_name, kwargs
)
else:
raise
finally:
# Always disconnect after tool call to ensure clean context manager lifecycle
# This prevents "exit cancel scope in different task" errors
# All transport context managers must be exited in the same event loop they were entered
await self._mcp_client.disconnect()
# Extract result content
if isinstance(result, str):
return result
# Handle various result formats
if hasattr(result, "content") and result.content:
if isinstance(result.content, list) and len(result.content) > 0:
content_item = result.content[0]
if hasattr(content_item, "text"):
return str(content_item.text)
return str(content_item)
return str(result.content)
return str(result)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/tools/mcp_native_tool.py",
"license": "MIT License",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
crewAIInc/crewAI:lib/crewai/tests/mcp/test_mcp_config.py | import asyncio
from unittest.mock import AsyncMock, patch
import pytest
from crewai.agent.core import Agent
from crewai.mcp.config import MCPServerHTTP, MCPServerSSE, MCPServerStdio
from crewai.tools.base_tool import BaseTool
@pytest.fixture
def mock_tool_definitions():
"""Create mock MCP tool definitions (as returned by list_tools)."""
return [
{
"name": "test_tool_1",
"description": "Test tool 1 description",
"inputSchema": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search query"}
},
"required": ["query"]
}
},
{
"name": "test_tool_2",
"description": "Test tool 2 description",
"inputSchema": {}
}
]
def test_agent_with_stdio_mcp_config(mock_tool_definitions):
"""Test agent setup with MCPServerStdio configuration."""
stdio_config = MCPServerStdio(
command="python",
args=["server.py"],
env={"API_KEY": "test_key"},
)
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
mcps=[stdio_config],
)
with patch("crewai.mcp.tool_resolver.MCPClient") as mock_client_class:
mock_client = AsyncMock()
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
mock_client.connected = False # Will trigger connect
mock_client.connect = AsyncMock()
mock_client.disconnect = AsyncMock()
mock_client_class.return_value = mock_client
tools = agent.get_mcp_tools([stdio_config])
assert len(tools) == 2
assert all(isinstance(tool, BaseTool) for tool in tools)
mock_client_class.assert_called_once()
call_args = mock_client_class.call_args
transport = call_args.kwargs["transport"]
assert transport.command == "python"
assert transport.args == ["server.py"]
assert transport.env == {"API_KEY": "test_key"}
def test_agent_with_http_mcp_config(mock_tool_definitions):
"""Test agent setup with MCPServerHTTP configuration."""
http_config = MCPServerHTTP(
url="https://api.example.com/mcp",
headers={"Authorization": "Bearer test_token"},
streamable=True,
)
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
mcps=[http_config],
)
with patch("crewai.mcp.tool_resolver.MCPClient") as mock_client_class:
mock_client = AsyncMock()
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
mock_client.connected = False # Will trigger connect
mock_client.connect = AsyncMock()
mock_client.disconnect = AsyncMock()
mock_client_class.return_value = mock_client
tools = agent.get_mcp_tools([http_config])
assert len(tools) == 2
assert all(isinstance(tool, BaseTool) for tool in tools)
mock_client_class.assert_called_once()
call_args = mock_client_class.call_args
transport = call_args.kwargs["transport"]
assert transport.url == "https://api.example.com/mcp"
assert transport.headers == {"Authorization": "Bearer test_token"}
assert transport.streamable is True
def test_agent_with_sse_mcp_config(mock_tool_definitions):
"""Test agent setup with MCPServerSSE configuration."""
sse_config = MCPServerSSE(
url="https://api.example.com/mcp/sse",
headers={"Authorization": "Bearer test_token"},
)
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
mcps=[sse_config],
)
with patch("crewai.mcp.tool_resolver.MCPClient") as mock_client_class:
mock_client = AsyncMock()
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
mock_client.connected = False
mock_client.connect = AsyncMock()
mock_client.disconnect = AsyncMock()
mock_client_class.return_value = mock_client
tools = agent.get_mcp_tools([sse_config])
assert len(tools) == 2
assert all(isinstance(tool, BaseTool) for tool in tools)
mock_client_class.assert_called_once()
call_args = mock_client_class.call_args
transport = call_args.kwargs["transport"]
assert transport.url == "https://api.example.com/mcp/sse"
assert transport.headers == {"Authorization": "Bearer test_token"}
def test_mcp_tool_execution_in_sync_context(mock_tool_definitions):
"""Test MCPNativeTool execution in synchronous context (normal crew execution)."""
http_config = MCPServerHTTP(url="https://api.example.com/mcp")
with patch("crewai.mcp.tool_resolver.MCPClient") as mock_client_class:
mock_client = AsyncMock()
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
mock_client.connected = False
mock_client.connect = AsyncMock()
mock_client.disconnect = AsyncMock()
mock_client.call_tool = AsyncMock(return_value="test result")
mock_client_class.return_value = mock_client
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
mcps=[http_config],
)
tools = agent.get_mcp_tools([http_config])
assert len(tools) == 2
tool = tools[0]
result = tool.run(query="test query")
assert result == "test result"
mock_client.call_tool.assert_called()
@pytest.mark.asyncio
async def test_mcp_tool_execution_in_async_context(mock_tool_definitions):
"""Test MCPNativeTool execution in async context (e.g., from a Flow)."""
http_config = MCPServerHTTP(url="https://api.example.com/mcp")
with patch("crewai.mcp.tool_resolver.MCPClient") as mock_client_class:
mock_client = AsyncMock()
mock_client.list_tools = AsyncMock(return_value=mock_tool_definitions)
mock_client.connected = False
mock_client.connect = AsyncMock()
mock_client.disconnect = AsyncMock()
mock_client.call_tool = AsyncMock(return_value="test result")
mock_client_class.return_value = mock_client
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
mcps=[http_config],
)
tools = agent.get_mcp_tools([http_config])
assert len(tools) == 2
tool = tools[0]
result = tool.run(query="test query")
assert result == "test result"
mock_client.call_tool.assert_called()
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/mcp/test_mcp_config.py",
"license": "MIT License",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/llms/hooks/base.py | """Base classes for LLM transport interceptors.
This module provides abstract base classes for intercepting and modifying
outbound and inbound messages at the transport level.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Generic, TypeVar
from pydantic_core import core_schema
if TYPE_CHECKING:
from pydantic import GetCoreSchemaHandler
from pydantic_core import CoreSchema
T = TypeVar("T")
U = TypeVar("U")
class BaseInterceptor(ABC, Generic[T, U]):
"""Abstract base class for intercepting transport-level messages.
Provides hooks to intercept and modify outbound and inbound messages
at the transport layer.
Type parameters:
T: Outbound message type (e.g., httpx.Request)
U: Inbound message type (e.g., httpx.Response)
Example:
>>> import httpx
>>> class CustomInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
... def on_outbound(self, message: httpx.Request) -> httpx.Request:
... message.headers["X-Custom-Header"] = "value"
... return message
...
... def on_inbound(self, message: httpx.Response) -> httpx.Response:
... print(f"Status: {message.status_code}")
... return message
"""
@abstractmethod
def on_outbound(self, message: T) -> T:
"""Intercept outbound message before sending.
Args:
message: Outbound message object.
Returns:
Modified message object.
"""
...
@abstractmethod
def on_inbound(self, message: U) -> U:
"""Intercept inbound message after receiving.
Args:
message: Inbound message object.
Returns:
Modified message object.
"""
...
async def aon_outbound(self, message: T) -> T:
"""Async version of on_outbound.
Args:
message: Outbound message object.
Returns:
Modified message object.
"""
raise NotImplementedError
async def aon_inbound(self, message: U) -> U:
"""Async version of on_inbound.
Args:
message: Inbound message object.
Returns:
Modified message object.
"""
raise NotImplementedError
@classmethod
def __get_pydantic_core_schema__(
cls, _source_type: Any, _handler: GetCoreSchemaHandler
) -> CoreSchema:
"""Generate Pydantic core schema for BaseInterceptor.
This allows the generic BaseInterceptor to be used in Pydantic models
without requiring arbitrary_types_allowed=True. The schema validates
that the value is an instance of BaseInterceptor.
Args:
_source_type: The source type being validated (unused).
_handler: Handler for generating schemas (unused).
Returns:
A Pydantic core schema that validates BaseInterceptor instances.
"""
return core_schema.no_info_plain_validator_function(
_validate_interceptor,
serialization=core_schema.plain_serializer_function_ser_schema(
lambda x: x, return_schema=core_schema.any_schema()
),
)
def _validate_interceptor(value: Any) -> BaseInterceptor[T, U]:
"""Validate that the value is a BaseInterceptor instance.
Args:
value: The value to validate.
Returns:
The validated BaseInterceptor instance.
Raises:
ValueError: If the value is not a BaseInterceptor instance.
"""
if not isinstance(value, BaseInterceptor):
raise ValueError(
f"Expected BaseInterceptor instance, got {type(value).__name__}"
)
return value
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/llms/hooks/base.py",
"license": "MIT License",
"lines": 99,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/src/crewai/llms/hooks/transport.py | """HTTP transport implementations for LLM request/response interception.
This module provides internal transport classes that integrate with BaseInterceptor
to enable request/response modification at the transport level.
"""
from __future__ import annotations
from collections.abc import Iterable
from typing import TYPE_CHECKING, TypedDict
from httpx import (
AsyncHTTPTransport as _AsyncHTTPTransport,
HTTPTransport as _HTTPTransport,
)
from typing_extensions import NotRequired, Unpack
if TYPE_CHECKING:
from ssl import SSLContext
from httpx import Limits, Request, Response
from httpx._types import CertTypes, ProxyTypes
from crewai.llms.hooks.base import BaseInterceptor
class HTTPTransportKwargs(TypedDict, total=False):
"""Typed dictionary for httpx.HTTPTransport initialization parameters.
These parameters configure the underlying HTTP transport behavior including
SSL verification, proxies, connection limits, and low-level socket options.
"""
verify: bool | str | SSLContext
cert: NotRequired[CertTypes]
trust_env: bool
http1: bool
http2: bool
limits: Limits
proxy: NotRequired[ProxyTypes]
uds: NotRequired[str]
local_address: NotRequired[str]
retries: int
socket_options: NotRequired[
Iterable[
tuple[int, int, int]
| tuple[int, int, bytes | bytearray]
| tuple[int, int, None, int]
]
]
class HTTPTransport(_HTTPTransport):
"""HTTP transport that uses an interceptor for request/response modification.
This transport is used internally when a user provides a BaseInterceptor.
Users should not instantiate this class directly - instead, pass an interceptor
to the LLM client and this transport will be created automatically.
"""
def __init__(
self,
interceptor: BaseInterceptor[Request, Response],
**kwargs: Unpack[HTTPTransportKwargs],
) -> None:
"""Initialize transport with interceptor.
Args:
interceptor: HTTP interceptor for modifying raw request/response objects.
**kwargs: HTTPTransport configuration parameters (verify, cert, proxy, etc.).
"""
super().__init__(**kwargs)
self.interceptor = interceptor
def handle_request(self, request: Request) -> Response:
"""Handle request with interception.
Args:
request: The HTTP request to handle.
Returns:
The HTTP response.
"""
request = self.interceptor.on_outbound(request)
response = super().handle_request(request)
return self.interceptor.on_inbound(response)
class AsyncHTTPTransport(_AsyncHTTPTransport):
"""Async HTTP transport that uses an interceptor for request/response modification.
This transport is used internally when a user provides a BaseInterceptor.
Users should not instantiate this class directly - instead, pass an interceptor
to the LLM client and this transport will be created automatically.
"""
def __init__(
self,
interceptor: BaseInterceptor[Request, Response],
**kwargs: Unpack[HTTPTransportKwargs],
) -> None:
"""Initialize async transport with interceptor.
Args:
interceptor: HTTP interceptor for modifying raw request/response objects.
**kwargs: HTTPTransport configuration parameters (verify, cert, proxy, etc.).
"""
super().__init__(**kwargs)
self.interceptor = interceptor
async def handle_async_request(self, request: Request) -> Response:
"""Handle async request with interception.
Args:
request: The HTTP request to handle.
Returns:
The HTTP response.
"""
request = await self.interceptor.aon_outbound(request)
response = await super().handle_async_request(request)
return await self.interceptor.aon_inbound(response)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/llms/hooks/transport.py",
"license": "MIT License",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
crewAIInc/crewAI:lib/crewai/tests/llms/hooks/test_anthropic_interceptor.py | """Tests for Anthropic provider with interceptor integration."""
import os
import httpx
import pytest
from crewai.llm import LLM
from crewai.llms.hooks.base import BaseInterceptor
@pytest.fixture(autouse=True)
def setup_anthropic_api_key(monkeypatch):
"""Set dummy Anthropic API key for tests that don't make real API calls."""
if "ANTHROPIC_API_KEY" not in os.environ:
monkeypatch.setenv("ANTHROPIC_API_KEY", "sk-ant-test-key-dummy")
class AnthropicTestInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Test interceptor for Anthropic provider."""
def __init__(self) -> None:
"""Initialize tracking and modification state."""
self.outbound_calls: list[httpx.Request] = []
self.inbound_calls: list[httpx.Response] = []
self.custom_header_value = "anthropic-test-value"
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Track and modify outbound Anthropic requests.
Args:
message: The outbound request.
Returns:
Modified request with custom headers.
"""
self.outbound_calls.append(message)
message.headers["X-Anthropic-Interceptor"] = self.custom_header_value
message.headers["X-Request-ID"] = "test-request-456"
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Track inbound Anthropic responses.
Args:
message: The inbound response.
Returns:
The response with tracking header.
"""
self.inbound_calls.append(message)
message.headers["X-Response-Tracked"] = "true"
return message
class TestAnthropicInterceptorIntegration:
"""Test suite for Anthropic provider with interceptor."""
def test_anthropic_llm_accepts_interceptor(self) -> None:
"""Test that Anthropic LLM accepts interceptor parameter."""
interceptor = AnthropicTestInterceptor()
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022", interceptor=interceptor)
assert llm.interceptor is interceptor
@pytest.mark.vcr()
def test_anthropic_call_with_interceptor_tracks_requests(self) -> None:
"""Test that interceptor tracks Anthropic API requests."""
interceptor = AnthropicTestInterceptor()
llm = LLM(model="anthropic/claude-3-5-haiku-20241022", interceptor=interceptor)
# Make a simple completion call
result = llm.call(
messages=[{"role": "user", "content": "Say 'Hello World' and nothing else"}]
)
# Verify custom headers were added
for request in interceptor.outbound_calls:
assert "X-Anthropic-Interceptor" in request.headers
assert request.headers["X-Anthropic-Interceptor"] == "anthropic-test-value"
assert "X-Request-ID" in request.headers
assert request.headers["X-Request-ID"] == "test-request-456"
# Verify response was tracked
for response in interceptor.inbound_calls:
assert "X-Response-Tracked" in response.headers
assert response.headers["X-Response-Tracked"] == "true"
# Verify result is valid
assert result is not None
assert isinstance(result, str)
assert len(result) > 0
def test_anthropic_without_interceptor_works(self) -> None:
"""Test that Anthropic LLM works without interceptor."""
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022")
assert llm.interceptor is None
def test_multiple_anthropic_llms_different_interceptors(self) -> None:
"""Test that multiple Anthropic LLMs can have different interceptors."""
interceptor1 = AnthropicTestInterceptor()
interceptor1.custom_header_value = "claude-opus-value"
interceptor2 = AnthropicTestInterceptor()
interceptor2.custom_header_value = "claude-sonnet-value"
llm1 = LLM(model="anthropic/claude-3-opus-20240229", interceptor=interceptor1)
llm2 = LLM(model="anthropic/claude-3-5-sonnet-20241022", interceptor=interceptor2)
assert llm1.interceptor is interceptor1
assert llm2.interceptor is interceptor2
assert llm1.interceptor.custom_header_value == "claude-opus-value"
assert llm2.interceptor.custom_header_value == "claude-sonnet-value"
class AnthropicLoggingInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Interceptor that logs Anthropic request/response details."""
def __init__(self) -> None:
"""Initialize logging lists."""
self.request_urls: list[str] = []
self.request_methods: list[str] = []
self.response_status_codes: list[int] = []
self.anthropic_version_headers: list[str] = []
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Log outbound request details.
Args:
message: The outbound request.
Returns:
The request unchanged.
"""
self.request_urls.append(str(message.url))
self.request_methods.append(message.method)
if "anthropic-version" in message.headers:
self.anthropic_version_headers.append(message.headers["anthropic-version"])
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Log inbound response details.
Args:
message: The inbound response.
Returns:
The response unchanged.
"""
self.response_status_codes.append(message.status_code)
return message
class TestAnthropicLoggingInterceptor:
"""Test suite for logging interceptor with Anthropic."""
def test_logging_interceptor_instantiation(self) -> None:
"""Test that logging interceptor can be created with Anthropic LLM."""
interceptor = AnthropicLoggingInterceptor()
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022", interceptor=interceptor)
assert llm.interceptor is interceptor
assert isinstance(llm.interceptor, AnthropicLoggingInterceptor)
@pytest.mark.vcr()
def test_logging_interceptor_tracks_details(self) -> None:
"""Test that logging interceptor tracks request/response details."""
interceptor = AnthropicLoggingInterceptor()
llm = LLM(model="anthropic/claude-3-5-haiku-20241022", interceptor=interceptor)
# Make a completion call
result = llm.call(messages=[{"role": "user", "content": "Count from 1 to 3"}])
# Verify URL points to Anthropic API
for url in interceptor.request_urls:
assert "anthropic" in url.lower() or "api" in url.lower()
# Verify methods are POST (messages endpoint uses POST)
for method in interceptor.request_methods:
assert method == "POST"
# Verify successful status codes
for status_code in interceptor.response_status_codes:
assert 200 <= status_code < 300
# Verify result is valid
assert result is not None
class AnthropicHeaderInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Interceptor that adds Anthropic-specific headers."""
def __init__(self, workspace_id: str, user_id: str) -> None:
"""Initialize with Anthropic-specific metadata.
Args:
workspace_id: The workspace ID to inject.
user_id: The user ID to inject.
"""
self.workspace_id = workspace_id
self.user_id = user_id
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Add custom metadata headers to request.
Args:
message: The outbound request.
Returns:
Request with metadata headers.
"""
message.headers["X-Workspace-ID"] = self.workspace_id
message.headers["X-User-ID"] = self.user_id
message.headers["X-Custom-Client"] = "crewai-interceptor"
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Pass through inbound response.
Args:
message: The inbound response.
Returns:
The response unchanged.
"""
return message
class TestAnthropicHeaderInterceptor:
"""Test suite for header interceptor with Anthropic."""
def test_header_interceptor_with_anthropic(self) -> None:
"""Test that header interceptor can be used with Anthropic LLM."""
interceptor = AnthropicHeaderInterceptor(
workspace_id="ws-789", user_id="user-012"
)
llm = LLM(model="anthropic/claude-3-5-sonnet-20241022", interceptor=interceptor)
assert llm.interceptor is interceptor
assert llm.interceptor.workspace_id == "ws-789"
assert llm.interceptor.user_id == "user-012"
def test_header_interceptor_adds_headers(self) -> None:
"""Test that header interceptor adds custom headers to requests."""
interceptor = AnthropicHeaderInterceptor(workspace_id="ws-123", user_id="u-456")
request = httpx.Request("POST", "https://api.anthropic.com/v1/messages")
modified_request = interceptor.on_outbound(request)
assert "X-Workspace-ID" in modified_request.headers
assert modified_request.headers["X-Workspace-ID"] == "ws-123"
assert "X-User-ID" in modified_request.headers
assert modified_request.headers["X-User-ID"] == "u-456"
assert "X-Custom-Client" in modified_request.headers
assert modified_request.headers["X-Custom-Client"] == "crewai-interceptor"
@pytest.mark.vcr()
def test_header_interceptor_with_real_call(self) -> None:
"""Test that header interceptor works with real Anthropic API call."""
interceptor = AnthropicHeaderInterceptor(workspace_id="ws-999", user_id="u-888")
llm = LLM(model="anthropic/claude-3-5-haiku-20241022", interceptor=interceptor)
# Make a simple call
result = llm.call(
messages=[{"role": "user", "content": "Reply with just the word: SUCCESS"}]
)
# Verify the call succeeded
assert result is not None
assert len(result) > 0
# Verify the interceptor was configured
assert llm.interceptor is interceptor
class TestMixedProviderInterceptors:
"""Test suite for using interceptors with different providers."""
def test_openai_and_anthropic_different_interceptors(self) -> None:
"""Test that OpenAI and Anthropic LLMs can have different interceptors."""
openai_interceptor = AnthropicTestInterceptor()
openai_interceptor.custom_header_value = "openai-specific"
anthropic_interceptor = AnthropicTestInterceptor()
anthropic_interceptor.custom_header_value = "anthropic-specific"
openai_llm = LLM(model="gpt-4", interceptor=openai_interceptor)
anthropic_llm = LLM(
model="anthropic/claude-3-5-sonnet-20241022", interceptor=anthropic_interceptor
)
assert openai_llm.interceptor is openai_interceptor
assert anthropic_llm.interceptor is anthropic_interceptor
assert openai_llm.interceptor.custom_header_value == "openai-specific"
assert anthropic_llm.interceptor.custom_header_value == "anthropic-specific"
def test_same_interceptor_different_providers(self) -> None:
"""Test that same interceptor instance can be used with multiple providers."""
shared_interceptor = AnthropicTestInterceptor()
openai_llm = LLM(model="gpt-4", interceptor=shared_interceptor)
anthropic_llm = LLM(
model="anthropic/claude-3-5-sonnet-20241022", interceptor=shared_interceptor
)
assert openai_llm.interceptor is shared_interceptor
assert anthropic_llm.interceptor is shared_interceptor
assert openai_llm.interceptor is anthropic_llm.interceptor
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/llms/hooks/test_anthropic_interceptor.py",
"license": "MIT License",
"lines": 231,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/llms/hooks/test_base_interceptor.py | """Tests for base interceptor functionality."""
import httpx
import pytest
from crewai.llms.hooks.base import BaseInterceptor
class SimpleInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Simple test interceptor implementation."""
def __init__(self) -> None:
"""Initialize tracking lists."""
self.outbound_calls: list[httpx.Request] = []
self.inbound_calls: list[httpx.Response] = []
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Track outbound calls.
Args:
message: The outbound request.
Returns:
The request unchanged.
"""
self.outbound_calls.append(message)
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Track inbound calls.
Args:
message: The inbound response.
Returns:
The response unchanged.
"""
self.inbound_calls.append(message)
return message
class ModifyingInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Interceptor that modifies requests and responses."""
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Add custom header to outbound request.
Args:
message: The outbound request.
Returns:
Modified request with custom header.
"""
message.headers["X-Custom-Header"] = "test-value"
message.headers["X-Intercepted"] = "true"
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Add custom header to inbound response.
Args:
message: The inbound response.
Returns:
Modified response with custom header.
"""
message.headers["X-Response-Intercepted"] = "true"
return message
class AsyncInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Interceptor with async support."""
def __init__(self) -> None:
"""Initialize tracking lists."""
self.async_outbound_calls: list[httpx.Request] = []
self.async_inbound_calls: list[httpx.Response] = []
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Handle sync outbound.
Args:
message: The outbound request.
Returns:
The request unchanged.
"""
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Handle sync inbound.
Args:
message: The inbound response.
Returns:
The response unchanged.
"""
return message
async def aon_outbound(self, message: httpx.Request) -> httpx.Request:
"""Handle async outbound.
Args:
message: The outbound request.
Returns:
Modified request with async header.
"""
self.async_outbound_calls.append(message)
message.headers["X-Async-Outbound"] = "true"
return message
async def aon_inbound(self, message: httpx.Response) -> httpx.Response:
"""Handle async inbound.
Args:
message: The inbound response.
Returns:
Modified response with async header.
"""
self.async_inbound_calls.append(message)
message.headers["X-Async-Inbound"] = "true"
return message
class TestBaseInterceptor:
"""Test suite for BaseInterceptor class."""
def test_interceptor_instantiation(self) -> None:
"""Test that interceptor can be instantiated."""
interceptor = SimpleInterceptor()
assert interceptor is not None
assert isinstance(interceptor, BaseInterceptor)
def test_on_outbound_called(self) -> None:
"""Test that on_outbound is called and tracks requests."""
interceptor = SimpleInterceptor()
request = httpx.Request("GET", "https://api.example.com/test")
result = interceptor.on_outbound(request)
assert len(interceptor.outbound_calls) == 1
assert interceptor.outbound_calls[0] is request
assert result is request
def test_on_inbound_called(self) -> None:
"""Test that on_inbound is called and tracks responses."""
interceptor = SimpleInterceptor()
response = httpx.Response(200, json={"status": "ok"})
result = interceptor.on_inbound(response)
assert len(interceptor.inbound_calls) == 1
assert interceptor.inbound_calls[0] is response
assert result is response
def test_multiple_outbound_calls(self) -> None:
"""Test that interceptor tracks multiple outbound calls."""
interceptor = SimpleInterceptor()
requests = [
httpx.Request("GET", "https://api.example.com/1"),
httpx.Request("POST", "https://api.example.com/2"),
httpx.Request("PUT", "https://api.example.com/3"),
]
for req in requests:
interceptor.on_outbound(req)
assert len(interceptor.outbound_calls) == 3
assert interceptor.outbound_calls == requests
def test_multiple_inbound_calls(self) -> None:
"""Test that interceptor tracks multiple inbound calls."""
interceptor = SimpleInterceptor()
responses = [
httpx.Response(200, json={"id": 1}),
httpx.Response(201, json={"id": 2}),
httpx.Response(404, json={"error": "not found"}),
]
for resp in responses:
interceptor.on_inbound(resp)
assert len(interceptor.inbound_calls) == 3
assert interceptor.inbound_calls == responses
class TestModifyingInterceptor:
"""Test suite for interceptor that modifies messages."""
def test_outbound_header_modification(self) -> None:
"""Test that interceptor can add headers to outbound requests."""
interceptor = ModifyingInterceptor()
request = httpx.Request("GET", "https://api.example.com/test")
result = interceptor.on_outbound(request)
assert result is request
assert "X-Custom-Header" in result.headers
assert result.headers["X-Custom-Header"] == "test-value"
assert "X-Intercepted" in result.headers
assert result.headers["X-Intercepted"] == "true"
def test_inbound_header_modification(self) -> None:
"""Test that interceptor can add headers to inbound responses."""
interceptor = ModifyingInterceptor()
response = httpx.Response(200, json={"status": "ok"})
result = interceptor.on_inbound(response)
assert result is response
assert "X-Response-Intercepted" in result.headers
assert result.headers["X-Response-Intercepted"] == "true"
def test_preserves_existing_headers(self) -> None:
"""Test that interceptor preserves existing headers."""
interceptor = ModifyingInterceptor()
request = httpx.Request(
"GET",
"https://api.example.com/test",
headers={"Authorization": "Bearer token123", "Content-Type": "application/json"},
)
result = interceptor.on_outbound(request)
assert result.headers["Authorization"] == "Bearer token123"
assert result.headers["Content-Type"] == "application/json"
assert result.headers["X-Custom-Header"] == "test-value"
class TestAsyncInterceptor:
"""Test suite for async interceptor functionality."""
def test_sync_methods_work(self) -> None:
"""Test that sync methods still work on async interceptor."""
interceptor = AsyncInterceptor()
request = httpx.Request("GET", "https://api.example.com/test")
response = httpx.Response(200)
req_result = interceptor.on_outbound(request)
resp_result = interceptor.on_inbound(response)
assert req_result is request
assert resp_result is response
@pytest.mark.asyncio
async def test_async_outbound(self) -> None:
"""Test async outbound hook."""
interceptor = AsyncInterceptor()
request = httpx.Request("GET", "https://api.example.com/test")
result = await interceptor.aon_outbound(request)
assert result is request
assert len(interceptor.async_outbound_calls) == 1
assert interceptor.async_outbound_calls[0] is request
assert "X-Async-Outbound" in result.headers
assert result.headers["X-Async-Outbound"] == "true"
@pytest.mark.asyncio
async def test_async_inbound(self) -> None:
"""Test async inbound hook."""
interceptor = AsyncInterceptor()
response = httpx.Response(200, json={"status": "ok"})
result = await interceptor.aon_inbound(response)
assert result is response
assert len(interceptor.async_inbound_calls) == 1
assert interceptor.async_inbound_calls[0] is response
assert "X-Async-Inbound" in result.headers
assert result.headers["X-Async-Inbound"] == "true"
@pytest.mark.asyncio
async def test_default_async_not_implemented(self) -> None:
"""Test that default async methods raise NotImplementedError."""
interceptor = SimpleInterceptor()
request = httpx.Request("GET", "https://api.example.com/test")
response = httpx.Response(200)
with pytest.raises(NotImplementedError):
await interceptor.aon_outbound(request)
with pytest.raises(NotImplementedError):
await interceptor.aon_inbound(response)
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/llms/hooks/test_base_interceptor.py",
"license": "MIT License",
"lines": 213,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/llms/hooks/test_openai_interceptor.py | """Tests for OpenAI provider with interceptor integration."""
import httpx
import pytest
from crewai.llm import LLM
from crewai.llms.hooks.base import BaseInterceptor
class OpenAITestInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Test interceptor for OpenAI provider."""
def __init__(self) -> None:
"""Initialize tracking and modification state."""
self.outbound_calls: list[httpx.Request] = []
self.inbound_calls: list[httpx.Response] = []
self.custom_header_value = "openai-test-value"
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Track and modify outbound OpenAI requests.
Args:
message: The outbound request.
Returns:
Modified request with custom headers.
"""
self.outbound_calls.append(message)
message.headers["X-OpenAI-Interceptor"] = self.custom_header_value
message.headers["X-Request-ID"] = "test-request-123"
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Track inbound OpenAI responses.
Args:
message: The inbound response.
Returns:
The response with tracking header.
"""
self.inbound_calls.append(message)
message.headers["X-Response-Tracked"] = "true"
return message
class TestOpenAIInterceptorIntegration:
"""Test suite for OpenAI provider with interceptor."""
def test_openai_llm_accepts_interceptor(self) -> None:
"""Test that OpenAI LLM accepts interceptor parameter."""
interceptor = OpenAITestInterceptor()
llm = LLM(model="gpt-4", interceptor=interceptor)
assert llm.interceptor is interceptor
@pytest.mark.vcr()
def test_openai_call_with_interceptor_tracks_requests(self) -> None:
"""Test that interceptor tracks OpenAI API requests."""
interceptor = OpenAITestInterceptor()
llm = LLM(model="gpt-4o-mini", interceptor=interceptor)
# Make a simple completion call
result = llm.call(
messages=[{"role": "user", "content": "Say 'Hello World' and nothing else"}]
)
# Verify custom headers were added
for request in interceptor.outbound_calls:
assert "X-OpenAI-Interceptor" in request.headers
assert request.headers["X-OpenAI-Interceptor"] == "openai-test-value"
assert "X-Request-ID" in request.headers
assert request.headers["X-Request-ID"] == "test-request-123"
# Verify response was tracked
for response in interceptor.inbound_calls:
assert "X-Response-Tracked" in response.headers
assert response.headers["X-Response-Tracked"] == "true"
# Verify result is valid
assert result is not None
assert isinstance(result, str)
assert len(result) > 0
def test_openai_without_interceptor_works(self) -> None:
"""Test that OpenAI LLM works without interceptor."""
llm = LLM(model="gpt-4")
assert llm.interceptor is None
def test_multiple_openai_llms_different_interceptors(self) -> None:
"""Test that multiple OpenAI LLMs can have different interceptors."""
interceptor1 = OpenAITestInterceptor()
interceptor1.custom_header_value = "llm1-value"
interceptor2 = OpenAITestInterceptor()
interceptor2.custom_header_value = "llm2-value"
llm1 = LLM(model="gpt-4", interceptor=interceptor1)
llm2 = LLM(model="gpt-3.5-turbo", interceptor=interceptor2)
assert llm1.interceptor is interceptor1
assert llm2.interceptor is interceptor2
assert llm1.interceptor.custom_header_value == "llm1-value"
assert llm2.interceptor.custom_header_value == "llm2-value"
class LoggingInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Interceptor that logs request/response details for testing."""
def __init__(self) -> None:
"""Initialize logging lists."""
self.request_urls: list[str] = []
self.request_methods: list[str] = []
self.response_status_codes: list[int] = []
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Log outbound request details.
Args:
message: The outbound request.
Returns:
The request unchanged.
"""
self.request_urls.append(str(message.url))
self.request_methods.append(message.method)
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Log inbound response details.
Args:
message: The inbound response.
Returns:
The response unchanged.
"""
self.response_status_codes.append(message.status_code)
return message
class TestOpenAILoggingInterceptor:
"""Test suite for logging interceptor with OpenAI."""
def test_logging_interceptor_instantiation(self) -> None:
"""Test that logging interceptor can be created with OpenAI LLM."""
interceptor = LoggingInterceptor()
llm = LLM(model="gpt-4", interceptor=interceptor)
assert llm.interceptor is interceptor
assert isinstance(llm.interceptor, LoggingInterceptor)
@pytest.mark.vcr()
def test_logging_interceptor_tracks_details(self) -> None:
"""Test that logging interceptor tracks request/response details."""
interceptor = LoggingInterceptor()
llm = LLM(model="gpt-4o-mini", interceptor=interceptor)
# Make a completion call
result = llm.call(
messages=[{"role": "user", "content": "Count from 1 to 3"}]
)
# Verify URL points to OpenAI API
for url in interceptor.request_urls:
assert "openai" in url.lower() or "api" in url.lower()
# Verify methods are POST (chat completions use POST)
for method in interceptor.request_methods:
assert method == "POST"
# Verify successful status codes
for status_code in interceptor.response_status_codes:
assert 200 <= status_code < 300
# Verify result is valid
assert result is not None
class AuthInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Interceptor that adds authentication headers."""
def __init__(self, api_key: str, org_id: str) -> None:
"""Initialize with auth credentials.
Args:
api_key: The API key to inject.
org_id: The organization ID to inject.
"""
self.api_key = api_key
self.org_id = org_id
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Add authentication headers to request.
Args:
message: The outbound request.
Returns:
Request with auth headers.
"""
message.headers["X-Custom-API-Key"] = self.api_key
message.headers["X-Organization-ID"] = self.org_id
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Pass through inbound response.
Args:
message: The inbound response.
Returns:
The response unchanged.
"""
return message
class TestOpenAIAuthInterceptor:
"""Test suite for authentication interceptor with OpenAI."""
def test_auth_interceptor_with_openai(self) -> None:
"""Test that auth interceptor can be used with OpenAI LLM."""
interceptor = AuthInterceptor(api_key="custom-key-123", org_id="org-456")
llm = LLM(model="gpt-4", interceptor=interceptor)
assert llm.interceptor is interceptor
assert llm.interceptor.api_key == "custom-key-123"
assert llm.interceptor.org_id == "org-456"
def test_auth_interceptor_adds_headers(self) -> None:
"""Test that auth interceptor adds custom headers to requests."""
interceptor = AuthInterceptor(api_key="test-key", org_id="test-org")
request = httpx.Request("POST", "https://api.openai.com/v1/chat/completions")
modified_request = interceptor.on_outbound(request)
assert "X-Custom-API-Key" in modified_request.headers
assert modified_request.headers["X-Custom-API-Key"] == "test-key"
assert "X-Organization-ID" in modified_request.headers
assert modified_request.headers["X-Organization-ID"] == "test-org"
@pytest.mark.vcr()
def test_auth_interceptor_with_real_call(self) -> None:
"""Test that auth interceptor works with real OpenAI API call."""
interceptor = AuthInterceptor(api_key="custom-123", org_id="org-789")
llm = LLM(model="gpt-4o-mini", interceptor=interceptor)
# Make a simple call
result = llm.call(
messages=[{"role": "user", "content": "Reply with just the word: SUCCESS"}]
)
# Verify the call succeeded
assert result is not None
assert len(result) > 0
# Verify headers were added to outbound requests
# (We can't directly inspect the request sent to OpenAI in this test,
# but we verify the interceptor was configured and the call succeeded)
assert llm.interceptor is interceptor
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/llms/hooks/test_openai_interceptor.py",
"license": "MIT License",
"lines": 195,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/llms/hooks/test_transport.py | """Tests for transport layer with interceptor integration."""
from unittest.mock import Mock
import httpx
import pytest
from crewai.llms.hooks.base import BaseInterceptor
from crewai.llms.hooks.transport import AsyncHTTPTransport, HTTPTransport
class TrackingInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Test interceptor that tracks all calls."""
def __init__(self) -> None:
"""Initialize tracking lists."""
self.outbound_calls: list[httpx.Request] = []
self.inbound_calls: list[httpx.Response] = []
self.async_outbound_calls: list[httpx.Request] = []
self.async_inbound_calls: list[httpx.Response] = []
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Track outbound calls and add header.
Args:
message: The outbound request.
Returns:
Modified request with tracking header.
"""
self.outbound_calls.append(message)
message.headers["X-Intercepted-Sync"] = "true"
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Track inbound calls.
Args:
message: The inbound response.
Returns:
The response with tracking header.
"""
self.inbound_calls.append(message)
message.headers["X-Response-Intercepted-Sync"] = "true"
return message
async def aon_outbound(self, message: httpx.Request) -> httpx.Request:
"""Track async outbound calls and add header.
Args:
message: The outbound request.
Returns:
Modified request with tracking header.
"""
self.async_outbound_calls.append(message)
message.headers["X-Intercepted-Async"] = "true"
return message
async def aon_inbound(self, message: httpx.Response) -> httpx.Response:
"""Track async inbound calls.
Args:
message: The inbound response.
Returns:
The response with tracking header.
"""
self.async_inbound_calls.append(message)
message.headers["X-Response-Intercepted-Async"] = "true"
return message
class TestHTTPTransport:
"""Test suite for sync HTTPTransport with interceptor."""
def test_transport_instantiation(self) -> None:
"""Test that transport can be instantiated with interceptor."""
interceptor = TrackingInterceptor()
transport = HTTPTransport(interceptor=interceptor)
assert transport.interceptor is interceptor
def test_transport_requires_interceptor(self) -> None:
"""Test that transport requires interceptor parameter."""
# HTTPTransport requires an interceptor parameter
with pytest.raises(TypeError):
HTTPTransport()
def test_interceptor_called_on_request(self) -> None:
"""Test that interceptor hooks are called during request handling."""
interceptor = TrackingInterceptor()
transport = HTTPTransport(interceptor=interceptor)
# Create a mock parent transport that returns a response
mock_response = httpx.Response(200, json={"success": True})
mock_parent_handle = Mock(return_value=mock_response)
# Monkey-patch the parent's handle_request
original_handle = httpx.HTTPTransport.handle_request
httpx.HTTPTransport.handle_request = mock_parent_handle
try:
request = httpx.Request("GET", "https://api.example.com/test")
response = transport.handle_request(request)
# Verify interceptor was called
assert len(interceptor.outbound_calls) == 1
assert len(interceptor.inbound_calls) == 1
assert interceptor.outbound_calls[0] is request
assert interceptor.inbound_calls[0] is response
# Verify headers were added
assert "X-Intercepted-Sync" in request.headers
assert request.headers["X-Intercepted-Sync"] == "true"
assert "X-Response-Intercepted-Sync" in response.headers
assert response.headers["X-Response-Intercepted-Sync"] == "true"
finally:
# Restore original method
httpx.HTTPTransport.handle_request = original_handle
class TestAsyncHTTPTransport:
"""Test suite for async AsyncHTTPransport with interceptor."""
def test_async_transport_instantiation(self) -> None:
"""Test that async transport can be instantiated with interceptor."""
interceptor = TrackingInterceptor()
transport = AsyncHTTPTransport(interceptor=interceptor)
assert transport.interceptor is interceptor
def test_async_transport_requires_interceptor(self) -> None:
"""Test that async transport requires interceptor parameter."""
# AsyncHTTPransport requires an interceptor parameter
with pytest.raises(TypeError):
AsyncHTTPTransport()
@pytest.mark.asyncio
async def test_async_interceptor_called_on_request(self) -> None:
"""Test that async interceptor hooks are called during request handling."""
interceptor = TrackingInterceptor()
transport = AsyncHTTPTransport(interceptor=interceptor)
# Create a mock parent transport that returns a response
mock_response = httpx.Response(200, json={"success": True})
async def mock_handle(*args, **kwargs):
return mock_response
mock_parent_handle = Mock(side_effect=mock_handle)
# Monkey-patch the parent's handle_async_request
original_handle = httpx.AsyncHTTPTransport.handle_async_request
httpx.AsyncHTTPTransport.handle_async_request = mock_parent_handle
try:
request = httpx.Request("GET", "https://api.example.com/test")
response = await transport.handle_async_request(request)
# Verify async interceptor was called
assert len(interceptor.async_outbound_calls) == 1
assert len(interceptor.async_inbound_calls) == 1
assert interceptor.async_outbound_calls[0] is request
assert interceptor.async_inbound_calls[0] is response
# Verify sync interceptor was NOT called
assert len(interceptor.outbound_calls) == 0
assert len(interceptor.inbound_calls) == 0
# Verify async headers were added
assert "X-Intercepted-Async" in request.headers
assert request.headers["X-Intercepted-Async"] == "true"
assert "X-Response-Intercepted-Async" in response.headers
assert response.headers["X-Response-Intercepted-Async"] == "true"
finally:
# Restore original method
httpx.AsyncHTTPTransport.handle_async_request = original_handle
class TestTransportIntegration:
"""Test suite for transport integration scenarios."""
def test_multiple_requests_same_interceptor(self) -> None:
"""Test that multiple requests through same interceptor are tracked."""
interceptor = TrackingInterceptor()
transport = HTTPTransport(interceptor=interceptor)
mock_response = httpx.Response(200)
mock_parent_handle = Mock(return_value=mock_response)
original_handle = httpx.HTTPTransport.handle_request
httpx.HTTPTransport.handle_request = mock_parent_handle
try:
# Make multiple requests
requests = [
httpx.Request("GET", "https://api.example.com/1"),
httpx.Request("POST", "https://api.example.com/2"),
httpx.Request("PUT", "https://api.example.com/3"),
]
for req in requests:
transport.handle_request(req)
# Verify all requests were intercepted
assert len(interceptor.outbound_calls) == 3
assert len(interceptor.inbound_calls) == 3
assert interceptor.outbound_calls == requests
finally:
httpx.HTTPTransport.handle_request = original_handle
@pytest.mark.asyncio
async def test_multiple_async_requests_same_interceptor(self) -> None:
"""Test that multiple async requests through same interceptor are tracked."""
interceptor = TrackingInterceptor()
transport = AsyncHTTPTransport(interceptor=interceptor)
mock_response = httpx.Response(200)
async def mock_handle(*args, **kwargs):
return mock_response
mock_parent_handle = Mock(side_effect=mock_handle)
original_handle = httpx.AsyncHTTPTransport.handle_async_request
httpx.AsyncHTTPTransport.handle_async_request = mock_parent_handle
try:
# Make multiple async requests
requests = [
httpx.Request("GET", "https://api.example.com/1"),
httpx.Request("POST", "https://api.example.com/2"),
httpx.Request("DELETE", "https://api.example.com/3"),
]
for req in requests:
await transport.handle_async_request(req)
# Verify all requests were intercepted
assert len(interceptor.async_outbound_calls) == 3
assert len(interceptor.async_inbound_calls) == 3
assert interceptor.async_outbound_calls == requests
finally:
httpx.AsyncHTTPTransport.handle_async_request = original_handle
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/llms/hooks/test_transport.py",
"license": "MIT License",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/llms/hooks/test_unsupported_providers.py | """Tests for interceptor behavior with unsupported providers."""
import os
import httpx
import pytest
from crewai.llm import LLM
from crewai.llms.hooks.base import BaseInterceptor
@pytest.fixture(autouse=True)
def setup_provider_api_keys(monkeypatch):
"""Set dummy API keys for providers that require them."""
if "OPENAI_API_KEY" not in os.environ:
monkeypatch.setenv("OPENAI_API_KEY", "sk-test-key-dummy")
if "ANTHROPIC_API_KEY" not in os.environ:
monkeypatch.setenv("ANTHROPIC_API_KEY", "sk-ant-test-key-dummy")
if "GOOGLE_API_KEY" not in os.environ:
monkeypatch.setenv("GOOGLE_API_KEY", "test-google-key-dummy")
class DummyInterceptor(BaseInterceptor[httpx.Request, httpx.Response]):
"""Simple dummy interceptor for testing."""
def on_outbound(self, message: httpx.Request) -> httpx.Request:
"""Pass through outbound request.
Args:
message: The outbound request.
Returns:
The request unchanged.
"""
message.headers["X-Dummy"] = "true"
return message
def on_inbound(self, message: httpx.Response) -> httpx.Response:
"""Pass through inbound response.
Args:
message: The inbound response.
Returns:
The response unchanged.
"""
return message
class TestAzureProviderInterceptor:
"""Test suite for Azure provider with interceptor (unsupported)."""
def test_azure_llm_accepts_interceptor_parameter(self) -> None:
"""Test that Azure LLM raises NotImplementedError with interceptor."""
interceptor = DummyInterceptor()
# Azure provider should raise NotImplementedError
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="azure/gpt-4",
interceptor=interceptor,
api_key="test-key",
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
)
assert "interceptor" in str(exc_info.value).lower()
def test_azure_raises_not_implemented_on_initialization(self) -> None:
"""Test that Azure raises NotImplementedError when interceptor is used."""
interceptor = DummyInterceptor()
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="azure/gpt-4",
interceptor=interceptor,
api_key="test-key",
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
)
error_msg = str(exc_info.value).lower()
assert "interceptor" in error_msg
assert "azure" in error_msg
def test_azure_without_interceptor_works(self) -> None:
"""Test that Azure LLM works without interceptor."""
llm = LLM(
model="azure/gpt-4",
api_key="test-key",
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
)
# Azure provider doesn't have interceptor attribute
assert not hasattr(llm, 'interceptor') or llm.interceptor is None
class TestBedrockProviderInterceptor:
"""Test suite for Bedrock provider with interceptor (unsupported)."""
def test_bedrock_llm_accepts_interceptor_parameter(self) -> None:
"""Test that Bedrock LLM raises NotImplementedError with interceptor."""
interceptor = DummyInterceptor()
# Bedrock provider should raise NotImplementedError
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
interceptor=interceptor,
aws_access_key_id="test-access-key",
aws_secret_access_key="test-secret-key",
aws_region_name="us-east-1",
)
error_msg = str(exc_info.value).lower()
assert "interceptor" in error_msg
assert "bedrock" in error_msg
def test_bedrock_raises_not_implemented_on_initialization(self) -> None:
"""Test that Bedrock raises NotImplementedError when interceptor is used."""
interceptor = DummyInterceptor()
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
interceptor=interceptor,
aws_access_key_id="test-access-key",
aws_secret_access_key="test-secret-key",
aws_region_name="us-east-1",
)
error_msg = str(exc_info.value).lower()
assert "interceptor" in error_msg
assert "bedrock" in error_msg
def test_bedrock_without_interceptor_works(self) -> None:
"""Test that Bedrock LLM works without interceptor."""
llm = LLM(
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
aws_access_key_id="test-access-key",
aws_secret_access_key="test-secret-key",
aws_region_name="us-east-1",
)
# Bedrock provider doesn't have interceptor attribute
assert not hasattr(llm, 'interceptor') or llm.interceptor is None
class TestGeminiProviderInterceptor:
"""Test suite for Gemini provider with interceptor (unsupported)."""
def test_gemini_llm_accepts_interceptor_parameter(self) -> None:
"""Test that Gemini LLM raises NotImplementedError with interceptor."""
interceptor = DummyInterceptor()
# Gemini provider should raise NotImplementedError
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="gemini/gemini-2.5-pro",
interceptor=interceptor,
api_key="test-gemini-key",
)
error_msg = str(exc_info.value).lower()
assert "interceptor" in error_msg
assert "gemini" in error_msg
def test_gemini_raises_not_implemented_on_initialization(self) -> None:
"""Test that Gemini raises NotImplementedError when interceptor is used."""
interceptor = DummyInterceptor()
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="gemini/gemini-2.5-pro",
interceptor=interceptor,
api_key="test-gemini-key",
)
error_msg = str(exc_info.value).lower()
assert "interceptor" in error_msg
assert "gemini" in error_msg
def test_gemini_without_interceptor_works(self) -> None:
"""Test that Gemini LLM works without interceptor."""
llm = LLM(
model="gemini/gemini-2.5-pro",
api_key="test-gemini-key",
)
# Gemini provider doesn't have interceptor attribute
assert not hasattr(llm, 'interceptor') or llm.interceptor is None
class TestUnsupportedProviderMessages:
"""Test suite for error messages from unsupported providers."""
def test_azure_error_message_is_clear(self) -> None:
"""Test that Azure error message clearly states lack of support."""
interceptor = DummyInterceptor()
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="azure/gpt-4",
interceptor=interceptor,
api_key="test-key",
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
)
error_message = str(exc_info.value).lower()
assert "azure" in error_message
assert "interceptor" in error_message
def test_bedrock_error_message_is_clear(self) -> None:
"""Test that Bedrock error message clearly states lack of support."""
interceptor = DummyInterceptor()
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
interceptor=interceptor,
aws_access_key_id="test-access-key",
aws_secret_access_key="test-secret-key",
aws_region_name="us-east-1",
)
error_message = str(exc_info.value).lower()
assert "bedrock" in error_message
assert "interceptor" in error_message
def test_gemini_error_message_is_clear(self) -> None:
"""Test that Gemini error message clearly states lack of support."""
interceptor = DummyInterceptor()
with pytest.raises(NotImplementedError) as exc_info:
LLM(
model="gemini/gemini-2.5-pro",
interceptor=interceptor,
api_key="test-gemini-key",
)
error_message = str(exc_info.value).lower()
assert "gemini" in error_message
assert "interceptor" in error_message
class TestProviderSupportMatrix:
"""Test suite to document which providers support interceptors."""
def test_supported_providers_accept_interceptor(self) -> None:
"""Test that supported providers accept and use interceptors."""
interceptor = DummyInterceptor()
# OpenAI - SUPPORTED
openai_llm = LLM(model="gpt-4", interceptor=interceptor)
assert openai_llm.interceptor is interceptor
# Anthropic - SUPPORTED
anthropic_llm = LLM(model="anthropic/claude-3-opus-20240229", interceptor=interceptor)
assert anthropic_llm.interceptor is interceptor
def test_unsupported_providers_raise_error(self) -> None:
"""Test that unsupported providers raise NotImplementedError."""
interceptor = DummyInterceptor()
# Azure - NOT SUPPORTED
with pytest.raises(NotImplementedError):
LLM(
model="azure/gpt-4",
interceptor=interceptor,
api_key="test",
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
)
# Bedrock - NOT SUPPORTED
with pytest.raises(NotImplementedError):
LLM(
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
interceptor=interceptor,
aws_access_key_id="test",
aws_secret_access_key="test",
aws_region_name="us-east-1",
)
# Gemini - NOT SUPPORTED
with pytest.raises(NotImplementedError):
LLM(
model="gemini/gemini-2.5-pro",
interceptor=interceptor,
api_key="test",
)
def test_all_providers_work_without_interceptor(self) -> None:
"""Test that all providers work normally without interceptor."""
# OpenAI
openai_llm = LLM(model="gpt-4")
assert openai_llm.interceptor is None
# Anthropic
anthropic_llm = LLM(model="anthropic/claude-3-opus-20240229")
assert anthropic_llm.interceptor is None
# Azure - doesn't have interceptor attribute
azure_llm = LLM(
model="azure/gpt-4",
api_key="test",
endpoint="https://test.openai.azure.com/openai/deployments/gpt-4",
)
assert not hasattr(azure_llm, 'interceptor') or azure_llm.interceptor is None
# Bedrock - doesn't have interceptor attribute
bedrock_llm = LLM(
model="bedrock/anthropic.claude-3-sonnet-20240229-v1:0",
aws_access_key_id="test",
aws_secret_access_key="test",
aws_region_name="us-east-1",
)
assert not hasattr(bedrock_llm, 'interceptor') or bedrock_llm.interceptor is None
# Gemini - doesn't have interceptor attribute
gemini_llm = LLM(model="gemini/gemini-2.5-pro", api_key="test")
assert not hasattr(gemini_llm, 'interceptor') or gemini_llm.interceptor is None
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/llms/hooks/test_unsupported_providers.py",
"license": "MIT License",
"lines": 251,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/tests/project/test_callback_with_taskoutput.py | """Test callback decorator with TaskOutput arguments."""
from unittest.mock import MagicMock, patch
from crewai import Agent, Crew, Task
from crewai.project import CrewBase, callback, task
from crewai.tasks.output_format import OutputFormat
from crewai.tasks.task_output import TaskOutput
def test_callback_decorator_with_taskoutput() -> None:
"""Test that @callback decorator works with TaskOutput arguments."""
@CrewBase
class TestCrew:
"""Test crew with callback."""
callback_called = False
callback_output = None
@callback
def task_callback(self, output: TaskOutput) -> None:
"""Test callback that receives TaskOutput."""
self.callback_called = True
self.callback_output = output
@task
def test_task(self) -> Task:
"""Test task with callback."""
return Task(
description="Test task",
expected_output="Test output",
callback=self.task_callback,
)
test_crew = TestCrew()
task_instance = test_crew.test_task()
test_output = TaskOutput(
description="Test task",
agent="Test Agent",
raw="test result",
output_format=OutputFormat.RAW,
)
task_instance.callback(test_output)
assert test_crew.callback_called
assert test_crew.callback_output == test_output
def test_callback_decorator_with_taskoutput_integration() -> None:
"""Integration test for callback with actual task execution."""
@CrewBase
class TestCrew:
"""Test crew with callback integration."""
callback_called = False
received_output: TaskOutput | None = None
@callback
def task_callback(self, output: TaskOutput) -> None:
"""Callback executed after task completion."""
self.callback_called = True
self.received_output = output
@task
def test_task(self) -> Task:
"""Test task."""
return Task(
description="Test task",
expected_output="Test output",
callback=self.task_callback,
)
test_crew = TestCrew()
agent = Agent(
role="Test Agent",
goal="Test goal",
backstory="Test backstory",
)
task_instance = test_crew.test_task()
task_instance.agent = agent
with patch.object(Agent, "execute_task") as mock_execute:
mock_execute.return_value = "test result"
task_instance.execute_sync()
assert test_crew.callback_called
assert test_crew.received_output is not None
assert test_crew.received_output.raw == "test result" | {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/tests/project/test_callback_with_taskoutput.py",
"license": "MIT License",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
crewAIInc/crewAI:lib/crewai/src/crewai/a2a/auth/schemas.py | """Deprecated: Authentication schemes for A2A protocol agents.
This module is deprecated. Import from crewai.a2a.auth instead:
- crewai.a2a.auth.ClientAuthScheme (replaces AuthScheme)
- crewai.a2a.auth.BearerTokenAuth
- crewai.a2a.auth.HTTPBasicAuth
- crewai.a2a.auth.HTTPDigestAuth
- crewai.a2a.auth.APIKeyAuth
- crewai.a2a.auth.OAuth2ClientCredentials
- crewai.a2a.auth.OAuth2AuthorizationCode
"""
from __future__ import annotations
from typing_extensions import deprecated
from crewai.a2a.auth.client_schemes import (
APIKeyAuth as _APIKeyAuth,
BearerTokenAuth as _BearerTokenAuth,
ClientAuthScheme as _ClientAuthScheme,
HTTPBasicAuth as _HTTPBasicAuth,
HTTPDigestAuth as _HTTPDigestAuth,
OAuth2AuthorizationCode as _OAuth2AuthorizationCode,
OAuth2ClientCredentials as _OAuth2ClientCredentials,
)
@deprecated("Use ClientAuthScheme from crewai.a2a.auth instead", category=FutureWarning)
class AuthScheme(_ClientAuthScheme):
"""Deprecated: Use ClientAuthScheme from crewai.a2a.auth instead."""
@deprecated("Import from crewai.a2a.auth instead", category=FutureWarning)
class BearerTokenAuth(_BearerTokenAuth):
"""Deprecated: Import from crewai.a2a.auth instead."""
@deprecated("Import from crewai.a2a.auth instead", category=FutureWarning)
class HTTPBasicAuth(_HTTPBasicAuth):
"""Deprecated: Import from crewai.a2a.auth instead."""
@deprecated("Import from crewai.a2a.auth instead", category=FutureWarning)
class HTTPDigestAuth(_HTTPDigestAuth):
"""Deprecated: Import from crewai.a2a.auth instead."""
@deprecated("Import from crewai.a2a.auth instead", category=FutureWarning)
class APIKeyAuth(_APIKeyAuth):
"""Deprecated: Import from crewai.a2a.auth instead."""
@deprecated("Import from crewai.a2a.auth instead", category=FutureWarning)
class OAuth2ClientCredentials(_OAuth2ClientCredentials):
"""Deprecated: Import from crewai.a2a.auth instead."""
@deprecated("Import from crewai.a2a.auth instead", category=FutureWarning)
class OAuth2AuthorizationCode(_OAuth2AuthorizationCode):
"""Deprecated: Import from crewai.a2a.auth instead."""
__all__ = [
"APIKeyAuth",
"AuthScheme",
"BearerTokenAuth",
"HTTPBasicAuth",
"HTTPDigestAuth",
"OAuth2AuthorizationCode",
"OAuth2ClientCredentials",
]
| {
"repo_id": "crewAIInc/crewAI",
"file_path": "lib/crewai/src/crewai/a2a/auth/schemas.py",
"license": "MIT License",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.