sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
agno-agi/agno:cookbook/90_models/anthropic/csv_input.py | """
Anthropic Csv Input
===================
Cookbook example for `anthropic/csv_input.py`.
"""
from pathlib import Path
from agno.agent import Agent
from agno.media import File
from agno.models.anthropic import Claude
from agno.utils.media import download_file
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
csv_path = Path(__file__).parent.joinpath("IMDB-Movie-Data.csv")
download_file(
"https://agno-public.s3.amazonaws.com/demo_data/IMDB-Movie-Data.csv", str(csv_path)
)
agent = Agent(
model=Claude(id="claude-sonnet-4-20250514"),
markdown=True,
)
agent.print_response(
"Analyze the top 10 highest-grossing movies in this dataset. Which genres perform best at the box office?",
files=[
File(
filepath=csv_path,
mime_type="text/csv",
),
],
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/anthropic/csv_input.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/unit/utils/test_claude.py | import base64
import pytest
from agno.media import File
from agno.utils.models.claude import _format_file_for_message
class TestFormatFileForMessage:
def test_filepath_text_csv_returns_text_source(self, tmp_path):
csv_content = "name,age\nAlice,30\nBob,25"
p = tmp_path / "data.csv"
p.write_text(csv_content)
result = _format_file_for_message(File(filepath=str(p), mime_type="text/csv"))
assert result["type"] == "document"
assert result["source"]["type"] == "text"
assert result["source"]["media_type"] == "text/plain"
assert result["source"]["data"] == csv_content
assert result["citations"] == {"enabled": True}
def test_filepath_pdf_returns_base64_source(self, tmp_path):
pdf_bytes = b"%PDF-1.4 fake content"
p = tmp_path / "doc.pdf"
p.write_bytes(pdf_bytes)
result = _format_file_for_message(File(filepath=str(p), mime_type="application/pdf"))
assert result["type"] == "document"
assert result["source"]["type"] == "base64"
assert result["source"]["media_type"] == "application/pdf"
assert base64.standard_b64decode(result["source"]["data"]) == pdf_bytes
def test_bytes_content_text_mime_returns_text_source(self):
raw = b"col1,col2\na,b"
result = _format_file_for_message(File(content=raw, mime_type="text/csv"))
assert result["source"]["type"] == "text"
assert result["source"]["media_type"] == "text/plain"
assert result["source"]["data"] == "col1,col2\na,b"
def test_bytes_content_pdf_returns_base64_source(self):
raw = b"fake-pdf-bytes"
result = _format_file_for_message(File(content=raw, mime_type="application/pdf"))
assert result["source"]["type"] == "base64"
assert result["source"]["media_type"] == "application/pdf"
assert base64.standard_b64decode(result["source"]["data"]) == raw
def test_filepath_no_mime_guesses_from_extension(self, tmp_path):
p = tmp_path / "report.csv"
p.write_text("x,y\n1,2")
result = _format_file_for_message(File(filepath=str(p)))
assert result["source"]["type"] == "text"
assert result["source"]["data"] == "x,y\n1,2"
def test_filepath_nonexistent_returns_none(self):
result = _format_file_for_message(File(filepath="/nonexistent/file.pdf", mime_type="application/pdf"))
assert result is None
@pytest.mark.parametrize(
"mime_type",
["text/plain", "text/html", "text/xml", "text/javascript", "application/json", "application/x-python"],
)
def test_all_text_mimes_route_to_text_source(self, mime_type):
raw = b"some text content"
result = _format_file_for_message(File(content=raw, mime_type=mime_type))
assert result["source"]["type"] == "text"
assert result["source"]["media_type"] == "text/plain"
def test_text_data_is_not_base64_encoded(self, tmp_path):
"""Regression: old code base64-encoded before checking MIME, sending gibberish as text."""
csv_content = "name,value\ntest,123"
p = tmp_path / "test.csv"
p.write_text(csv_content)
result = _format_file_for_message(File(filepath=str(p), mime_type="text/csv"))
assert result["source"]["data"] == csv_content
assert result["source"]["data"] != base64.standard_b64encode(csv_content.encode()).decode()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/utils/test_claude.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/07_knowledge/vector_db/lance_db/lance_db_cloud.py | """
LanceDB Cloud connection test.
Requires environment variables:
- LANCE_DB_URI: LanceDB Cloud database URI (e.g. db://your-database-id)
- LANCE_DB_API_KEY or LANCEDB_API_KEY: LanceDB Cloud API key
Run from repo root with env loaded (e.g. direnv):
.venvs/demo/bin/python cookbook/07_knowledge/vector_db/lance_db_cloud/lance_db_cloud.py
"""
import asyncio
import os
from agno.knowledge.knowledge import Knowledge
from agno.vectordb.lancedb import LanceDb
# ---------------------------------------------------------------------------
# Setup
# ---------------------------------------------------------------------------
TABLE_NAME = "agno_cloud_test"
URI = os.getenv("LANCE_DB_URI")
API_KEY = os.getenv("LANCE_DB_API_KEY") or os.getenv("LANCEDB_API_KEY")
# ---------------------------------------------------------------------------
# Create Knowledge Base
# ---------------------------------------------------------------------------
# The cloud vector DB and knowledge instance are created inside `main()`
# after validating required environment variables.
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
def main():
if not URI:
print("Set LANCE_DB_URI (e.g. db://your-database-id)")
return
# ---------------------------------------------------------------------------
# Create Knowledge Base
# ---------------------------------------------------------------------------
vector_db = LanceDb(
uri=URI,
table_name=TABLE_NAME,
api_key=API_KEY,
)
knowledge = Knowledge(
name="LanceDB Cloud Test",
description="Agno Knowledge with LanceDB Cloud",
vector_db=vector_db,
)
async def run():
print("Inserting test content...")
await knowledge.ainsert(
name="cloud_test_doc",
text_content="LanceDB Cloud is a hosted vector database. "
"Agno supports it via the LanceDb vector store with uri and api_key. "
"Use db:// URI and set LANCEDB_API_KEY for cloud connections.",
metadata={"source": "lance_db_cloud_cookbook"},
)
print("Searching for 'vector database'...")
results = knowledge.search("vector database", max_results=3)
print(f"Found {len(results)} document(s)")
for i, doc in enumerate(results):
print(f" [{i + 1}] {doc.name}: {doc.content[:80]}...")
print("Deleting test document...")
vector_db.delete_by_name("cloud_test_doc")
print("Done.")
asyncio.run(run())
if __name__ == "__main__":
main()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/vector_db/lance_db/lance_db_cloud.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/neosantara/basic.py | """
Neosantara Basic
================
Cookbook example for `neosantara/basic.py`.
"""
import asyncio
from agno.agent import Agent
from agno.models.neosantara import Neosantara
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=Neosantara(id="grok-4.1-fast-non-reasoning"),
markdown=True,
)
# Print the response in the terminal
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Share a 2 sentence horror story")
# --- Sync + Streaming ---
agent.print_response("Share a 2 sentence horror story", stream=True)
# --- Async ---
asyncio.run(agent.aprint_response("Share a 2 sentence horror story"))
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Share a 2 sentence horror story", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/neosantara/basic.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/neosantara/structured_output.py | """
Neosantara Structured Output
============================
Cookbook example for `neosantara/structured_output.py`.
"""
from typing import List
from agno.agent import Agent
from agno.models.neosantara import Neosantara
from pydantic import BaseModel, Field
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
class MovieScript(BaseModel):
setting: str = Field(
..., description="Provide a nice setting for a blockbuster movie."
)
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(
..., description="3 sentence storyline for the movie. Make it exciting!"
)
# Agent that uses structured outputs
agent = Agent(
model=Neosantara(id="grok-4.1-fast-non-reasoning"),
description="You write movie scripts. Respond ONLY with a valid JSON object matching the provided schema.",
output_schema=MovieScript,
use_json_mode=True,
)
# Print the response in the terminal
agent.print_response("New York")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/neosantara/structured_output.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/neosantara/tool_use.py | """
Neosantara Tool Use
===================
Cookbook example for `neosantara/tool_use.py`.
"""
import asyncio
from agno.agent import Agent
from agno.models.neosantara import Neosantara
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=Neosantara(id="grok-4.1-fast-non-reasoning"),
tools=[WebSearchTools()],
markdown=True,
)
# Print the response in the terminal
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response(
"What is the current stock price of NVDA and what is its 52 week high?"
)
# --- Async + Streaming ---
asyncio.run(
agent.aprint_response("What is the current stock price of NVDA?", stream=True)
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/neosantara/tool_use.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/models/neosantara/neosantara.py | from dataclasses import dataclass
from os import getenv
from typing import Any, Dict, Optional
from agno.exceptions import ModelAuthenticationError
from agno.models.openai.like import OpenAILike
@dataclass
class Neosantara(OpenAILike):
"""
A class for interacting with Neosantara API.
Attributes:
id (str): The id of the Neosantara model to use. Default is "grok-4.1-fast-non-reasoning".
name (str): The name of this chat model instance. Default is "Neosantara"
provider (str): The provider of the model. Default is "Neosantara".
api_key (str): The api key to authorize request to Neosantara.
base_url (str): The base url to which the requests are sent. Defaults to "https://api.neosantara.xyz/v1".
"""
id: str = "grok-4.1-fast-non-reasoning"
name: str = "Neosantara"
provider: str = "Neosantara"
api_key: Optional[str] = None
base_url: str = "https://api.neosantara.xyz/v1"
def _get_client_params(self) -> Dict[str, Any]:
"""
Returns client parameters for API requests, checking for NEOSANTARA_API_KEY.
Returns:
Dict[str, Any]: A dictionary of client parameters for API requests.
"""
if not self.api_key:
self.api_key = getenv("NEOSANTARA_API_KEY")
if not self.api_key:
raise ModelAuthenticationError(
message="NEOSANTARA_API_KEY not set. Please set the NEOSANTARA_API_KEY environment variable.",
model_name=self.name,
)
return super()._get_client_params()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/neosantara/neosantara.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/unit/models/neosantara/test_neosantara.py | import os
from unittest.mock import patch
import pytest
from agno.exceptions import ModelAuthenticationError
from agno.models.neosantara import Neosantara
def test_neosantara_initialization_with_api_key():
model = Neosantara(id="grok-4.1-fast-non-reasoning", api_key="test-api-key")
assert model.id == "grok-4.1-fast-non-reasoning"
assert model.api_key == "test-api-key"
assert model.base_url == "https://api.neosantara.xyz/v1"
def test_neosantara_initialization_without_api_key():
with patch.dict(os.environ, {}, clear=True):
model = Neosantara(id="grok-4.1-fast-non-reasoning")
client_params = None
with pytest.raises(ModelAuthenticationError):
client_params = model._get_client_params()
assert client_params is None
def test_neosantara_initialization_with_env_api_key():
with patch.dict(os.environ, {"NEOSANTARA_API_KEY": "env-api-key"}):
model = Neosantara(id="grok-4.1-fast-non-reasoning")
model._get_client_params()
assert model.api_key == "env-api-key"
def test_neosantara_client_params():
model = Neosantara(id="grok-4.1-fast-non-reasoning", api_key="test-api-key")
client_params = model._get_client_params()
assert client_params["api_key"] == "test-api-key"
assert client_params["base_url"] == "https://api.neosantara.xyz/v1"
def test_neosantara_default_values():
model = Neosantara(api_key="test-api-key")
assert model.id == "grok-4.1-fast-non-reasoning"
assert model.name == "Neosantara"
assert model.provider == "Neosantara"
def test_neosantara_custom_model_id():
model = Neosantara(id="custom-model", api_key="test-api-key")
assert model.id == "custom-model"
assert model.name == "Neosantara"
assert model.provider == "Neosantara"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/neosantara/test_neosantara.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/93_components/workflows/save_conditional_steps.py | """
Save Conditional Workflow Steps
===============================
Demonstrates creating a workflow with conditional steps, saving it to the
database, and loading it back with a Registry.
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.registry import Registry
from agno.tools.hackernews import HackerNewsTools
from agno.tools.websearch import WebSearchTools
from agno.workflow.condition import Condition
from agno.workflow.step import Step
from agno.workflow.types import StepInput
from agno.workflow.workflow import Workflow, get_workflow_by_id
# ---------------------------------------------------------------------------
# Setup
# ---------------------------------------------------------------------------
# Database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
# ---------------------------------------------------------------------------
# Create Agents
# ---------------------------------------------------------------------------
# Agents
hackernews_agent = Agent(
name="HackerNews Researcher",
instructions="Research tech news and trends from Hacker News",
tools=[HackerNewsTools()],
)
web_agent = Agent(
name="Web Researcher",
instructions="Research general information from the web",
tools=[WebSearchTools()],
)
content_agent = Agent(
name="Content Creator",
instructions="Create well-structured content from research data",
)
# ---------------------------------------------------------------------------
# Create Registry Components
# ---------------------------------------------------------------------------
# Evaluator function (will be serialized by name and restored via registry)
def is_tech_topic(step_input: StepInput) -> bool:
"""Returns True to execute the conditional steps, False to skip."""
topic = step_input.input or step_input.previous_step_content or ""
tech_keywords = [
"ai",
"machine learning",
"programming",
"software",
"tech",
"startup",
"coding",
]
is_tech = any(keyword in topic.lower() for keyword in tech_keywords)
print(f"Condition: Topic is {'tech' if is_tech else 'not tech'}")
return is_tech
# Registry (required to restore the evaluator function when loading)
registry = Registry(
name="Condition Workflow Registry",
functions=[is_tech_topic],
)
# ---------------------------------------------------------------------------
# Create Workflow Steps
# ---------------------------------------------------------------------------
# Steps
research_hackernews_step = Step(
name="ResearchHackerNews",
description="Research tech news from Hacker News",
agent=hackernews_agent,
)
research_web_step = Step(
name="ResearchWeb",
description="Research general information from web",
agent=web_agent,
)
write_step = Step(
name="WriteContent",
description="Write the final content based on research",
agent=content_agent,
)
# ---------------------------------------------------------------------------
# Create Workflow
# ---------------------------------------------------------------------------
# Workflow
workflow = Workflow(
name="Conditional Research Workflow",
description="Conditionally research from HackerNews for tech topics",
steps=[
Condition(
name="TechTopicCondition",
description="Check if topic is tech-related for HackerNews research",
evaluator=is_tech_topic,
steps=[research_hackernews_step],
),
research_web_step,
write_step,
],
db=db,
)
# ---------------------------------------------------------------------------
# Run Workflow Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# Save
print("Saving workflow...")
version = workflow.save(db=db)
print(f"Saved workflow as version {version}")
# Load
print("\nLoading workflow...")
loaded_workflow = get_workflow_by_id(
db=db,
id="conditional-research-workflow",
registry=registry,
)
if loaded_workflow:
print("Workflow loaded successfully!")
print(f" Name: {loaded_workflow.name}")
print(f" Steps: {len(loaded_workflow.steps) if loaded_workflow.steps else 0}")
# Uncomment to run the loaded workflow
# loaded_workflow.print_response(input="Latest AI developments in machine learning", stream=True)
else:
print("Workflow not found")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/93_components/workflows/save_conditional_steps.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/93_components/workflows/save_custom_steps.py | """
Save Custom Executor Workflow Steps
===================================
Demonstrates creating a workflow with custom executor steps, saving it to the
database, and loading it back with a Registry.
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.registry import Registry
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput
from agno.workflow.workflow import Workflow, get_workflow_by_id
# ---------------------------------------------------------------------------
# Setup
# ---------------------------------------------------------------------------
# Database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
# ---------------------------------------------------------------------------
# Create Agents
# ---------------------------------------------------------------------------
# Agents
content_agent = Agent(
name="Content Creator",
instructions="Create well-structured content from input data",
)
# ---------------------------------------------------------------------------
# Create Registry Components
# ---------------------------------------------------------------------------
# Custom executor function (will be serialized by name and restored via registry)
def transform_content(step_input: StepInput) -> StepOutput:
"""Custom executor function that transforms content."""
previous_content = step_input.previous_step_content or ""
transformed = f"[TRANSFORMED] {previous_content} [END]"
print("Transform: Applied transformation to content")
return StepOutput(
step_name="TransformContent",
content=transformed,
success=True,
)
# Registry (required to restore the executor function when loading)
registry = Registry(
name="Custom Steps Registry",
functions=[transform_content],
)
# ---------------------------------------------------------------------------
# Create Workflow Steps
# ---------------------------------------------------------------------------
# Steps
content_step = Step(
name="CreateContent",
description="Create initial content using the agent",
agent=content_agent,
)
transform_step = Step(
name="TransformContent",
description="Transform the content using custom function",
executor=transform_content,
)
# ---------------------------------------------------------------------------
# Create Workflow
# ---------------------------------------------------------------------------
# Workflow
workflow = Workflow(
name="Custom Executor Workflow",
description="Create content with agent, then transform with custom function",
steps=[
content_step,
transform_step,
],
db=db,
)
# ---------------------------------------------------------------------------
# Run Workflow Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# Save
print("Saving workflow...")
version = workflow.save(db=db)
print(f"Saved workflow as version {version}")
# Load
print("\nLoading workflow...")
loaded_workflow = get_workflow_by_id(
db=db,
id="custom-executor-workflow",
registry=registry,
)
if loaded_workflow:
print("Workflow loaded successfully!")
print(f" Name: {loaded_workflow.name}")
print(f" Steps: {len(loaded_workflow.steps) if loaded_workflow.steps else 0}")
# Uncomment to run the loaded workflow
# loaded_workflow.print_response(input="Write about AI trends", stream=True)
else:
print("Workflow not found")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/93_components/workflows/save_custom_steps.py",
"license": "Apache License 2.0",
"lines": 95,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/93_components/workflows/save_loop_steps.py | """
Save Loop Workflow Steps
========================
Demonstrates creating a workflow with loop steps, saving it to the database,
and loading it back with a Registry.
"""
from typing import List
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.registry import Registry
from agno.tools.hackernews import HackerNewsTools
from agno.tools.websearch import WebSearchTools
from agno.workflow.loop import Loop
from agno.workflow.step import Step
from agno.workflow.types import StepOutput
from agno.workflow.workflow import Workflow, get_workflow_by_id
# ---------------------------------------------------------------------------
# Setup
# ---------------------------------------------------------------------------
# Database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
# ---------------------------------------------------------------------------
# Create Agents
# ---------------------------------------------------------------------------
# Agents
research_agent = Agent(
name="Research Agent",
instructions="Research the given topic thoroughly using available tools",
tools=[HackerNewsTools(), WebSearchTools()],
)
summary_agent = Agent(
name="Summary Agent",
instructions="Summarize the research findings into a concise report",
)
# ---------------------------------------------------------------------------
# Create Registry Components
# ---------------------------------------------------------------------------
# End condition function (will be serialized by name and restored via registry)
def check_research_complete(outputs: List[StepOutput]) -> bool:
"""Returns True to break the loop, False to continue."""
if not outputs:
return False
for output in outputs:
if output.content and len(output.content) > 500:
print(f"Loop: Research complete - found {len(output.content)} chars")
return True
print("Loop: Research incomplete - continuing")
return False
# Registry (required to restore the end_condition function when loading)
registry = Registry(
name="Loop Workflow Registry",
functions=[check_research_complete],
)
# ---------------------------------------------------------------------------
# Create Workflow Steps
# ---------------------------------------------------------------------------
# Steps
research_step = Step(
name="ResearchStep",
description="Research the topic using HackerNews and web search",
agent=research_agent,
)
summarize_step = Step(
name="SummarizeStep",
description="Summarize all research findings",
agent=summary_agent,
)
# ---------------------------------------------------------------------------
# Create Workflow
# ---------------------------------------------------------------------------
# Workflow
workflow = Workflow(
name="Loop Research Workflow",
description="Research a topic in a loop until sufficient content is gathered",
steps=[
Loop(
name="ResearchLoop",
description="Loop through research until end condition is met",
steps=[research_step],
end_condition=check_research_complete,
max_iterations=3,
),
summarize_step,
],
db=db,
)
# ---------------------------------------------------------------------------
# Run Workflow Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# Save
print("Saving workflow...")
version = workflow.save(db=db)
print(f"Saved workflow as version {version}")
# Load
print("\nLoading workflow...")
loaded_workflow = get_workflow_by_id(
db=db,
id="loop-research-workflow",
registry=registry,
)
if loaded_workflow:
print("Workflow loaded successfully!")
print(f" Name: {loaded_workflow.name}")
print(f" Steps: {len(loaded_workflow.steps) if loaded_workflow.steps else 0}")
# Uncomment to run the loaded workflow
# loaded_workflow.print_response(input="Latest developments in AI agents", stream=True)
else:
print("Workflow not found")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/93_components/workflows/save_loop_steps.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/93_components/workflows/save_parallel_steps.py | """
Save Parallel Workflow Steps
============================
Demonstrates creating a workflow with parallel steps, saving it to the
database, and loading it back.
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.tools.hackernews import HackerNewsTools
from agno.tools.websearch import WebSearchTools
from agno.workflow.parallel import Parallel
from agno.workflow.step import Step
from agno.workflow.workflow import Workflow, get_workflow_by_id
# ---------------------------------------------------------------------------
# Setup
# ---------------------------------------------------------------------------
# Database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
# ---------------------------------------------------------------------------
# Create Agents
# ---------------------------------------------------------------------------
# Agents
hackernews_researcher = Agent(
name="HackerNews Researcher",
instructions="Research tech news and trends from Hacker News",
tools=[HackerNewsTools()],
)
web_researcher = Agent(
name="Web Researcher",
instructions="Research general information from the web",
tools=[WebSearchTools()],
)
writer = Agent(
name="Content Writer",
instructions="Write well-structured content from research findings",
)
reviewer = Agent(
name="Content Reviewer",
instructions="Review and improve the written content",
)
# ---------------------------------------------------------------------------
# Create Workflow Steps
# ---------------------------------------------------------------------------
# Steps
research_hn_step = Step(
name="ResearchHackerNews",
description="Research tech news from Hacker News",
agent=hackernews_researcher,
)
research_web_step = Step(
name="ResearchWeb",
description="Research information from the web",
agent=web_researcher,
)
write_step = Step(
name="WriteArticle",
description="Write article from research findings",
agent=writer,
)
review_step = Step(
name="ReviewArticle",
description="Review and finalize the article",
agent=reviewer,
)
# ---------------------------------------------------------------------------
# Create Workflow
# ---------------------------------------------------------------------------
# Workflow
workflow = Workflow(
name="Parallel Research Pipeline",
description="Research from multiple sources in parallel, then write and review",
steps=[
Parallel(
research_hn_step,
research_web_step,
name="ParallelResearch",
description="Run HackerNews and Web research in parallel",
),
write_step,
review_step,
],
db=db,
)
# ---------------------------------------------------------------------------
# Run Workflow Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# Save
print("Saving workflow...")
version = workflow.save(db=db)
print(f"Saved workflow as version {version}")
# Load
print("\nLoading workflow...")
loaded_workflow = get_workflow_by_id(db=db, id="parallel-research-pipeline")
if loaded_workflow:
print("Workflow loaded successfully!")
print(f" Name: {loaded_workflow.name}")
print(f" Steps: {len(loaded_workflow.steps) if loaded_workflow.steps else 0}")
# Uncomment to run the loaded workflow
# loaded_workflow.print_response(input="Latest developments in AI agents", stream=True)
else:
print("Workflow not found")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/93_components/workflows/save_parallel_steps.py",
"license": "Apache License 2.0",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/93_components/workflows/save_router_steps.py | """
Save Router Workflow Steps
==========================
Demonstrates creating a workflow with router steps, saving it to the
database, and loading it back with a Registry.
"""
from typing import List
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.registry import Registry
from agno.tools.hackernews import HackerNewsTools
from agno.tools.websearch import WebSearchTools
from agno.workflow.router import Router
from agno.workflow.step import Step
from agno.workflow.types import StepInput
from agno.workflow.workflow import Workflow, get_workflow_by_id
# ---------------------------------------------------------------------------
# Setup
# ---------------------------------------------------------------------------
# Database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
# ---------------------------------------------------------------------------
# Create Agents
# ---------------------------------------------------------------------------
# Agents
hackernews_agent = Agent(
name="HackerNews Agent",
instructions="Research tech news and trends from Hacker News",
tools=[HackerNewsTools()],
)
web_agent = Agent(
name="Web Agent",
instructions="Research general information from the web",
tools=[WebSearchTools()],
)
summary_agent = Agent(
name="Summary Agent",
instructions="Summarize the research findings into a concise report",
)
# ---------------------------------------------------------------------------
# Create Workflow Steps
# ---------------------------------------------------------------------------
# Steps
hackernews_step = Step(
name="HackerNewsStep",
description="Research using HackerNews for tech topics",
agent=hackernews_agent,
)
web_step = Step(
name="WebStep",
description="Research using web search for general topics",
agent=web_agent,
)
summary_step = Step(
name="SummaryStep",
description="Summarize the research",
agent=summary_agent,
)
# ---------------------------------------------------------------------------
# Create Registry Components
# ---------------------------------------------------------------------------
# Selector function (will be serialized by name and restored via registry)
def select_research_step(step_input: StepInput) -> List[Step]:
"""Dynamically select which research step(s) to execute based on the input."""
topic = step_input.input or step_input.previous_step_content or ""
topic_lower = topic.lower()
tech_keywords = [
"ai",
"machine learning",
"programming",
"software",
"tech",
"startup",
"coding",
]
selected_steps = []
if any(keyword in topic_lower for keyword in tech_keywords):
print("Router: Selected HackerNews step for tech topic")
selected_steps.append(hackernews_step)
if not selected_steps or "news" in topic_lower or "general" in topic_lower:
print("Router: Selected Web step")
selected_steps.append(web_step)
return selected_steps
# Registry (required to restore the selector function when loading)
registry = Registry(
name="Router Workflow Registry",
functions=[select_research_step],
)
# ---------------------------------------------------------------------------
# Create Workflow
# ---------------------------------------------------------------------------
# Workflow
workflow = Workflow(
name="Router Research Workflow",
description="Dynamically route to appropriate research steps based on topic",
steps=[
Router(
name="ResearchRouter",
description="Route to appropriate research agent based on topic",
selector=select_research_step,
choices=[hackernews_step, web_step],
),
summary_step,
],
db=db,
)
# ---------------------------------------------------------------------------
# Run Workflow Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# Save
print("Saving workflow...")
version = workflow.save(db=db)
print(f"Saved workflow as version {version}")
# Load
print("\nLoading workflow...")
loaded_workflow = get_workflow_by_id(
db=db,
id="router-research-workflow",
registry=registry,
)
if loaded_workflow:
print("Workflow loaded successfully!")
print(f" Name: {loaded_workflow.name}")
print(f" Steps: {len(loaded_workflow.steps) if loaded_workflow.steps else 0}")
# Uncomment to run the loaded workflow
# loaded_workflow.print_response(input="Latest developments in AI agents", stream=True)
else:
print("Workflow not found")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/93_components/workflows/save_router_steps.py",
"license": "Apache License 2.0",
"lines": 130,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/workflow/cel.py | """CEL (Common Expression Language) support for workflow steps.
CEL spec: https://github.com/google/cel-spec
"""
import json
import re
from typing import Any, Dict, List, Optional, Union
from agno.utils.log import logger
try:
import celpy
from celpy import celtypes
CEL_AVAILABLE = True
CelValue = Union[
celtypes.BoolType,
celtypes.IntType,
celtypes.DoubleType,
celtypes.StringType,
celtypes.ListType,
celtypes.MapType,
]
except ImportError:
CEL_AVAILABLE = False
celpy = None # type: ignore
celtypes = None # type: ignore
CelValue = Any # type: ignore
# Type alias for Python values that can be converted to CEL
PythonValue = Union[None, bool, int, float, str, List[Any], Dict[str, Any]]
# Regex for simple Python identifiers (function names)
_IDENTIFIER_RE = re.compile(r"^[a-zA-Z_][a-zA-Z0-9_]*$")
# Characters/tokens that indicate a CEL expression rather than a function name
_CEL_INDICATORS = [
".",
"(",
")",
"[",
"]",
"==",
"!=",
"<=",
">=",
"<",
">",
"&&",
"||",
"!",
"+",
"-",
"*",
"/",
"%",
"?",
":",
'"',
"'",
"true",
"false",
" in ",
]
# ********** Public Functions **********
def validate_cel_expression(expression: str) -> bool:
"""Validate a CEL expression without evaluating it.
Useful for UI validation before saving a workflow configuration.
"""
if not CEL_AVAILABLE:
logger.warning("cel-python is not installed. Install with: pip install cel-python")
return False
try:
env = celpy.Environment()
env.compile(expression)
return True
except Exception as e:
logger.debug(f"CEL expression validation failed: {e}")
return False
def is_cel_expression(value: str) -> bool:
"""Determine if a string is a CEL expression vs a function name.
Simple identifiers like ``my_evaluator`` return False.
Anything containing operators, dots, parens, etc. returns True.
"""
if _IDENTIFIER_RE.match(value):
return False
return any(indicator in value for indicator in _CEL_INDICATORS)
def evaluate_cel_condition_evaluator(
expression: str,
step_input: "StepInput", # type: ignore # noqa: F821
session_state: Optional[Dict[str, Any]] = None,
) -> bool:
"""Evaluate a CEL expression for a Condition evaluator.
Context variables:
- input: The workflow input as a string
- previous_step_content: Content from the previous step
- previous_step_outputs: Map of step name to content string from all previous steps
- additional_data: Map of additional data passed to the workflow
- session_state: Map of session state values
"""
return _evaluate_cel(expression, _build_step_input_context(step_input, session_state))
def evaluate_cel_loop_end_condition(
expression: str,
iteration_results: "List[StepOutput]", # type: ignore # noqa: F821
current_iteration: int = 0,
max_iterations: int = 3,
) -> bool:
"""Evaluate a CEL expression as a Loop end condition.
Context variables:
- current_iteration: Current iteration number (1-indexed, after completion)
- max_iterations: Maximum iterations configured for the loop
- all_success: True if all steps in this iteration succeeded
- last_step_content: Content string from the last step in this iteration
- step_outputs: Map of step name to content string from the current iteration
"""
return _evaluate_cel(
expression, _build_loop_step_output_context(iteration_results, current_iteration, max_iterations)
)
def evaluate_cel_router_selector(
expression: str,
step_input: "StepInput", # type: ignore # noqa: F821
session_state: Optional[Dict[str, Any]] = None,
step_choices: Optional[List[str]] = None,
) -> str:
"""Evaluate a CEL expression for a Router selector.
Returns the name of the step to execute as a string.
Context variables (same as Condition, plus step_choices):
- input: The workflow input as a string
- previous_step_content: Content from the previous step
- previous_step_outputs: Map of step name to content string from all previous steps
- additional_data: Map of additional data passed to the workflow
- session_state: Map of session state values
- step_choices: List of step names available to the selector
"""
context = _build_step_input_context(step_input, session_state)
context["step_choices"] = step_choices or []
return _evaluate_cel_string(expression, context)
# ********** Internal Functions **********
def _evaluate_cel_raw(expression: str, context: Dict[str, Any]) -> Any:
"""Core CEL evaluation: compile, run, and return the raw result."""
if not CEL_AVAILABLE:
raise RuntimeError("cel-python is not installed. Install with: pip install cel-python")
try:
env = celpy.Environment()
prog = env.program(env.compile(expression))
return prog.evaluate({k: _to_cel(v) for k, v in context.items()})
except Exception as e:
logger.error(f"CEL evaluation failed for '{expression}': {e}")
raise ValueError(f"Failed to evaluate CEL expression '{expression}': {e}") from e
def _evaluate_cel(expression: str, context: Dict[str, Any]) -> bool:
"""CEL evaluation that coerces the result to bool."""
result = _evaluate_cel_raw(expression, context)
if isinstance(result, celtypes.BoolType):
return bool(result)
if isinstance(result, bool):
return result
logger.warning(f"CEL expression '{expression}' returned {type(result).__name__}, converting to bool")
return bool(result)
def _evaluate_cel_string(expression: str, context: Dict[str, Any]) -> str:
"""CEL evaluation that coerces the result to string (for Router selector)."""
result = _evaluate_cel_raw(expression, context)
if isinstance(result, celtypes.StringType):
return str(result)
if isinstance(result, str):
return result
logger.warning(f"CEL expression '{expression}' returned {type(result).__name__}, converting to string")
return str(result)
def _to_cel(value: PythonValue) -> Union["CelValue", None]:
"""Convert a Python value to a CEL-compatible type.
Args:
value: A Python value (None, bool, int, float, str, list, or dict)
Returns:
The corresponding CEL type, or None if input is None
"""
if value is None:
return None
if isinstance(value, bool):
return celtypes.BoolType(value)
if isinstance(value, int):
return celtypes.IntType(value)
if isinstance(value, float):
return celtypes.DoubleType(value)
if isinstance(value, str):
return celtypes.StringType(value)
if isinstance(value, list):
return celtypes.ListType([_to_cel(item) for item in value])
if isinstance(value, dict):
return celtypes.MapType({celtypes.StringType(k): _to_cel(v) for k, v in value.items()})
# Fallback for any other type - convert to string
return celtypes.StringType(str(value))
def _build_step_input_context(
step_input: "StepInput", # type: ignore # noqa: F821
session_state: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Build context for CEL evaluation of step input.
Maps directly to StepInput fields:
- input: from step_input.input (as string)
- previous_step_content: from step_input.previous_step_content (as string)
- previous_step_outputs: from step_input.previous_step_outputs (map of step name -> content string)
- additional_data: from step_input.additional_data
- session_state: passed separately
"""
input_str = ""
if step_input.input is not None:
input_str = step_input.get_input_as_string() or ""
previous_content = ""
if step_input.previous_step_content is not None:
if hasattr(step_input.previous_step_content, "model_dump_json"):
previous_content = step_input.previous_step_content.model_dump_json()
elif isinstance(step_input.previous_step_content, dict):
previous_content = json.dumps(step_input.previous_step_content, default=str)
else:
previous_content = str(step_input.previous_step_content)
previous_step_outputs: Dict[str, str] = {}
if step_input.previous_step_outputs:
for name, output in step_input.previous_step_outputs.items():
previous_step_outputs[name] = str(output.content) if output.content else ""
return {
"input": input_str,
"previous_step_content": previous_content,
"previous_step_outputs": previous_step_outputs,
"additional_data": step_input.additional_data or {},
"session_state": session_state or {},
}
def _build_loop_step_output_context(
iteration_results: "List[StepOutput]", # type: ignore # noqa: F821
current_iteration: int = 0,
max_iterations: int = 3,
) -> Dict[str, Any]:
"""Build context for CEL evaluation of loop end condition from iteration results.
Maps to StepOutput fields:
- step_outputs: map of StepOutput.step_name -> str(StepOutput.content)
- all_success: derived from StepOutput.success
- last_step_content: content from the last StepOutput of the current loop iteration
"""
all_success = True
outputs: Dict[str, str] = {}
last_content = ""
for result in iteration_results:
content = str(result.content) if result.content else ""
name = result.step_name or f"step_{len(outputs)}"
outputs[name] = content
last_content = content
if not result.success:
all_success = False
return {
"current_iteration": current_iteration,
"max_iterations": max_iterations,
"all_success": all_success,
"last_step_content": last_content,
"step_outputs": outputs,
}
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/workflow/cel.py",
"license": "Apache License 2.0",
"lines": 245,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/workflow/test_cel.py | """Unit tests for CEL (Common Expression Language) support in workflows."""
import pytest
from agno.workflow.cel import (
CEL_AVAILABLE,
is_cel_expression,
validate_cel_expression,
)
class TestIsCelExpression:
"""Tests for is_cel_expression function."""
# ============================================================================
# SIMPLE IDENTIFIERS (Should return False - these are function names)
# ============================================================================
def test_simple_identifier(self):
"""Simple function names should return False."""
assert is_cel_expression("my_evaluator") is False
def test_simple_identifier_with_numbers(self):
"""Function names with numbers should return False."""
assert is_cel_expression("evaluator_v2") is False
assert is_cel_expression("step1") is False
def test_simple_identifier_camelcase(self):
"""CamelCase function names should return False."""
assert is_cel_expression("myEvaluator") is False
assert is_cel_expression("checkCondition") is False
def test_simple_identifier_underscore_prefix(self):
"""Private function names with underscore prefix should return False."""
assert is_cel_expression("_private_func") is False
assert is_cel_expression("__double_underscore") is False
def test_simple_identifier_single_char(self):
"""Single character identifiers should return False."""
assert is_cel_expression("x") is False
assert is_cel_expression("_") is False
# ============================================================================
# DOT OPERATOR (Should return True - method calls, property access)
# ============================================================================
def test_dot_method_call(self):
"""Method calls with dot should return True."""
assert is_cel_expression("input.contains('test')") is True
assert is_cel_expression("input.size()") is True
assert is_cel_expression("previous_step_content.startsWith('hello')") is True
def test_dot_property_access(self):
"""Property access with dot should return True."""
assert is_cel_expression("additional_data.route") is True
assert is_cel_expression("session_state.user_type") is True
def test_dot_chained_access(self):
"""Chained property/method access should return True."""
assert is_cel_expression("additional_data.user.name") is True
assert is_cel_expression("input.trim().size()") is True
# ============================================================================
# COMPARISON OPERATORS (Should return True)
# ============================================================================
def test_equality_operators(self):
"""Equality operators should return True."""
assert is_cel_expression("x == 5") is True
assert is_cel_expression("name != 'admin'") is True
assert is_cel_expression("previous_step_name == 'step1'") is True
def test_comparison_operators(self):
"""Comparison operators should return True."""
assert is_cel_expression("size > 100") is True
assert is_cel_expression("count < 10") is True
assert is_cel_expression("age >= 18") is True
assert is_cel_expression("score <= 100") is True
# ============================================================================
# LOGICAL OPERATORS (Should return True)
# ============================================================================
def test_and_operator(self):
"""AND operator should return True."""
assert is_cel_expression("a && b") is True
assert is_cel_expression("previous_step_content.size() > 0 && input.size() > 0") is True
def test_or_operator(self):
"""OR operator should return True."""
assert is_cel_expression("a || b") is True
assert is_cel_expression("is_admin || is_moderator") is True
def test_not_operator(self):
"""NOT operator should return True."""
assert is_cel_expression("!is_empty") is True
assert is_cel_expression("!all_success") is True
def test_complex_logical(self):
"""Complex logical expressions should return True."""
assert is_cel_expression("(a && b) || (c && d)") is True
assert is_cel_expression("!empty && size > 0") is True
# ============================================================================
# ARITHMETIC OPERATORS (Should return True)
# ============================================================================
def test_arithmetic_operators(self):
"""Arithmetic operators should return True."""
assert is_cel_expression("a + b") is True
assert is_cel_expression("x - y") is True
assert is_cel_expression("count * 2") is True
assert is_cel_expression("total / 4") is True
assert is_cel_expression("num % 2") is True
# ============================================================================
# TERNARY OPERATOR (Should return True)
# ============================================================================
def test_ternary_operator(self):
"""Ternary operator should return True."""
assert is_cel_expression("condition ? 'yes' : 'no'") is True
assert is_cel_expression("x > 0 ? x : -x") is True
assert is_cel_expression('input.contains("video") ? "video_step" : "image_step"') is True
# ============================================================================
# PARENTHESES AND BRACKETS (Should return True)
# ============================================================================
def test_parentheses(self):
"""Parentheses should return True."""
assert is_cel_expression("(a)") is True
assert is_cel_expression("func()") is True
assert is_cel_expression("(x + y) * z") is True
def test_brackets(self):
"""Brackets (array/map access) should return True."""
assert is_cel_expression("arr[0]") is True
assert is_cel_expression("map['key']") is True
assert is_cel_expression("additional_data['route']") is True
# ============================================================================
# STRING LITERALS (Should return True)
# ============================================================================
def test_double_quoted_strings(self):
"""Double-quoted strings should return True."""
assert is_cel_expression('"hello"') is True
assert is_cel_expression('input == "test"') is True
def test_single_quoted_strings(self):
"""Single-quoted strings should return True."""
assert is_cel_expression("'hello'") is True
assert is_cel_expression("input == 'test'") is True
# ============================================================================
# BOOLEAN LITERALS
# ============================================================================
def test_true_literal_standalone(self):
"""Standalone 'true' matches identifier regex, so returns False.
This is correct behavior - the function can't distinguish between
a function named 'true' and the CEL boolean literal. Users should
use selector_type='cel' explicitly if needed.
"""
assert is_cel_expression("true") is False
def test_true_literal_in_expression(self):
"""'true' in an expression context should return True."""
assert is_cel_expression("has_content == true") is True
def test_false_literal_standalone(self):
"""Standalone 'false' matches identifier regex, so returns False."""
assert is_cel_expression("false") is False
def test_false_literal_in_expression(self):
"""'false' in an expression context should return True."""
assert is_cel_expression("is_empty == false") is True
# ============================================================================
# IN OPERATOR (Should return True)
# ============================================================================
def test_in_operator(self):
"""'in' operator should return True."""
assert is_cel_expression("x in list") is True
assert is_cel_expression("'admin' in roles") is True
assert is_cel_expression("name in allowed_names") is True
# ============================================================================
# REAL-WORLD CEL EXPRESSIONS FROM COOKBOOKS
# ============================================================================
def test_cookbook_condition_examples(self):
"""Test CEL expressions from condition cookbooks."""
# Basic condition
assert is_cel_expression('input.contains("urgent")') is True
# Previous step content check
assert is_cel_expression("previous_step_content.size() > 500") is True
# Additional data access
assert is_cel_expression('additional_data.priority == "high"') is True
# Session state
assert is_cel_expression("session_state.request_count > 5") is True
# Combined conditions
assert is_cel_expression('previous_step_content.size() > 0 && previous_step_content.contains("error")') is True
def test_cookbook_loop_examples(self):
"""Test CEL expressions from loop cookbooks."""
# Iteration check
assert is_cel_expression("current_iteration >= 3") is True
# Max iterations
assert is_cel_expression("current_iteration >= max_iterations - 1") is True
# Success check - standalone identifier returns False (matches identifier regex)
# In practice, this is used in compound expressions like "all_success && current_iteration >= 2"
assert is_cel_expression("all_success") is False
assert is_cel_expression("all_success && current_iteration >= 2") is True
# Last step content
assert is_cel_expression('last_step_content.contains("DONE")') is True
# Step outputs map
assert is_cel_expression("step_outputs.size() >= 2") is True
assert is_cel_expression('step_outputs.Research.contains("DONE")') is True
def test_cookbook_router_examples(self):
"""Test CEL expressions from router cookbooks."""
# Route based on input
assert is_cel_expression('input.contains("video") ? "video_step" : "image_step"') is True
# Additional data routing
assert is_cel_expression("additional_data.route") is True
# Compound selector
assert is_cel_expression('additional_data.priority == "high" ? "fast_step" : "normal_step"') is True
# ============================================================================
# EDGE CASES
# ============================================================================
def test_empty_string(self):
"""Empty string should return False (not a valid expression or function)."""
assert is_cel_expression("") is False
def test_whitespace_only(self):
"""Whitespace-only string should return False."""
assert is_cel_expression(" ") is False
def test_numbers_only(self):
"""Numbers are not valid identifiers, but also not CEL expressions."""
# Numbers starting with digit don't match identifier regex
# But they also don't contain CEL indicators
assert is_cel_expression("123") is False
assert is_cel_expression("3point14") is False
def test_dotted_module_path_like(self):
"""Dotted strings that look like module paths should return True.
Note: This is a tradeoff - 'my.evaluator' looks like a module path but
will be detected as CEL due to the dot. This is acceptable because:
1. Registry lookups use simple function names, not dotted paths
2. If someone needs a dotted name, they can use selector_type='function'
"""
assert is_cel_expression("my.evaluator") is True
assert is_cel_expression("module.submodule.func") is True
def test_reserved_words_standalone(self):
"""Standalone reserved words match identifier regex, so return False.
This is a known limitation - 'true', 'false', 'all_success' etc.
are valid Python identifiers. The function prioritizes avoiding
false positives (treating function names as CEL) over catching
these edge cases. Users can use selector_type='cel' explicitly.
"""
assert is_cel_expression("true") is False
assert is_cel_expression("false") is False
# But when used in expressions, they're detected
assert is_cel_expression("x == true") is True
assert is_cel_expression("y == false") is True
def test_reserved_words_in_identifier(self):
"""Identifiers containing reserved words should return False if valid identifier."""
# 'true' and 'false' as substrings in valid identifiers
assert is_cel_expression("is_true") is False
assert is_cel_expression("check_false") is False
assert is_cel_expression("trueValue") is False
assert is_cel_expression("falseFlag") is False
class TestValidateCelExpression:
"""Tests for validate_cel_expression function."""
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_valid_simple_expression(self):
"""Valid simple expressions should validate."""
assert validate_cel_expression("true") is True
assert validate_cel_expression("false") is True
assert validate_cel_expression("1 + 2") is True
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_valid_comparison(self):
"""Valid comparison expressions should validate."""
assert validate_cel_expression("x > 5") is True
assert validate_cel_expression("name == 'test'") is True
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_valid_logical(self):
"""Valid logical expressions should validate."""
assert validate_cel_expression("a && b") is True
assert validate_cel_expression("a || b") is True
assert validate_cel_expression("!flag") is True
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_valid_ternary(self):
"""Valid ternary expressions should validate."""
assert validate_cel_expression("x ? 'yes' : 'no'") is True
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_valid_method_call(self):
"""Valid method call expressions should validate."""
assert validate_cel_expression("'hello'.size()") is True
assert validate_cel_expression("'test'.contains('es')") is True
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_invalid_syntax(self):
"""Invalid syntax should not validate."""
assert validate_cel_expression("x +") is False
assert validate_cel_expression("((())") is False
assert validate_cel_expression("a && ") is False
@pytest.mark.skipif(not CEL_AVAILABLE, reason="cel-python not installed")
def test_empty_expression(self):
"""Empty expression should not validate."""
assert validate_cel_expression("") is False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/workflow/test_cel.py",
"license": "Apache License 2.0",
"lines": 268,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/workflow/test_condition_serialization.py | """
Unit tests for Condition serialization and deserialization.
Tests cover:
- to_dict(): Serialization of Condition to dictionary
- from_dict(): Deserialization of Condition from dictionary
- Roundtrip serialization (no data loss)
- evaluator callable serialization
- Boolean evaluator serialization
- Nested step serialization
"""
from typing import List
import pytest
from agno.registry import Registry
from agno.workflow.condition import Condition
from agno.workflow.step import Step
from agno.workflow.types import StepInput
# =============================================================================
# Sample executor and evaluator functions for testing
# =============================================================================
def condition_executor_1(step_input: StepInput) -> str:
"""First condition executor."""
return "result_1"
def condition_executor_2(step_input: StepInput) -> str:
"""Second condition executor."""
return "result_2"
def always_true_evaluator(step_input: StepInput) -> bool:
"""Evaluator that always returns True."""
return True
def always_false_evaluator(step_input: StepInput) -> bool:
"""Evaluator that always returns False."""
return False
def content_based_evaluator(step_input: StepInput) -> bool:
"""Evaluator based on input content."""
return "proceed" in str(step_input.input).lower()
def complex_evaluator(step_input: StepInput) -> bool:
"""Complex evaluator with multiple conditions."""
if step_input.previous_step_content:
return "success" in str(step_input.previous_step_content).lower()
return False
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def registry_with_functions():
"""Create a registry with sample functions registered."""
return Registry(
functions=[
condition_executor_1,
condition_executor_2,
always_true_evaluator,
always_false_evaluator,
content_based_evaluator,
complex_evaluator,
]
)
@pytest.fixture
def simple_steps():
"""Create simple steps for testing."""
return [
Step(name="condition-step-1", executor=condition_executor_1),
Step(name="condition-step-2", executor=condition_executor_2),
]
# =============================================================================
# to_dict() Tests
# =============================================================================
class TestConditionToDict:
"""Tests for Condition.to_dict() method."""
def test_to_dict_basic(self, simple_steps):
"""Test to_dict with basic Condition configuration."""
condition = Condition(
name="basic-condition",
description="Basic condition step",
evaluator=always_true_evaluator,
steps=simple_steps,
)
result = condition.to_dict()
assert result["type"] == "Condition"
assert result["name"] == "basic-condition"
assert result["description"] == "Basic condition step"
assert len(result["steps"]) == 2
def test_to_dict_serializes_callable_evaluator(self, simple_steps):
"""Test to_dict serializes callable evaluator by name."""
condition = Condition(
name="callable-condition",
evaluator=content_based_evaluator,
steps=simple_steps,
)
result = condition.to_dict()
assert result["evaluator"] == "content_based_evaluator"
def test_to_dict_serializes_boolean_evaluator(self, simple_steps):
"""Test to_dict serializes boolean evaluator directly."""
# Test with True
condition_true = Condition(
name="true-condition",
evaluator=True,
steps=simple_steps,
)
result_true = condition_true.to_dict()
assert result_true["evaluator"] is True
# Test with False
condition_false = Condition(
name="false-condition",
evaluator=False,
steps=simple_steps,
)
result_false = condition_false.to_dict()
assert result_false["evaluator"] is False
def test_to_dict_serializes_nested_steps(self, simple_steps):
"""Test to_dict serializes nested steps correctly."""
condition = Condition(name="nested-condition", evaluator=always_true_evaluator, steps=simple_steps)
result = condition.to_dict()
assert len(result["steps"]) == 2
assert result["steps"][0]["name"] == "condition-step-1"
assert result["steps"][0]["type"] == "Step"
assert result["steps"][1]["name"] == "condition-step-2"
assert result["steps"][1]["type"] == "Step"
def test_to_dict_preserves_step_details(self):
"""Test to_dict preserves all step configuration details."""
step = Step(
name="detailed-step",
executor=condition_executor_1,
description="Detailed description",
max_retries=5,
skip_on_failure=True,
)
condition = Condition(name="detail-condition", evaluator=always_true_evaluator, steps=[step])
result = condition.to_dict()
step_data = result["steps"][0]
assert step_data["name"] == "detailed-step"
assert step_data["description"] == "Detailed description"
assert step_data["max_retries"] == 5
assert step_data["skip_on_failure"] is True
# =============================================================================
# from_dict() Tests
# =============================================================================
class TestConditionFromDict:
"""Tests for Condition.from_dict() method."""
def test_from_dict_basic(self, registry_with_functions):
"""Test from_dict creates Condition with basic config."""
data = {
"type": "Condition",
"name": "basic-condition",
"description": "Basic condition step",
"evaluator": "always_true_evaluator",
"steps": [
{
"type": "Step",
"name": "condition-step-1",
"executor_ref": "condition_executor_1",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
],
}
condition = Condition.from_dict(data, registry=registry_with_functions)
assert condition.name == "basic-condition"
assert condition.description == "Basic condition step"
assert condition.evaluator == always_true_evaluator
assert len(condition.steps) == 1
def test_from_dict_restores_callable_evaluator(self, registry_with_functions):
"""Test from_dict restores callable evaluator function from registry."""
data = {
"type": "Condition",
"name": "callable-condition",
"description": None,
"evaluator": "content_based_evaluator",
"steps": [
{
"type": "Step",
"name": "step",
"executor_ref": "condition_executor_1",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
],
}
condition = Condition.from_dict(data, registry=registry_with_functions)
assert condition.evaluator == content_based_evaluator
assert callable(condition.evaluator)
def test_from_dict_restores_boolean_evaluator(self, registry_with_functions):
"""Test from_dict restores boolean evaluator directly."""
# Test with True
data_true = {
"type": "Condition",
"name": "true-condition",
"description": None,
"evaluator": True,
"steps": [],
}
condition_true = Condition.from_dict(data_true, registry=registry_with_functions)
assert condition_true.evaluator is True
# Test with False
data_false = {
"type": "Condition",
"name": "false-condition",
"description": None,
"evaluator": False,
"steps": [],
}
condition_false = Condition.from_dict(data_false, registry=registry_with_functions)
assert condition_false.evaluator is False
def test_from_dict_raises_without_registry_for_callable_evaluator(self):
"""Test from_dict raises error when callable evaluator needs registry but none provided."""
data = {
"type": "Condition",
"name": "callable-condition",
"description": None,
"evaluator": "always_true_evaluator",
"steps": [],
}
with pytest.raises(ValueError, match="Registry required"):
Condition.from_dict(data, registry=None)
def test_from_dict_raises_for_unknown_evaluator(self, registry_with_functions):
"""Test from_dict raises error for unknown evaluator function."""
data = {
"type": "Condition",
"name": "unknown-evaluator-condition",
"description": None,
"evaluator": "unknown_evaluator",
"steps": [],
}
with pytest.raises(ValueError, match="not found in registry"):
Condition.from_dict(data, registry=registry_with_functions)
def test_from_dict_with_multiple_steps(self, registry_with_functions):
"""Test from_dict with multiple nested steps."""
data = {
"type": "Condition",
"name": "multi-condition",
"description": None,
"evaluator": "always_true_evaluator",
"steps": [
{
"type": "Step",
"name": "condition-step-1",
"executor_ref": "condition_executor_1",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
{
"type": "Step",
"name": "condition-step-2",
"executor_ref": "condition_executor_2",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
],
}
condition = Condition.from_dict(data, registry=registry_with_functions)
assert len(condition.steps) == 2
assert condition.steps[0].name == "condition-step-1"
assert condition.steps[1].name == "condition-step-2"
# =============================================================================
# Roundtrip Tests
# =============================================================================
class TestConditionSerializationRoundtrip:
"""Tests for Condition serialization roundtrip (to_dict -> from_dict)."""
def test_roundtrip_basic(self, registry_with_functions):
"""Test roundtrip preserves basic Condition configuration."""
step_1 = Step(name="step-1", executor=condition_executor_1)
step_2 = Step(name="step-2", executor=condition_executor_2)
original = Condition(
name="roundtrip-condition",
description="Test description",
evaluator=always_true_evaluator,
steps=[step_1, step_2],
)
# Serialize
data = original.to_dict()
# Deserialize
restored = Condition.from_dict(data, registry=registry_with_functions)
# Verify no data loss
assert restored.name == original.name
assert restored.description == original.description
assert len(restored.steps) == len(original.steps)
def test_roundtrip_preserves_type_field(self, registry_with_functions):
"""Test roundtrip preserves type field for proper deserialization dispatch."""
step = Step(name="typed-step", executor=condition_executor_1)
condition = Condition(name="typed-condition", evaluator=always_true_evaluator, steps=[step])
data = condition.to_dict()
assert data["type"] == "Condition"
restored = Condition.from_dict(data, registry=registry_with_functions)
assert restored.name == "typed-condition"
def test_roundtrip_preserves_callable_evaluator(self, registry_with_functions):
"""Test roundtrip preserves callable evaluator function reference."""
step = Step(name="evaluator-step", executor=condition_executor_1)
original = Condition(
name="evaluator-condition",
evaluator=complex_evaluator,
steps=[step],
)
data = original.to_dict()
restored = Condition.from_dict(data, registry=registry_with_functions)
assert restored.evaluator == complex_evaluator
assert callable(restored.evaluator)
def test_roundtrip_preserves_boolean_evaluator(self, registry_with_functions):
"""Test roundtrip preserves boolean evaluator."""
step = Step(name="bool-step", executor=condition_executor_1)
# Test with True
original_true = Condition(name="true-condition", evaluator=True, steps=[step])
data_true = original_true.to_dict()
restored_true = Condition.from_dict(data_true, registry=registry_with_functions)
assert restored_true.evaluator is True
# Test with False
original_false = Condition(name="false-condition", evaluator=False, steps=[step])
data_false = original_false.to_dict()
restored_false = Condition.from_dict(data_false, registry=registry_with_functions)
assert restored_false.evaluator is False
def test_roundtrip_preserves_nested_step_names(self, registry_with_functions):
"""Test roundtrip preserves all nested step names."""
steps = [
Step(name="first", executor=condition_executor_1),
Step(name="second", executor=condition_executor_2),
]
original = Condition(name="multi-step-condition", evaluator=always_true_evaluator, steps=steps)
data = original.to_dict()
restored = Condition.from_dict(data, registry=registry_with_functions)
assert len(restored.steps) == 2
assert restored.steps[0].name == "first"
assert restored.steps[1].name == "second"
def test_roundtrip_preserves_step_executors(self, registry_with_functions):
"""Test roundtrip preserves executor function references."""
step_1 = Step(name="step-1", executor=condition_executor_1)
step_2 = Step(name="step-2", executor=condition_executor_2)
original = Condition(name="executor-condition", evaluator=always_true_evaluator, steps=[step_1, step_2])
data = original.to_dict()
restored = Condition.from_dict(data, registry=registry_with_functions)
assert restored.steps[0].executor == condition_executor_1
assert restored.steps[1].executor == condition_executor_2
def test_roundtrip_preserves_step_configuration(self, registry_with_functions):
"""Test roundtrip preserves all step configuration fields."""
step = Step(
name="configured-step",
executor=condition_executor_1,
description="Step description",
max_retries=5,
skip_on_failure=True,
strict_input_validation=True,
add_workflow_history=True,
num_history_runs=8,
)
original = Condition(name="config-condition", evaluator=always_true_evaluator, steps=[step])
data = original.to_dict()
restored = Condition.from_dict(data, registry=registry_with_functions)
restored_step = restored.steps[0]
assert restored_step.name == "configured-step"
assert restored_step.description == "Step description"
assert restored_step.max_retries == 5
assert restored_step.skip_on_failure is True
assert restored_step.strict_input_validation is True
assert restored_step.add_workflow_history is True
assert restored_step.num_history_runs == 8
def test_roundtrip_with_different_evaluators(self, registry_with_functions):
"""Test roundtrip with different evaluator functions."""
for evaluator in [always_true_evaluator, always_false_evaluator, content_based_evaluator, complex_evaluator]:
step = Step(name="test-step", executor=condition_executor_1)
original = Condition(
name=f"condition-with-{evaluator.__name__}",
evaluator=evaluator,
steps=[step],
)
data = original.to_dict()
restored = Condition.from_dict(data, registry=registry_with_functions)
assert restored.evaluator == evaluator
# =============================================================================
# Nested Container Tests
# =============================================================================
class TestConditionNestedContainerSerialization:
"""Tests for Condition with nested container steps (Parallel, Steps, Loop, etc.)."""
def test_roundtrip_with_nested_condition(self, registry_with_functions):
"""Test roundtrip with nested Condition inside Condition."""
inner_step = Step(name="inner-step", executor=condition_executor_1)
inner_condition = Condition(name="inner-condition", evaluator=always_true_evaluator, steps=[inner_step])
outer_condition = Condition(name="outer-condition", evaluator=always_false_evaluator, steps=[inner_condition])
data = outer_condition.to_dict()
restored = Condition.from_dict(data, registry=registry_with_functions)
assert restored.name == "outer-condition"
assert len(restored.steps) == 1
assert restored.steps[0].name == "inner-condition"
assert isinstance(restored.steps[0], Condition)
def test_roundtrip_with_nested_parallel(self, registry_with_functions):
"""Test roundtrip with nested Parallel container inside Condition."""
from agno.workflow.parallel import Parallel
step_1 = Step(name="step-1", executor=condition_executor_1)
step_2 = Step(name="step-2", executor=condition_executor_2)
parallel = Parallel(step_1, step_2, name="parallel-container")
condition = Condition(name="condition-with-parallel", evaluator=always_true_evaluator, steps=[parallel])
data = condition.to_dict()
restored = Condition.from_dict(data, registry=registry_with_functions)
assert restored.name == "condition-with-parallel"
assert len(restored.steps) == 1
assert restored.steps[0].name == "parallel-container"
assert isinstance(restored.steps[0], Parallel)
assert len(restored.steps[0].steps) == 2
def test_roundtrip_with_nested_loop(self, registry_with_functions):
"""Test roundtrip with nested Loop container inside Condition."""
from agno.workflow.loop import Loop
step = Step(name="loop-step", executor=condition_executor_1)
loop = Loop(name="loop-container", steps=[step], max_iterations=3)
condition = Condition(name="condition-with-loop", evaluator=always_true_evaluator, steps=[loop])
data = condition.to_dict()
restored = Condition.from_dict(data, registry=registry_with_functions)
assert restored.name == "condition-with-loop"
assert len(restored.steps) == 1
assert restored.steps[0].name == "loop-container"
assert isinstance(restored.steps[0], Loop)
assert restored.steps[0].max_iterations == 3
def test_roundtrip_with_nested_steps_container(self, registry_with_functions):
"""Test roundtrip with nested Steps container inside Condition."""
from agno.workflow.steps import Steps
step_1 = Step(name="step-1", executor=condition_executor_1)
step_2 = Step(name="step-2", executor=condition_executor_2)
steps_container = Steps(name="steps-container", steps=[step_1, step_2])
condition = Condition(name="condition-with-steps", evaluator=always_true_evaluator, steps=[steps_container])
data = condition.to_dict()
restored = Condition.from_dict(data, registry=registry_with_functions)
assert restored.name == "condition-with-steps"
assert len(restored.steps) == 1
assert restored.steps[0].name == "steps-container"
assert isinstance(restored.steps[0], Steps)
assert len(restored.steps[0].steps) == 2
def test_roundtrip_with_nested_router(self, registry_with_functions):
"""Test roundtrip with nested Router container inside Condition."""
from agno.workflow.router import Router
# Add a selector function to the registry
def test_selector(step_input: StepInput) -> List[Step]:
return []
# Create new registry with selector
registry = Registry(
functions=[
condition_executor_1,
condition_executor_2,
always_true_evaluator,
test_selector,
]
)
choice = Step(name="router-choice", executor=condition_executor_1)
router = Router(name="router-container", selector=test_selector, choices=[choice])
condition = Condition(name="condition-with-router", evaluator=always_true_evaluator, steps=[router])
data = condition.to_dict()
restored = Condition.from_dict(data, registry=registry)
assert restored.name == "condition-with-router"
assert len(restored.steps) == 1
assert restored.steps[0].name == "router-container"
assert isinstance(restored.steps[0], Router)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/workflow/test_condition_serialization.py",
"license": "Apache License 2.0",
"lines": 448,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/workflow/test_loop_serialization.py | """
Unit tests for Loop serialization and deserialization.
Tests cover:
- to_dict(): Serialization of Loop to dictionary
- from_dict(): Deserialization of Loop from dictionary
- Roundtrip serialization (no data loss)
- end_condition callable serialization
- Nested step serialization
"""
from typing import List
import pytest
from agno.registry import Registry
from agno.workflow.loop import Loop
from agno.workflow.step import Step
from agno.workflow.types import StepInput, StepOutput
# =============================================================================
# Sample executor and end_condition functions for testing
# =============================================================================
def loop_executor_1(step_input: StepInput) -> str:
"""First loop executor."""
return "result_1"
def loop_executor_2(step_input: StepInput) -> str:
"""Second loop executor."""
return "result_2"
def simple_end_condition(results: List[StepOutput]) -> bool:
"""Simple end condition that always returns False (continue looping)."""
return False
def count_end_condition(results: List[StepOutput]) -> bool:
"""End condition that stops after a certain count."""
return len(results) >= 3
def content_end_condition(results: List[StepOutput]) -> bool:
"""End condition based on content."""
if results:
return "done" in str(results[-1].content).lower()
return False
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def registry_with_functions():
"""Create a registry with sample functions registered."""
return Registry(
functions=[
loop_executor_1,
loop_executor_2,
simple_end_condition,
count_end_condition,
content_end_condition,
]
)
@pytest.fixture
def simple_steps():
"""Create simple steps for testing."""
return [
Step(name="loop-step-1", executor=loop_executor_1),
Step(name="loop-step-2", executor=loop_executor_2),
]
# =============================================================================
# to_dict() Tests
# =============================================================================
class TestLoopToDict:
"""Tests for Loop.to_dict() method."""
def test_to_dict_basic(self, simple_steps):
"""Test to_dict with basic Loop configuration."""
loop = Loop(
name="basic-loop",
description="Basic loop step",
steps=simple_steps,
max_iterations=5,
)
result = loop.to_dict()
assert result["type"] == "Loop"
assert result["name"] == "basic-loop"
assert result["description"] == "Basic loop step"
assert result["max_iterations"] == 5
assert len(result["steps"]) == 2
def test_to_dict_serializes_end_condition(self, simple_steps):
"""Test to_dict serializes end_condition function by name."""
loop = Loop(
name="condition-loop",
steps=simple_steps,
max_iterations=10,
end_condition=simple_end_condition,
)
result = loop.to_dict()
assert result["end_condition"] == "simple_end_condition"
def test_to_dict_with_none_end_condition(self, simple_steps):
"""Test to_dict handles None end_condition."""
loop = Loop(
name="no-condition-loop",
steps=simple_steps,
max_iterations=3,
end_condition=None,
)
result = loop.to_dict()
assert result["end_condition"] is None
def test_to_dict_serializes_nested_steps(self, simple_steps):
"""Test to_dict serializes nested steps correctly."""
loop = Loop(name="nested-loop", steps=simple_steps, max_iterations=3)
result = loop.to_dict()
assert len(result["steps"]) == 2
assert result["steps"][0]["name"] == "loop-step-1"
assert result["steps"][0]["type"] == "Step"
assert result["steps"][1]["name"] == "loop-step-2"
assert result["steps"][1]["type"] == "Step"
def test_to_dict_preserves_step_details(self):
"""Test to_dict preserves all step configuration details."""
step = Step(
name="detailed-step",
executor=loop_executor_1,
description="Detailed description",
max_retries=5,
skip_on_failure=True,
)
loop = Loop(name="detail-loop", steps=[step], max_iterations=2)
result = loop.to_dict()
step_data = result["steps"][0]
assert step_data["name"] == "detailed-step"
assert step_data["description"] == "Detailed description"
assert step_data["max_retries"] == 5
assert step_data["skip_on_failure"] is True
def test_to_dict_default_max_iterations(self, simple_steps):
"""Test to_dict includes default max_iterations."""
loop = Loop(name="default-loop", steps=simple_steps)
result = loop.to_dict()
assert result["max_iterations"] == 3 # Default value
# =============================================================================
# from_dict() Tests
# =============================================================================
class TestLoopFromDict:
"""Tests for Loop.from_dict() method."""
def test_from_dict_basic(self, registry_with_functions):
"""Test from_dict creates Loop with basic config."""
data = {
"type": "Loop",
"name": "basic-loop",
"description": "Basic loop step",
"max_iterations": 5,
"end_condition": None,
"steps": [
{
"type": "Step",
"name": "loop-step-1",
"executor_ref": "loop_executor_1",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
],
}
loop = Loop.from_dict(data, registry=registry_with_functions)
assert loop.name == "basic-loop"
assert loop.description == "Basic loop step"
assert loop.max_iterations == 5
assert loop.end_condition is None
assert len(loop.steps) == 1
def test_from_dict_restores_end_condition(self, registry_with_functions):
"""Test from_dict restores end_condition function from registry."""
data = {
"type": "Loop",
"name": "condition-loop",
"description": None,
"max_iterations": 10,
"end_condition": "simple_end_condition",
"steps": [
{
"type": "Step",
"name": "loop-step",
"executor_ref": "loop_executor_1",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
],
}
loop = Loop.from_dict(data, registry=registry_with_functions)
assert loop.end_condition == simple_end_condition
assert callable(loop.end_condition)
def test_from_dict_raises_without_registry_for_end_condition(self):
"""Test from_dict raises error when end_condition needs registry but none provided."""
data = {
"type": "Loop",
"name": "condition-loop",
"description": None,
"max_iterations": 10,
"end_condition": "simple_end_condition",
"steps": [],
}
with pytest.raises(ValueError, match="Registry required"):
Loop.from_dict(data, registry=None)
def test_from_dict_raises_for_unknown_end_condition(self, registry_with_functions):
"""Test from_dict raises error for unknown end_condition function."""
data = {
"type": "Loop",
"name": "unknown-condition-loop",
"description": None,
"max_iterations": 5,
"end_condition": "unknown_function",
"steps": [],
}
with pytest.raises(ValueError, match="not found in registry"):
Loop.from_dict(data, registry=registry_with_functions)
def test_from_dict_with_multiple_steps(self, registry_with_functions):
"""Test from_dict with multiple nested steps."""
data = {
"type": "Loop",
"name": "multi-loop",
"description": None,
"max_iterations": 3,
"end_condition": None,
"steps": [
{
"type": "Step",
"name": "loop-step-1",
"executor_ref": "loop_executor_1",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
{
"type": "Step",
"name": "loop-step-2",
"executor_ref": "loop_executor_2",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
],
}
loop = Loop.from_dict(data, registry=registry_with_functions)
assert len(loop.steps) == 2
assert loop.steps[0].name == "loop-step-1"
assert loop.steps[1].name == "loop-step-2"
# =============================================================================
# Roundtrip Tests
# =============================================================================
class TestLoopSerializationRoundtrip:
"""Tests for Loop serialization roundtrip (to_dict -> from_dict)."""
def test_roundtrip_basic(self, registry_with_functions):
"""Test roundtrip preserves basic Loop configuration."""
step_1 = Step(name="step-1", executor=loop_executor_1)
step_2 = Step(name="step-2", executor=loop_executor_2)
original = Loop(
name="roundtrip-loop",
description="Test description",
steps=[step_1, step_2],
max_iterations=7,
)
# Serialize
data = original.to_dict()
# Deserialize
restored = Loop.from_dict(data, registry=registry_with_functions)
# Verify no data loss
assert restored.name == original.name
assert restored.description == original.description
assert restored.max_iterations == original.max_iterations
assert len(restored.steps) == len(original.steps)
def test_roundtrip_preserves_type_field(self, registry_with_functions):
"""Test roundtrip preserves type field for proper deserialization dispatch."""
step = Step(name="typed-step", executor=loop_executor_1)
loop = Loop(name="typed-loop", steps=[step], max_iterations=3)
data = loop.to_dict()
assert data["type"] == "Loop"
restored = Loop.from_dict(data, registry=registry_with_functions)
assert restored.name == "typed-loop"
def test_roundtrip_preserves_end_condition(self, registry_with_functions):
"""Test roundtrip preserves end_condition function reference."""
step = Step(name="condition-step", executor=loop_executor_1)
original = Loop(
name="condition-loop",
steps=[step],
max_iterations=10,
end_condition=count_end_condition,
)
data = original.to_dict()
restored = Loop.from_dict(data, registry=registry_with_functions)
assert restored.end_condition == count_end_condition
assert callable(restored.end_condition)
def test_roundtrip_preserves_none_end_condition(self, registry_with_functions):
"""Test roundtrip preserves None end_condition."""
step = Step(name="no-condition-step", executor=loop_executor_1)
original = Loop(
name="no-condition-loop",
steps=[step],
max_iterations=5,
end_condition=None,
)
data = original.to_dict()
restored = Loop.from_dict(data, registry=registry_with_functions)
assert restored.end_condition is None
def test_roundtrip_preserves_nested_step_names(self, registry_with_functions):
"""Test roundtrip preserves all nested step names."""
steps = [
Step(name="first", executor=loop_executor_1),
Step(name="second", executor=loop_executor_2),
]
original = Loop(name="multi-step-loop", steps=steps, max_iterations=3)
data = original.to_dict()
restored = Loop.from_dict(data, registry=registry_with_functions)
assert len(restored.steps) == 2
assert restored.steps[0].name == "first"
assert restored.steps[1].name == "second"
def test_roundtrip_preserves_step_executors(self, registry_with_functions):
"""Test roundtrip preserves executor function references."""
step_1 = Step(name="step-1", executor=loop_executor_1)
step_2 = Step(name="step-2", executor=loop_executor_2)
original = Loop(name="executor-loop", steps=[step_1, step_2], max_iterations=3)
data = original.to_dict()
restored = Loop.from_dict(data, registry=registry_with_functions)
assert restored.steps[0].executor == loop_executor_1
assert restored.steps[1].executor == loop_executor_2
def test_roundtrip_preserves_step_configuration(self, registry_with_functions):
"""Test roundtrip preserves all step configuration fields."""
step = Step(
name="configured-step",
executor=loop_executor_1,
description="Step description",
max_retries=5,
skip_on_failure=True,
strict_input_validation=True,
add_workflow_history=True,
num_history_runs=8,
)
original = Loop(name="config-loop", steps=[step], max_iterations=4)
data = original.to_dict()
restored = Loop.from_dict(data, registry=registry_with_functions)
restored_step = restored.steps[0]
assert restored_step.name == "configured-step"
assert restored_step.description == "Step description"
assert restored_step.max_retries == 5
assert restored_step.skip_on_failure is True
assert restored_step.strict_input_validation is True
assert restored_step.add_workflow_history is True
assert restored_step.num_history_runs == 8
def test_roundtrip_with_different_end_conditions(self, registry_with_functions):
"""Test roundtrip with different end_condition functions."""
for end_cond in [simple_end_condition, count_end_condition, content_end_condition]:
step = Step(name="test-step", executor=loop_executor_1)
original = Loop(
name=f"loop-with-{end_cond.__name__}",
steps=[step],
max_iterations=5,
end_condition=end_cond,
)
data = original.to_dict()
restored = Loop.from_dict(data, registry=registry_with_functions)
assert restored.end_condition == end_cond
# =============================================================================
# Nested Container Tests
# =============================================================================
class TestLoopNestedContainerSerialization:
"""Tests for Loop with nested container steps (Parallel, Steps, Condition, etc.)."""
def test_roundtrip_with_nested_loop(self, registry_with_functions):
"""Test roundtrip with nested Loop inside Loop."""
inner_step = Step(name="inner-step", executor=loop_executor_1)
inner_loop = Loop(name="inner-loop", steps=[inner_step], max_iterations=2)
outer_loop = Loop(name="outer-loop", steps=[inner_loop], max_iterations=3)
data = outer_loop.to_dict()
restored = Loop.from_dict(data, registry=registry_with_functions)
assert restored.name == "outer-loop"
assert len(restored.steps) == 1
assert restored.steps[0].name == "inner-loop"
assert isinstance(restored.steps[0], Loop)
assert restored.steps[0].max_iterations == 2
def test_roundtrip_with_nested_parallel(self, registry_with_functions):
"""Test roundtrip with nested Parallel container inside Loop."""
from agno.workflow.parallel import Parallel
step_1 = Step(name="step-1", executor=loop_executor_1)
step_2 = Step(name="step-2", executor=loop_executor_2)
parallel = Parallel(step_1, step_2, name="parallel-container")
loop = Loop(name="loop-with-parallel", steps=[parallel], max_iterations=3)
data = loop.to_dict()
restored = Loop.from_dict(data, registry=registry_with_functions)
assert restored.name == "loop-with-parallel"
assert len(restored.steps) == 1
assert restored.steps[0].name == "parallel-container"
assert isinstance(restored.steps[0], Parallel)
assert len(restored.steps[0].steps) == 2
def test_roundtrip_with_nested_steps_container(self, registry_with_functions):
"""Test roundtrip with nested Steps container inside Loop."""
from agno.workflow.steps import Steps
step_1 = Step(name="step-1", executor=loop_executor_1)
step_2 = Step(name="step-2", executor=loop_executor_2)
steps_container = Steps(name="steps-container", steps=[step_1, step_2])
loop = Loop(name="loop-with-steps", steps=[steps_container], max_iterations=3)
data = loop.to_dict()
restored = Loop.from_dict(data, registry=registry_with_functions)
assert restored.name == "loop-with-steps"
assert len(restored.steps) == 1
assert restored.steps[0].name == "steps-container"
assert isinstance(restored.steps[0], Steps)
assert len(restored.steps[0].steps) == 2
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/workflow/test_loop_serialization.py",
"license": "Apache License 2.0",
"lines": 392,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/workflow/test_parallel_serialization.py | """
Unit tests for Parallel serialization and deserialization.
Tests cover:
- to_dict(): Serialization of Parallel to dictionary
- from_dict(): Deserialization of Parallel from dictionary
- Roundtrip serialization (no data loss)
- Nested step serialization
"""
import pytest
from agno.registry import Registry
from agno.workflow.parallel import Parallel
from agno.workflow.step import Step
from agno.workflow.types import StepInput
# =============================================================================
# Sample executor functions for testing
# =============================================================================
def executor_a(step_input: StepInput) -> str:
"""Executor A for testing."""
return "result_a"
def executor_b(step_input: StepInput) -> str:
"""Executor B for testing."""
return "result_b"
def executor_c(step_input: StepInput) -> str:
"""Executor C for testing."""
return "result_c"
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def registry_with_functions():
"""Create a registry with sample functions registered."""
return Registry(functions=[executor_a, executor_b, executor_c])
@pytest.fixture
def simple_steps(registry_with_functions):
"""Create simple steps for testing."""
return [
Step(name="step-a", executor=executor_a),
Step(name="step-b", executor=executor_b),
]
# =============================================================================
# to_dict() Tests
# =============================================================================
class TestParallelToDict:
"""Tests for Parallel.to_dict() method."""
def test_to_dict_basic(self, simple_steps):
"""Test to_dict with basic parallel configuration."""
parallel = Parallel(*simple_steps, name="basic-parallel", description="Basic parallel step")
result = parallel.to_dict()
assert result["type"] == "Parallel"
assert result["name"] == "basic-parallel"
assert result["description"] == "Basic parallel step"
assert len(result["steps"]) == 2
def test_to_dict_serializes_nested_steps(self, simple_steps):
"""Test to_dict serializes nested steps correctly."""
parallel = Parallel(*simple_steps, name="nested-parallel")
result = parallel.to_dict()
assert len(result["steps"]) == 2
assert result["steps"][0]["name"] == "step-a"
assert result["steps"][0]["type"] == "Step"
assert result["steps"][1]["name"] == "step-b"
assert result["steps"][1]["type"] == "Step"
def test_to_dict_preserves_step_details(self):
"""Test to_dict preserves all step configuration details."""
step = Step(
name="detailed-step",
executor=executor_a,
description="Detailed description",
max_retries=5,
skip_on_failure=True,
)
parallel = Parallel(step, name="detail-parallel")
result = parallel.to_dict()
step_data = result["steps"][0]
assert step_data["name"] == "detailed-step"
assert step_data["description"] == "Detailed description"
assert step_data["max_retries"] == 5
assert step_data["skip_on_failure"] is True
def test_to_dict_with_none_values(self):
"""Test to_dict handles None name and description."""
step = Step(name="simple-step", executor=executor_a)
parallel = Parallel(step)
result = parallel.to_dict()
assert result["name"] is None
assert result["description"] is None
assert result["type"] == "Parallel"
# =============================================================================
# from_dict() Tests
# =============================================================================
class TestParallelFromDict:
"""Tests for Parallel.from_dict() method."""
def test_from_dict_basic(self, registry_with_functions):
"""Test from_dict creates parallel with basic config."""
data = {
"type": "Parallel",
"name": "basic-parallel",
"description": "Basic parallel step",
"steps": [
{
"type": "Step",
"name": "step-a",
"executor_ref": "executor_a",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
],
}
parallel = Parallel.from_dict(data, registry=registry_with_functions)
assert parallel.name == "basic-parallel"
assert parallel.description == "Basic parallel step"
assert len(parallel.steps) == 1
def test_from_dict_with_multiple_steps(self, registry_with_functions):
"""Test from_dict with multiple nested steps."""
data = {
"type": "Parallel",
"name": "multi-parallel",
"description": None,
"steps": [
{
"type": "Step",
"name": "step-a",
"executor_ref": "executor_a",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
{
"type": "Step",
"name": "step-b",
"executor_ref": "executor_b",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
],
}
parallel = Parallel.from_dict(data, registry=registry_with_functions)
assert len(parallel.steps) == 2
assert parallel.steps[0].name == "step-a"
assert parallel.steps[1].name == "step-b"
def test_from_dict_preserves_step_configuration(self, registry_with_functions):
"""Test from_dict preserves nested step configuration."""
data = {
"type": "Parallel",
"name": "config-parallel",
"description": None,
"steps": [
{
"type": "Step",
"name": "configured-step",
"executor_ref": "executor_a",
"description": "Step description",
"max_retries": 5,
"skip_on_failure": True,
"strict_input_validation": True,
"num_history_runs": 10,
},
],
}
parallel = Parallel.from_dict(data, registry=registry_with_functions)
step = parallel.steps[0]
assert step.name == "configured-step"
assert step.description == "Step description"
assert step.max_retries == 5
assert step.skip_on_failure is True
assert step.strict_input_validation is True
assert step.num_history_runs == 10
# =============================================================================
# Roundtrip Tests
# =============================================================================
class TestParallelSerializationRoundtrip:
"""Tests for Parallel serialization roundtrip (to_dict -> from_dict)."""
def test_roundtrip_basic(self, registry_with_functions):
"""Test roundtrip preserves basic parallel configuration."""
step_a = Step(name="step-a", executor=executor_a)
step_b = Step(name="step-b", executor=executor_b)
original = Parallel(step_a, step_b, name="roundtrip-parallel", description="Test description")
# Serialize
data = original.to_dict()
# Deserialize
restored = Parallel.from_dict(data, registry=registry_with_functions)
# Verify no data loss
assert restored.name == original.name
assert restored.description == original.description
assert len(restored.steps) == len(original.steps)
def test_roundtrip_preserves_type_field(self, registry_with_functions):
"""Test roundtrip preserves type field for proper deserialization dispatch."""
step = Step(name="typed-step", executor=executor_a)
parallel = Parallel(step, name="typed-parallel")
data = parallel.to_dict()
assert data["type"] == "Parallel"
restored = Parallel.from_dict(data, registry=registry_with_functions)
assert restored.name == "typed-parallel"
def test_roundtrip_preserves_nested_step_names(self, registry_with_functions):
"""Test roundtrip preserves all nested step names."""
steps = [
Step(name="first", executor=executor_a),
Step(name="second", executor=executor_b),
Step(name="third", executor=executor_c),
]
original = Parallel(*steps, name="multi-step-parallel")
data = original.to_dict()
restored = Parallel.from_dict(data, registry=registry_with_functions)
assert len(restored.steps) == 3
assert restored.steps[0].name == "first"
assert restored.steps[1].name == "second"
assert restored.steps[2].name == "third"
def test_roundtrip_preserves_step_executors(self, registry_with_functions):
"""Test roundtrip preserves executor function references."""
step_a = Step(name="step-a", executor=executor_a)
step_b = Step(name="step-b", executor=executor_b)
original = Parallel(step_a, step_b, name="executor-parallel")
data = original.to_dict()
restored = Parallel.from_dict(data, registry=registry_with_functions)
assert restored.steps[0].executor == executor_a
assert restored.steps[1].executor == executor_b
def test_roundtrip_preserves_step_configuration(self, registry_with_functions):
"""Test roundtrip preserves all step configuration fields."""
step = Step(
name="configured-step",
executor=executor_a,
description="Step description",
max_retries=5,
skip_on_failure=True,
strict_input_validation=True,
add_workflow_history=True,
num_history_runs=8,
)
original = Parallel(step, name="config-parallel")
data = original.to_dict()
restored = Parallel.from_dict(data, registry=registry_with_functions)
restored_step = restored.steps[0]
assert restored_step.name == "configured-step"
assert restored_step.description == "Step description"
assert restored_step.max_retries == 5
assert restored_step.skip_on_failure is True
assert restored_step.strict_input_validation is True
assert restored_step.add_workflow_history is True
assert restored_step.num_history_runs == 8
def test_roundtrip_with_none_values(self, registry_with_functions):
"""Test roundtrip handles None values correctly."""
step = Step(name="simple-step", executor=executor_a)
original = Parallel(step) # name and description are None
data = original.to_dict()
restored = Parallel.from_dict(data, registry=registry_with_functions)
assert restored.name is None
assert restored.description is None
# =============================================================================
# Nested Container Tests
# =============================================================================
class TestParallelNestedContainerSerialization:
"""Tests for Parallel with nested container steps (Loop, Steps, Condition, etc.)."""
def test_roundtrip_with_nested_parallel(self, registry_with_functions):
"""Test roundtrip with nested Parallel inside Parallel."""
inner_step = Step(name="inner-step", executor=executor_a)
inner_parallel = Parallel(inner_step, name="inner-parallel")
outer_parallel = Parallel(inner_parallel, name="outer-parallel")
data = outer_parallel.to_dict()
restored = Parallel.from_dict(data, registry=registry_with_functions)
assert restored.name == "outer-parallel"
assert len(restored.steps) == 1
assert restored.steps[0].name == "inner-parallel"
assert isinstance(restored.steps[0], Parallel)
def test_roundtrip_with_nested_steps_container(self, registry_with_functions):
"""Test roundtrip with nested Steps container inside Parallel."""
from agno.workflow.steps import Steps
step_a = Step(name="step-a", executor=executor_a)
step_b = Step(name="step-b", executor=executor_b)
steps_container = Steps(name="steps-container", steps=[step_a, step_b])
parallel = Parallel(steps_container, name="parallel-with-steps")
data = parallel.to_dict()
restored = Parallel.from_dict(data, registry=registry_with_functions)
assert restored.name == "parallel-with-steps"
assert len(restored.steps) == 1
assert restored.steps[0].name == "steps-container"
assert isinstance(restored.steps[0], Steps)
assert len(restored.steps[0].steps) == 2
# =============================================================================
# Dataclass Field Tests
# =============================================================================
class TestParallelStepsField:
"""Tests for Parallel.steps as a dataclass field with default_factory."""
def test_no_args_gives_empty_steps(self):
"""Test that Parallel() with no args defaults to an empty steps list."""
parallel = Parallel()
assert parallel.steps == []
assert parallel.name is None
assert parallel.description is None
def test_instances_do_not_share_steps(self):
"""Test that each Parallel instance gets its own steps list (no mutable default sharing)."""
p1 = Parallel()
p2 = Parallel()
step = Step(name="only-for-p1", executor=executor_a)
p1.steps.append(step)
assert len(p1.steps) == 1
assert len(p2.steps) == 0
def test_steps_set_via_init_args(self):
"""Test that steps passed via *args are stored correctly."""
step_a = Step(name="a", executor=executor_a)
step_b = Step(name="b", executor=executor_b)
parallel = Parallel(step_a, step_b)
assert len(parallel.steps) == 2
assert parallel.steps[0] is step_a
assert parallel.steps[1] is step_b
def test_steps_field_is_dataclass_field(self):
"""Test that steps is a recognized dataclass field."""
import dataclasses
fields = {f.name for f in dataclasses.fields(Parallel)}
assert "steps" in fields
assert "name" in fields
assert "description" in fields
def test_steps_field_default_factory(self):
"""Test that the steps field uses a default_factory (list)."""
import dataclasses
steps_field = next(f for f in dataclasses.fields(Parallel) if f.name == "steps")
assert steps_field.default is dataclasses.MISSING
assert steps_field.default_factory is list
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/workflow/test_parallel_serialization.py",
"license": "Apache License 2.0",
"lines": 317,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/workflow/test_router_serialization.py | """
Unit tests for Router serialization and deserialization.
Tests cover:
- to_dict(): Serialization of Router to dictionary
- from_dict(): Deserialization of Router from dictionary
- Roundtrip serialization (no data loss)
- selector callable serialization
- Nested choice serialization
"""
from typing import List
import pytest
from agno.registry import Registry
from agno.workflow.router import Router
from agno.workflow.step import Step
from agno.workflow.types import StepInput
# =============================================================================
# Sample executor and selector functions for testing
# =============================================================================
def router_executor_1(step_input: StepInput) -> str:
"""First router executor."""
return "result_1"
def router_executor_2(step_input: StepInput) -> str:
"""Second router executor."""
return "result_2"
def router_executor_3(step_input: StepInput) -> str:
"""Third router executor."""
return "result_3"
def first_choice_selector(step_input: StepInput) -> List[Step]:
"""Selector that returns the first choice."""
return []
def content_based_selector(step_input: StepInput) -> List[Step]:
"""Selector based on input content."""
return []
def multi_choice_selector(step_input: StepInput) -> List[Step]:
"""Selector that returns multiple choices."""
return []
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def registry_with_functions():
"""Create a registry with sample functions registered."""
return Registry(
functions=[
router_executor_1,
router_executor_2,
router_executor_3,
first_choice_selector,
content_based_selector,
multi_choice_selector,
]
)
@pytest.fixture
def simple_choices():
"""Create simple step choices for testing."""
return [
Step(name="choice-1", executor=router_executor_1),
Step(name="choice-2", executor=router_executor_2),
]
# =============================================================================
# to_dict() Tests
# =============================================================================
class TestRouterToDict:
"""Tests for Router.to_dict() method."""
def test_to_dict_basic(self, simple_choices):
"""Test to_dict with basic Router configuration."""
router = Router(
name="basic-router",
description="Basic router step",
selector=first_choice_selector,
choices=simple_choices,
)
result = router.to_dict()
assert result["type"] == "Router"
assert result["name"] == "basic-router"
assert result["description"] == "Basic router step"
assert len(result["choices"]) == 2
def test_to_dict_serializes_selector(self, simple_choices):
"""Test to_dict serializes selector function by name."""
router = Router(
name="selector-router",
selector=content_based_selector,
choices=simple_choices,
)
result = router.to_dict()
assert result["selector"] == "content_based_selector"
def test_to_dict_serializes_choices(self, simple_choices):
"""Test to_dict serializes choice steps correctly."""
router = Router(
name="choices-router",
selector=first_choice_selector,
choices=simple_choices,
)
result = router.to_dict()
assert len(result["choices"]) == 2
assert result["choices"][0]["name"] == "choice-1"
assert result["choices"][0]["type"] == "Step"
assert result["choices"][1]["name"] == "choice-2"
assert result["choices"][1]["type"] == "Step"
def test_to_dict_preserves_choice_details(self):
"""Test to_dict preserves all choice configuration details."""
choice = Step(
name="detailed-choice",
executor=router_executor_1,
description="Detailed description",
max_retries=5,
skip_on_failure=True,
)
router = Router(name="detail-router", selector=first_choice_selector, choices=[choice])
result = router.to_dict()
choice_data = result["choices"][0]
assert choice_data["name"] == "detailed-choice"
assert choice_data["description"] == "Detailed description"
assert choice_data["max_retries"] == 5
assert choice_data["skip_on_failure"] is True
# =============================================================================
# from_dict() Tests
# =============================================================================
class TestRouterFromDict:
"""Tests for Router.from_dict() method."""
def test_from_dict_basic(self, registry_with_functions):
"""Test from_dict creates Router with basic config."""
data = {
"type": "Router",
"name": "basic-router",
"description": "Basic router step",
"selector": "first_choice_selector",
"choices": [
{
"type": "Step",
"name": "choice-1",
"executor_ref": "router_executor_1",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
],
}
router = Router.from_dict(data, registry=registry_with_functions)
assert router.name == "basic-router"
assert router.description == "Basic router step"
assert router.selector == first_choice_selector
assert len(router.choices) == 1
def test_from_dict_restores_selector(self, registry_with_functions):
"""Test from_dict restores selector function from registry."""
data = {
"type": "Router",
"name": "selector-router",
"description": None,
"selector": "content_based_selector",
"choices": [
{
"type": "Step",
"name": "choice",
"executor_ref": "router_executor_1",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
],
}
router = Router.from_dict(data, registry=registry_with_functions)
assert router.selector == content_based_selector
assert callable(router.selector)
def test_from_dict_raises_without_registry_for_selector(self):
"""Test from_dict raises error when selector needs registry but none provided."""
data = {
"type": "Router",
"name": "selector-router",
"description": None,
"selector": "first_choice_selector",
"choices": [],
}
with pytest.raises(ValueError, match="Registry required"):
Router.from_dict(data, registry=None)
def test_from_dict_raises_for_unknown_selector(self, registry_with_functions):
"""Test from_dict raises error for unknown selector function."""
data = {
"type": "Router",
"name": "unknown-selector-router",
"description": None,
"selector": "unknown_selector",
"choices": [],
}
with pytest.raises(ValueError, match="not found in registry"):
Router.from_dict(data, registry=registry_with_functions)
def test_from_dict_without_selector_uses_hitl(self, registry_with_functions):
"""Test from_dict allows no selector when using HITL."""
# With HITL, selector is optional
data = {
"type": "Router",
"name": "hitl-router",
"description": None,
"selector": None,
"requires_user_input": True,
"choices": [],
}
router = Router.from_dict(data, registry=registry_with_functions)
assert router.name == "hitl-router"
assert router.selector is None
assert router.requires_user_input is True
def test_from_dict_without_selector_and_without_hitl(self, registry_with_functions):
"""Test from_dict allows no selector even without HITL (but won't function)."""
# Router without selector and without HITL will be created but won't work at runtime
data = {
"type": "Router",
"name": "no-selector-router",
"description": None,
"selector": None,
"choices": [],
}
# This now succeeds at deserialization time - validation happens at execution time
router = Router.from_dict(data, registry=registry_with_functions)
assert router.name == "no-selector-router"
assert router.selector is None
assert router.requires_user_input is False
def test_from_dict_with_multiple_choices(self, registry_with_functions):
"""Test from_dict with multiple choice steps."""
data = {
"type": "Router",
"name": "multi-router",
"description": None,
"selector": "multi_choice_selector",
"choices": [
{
"type": "Step",
"name": "choice-1",
"executor_ref": "router_executor_1",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
{
"type": "Step",
"name": "choice-2",
"executor_ref": "router_executor_2",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
{
"type": "Step",
"name": "choice-3",
"executor_ref": "router_executor_3",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
],
}
router = Router.from_dict(data, registry=registry_with_functions)
assert len(router.choices) == 3
assert router.choices[0].name == "choice-1"
assert router.choices[1].name == "choice-2"
assert router.choices[2].name == "choice-3"
# =============================================================================
# Roundtrip Tests
# =============================================================================
class TestRouterSerializationRoundtrip:
"""Tests for Router serialization roundtrip (to_dict -> from_dict)."""
def test_roundtrip_basic(self, registry_with_functions):
"""Test roundtrip preserves basic Router configuration."""
choice_1 = Step(name="choice-1", executor=router_executor_1)
choice_2 = Step(name="choice-2", executor=router_executor_2)
original = Router(
name="roundtrip-router",
description="Test description",
selector=first_choice_selector,
choices=[choice_1, choice_2],
)
# Serialize
data = original.to_dict()
# Deserialize
restored = Router.from_dict(data, registry=registry_with_functions)
# Verify no data loss
assert restored.name == original.name
assert restored.description == original.description
assert len(restored.choices) == len(original.choices)
def test_roundtrip_preserves_type_field(self, registry_with_functions):
"""Test roundtrip preserves type field for proper deserialization dispatch."""
choice = Step(name="typed-choice", executor=router_executor_1)
router = Router(name="typed-router", selector=first_choice_selector, choices=[choice])
data = router.to_dict()
assert data["type"] == "Router"
restored = Router.from_dict(data, registry=registry_with_functions)
assert restored.name == "typed-router"
def test_roundtrip_preserves_selector(self, registry_with_functions):
"""Test roundtrip preserves selector function reference."""
choice = Step(name="selector-choice", executor=router_executor_1)
original = Router(
name="selector-router",
selector=content_based_selector,
choices=[choice],
)
data = original.to_dict()
restored = Router.from_dict(data, registry=registry_with_functions)
assert restored.selector == content_based_selector
assert callable(restored.selector)
def test_roundtrip_preserves_choice_names(self, registry_with_functions):
"""Test roundtrip preserves all choice step names."""
choices = [
Step(name="first", executor=router_executor_1),
Step(name="second", executor=router_executor_2),
Step(name="third", executor=router_executor_3),
]
original = Router(name="multi-choice-router", selector=multi_choice_selector, choices=choices)
data = original.to_dict()
restored = Router.from_dict(data, registry=registry_with_functions)
assert len(restored.choices) == 3
assert restored.choices[0].name == "first"
assert restored.choices[1].name == "second"
assert restored.choices[2].name == "third"
def test_roundtrip_preserves_choice_executors(self, registry_with_functions):
"""Test roundtrip preserves executor function references in choices."""
choice_1 = Step(name="choice-1", executor=router_executor_1)
choice_2 = Step(name="choice-2", executor=router_executor_2)
original = Router(name="executor-router", selector=first_choice_selector, choices=[choice_1, choice_2])
data = original.to_dict()
restored = Router.from_dict(data, registry=registry_with_functions)
assert restored.choices[0].executor == router_executor_1
assert restored.choices[1].executor == router_executor_2
def test_roundtrip_preserves_choice_configuration(self, registry_with_functions):
"""Test roundtrip preserves all choice configuration fields."""
choice = Step(
name="configured-choice",
executor=router_executor_1,
description="Choice description",
max_retries=5,
skip_on_failure=True,
strict_input_validation=True,
add_workflow_history=True,
num_history_runs=8,
)
original = Router(name="config-router", selector=first_choice_selector, choices=[choice])
data = original.to_dict()
restored = Router.from_dict(data, registry=registry_with_functions)
restored_choice = restored.choices[0]
assert restored_choice.name == "configured-choice"
assert restored_choice.description == "Choice description"
assert restored_choice.max_retries == 5
assert restored_choice.skip_on_failure is True
assert restored_choice.strict_input_validation is True
assert restored_choice.add_workflow_history is True
assert restored_choice.num_history_runs == 8
def test_roundtrip_with_different_selectors(self, registry_with_functions):
"""Test roundtrip with different selector functions."""
for selector in [first_choice_selector, content_based_selector, multi_choice_selector]:
choice = Step(name="test-choice", executor=router_executor_1)
original = Router(
name=f"router-with-{selector.__name__}",
selector=selector,
choices=[choice],
)
data = original.to_dict()
restored = Router.from_dict(data, registry=registry_with_functions)
assert restored.selector == selector
# =============================================================================
# Nested Container Tests
# =============================================================================
class TestRouterNestedContainerSerialization:
"""Tests for Router with nested container choices (Parallel, Steps, Loop, etc.)."""
def test_roundtrip_with_nested_router(self, registry_with_functions):
"""Test roundtrip with nested Router inside Router."""
inner_choice = Step(name="inner-choice", executor=router_executor_1)
inner_router = Router(name="inner-router", selector=first_choice_selector, choices=[inner_choice])
outer_router = Router(name="outer-router", selector=content_based_selector, choices=[inner_router])
data = outer_router.to_dict()
restored = Router.from_dict(data, registry=registry_with_functions)
assert restored.name == "outer-router"
assert len(restored.choices) == 1
assert restored.choices[0].name == "inner-router"
assert isinstance(restored.choices[0], Router)
def test_roundtrip_with_nested_parallel(self, registry_with_functions):
"""Test roundtrip with nested Parallel container inside Router."""
from agno.workflow.parallel import Parallel
step_1 = Step(name="step-1", executor=router_executor_1)
step_2 = Step(name="step-2", executor=router_executor_2)
parallel = Parallel(step_1, step_2, name="parallel-choice")
router = Router(name="router-with-parallel", selector=first_choice_selector, choices=[parallel])
data = router.to_dict()
restored = Router.from_dict(data, registry=registry_with_functions)
assert restored.name == "router-with-parallel"
assert len(restored.choices) == 1
assert restored.choices[0].name == "parallel-choice"
assert isinstance(restored.choices[0], Parallel)
assert len(restored.choices[0].steps) == 2
def test_roundtrip_with_nested_loop(self, registry_with_functions):
"""Test roundtrip with nested Loop container inside Router."""
from agno.workflow.loop import Loop
step = Step(name="loop-step", executor=router_executor_1)
loop = Loop(name="loop-choice", steps=[step], max_iterations=3)
router = Router(name="router-with-loop", selector=first_choice_selector, choices=[loop])
data = router.to_dict()
restored = Router.from_dict(data, registry=registry_with_functions)
assert restored.name == "router-with-loop"
assert len(restored.choices) == 1
assert restored.choices[0].name == "loop-choice"
assert isinstance(restored.choices[0], Loop)
assert restored.choices[0].max_iterations == 3
def test_roundtrip_with_nested_steps_container(self, registry_with_functions):
"""Test roundtrip with nested Steps container inside Router."""
from agno.workflow.steps import Steps
step_1 = Step(name="step-1", executor=router_executor_1)
step_2 = Step(name="step-2", executor=router_executor_2)
steps_container = Steps(name="steps-choice", steps=[step_1, step_2])
router = Router(name="router-with-steps", selector=first_choice_selector, choices=[steps_container])
data = router.to_dict()
restored = Router.from_dict(data, registry=registry_with_functions)
assert restored.name == "router-with-steps"
assert len(restored.choices) == 1
assert restored.choices[0].name == "steps-choice"
assert isinstance(restored.choices[0], Steps)
assert len(restored.choices[0].steps) == 2
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/workflow/test_router_serialization.py",
"license": "Apache License 2.0",
"lines": 413,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/workflow/test_step_serialization.py | """
Unit tests for Step serialization and deserialization.
Tests cover:
- to_dict(): Serialization of Step to dictionary
- from_dict(): Deserialization of Step from dictionary
- Roundtrip serialization (no data loss)
"""
from unittest.mock import MagicMock, patch
import pytest
from agno.registry import Registry
from agno.workflow.step import Step
from agno.workflow.types import StepInput
# =============================================================================
# Sample executor functions for testing
# =============================================================================
def sample_executor(step_input: StepInput) -> str:
"""A sample executor function for testing."""
return "executed"
def another_executor(step_input: StepInput) -> str:
"""Another executor function for testing."""
return "another result"
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def mock_agent():
"""Create a mock agent for testing."""
agent = MagicMock()
agent.id = "test-agent-id"
agent.name = "Test Agent"
agent.description = "A test agent"
return agent
@pytest.fixture
def mock_team():
"""Create a mock team for testing."""
team = MagicMock()
team.id = "test-team-id"
team.name = "Test Team"
team.description = "A test team"
return team
@pytest.fixture
def registry_with_functions():
"""Create a registry with sample functions registered."""
registry = Registry(functions=[sample_executor, another_executor])
return registry
# =============================================================================
# to_dict() Tests
# =============================================================================
class TestStepToDict:
"""Tests for Step.to_dict() method."""
def test_to_dict_with_agent(self, mock_agent):
"""Test to_dict serializes agent reference."""
step = Step(name="agent-step", agent=mock_agent, description="Step with agent")
result = step.to_dict()
assert result["type"] == "Step"
assert result["name"] == "agent-step"
assert result["description"] == "Step with agent"
assert result["agent_id"] == "test-agent-id"
assert "team_id" not in result
assert "executor_ref" not in result
def test_to_dict_with_team(self, mock_team):
"""Test to_dict serializes team reference."""
step = Step(name="team-step", team=mock_team, description="Step with team")
result = step.to_dict()
assert result["type"] == "Step"
assert result["name"] == "team-step"
assert result["description"] == "Step with team"
assert result["team_id"] == "test-team-id"
assert "agent_id" not in result
assert "executor_ref" not in result
def test_to_dict_with_executor(self):
"""Test to_dict serializes executor function reference."""
step = Step(name="executor-step", executor=sample_executor, description="Step with executor")
result = step.to_dict()
assert result["type"] == "Step"
assert result["name"] == "executor-step"
assert result["description"] == "Step with executor"
assert result["executor_ref"] == "sample_executor"
assert "agent_id" not in result
assert "team_id" not in result
def test_to_dict_preserves_all_fields(self, mock_agent):
"""Test to_dict preserves all step configuration fields."""
step = Step(
name="full-step",
agent=mock_agent,
step_id="custom-step-id",
description="Full step description",
max_retries=5,
skip_on_failure=True,
strict_input_validation=True,
add_workflow_history=True,
num_history_runs=10,
)
result = step.to_dict()
assert result["name"] == "full-step"
assert result["step_id"] == "custom-step-id"
assert result["description"] == "Full step description"
assert result["max_retries"] == 5
assert result["skip_on_failure"] is True
assert result["strict_input_validation"] is True
assert result["add_workflow_history"] is True
assert result["num_history_runs"] == 10
def test_to_dict_default_values(self, mock_agent):
"""Test to_dict includes default values."""
step = Step(name="default-step", agent=mock_agent)
result = step.to_dict()
assert result["max_retries"] == 3
assert result["skip_on_failure"] is False
assert result["strict_input_validation"] is False
assert result["add_workflow_history"] is None
assert result["num_history_runs"] == 3
# =============================================================================
# from_dict() Tests
# =============================================================================
class TestStepFromDict:
"""Tests for Step.from_dict() method."""
def test_from_dict_basic(self):
"""Test from_dict creates step with basic config."""
_data = { # noqa: F841
"type": "Step",
"name": "basic-step",
"description": "A basic step",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
"add_workflow_history": None,
"num_history_runs": 3,
}
# Need to provide an executor since Step requires one
with patch("agno.workflow.step.Step.__init__", return_value=None):
# Skip actual initialization for this basic test
pass
def test_from_dict_with_agent(self):
"""Test from_dict reconstructs step with agent."""
mock_agent = MagicMock()
mock_agent.id = "loaded-agent-id"
data = {
"type": "Step",
"name": "agent-step",
"description": "Step with agent",
"agent_id": "loaded-agent-id",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
}
with patch("agno.agent.agent.get_agent_by_id") as mock_get_agent:
mock_get_agent.return_value = mock_agent
mock_db = MagicMock()
step = Step.from_dict(data, db=mock_db)
mock_get_agent.assert_called_once()
assert step.agent == mock_agent
assert step.name == "agent-step"
def test_from_dict_agent_resolved_from_registry(self):
"""Test from_dict resolves agent from registry before DB."""
mock_agent = MagicMock()
mock_agent.id = "registry-agent"
mock_copy = MagicMock()
mock_copy.id = "registry-agent"
mock_agent.deep_copy.return_value = mock_copy
registry = Registry(agents=[mock_agent])
data = {
"type": "Step",
"name": "registry-step",
"agent_id": "registry-agent",
}
with patch("agno.agent.agent.get_agent_by_id") as mock_get_agent:
step = Step.from_dict(data, registry=registry)
# DB fallback should NOT be called
mock_get_agent.assert_not_called()
assert step.agent is mock_copy
mock_agent.deep_copy.assert_called_once()
def test_from_dict_agent_falls_back_to_db(self):
"""Test from_dict falls back to DB when agent not in registry."""
registry = Registry(agents=[]) # empty
mock_db_agent = MagicMock()
mock_db_agent.id = "db-agent"
data = {
"type": "Step",
"name": "db-step",
"agent_id": "db-agent",
}
with patch("agno.agent.agent.get_agent_by_id") as mock_get_agent:
mock_get_agent.return_value = mock_db_agent
mock_db = MagicMock()
step = Step.from_dict(data, registry=registry, db=mock_db)
mock_get_agent.assert_called_once_with(db=mock_db, id="db-agent", registry=registry)
assert step.agent is mock_db_agent
def test_from_dict_team_resolved_from_registry(self):
"""Test from_dict resolves team from registry before DB."""
mock_team = MagicMock()
mock_team.id = "registry-team"
mock_copy = MagicMock()
mock_copy.id = "registry-team"
mock_team.deep_copy.return_value = mock_copy
registry = Registry(teams=[mock_team])
data = {
"type": "Step",
"name": "registry-team-step",
"team_id": "registry-team",
}
with patch("agno.team.team.get_team_by_id") as mock_get_team:
step = Step.from_dict(data, registry=registry)
mock_get_team.assert_not_called()
assert step.team is mock_copy
mock_team.deep_copy.assert_called_once()
def test_from_dict_unresolvable_agent_raises(self):
"""Test from_dict raises ValueError when agent can't be resolved."""
data = {
"type": "Step",
"name": "broken-step",
"agent_id": "nonexistent-agent",
}
with pytest.raises(ValueError, match="must have one executor"):
Step.from_dict(data)
def test_from_dict_unresolvable_team_raises(self):
"""Test from_dict raises ValueError when team can't be resolved."""
data = {
"type": "Step",
"name": "broken-step",
"team_id": "nonexistent-team",
}
with pytest.raises(ValueError, match="must have one executor"):
Step.from_dict(data)
def test_from_dict_with_executor(self, registry_with_functions):
"""Test from_dict reconstructs step with executor function."""
data = {
"type": "Step",
"name": "executor-step",
"description": "Step with executor",
"executor_ref": "sample_executor",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
}
step = Step.from_dict(data, registry=registry_with_functions)
assert step.executor == sample_executor
assert step.name == "executor-step"
def test_from_dict_preserves_all_fields(self, registry_with_functions):
"""Test from_dict preserves all configuration fields."""
data = {
"type": "Step",
"name": "full-step",
"step_id": "custom-step-id",
"description": "Full description",
"executor_ref": "sample_executor",
"max_retries": 5,
"skip_on_failure": True,
"strict_input_validation": True,
"add_workflow_history": True,
"num_history_runs": 10,
}
step = Step.from_dict(data, registry=registry_with_functions)
assert step.name == "full-step"
assert step.step_id == "custom-step-id"
assert step.description == "Full description"
assert step.max_retries == 5
assert step.skip_on_failure is True
assert step.strict_input_validation is True
assert step.add_workflow_history is True
assert step.num_history_runs == 10
# =============================================================================
# Roundtrip Tests
# =============================================================================
class TestStepSerializationRoundtrip:
"""Tests for Step serialization roundtrip (to_dict -> from_dict)."""
def test_roundtrip_with_executor(self, registry_with_functions):
"""Test roundtrip preserves all data for step with executor."""
original = Step(
name="roundtrip-step",
executor=sample_executor,
description="Roundtrip test step",
max_retries=4,
skip_on_failure=True,
strict_input_validation=True,
add_workflow_history=True,
num_history_runs=7,
)
# Serialize
data = original.to_dict()
# Deserialize
restored = Step.from_dict(data, registry=registry_with_functions)
# Verify no data loss
assert restored.name == original.name
assert restored.description == original.description
assert restored.max_retries == original.max_retries
assert restored.skip_on_failure == original.skip_on_failure
assert restored.strict_input_validation == original.strict_input_validation
assert restored.add_workflow_history == original.add_workflow_history
assert restored.num_history_runs == original.num_history_runs
assert restored.executor == original.executor
def test_roundtrip_preserves_type_field(self, registry_with_functions):
"""Test roundtrip preserves type field for proper deserialization dispatch."""
step = Step(name="typed-step", executor=sample_executor)
data = step.to_dict()
assert data["type"] == "Step"
restored = Step.from_dict(data, registry=registry_with_functions)
assert restored.name == "typed-step"
def test_roundtrip_with_default_values(self, registry_with_functions):
"""Test roundtrip preserves default values."""
original = Step(name="defaults-step", executor=sample_executor)
data = original.to_dict()
restored = Step.from_dict(data, registry=registry_with_functions)
assert restored.max_retries == 3
assert restored.skip_on_failure is False
assert restored.strict_input_validation is False
assert restored.num_history_runs == 3
def test_roundtrip_step_id_preserved(self, registry_with_functions):
"""Test roundtrip preserves custom step_id."""
original = Step(
name="id-step",
executor=sample_executor,
step_id="my-custom-step-id",
)
data = original.to_dict()
restored = Step.from_dict(data, registry=registry_with_functions)
assert restored.step_id == "my-custom-step-id"
def test_roundtrip_none_values(self, registry_with_functions):
"""Test roundtrip handles None values correctly."""
original = Step(
name="none-step",
executor=sample_executor,
description=None,
add_workflow_history=None,
)
data = original.to_dict()
restored = Step.from_dict(data, registry=registry_with_functions)
assert restored.description is None
assert restored.add_workflow_history is None
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/workflow/test_step_serialization.py",
"license": "Apache License 2.0",
"lines": 328,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/workflow/test_steps_serialization.py | """
Unit tests for Steps serialization and deserialization.
Tests cover:
- to_dict(): Serialization of Steps to dictionary
- from_dict(): Deserialization of Steps from dictionary
- Roundtrip serialization (no data loss)
- Nested step serialization
"""
import pytest
from agno.registry import Registry
from agno.workflow.step import Step
from agno.workflow.steps import Steps
from agno.workflow.types import StepInput
# =============================================================================
# Sample executor functions for testing
# =============================================================================
def step_executor_1(step_input: StepInput) -> str:
"""First step executor."""
return "result_1"
def step_executor_2(step_input: StepInput) -> str:
"""Second step executor."""
return "result_2"
def step_executor_3(step_input: StepInput) -> str:
"""Third step executor."""
return "result_3"
# =============================================================================
# Fixtures
# =============================================================================
@pytest.fixture
def registry_with_functions():
"""Create a registry with sample functions registered."""
return Registry(functions=[step_executor_1, step_executor_2, step_executor_3])
@pytest.fixture
def simple_steps():
"""Create simple steps for testing."""
return [
Step(name="step-1", executor=step_executor_1),
Step(name="step-2", executor=step_executor_2),
]
# =============================================================================
# to_dict() Tests
# =============================================================================
class TestStepsToDict:
"""Tests for Steps.to_dict() method."""
def test_to_dict_basic(self, simple_steps):
"""Test to_dict with basic Steps configuration."""
steps = Steps(name="basic-steps", description="Basic steps container", steps=simple_steps)
result = steps.to_dict()
assert result["type"] == "Steps"
assert result["name"] == "basic-steps"
assert result["description"] == "Basic steps container"
assert len(result["steps"]) == 2
def test_to_dict_serializes_nested_steps(self, simple_steps):
"""Test to_dict serializes nested steps correctly."""
steps = Steps(name="nested-steps", steps=simple_steps)
result = steps.to_dict()
assert len(result["steps"]) == 2
assert result["steps"][0]["name"] == "step-1"
assert result["steps"][0]["type"] == "Step"
assert result["steps"][1]["name"] == "step-2"
assert result["steps"][1]["type"] == "Step"
def test_to_dict_preserves_step_details(self):
"""Test to_dict preserves all step configuration details."""
step = Step(
name="detailed-step",
executor=step_executor_1,
description="Detailed description",
max_retries=5,
skip_on_failure=True,
)
steps = Steps(name="detail-steps", steps=[step])
result = steps.to_dict()
step_data = result["steps"][0]
assert step_data["name"] == "detailed-step"
assert step_data["description"] == "Detailed description"
assert step_data["max_retries"] == 5
assert step_data["skip_on_failure"] is True
def test_to_dict_empty_steps(self):
"""Test to_dict with empty steps list."""
steps = Steps(name="empty-steps", steps=[])
result = steps.to_dict()
assert result["type"] == "Steps"
assert result["name"] == "empty-steps"
assert result["steps"] == []
# =============================================================================
# from_dict() Tests
# =============================================================================
class TestStepsFromDict:
"""Tests for Steps.from_dict() method."""
def test_from_dict_basic(self, registry_with_functions):
"""Test from_dict creates Steps with basic config."""
data = {
"type": "Steps",
"name": "basic-steps",
"description": "Basic steps container",
"steps": [
{
"type": "Step",
"name": "step-1",
"executor_ref": "step_executor_1",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
],
}
steps = Steps.from_dict(data, registry=registry_with_functions)
assert steps.name == "basic-steps"
assert steps.description == "Basic steps container"
assert len(steps.steps) == 1
def test_from_dict_with_multiple_steps(self, registry_with_functions):
"""Test from_dict with multiple nested steps."""
data = {
"type": "Steps",
"name": "multi-steps",
"description": None,
"steps": [
{
"type": "Step",
"name": "step-1",
"executor_ref": "step_executor_1",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
{
"type": "Step",
"name": "step-2",
"executor_ref": "step_executor_2",
"max_retries": 3,
"skip_on_failure": False,
"strict_input_validation": False,
},
],
}
steps = Steps.from_dict(data, registry=registry_with_functions)
assert len(steps.steps) == 2
assert steps.steps[0].name == "step-1"
assert steps.steps[1].name == "step-2"
def test_from_dict_preserves_step_configuration(self, registry_with_functions):
"""Test from_dict preserves nested step configuration."""
data = {
"type": "Steps",
"name": "config-steps",
"description": None,
"steps": [
{
"type": "Step",
"name": "configured-step",
"executor_ref": "step_executor_1",
"description": "Step description",
"max_retries": 5,
"skip_on_failure": True,
"strict_input_validation": True,
"num_history_runs": 10,
},
],
}
steps = Steps.from_dict(data, registry=registry_with_functions)
step = steps.steps[0]
assert step.name == "configured-step"
assert step.description == "Step description"
assert step.max_retries == 5
assert step.skip_on_failure is True
assert step.strict_input_validation is True
assert step.num_history_runs == 10
# =============================================================================
# Roundtrip Tests
# =============================================================================
class TestStepsSerializationRoundtrip:
"""Tests for Steps serialization roundtrip (to_dict -> from_dict)."""
def test_roundtrip_basic(self, registry_with_functions):
"""Test roundtrip preserves basic Steps configuration."""
step_1 = Step(name="step-1", executor=step_executor_1)
step_2 = Step(name="step-2", executor=step_executor_2)
original = Steps(name="roundtrip-steps", description="Test description", steps=[step_1, step_2])
# Serialize
data = original.to_dict()
# Deserialize
restored = Steps.from_dict(data, registry=registry_with_functions)
# Verify no data loss
assert restored.name == original.name
assert restored.description == original.description
assert len(restored.steps) == len(original.steps)
def test_roundtrip_preserves_type_field(self, registry_with_functions):
"""Test roundtrip preserves type field for proper deserialization dispatch."""
step = Step(name="typed-step", executor=step_executor_1)
steps = Steps(name="typed-steps", steps=[step])
data = steps.to_dict()
assert data["type"] == "Steps"
restored = Steps.from_dict(data, registry=registry_with_functions)
assert restored.name == "typed-steps"
def test_roundtrip_preserves_nested_step_names(self, registry_with_functions):
"""Test roundtrip preserves all nested step names."""
step_list = [
Step(name="first", executor=step_executor_1),
Step(name="second", executor=step_executor_2),
Step(name="third", executor=step_executor_3),
]
original = Steps(name="multi-step-steps", steps=step_list)
data = original.to_dict()
restored = Steps.from_dict(data, registry=registry_with_functions)
assert len(restored.steps) == 3
assert restored.steps[0].name == "first"
assert restored.steps[1].name == "second"
assert restored.steps[2].name == "third"
def test_roundtrip_preserves_step_executors(self, registry_with_functions):
"""Test roundtrip preserves executor function references."""
step_1 = Step(name="step-1", executor=step_executor_1)
step_2 = Step(name="step-2", executor=step_executor_2)
original = Steps(name="executor-steps", steps=[step_1, step_2])
data = original.to_dict()
restored = Steps.from_dict(data, registry=registry_with_functions)
assert restored.steps[0].executor == step_executor_1
assert restored.steps[1].executor == step_executor_2
def test_roundtrip_preserves_step_configuration(self, registry_with_functions):
"""Test roundtrip preserves all step configuration fields."""
step = Step(
name="configured-step",
executor=step_executor_1,
description="Step description",
max_retries=5,
skip_on_failure=True,
strict_input_validation=True,
add_workflow_history=True,
num_history_runs=8,
)
original = Steps(name="config-steps", steps=[step])
data = original.to_dict()
restored = Steps.from_dict(data, registry=registry_with_functions)
restored_step = restored.steps[0]
assert restored_step.name == "configured-step"
assert restored_step.description == "Step description"
assert restored_step.max_retries == 5
assert restored_step.skip_on_failure is True
assert restored_step.strict_input_validation is True
assert restored_step.add_workflow_history is True
assert restored_step.num_history_runs == 8
def test_roundtrip_empty_steps(self, registry_with_functions):
"""Test roundtrip with empty steps list."""
original = Steps(name="empty-steps", steps=[])
data = original.to_dict()
restored = Steps.from_dict(data, registry=registry_with_functions)
assert restored.name == "empty-steps"
assert restored.steps == []
# =============================================================================
# Nested Container Tests
# =============================================================================
class TestStepsNestedContainerSerialization:
"""Tests for Steps with nested container steps (Loop, Parallel, Condition, etc.)."""
def test_roundtrip_with_nested_steps(self, registry_with_functions):
"""Test roundtrip with nested Steps inside Steps."""
inner_step = Step(name="inner-step", executor=step_executor_1)
inner_steps = Steps(name="inner-steps", steps=[inner_step])
outer_steps = Steps(name="outer-steps", steps=[inner_steps])
data = outer_steps.to_dict()
restored = Steps.from_dict(data, registry=registry_with_functions)
assert restored.name == "outer-steps"
assert len(restored.steps) == 1
assert restored.steps[0].name == "inner-steps"
assert isinstance(restored.steps[0], Steps)
def test_roundtrip_with_nested_parallel(self, registry_with_functions):
"""Test roundtrip with nested Parallel container inside Steps."""
from agno.workflow.parallel import Parallel
step_1 = Step(name="step-1", executor=step_executor_1)
step_2 = Step(name="step-2", executor=step_executor_2)
parallel = Parallel(step_1, step_2, name="parallel-container")
steps = Steps(name="steps-with-parallel", steps=[parallel])
data = steps.to_dict()
restored = Steps.from_dict(data, registry=registry_with_functions)
assert restored.name == "steps-with-parallel"
assert len(restored.steps) == 1
assert restored.steps[0].name == "parallel-container"
assert isinstance(restored.steps[0], Parallel)
assert len(restored.steps[0].steps) == 2
def test_roundtrip_mixed_step_types(self, registry_with_functions):
"""Test roundtrip with mixed step types (Step, Parallel, nested Steps)."""
from agno.workflow.parallel import Parallel
step_1 = Step(name="regular-step", executor=step_executor_1)
parallel_steps = Parallel(
Step(name="parallel-step-1", executor=step_executor_2),
Step(name="parallel-step-2", executor=step_executor_3),
name="parallel-block",
)
nested_steps = Steps(name="nested-steps", steps=[Step(name="nested-inner", executor=step_executor_1)])
original = Steps(name="mixed-steps", steps=[step_1, parallel_steps, nested_steps])
data = original.to_dict()
restored = Steps.from_dict(data, registry=registry_with_functions)
assert len(restored.steps) == 3
assert isinstance(restored.steps[0], Step)
assert restored.steps[0].name == "regular-step"
assert isinstance(restored.steps[1], Parallel)
assert restored.steps[1].name == "parallel-block"
assert isinstance(restored.steps[2], Steps)
assert restored.steps[2].name == "nested-steps"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/workflow/test_steps_serialization.py",
"license": "Apache License 2.0",
"lines": 295,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/agno/knowledge/loaders/azure_blob.py | """Azure Blob Storage content loader for Knowledge.
Provides methods for loading content from Azure Blob Storage.
"""
# mypy: disable-error-code="attr-defined"
from io import BytesIO
from typing import Any, Dict, List, Optional, cast
from agno.knowledge.content import Content, ContentStatus
from agno.knowledge.loaders.base import BaseLoader
from agno.knowledge.reader import Reader
from agno.knowledge.remote_content.azure_blob import AzureBlobConfig
from agno.knowledge.remote_content.base import BaseStorageConfig
from agno.knowledge.remote_content.remote_content import AzureBlobContent
from agno.utils.log import log_debug, log_error, log_info, log_warning
from agno.utils.string import generate_id
class AzureBlobLoader(BaseLoader):
"""Loader for Azure Blob Storage content."""
# ==========================================
# AZURE BLOB HELPERS (shared between sync/async)
# ==========================================
def _validate_azure_config(
self,
content: Content,
config: Optional[BaseStorageConfig],
) -> Optional[AzureBlobConfig]:
"""Validate and extract Azure Blob config.
Returns:
AzureBlobConfig if valid, None otherwise
"""
remote_content: AzureBlobContent = cast(AzureBlobContent, content.remote_content)
azure_config = cast(AzureBlobConfig, config) if isinstance(config, AzureBlobConfig) else None
if azure_config is None:
log_error(f"Azure Blob config not found for config_id: {remote_content.config_id}")
return None
return azure_config
def _get_azure_blob_client(self, azure_config: AzureBlobConfig):
"""Get a sync Azure Blob Service Client using client credentials flow.
Requires the `azure-identity` and `azure-storage-blob` packages.
"""
try:
from azure.identity import ClientSecretCredential # type: ignore
from azure.storage.blob import BlobServiceClient # type: ignore
except ImportError:
raise ImportError(
"The `azure-identity` and `azure-storage-blob` packages are not installed. "
"Please install them via `pip install azure-identity azure-storage-blob`."
)
credential = ClientSecretCredential(
tenant_id=azure_config.tenant_id,
client_id=azure_config.client_id,
client_secret=azure_config.client_secret,
)
blob_service = BlobServiceClient(
account_url=f"https://{azure_config.storage_account}.blob.core.windows.net",
credential=credential,
)
return blob_service
def _get_azure_blob_client_async(self, azure_config: AzureBlobConfig):
"""Get an async Azure Blob Service Client using client credentials flow.
Requires the `azure-identity` and `azure-storage-blob` packages.
Uses the async versions from azure.storage.blob.aio and azure.identity.aio.
"""
try:
from azure.identity.aio import ClientSecretCredential # type: ignore
from azure.storage.blob.aio import BlobServiceClient # type: ignore
except ImportError:
raise ImportError(
"The `azure-identity` and `azure-storage-blob` packages are not installed. "
"Please install them via `pip install azure-identity azure-storage-blob`."
)
credential = ClientSecretCredential(
tenant_id=azure_config.tenant_id,
client_id=azure_config.client_id,
client_secret=azure_config.client_secret,
)
blob_service = BlobServiceClient(
account_url=f"https://{azure_config.storage_account}.blob.core.windows.net",
credential=credential,
)
return blob_service
def _build_azure_metadata(
self,
azure_config: AzureBlobConfig,
blob_name: str,
file_name: str,
) -> Dict[str, str]:
"""Build Azure Blob-specific metadata dictionary."""
return {
"source_type": "azure_blob",
"source_config_id": azure_config.id,
"source_config_name": azure_config.name,
"azure_storage_account": azure_config.storage_account,
"azure_container": azure_config.container,
"azure_blob_name": blob_name,
"azure_filename": file_name,
}
def _build_azure_virtual_path(
self,
storage_account: str,
container: str,
blob_name: str,
) -> str:
"""Build virtual path for Azure Blob content."""
return f"azure://{storage_account}/{container}/{blob_name}"
def _get_azure_root_path(self, remote_content: AzureBlobContent) -> str:
"""Get the root path for computing relative paths."""
return remote_content.prefix or ""
# ==========================================
# AZURE BLOB LOADERS
# ==========================================
async def _aload_from_azure_blob(
self,
content: Content,
upsert: bool,
skip_if_exists: bool,
config: Optional[BaseStorageConfig] = None,
):
"""Load content from Azure Blob Storage (async).
Requires the AzureBlobConfig to contain tenant_id, client_id, client_secret,
storage_account, and container.
Uses the async Azure SDK to avoid blocking the event loop.
"""
remote_content: AzureBlobContent = cast(AzureBlobContent, content.remote_content)
azure_config = self._validate_azure_config(content, config)
if azure_config is None:
return
# Get async blob service client
try:
blob_service = self._get_azure_blob_client_async(azure_config)
except ImportError as e:
log_error(str(e))
return
except Exception as e:
log_error(f"Error creating Azure Blob client: {e}")
return
# Use async context manager for proper resource cleanup
async with blob_service:
container_client = blob_service.get_container_client(azure_config.container)
# Helper to list blobs with a given prefix (async)
async def list_blobs_with_prefix(prefix: str) -> List[Dict[str, Any]]:
"""List all blobs under a given prefix (folder)."""
results: List[Dict[str, Any]] = []
normalized_prefix = prefix.rstrip("/") + "/" if not prefix.endswith("/") else prefix
async for blob in container_client.list_blobs(name_starts_with=normalized_prefix):
if not blob.name.endswith("/"):
results.append(
{
"name": blob.name,
"size": blob.size,
"content_type": blob.content_settings.content_type if blob.content_settings else None,
}
)
return results
# Identify blobs to process
blobs_to_process: List[Dict[str, Any]] = []
try:
if remote_content.blob_name:
blob_client = container_client.get_blob_client(remote_content.blob_name)
try:
props = await blob_client.get_blob_properties()
blobs_to_process.append(
{
"name": remote_content.blob_name,
"size": props.size,
"content_type": props.content_settings.content_type if props.content_settings else None,
}
)
except Exception:
log_debug(f"Blob {remote_content.blob_name} not found, checking if it's a folder...")
blobs_to_process = await list_blobs_with_prefix(remote_content.blob_name)
if not blobs_to_process:
log_error(
f"No blob or folder found at path: {remote_content.blob_name}. "
"If this is a folder, ensure files exist inside it."
)
return
elif remote_content.prefix:
blobs_to_process = await list_blobs_with_prefix(remote_content.prefix)
except Exception as e:
log_error(f"Error listing Azure blobs: {e}")
return
if not blobs_to_process:
log_warning(f"No blobs found in Azure container: {azure_config.container}")
return
log_info(f"Processing {len(blobs_to_process)} file(s) from Azure Blob Storage")
is_folder_upload = len(blobs_to_process) > 1
root_path = self._get_azure_root_path(remote_content)
for blob_info in blobs_to_process:
blob_name = blob_info["name"]
file_name = blob_name.split("/")[-1]
# Build metadata and virtual path using helpers
virtual_path = self._build_azure_virtual_path(
azure_config.storage_account, azure_config.container, blob_name
)
azure_metadata = self._build_azure_metadata(azure_config, blob_name, file_name)
merged_metadata = self._merge_metadata(azure_metadata, content.metadata)
# Compute content name using base helper
content_name = self._compute_content_name(
blob_name, file_name, content.name, root_path, is_folder_upload
)
# Create content entry using base helper
content_entry = self._create_content_entry(
content, content_name, virtual_path, merged_metadata, "azure_blob", is_folder_upload
)
await self._ainsert_contents_db(content_entry)
if self._should_skip(content_entry.content_hash, skip_if_exists):
content_entry.status = ContentStatus.COMPLETED
await self._aupdate_content(content_entry)
continue
# Download blob (async)
try:
blob_client = container_client.get_blob_client(blob_name)
download_stream = await blob_client.download_blob()
blob_data = await download_stream.readall()
file_content = BytesIO(blob_data)
except Exception as e:
log_error(f"Error downloading Azure blob {blob_name}: {e}")
content_entry.status = ContentStatus.FAILED
content_entry.status_message = str(e)
await self._aupdate_content(content_entry)
continue
# Select reader and read content
reader = self._select_reader_by_uri(file_name, content.reader)
if reader is None:
log_warning(f"No reader found for file: {file_name}")
content_entry.status = ContentStatus.FAILED
content_entry.status_message = "No suitable reader found"
await self._aupdate_content(content_entry)
continue
reader = cast(Reader, reader)
read_documents = await reader.async_read(file_content, name=file_name)
# Prepare and insert into vector database
if not content_entry.id:
content_entry.id = generate_id(content_entry.content_hash or "")
self._prepare_documents_for_insert(read_documents, content_entry.id)
await self._ahandle_vector_db_insert(content_entry, read_documents, upsert)
def _load_from_azure_blob(
self,
content: Content,
upsert: bool,
skip_if_exists: bool,
config: Optional[BaseStorageConfig] = None,
):
"""Load content from Azure Blob Storage (sync).
Requires the AzureBlobConfig to contain tenant_id, client_id, client_secret,
storage_account, and container.
"""
remote_content: AzureBlobContent = cast(AzureBlobContent, content.remote_content)
azure_config = self._validate_azure_config(content, config)
if azure_config is None:
return
# Get blob service client
try:
blob_service = self._get_azure_blob_client(azure_config)
except ImportError as e:
log_error(str(e))
return
except Exception as e:
log_error(f"Error creating Azure Blob client: {e}")
return
# Use context manager for proper resource cleanup
with blob_service:
container_client = blob_service.get_container_client(azure_config.container)
# Helper to list blobs with a given prefix
def list_blobs_with_prefix(prefix: str) -> List[Dict[str, Any]]:
"""List all blobs under a given prefix (folder)."""
results: List[Dict[str, Any]] = []
normalized_prefix = prefix.rstrip("/") + "/" if not prefix.endswith("/") else prefix
blobs = container_client.list_blobs(name_starts_with=normalized_prefix)
for blob in blobs:
if not blob.name.endswith("/"):
results.append(
{
"name": blob.name,
"size": blob.size,
"content_type": blob.content_settings.content_type if blob.content_settings else None,
}
)
return results
# Identify blobs to process
blobs_to_process: List[Dict[str, Any]] = []
try:
if remote_content.blob_name:
blob_client = container_client.get_blob_client(remote_content.blob_name)
try:
props = blob_client.get_blob_properties()
blobs_to_process.append(
{
"name": remote_content.blob_name,
"size": props.size,
"content_type": props.content_settings.content_type if props.content_settings else None,
}
)
except Exception:
log_debug(f"Blob {remote_content.blob_name} not found, checking if it's a folder...")
blobs_to_process = list_blobs_with_prefix(remote_content.blob_name)
if not blobs_to_process:
log_error(
f"No blob or folder found at path: {remote_content.blob_name}. "
"If this is a folder, ensure files exist inside it."
)
return
elif remote_content.prefix:
blobs_to_process = list_blobs_with_prefix(remote_content.prefix)
except Exception as e:
log_error(f"Error listing Azure blobs: {e}")
return
if not blobs_to_process:
log_warning(f"No blobs found in Azure container: {azure_config.container}")
return
log_info(f"Processing {len(blobs_to_process)} file(s) from Azure Blob Storage")
is_folder_upload = len(blobs_to_process) > 1
root_path = self._get_azure_root_path(remote_content)
for blob_info in blobs_to_process:
blob_name = blob_info["name"]
file_name = blob_name.split("/")[-1]
# Build metadata and virtual path using helpers
virtual_path = self._build_azure_virtual_path(
azure_config.storage_account, azure_config.container, blob_name
)
azure_metadata = self._build_azure_metadata(azure_config, blob_name, file_name)
merged_metadata = self._merge_metadata(azure_metadata, content.metadata)
# Compute content name using base helper
content_name = self._compute_content_name(
blob_name, file_name, content.name, root_path, is_folder_upload
)
# Create content entry using base helper
content_entry = self._create_content_entry(
content, content_name, virtual_path, merged_metadata, "azure_blob", is_folder_upload
)
self._insert_contents_db(content_entry)
if self._should_skip(content_entry.content_hash, skip_if_exists):
content_entry.status = ContentStatus.COMPLETED
self._update_content(content_entry)
continue
# Download blob
try:
blob_client = container_client.get_blob_client(blob_name)
download_stream = blob_client.download_blob()
file_content = BytesIO(download_stream.readall())
except Exception as e:
log_error(f"Error downloading Azure blob {blob_name}: {e}")
content_entry.status = ContentStatus.FAILED
content_entry.status_message = str(e)
self._update_content(content_entry)
continue
# Select reader and read content
reader = self._select_reader_by_uri(file_name, content.reader)
if reader is None:
log_warning(f"No reader found for file: {file_name}")
content_entry.status = ContentStatus.FAILED
content_entry.status_message = "No suitable reader found"
self._update_content(content_entry)
continue
reader = cast(Reader, reader)
read_documents = reader.read(file_content, name=file_name)
# Prepare and insert into vector database
if not content_entry.id:
content_entry.id = generate_id(content_entry.content_hash or "")
self._prepare_documents_for_insert(read_documents, content_entry.id)
self._handle_vector_db_insert(content_entry, read_documents, upsert)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/loaders/azure_blob.py",
"license": "Apache License 2.0",
"lines": 360,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/knowledge/loaders/base.py | """Base loader class with shared utilities for all content loaders.
Provides common helpers for:
- Computing content names for files
- Creating Content entries
- Building metadata dictionaries
"""
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from agno.knowledge.content import Content, ContentStatus
from agno.knowledge.utils import RESERVED_AGNO_KEY, strip_agno_metadata
from agno.utils.string import generate_id
@dataclass
class FileToProcess:
"""Represents a file identified for processing."""
path: str
name: str
size: Optional[int] = None
content_type: Optional[str] = None
class BaseLoader:
"""Base class with shared loader utilities.
This class provides common methods used by all content loaders to reduce
code duplication between sync and async implementations.
Methods that call self._build_content_hash() assume they are mixed into
a class that provides this method (e.g., Knowledge via RemoteKnowledge).
"""
RESERVED_METADATA_KEY = RESERVED_AGNO_KEY
def _compute_content_name(
self,
file_path: str,
file_name: str,
base_name: Optional[str],
root_path: str,
is_folder_upload: bool,
) -> str:
"""Compute the content name for a file.
Args:
file_path: Full path to the file
file_name: Name of the file
base_name: User-provided base name for the content
root_path: Root path of the upload (for computing relative paths)
is_folder_upload: Whether this is part of a folder upload
Returns:
The computed content name
"""
if is_folder_upload:
relative_path = file_path
if root_path and file_path.startswith(root_path + "/"):
relative_path = file_path[len(root_path) + 1 :]
return f"{base_name}/{relative_path}" if base_name else file_path
return base_name or file_name
def _create_content_entry_for_folder(
self,
content: Content,
content_name: str,
virtual_path: str,
metadata: Dict[str, Any],
file_type: str,
) -> Content:
"""Create a new Content entry for a file in a folder upload.
Args:
content: Original content object (used for description)
content_name: Name for the new content entry
virtual_path: Virtual path for hashing
metadata: Metadata dictionary
file_type: Type of file (e.g., 'github', 'azure_blob')
Returns:
New Content entry with hash and ID set
"""
entry = Content(
name=content_name,
description=content.description,
path=virtual_path,
status=ContentStatus.PROCESSING,
metadata=metadata,
file_type=file_type,
)
entry.content_hash = self._build_content_hash(entry) # type: ignore[attr-defined]
entry.id = generate_id(entry.content_hash)
return entry
def _update_content_entry_for_single_file(
self,
content: Content,
virtual_path: str,
metadata: Dict[str, Any],
file_type: str,
) -> Content:
"""Update an existing Content entry for a single file upload.
Args:
content: Original content object to update
virtual_path: Virtual path for hashing
metadata: Metadata dictionary
file_type: Type of file (e.g., 'github', 'azure_blob')
Returns:
Updated Content entry with hash and ID set if not already present
"""
content.path = virtual_path
content.status = ContentStatus.PROCESSING
content.metadata = metadata
content.file_type = file_type
if not content.content_hash:
content.content_hash = self._build_content_hash(content) # type: ignore[attr-defined]
if not content.id:
content.id = generate_id(content.content_hash)
return content
def _create_content_entry(
self,
content: Content,
content_name: str,
virtual_path: str,
metadata: Dict[str, Any],
file_type: str,
is_folder_upload: bool,
) -> Content:
"""Create or update a Content entry for a file.
For folder uploads, creates a new Content entry.
For single file uploads, updates the original Content object.
Args:
content: Original content object
content_name: Name for the content entry
virtual_path: Virtual path for hashing
metadata: Metadata dictionary
file_type: Type of file (e.g., 'github', 'azure_blob')
is_folder_upload: Whether this is part of a folder upload
Returns:
Content entry with hash and ID set
"""
if is_folder_upload:
return self._create_content_entry_for_folder(content, content_name, virtual_path, metadata, file_type)
return self._update_content_entry_for_single_file(content, virtual_path, metadata, file_type)
def _merge_metadata(
self,
provider_metadata: Dict[str, str],
user_metadata: Optional[Dict[str, Any]],
) -> Dict[str, Any]:
"""Merge provider metadata with user-provided metadata.
Provider metadata (source_type, bucket info, etc.) is stored under the
reserved ``_agno`` key so that user PATCH updates cannot overwrite it.
User metadata is stored at the top level.
Args:
provider_metadata: Metadata from the provider (e.g., GitHub, Azure)
user_metadata: User-provided metadata
Returns:
Merged metadata dictionary with provider fields under ``_agno``
"""
# Strip any user-provided _agno — this key is reserved for the framework
merged: Dict[str, Any] = strip_agno_metadata(user_metadata) or {}
# Store provider metadata under reserved _agno key
merged[RESERVED_AGNO_KEY] = dict(provider_metadata)
return merged
def _files_to_dict_list(self, files: List[FileToProcess]) -> List[Dict[str, Any]]:
"""Convert FileToProcess objects to dict list for compatibility.
Args:
files: List of FileToProcess objects
Returns:
List of dictionaries with file info
"""
return [
{
"path": f.path,
"name": f.name,
"size": f.size,
"content_type": f.content_type,
}
for f in files
]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/loaders/base.py",
"license": "Apache License 2.0",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/agno/knowledge/loaders/gcs.py | """GCS content loader for Knowledge.
Provides methods for loading content from Google Cloud Storage.
"""
# mypy: disable-error-code="attr-defined"
from io import BytesIO
from typing import Any, Dict, Optional, cast
from agno.knowledge.content import Content, ContentStatus
from agno.knowledge.loaders.base import BaseLoader
from agno.knowledge.reader import Reader
from agno.knowledge.remote_content.base import BaseStorageConfig
from agno.knowledge.remote_content.gcs import GcsConfig
from agno.knowledge.remote_content.remote_content import GCSContent
from agno.utils.log import log_info, log_warning
from agno.utils.string import generate_id
class GCSLoader(BaseLoader):
"""Loader for Google Cloud Storage content."""
# ==========================================
# GCS HELPERS (shared between sync/async)
# ==========================================
def _validate_gcs_config(
self,
content: Content,
config: Optional[BaseStorageConfig],
) -> Optional[GcsConfig]:
"""Validate and extract GCS config.
Returns:
GcsConfig if valid, None otherwise (GCS can work without explicit config)
"""
return cast(GcsConfig, config) if isinstance(config, GcsConfig) else None
def _get_gcs_client(self, gcs_config: Optional[GcsConfig]):
"""Get a GCS client.
Requires the `google-cloud-storage` package.
"""
try:
from google.cloud import storage # type: ignore
except ImportError:
raise ImportError(
"The `google-cloud-storage` package is not installed. "
"Please install it via `pip install google-cloud-storage`."
)
if gcs_config and gcs_config.credentials_path:
return storage.Client.from_service_account_json(gcs_config.credentials_path)
elif gcs_config and gcs_config.project:
return storage.Client(project=gcs_config.project)
else:
return storage.Client()
def _build_gcs_metadata(
self,
gcs_config: Optional[GcsConfig],
bucket_name: str,
blob_name: str,
) -> Dict[str, str]:
"""Build GCS-specific metadata dictionary."""
metadata: Dict[str, str] = {
"source_type": "gcs",
"gcs_bucket": bucket_name,
"gcs_blob_name": blob_name,
}
if gcs_config:
metadata["source_config_id"] = gcs_config.id
metadata["source_config_name"] = gcs_config.name
return metadata
def _build_gcs_virtual_path(self, bucket_name: str, blob_name: str) -> str:
"""Build virtual path for GCS content."""
return f"gcs://{bucket_name}/{blob_name}"
# ==========================================
# GCS LOADERS
# ==========================================
async def _aload_from_gcs(
self,
content: Content,
upsert: bool,
skip_if_exists: bool,
config: Optional[BaseStorageConfig] = None,
):
"""Load content from Google Cloud Storage (async).
Note: Uses sync google-cloud-storage calls as it doesn't have an async API.
"""
try:
from google.cloud import storage # type: ignore # noqa: F401
except ImportError:
raise ImportError(
"The `google-cloud-storage` package is not installed. "
"Please install it via `pip install google-cloud-storage`."
)
log_warning(
"GCS content loading has limited features. "
"Recursive folder traversal, rich metadata, and improved naming are coming in a future release."
)
remote_content: GCSContent = cast(GCSContent, content.remote_content)
gcs_config = self._validate_gcs_config(content, config)
# Get or create bucket
bucket = remote_content.bucket
if bucket is None and remote_content.bucket_name:
client = self._get_gcs_client(gcs_config)
bucket = client.bucket(remote_content.bucket_name)
# Identify objects to read
objects_to_read = []
if remote_content.blob_name is not None:
objects_to_read.append(bucket.blob(remote_content.blob_name)) # type: ignore
elif remote_content.prefix is not None:
objects_to_read.extend(bucket.list_blobs(prefix=remote_content.prefix)) # type: ignore
else:
objects_to_read.extend(bucket.list_blobs()) # type: ignore
if objects_to_read:
log_info(f"Processing {len(objects_to_read)} file(s) from GCS")
bucket_name = remote_content.bucket_name or (bucket.name if bucket else "unknown")
is_folder_upload = len(objects_to_read) > 1
root_path = remote_content.prefix or ""
for gcs_object in objects_to_read:
blob_name = gcs_object.name
file_name = blob_name.split("/")[-1]
# Build metadata and virtual path using helpers
virtual_path = self._build_gcs_virtual_path(bucket_name, blob_name)
gcs_metadata = self._build_gcs_metadata(gcs_config, bucket_name, blob_name)
merged_metadata: Dict[str, Any] = self._merge_metadata(gcs_metadata, content.metadata)
# Compute content name using base helper
content_name = self._compute_content_name(blob_name, file_name, content.name, root_path, is_folder_upload)
# Create content entry
content_entry = Content(
name=content_name,
description=content.description,
path=virtual_path,
status=ContentStatus.PROCESSING,
metadata=merged_metadata,
file_type="gcs",
)
content_entry.content_hash = self._build_content_hash(content_entry)
content_entry.id = generate_id(content_entry.content_hash)
await self._ainsert_contents_db(content_entry)
if self._should_skip(content_entry.content_hash, skip_if_exists):
content_entry.status = ContentStatus.COMPLETED
await self._aupdate_content(content_entry)
continue
# Select reader
reader = self._select_reader_by_uri(gcs_object.name, content.reader)
reader = cast(Reader, reader)
# Fetch and load the content
readable_content = BytesIO(gcs_object.download_as_bytes())
# Read the content
read_documents = await reader.async_read(readable_content, name=file_name)
# Prepare and insert the content in the vector database
self._prepare_documents_for_insert(read_documents, content_entry.id)
await self._ahandle_vector_db_insert(content_entry, read_documents, upsert)
def _load_from_gcs(
self,
content: Content,
upsert: bool,
skip_if_exists: bool,
config: Optional[BaseStorageConfig] = None,
):
"""Load content from Google Cloud Storage (sync)."""
try:
from google.cloud import storage # type: ignore # noqa: F401
except ImportError:
raise ImportError(
"The `google-cloud-storage` package is not installed. "
"Please install it via `pip install google-cloud-storage`."
)
log_warning(
"GCS content loading has limited features. "
"Recursive folder traversal, rich metadata, and improved naming are coming in a future release."
)
remote_content: GCSContent = cast(GCSContent, content.remote_content)
gcs_config = self._validate_gcs_config(content, config)
# Get or create bucket
bucket = remote_content.bucket
if bucket is None and remote_content.bucket_name:
client = self._get_gcs_client(gcs_config)
bucket = client.bucket(remote_content.bucket_name)
# Identify objects to read
objects_to_read = []
if remote_content.blob_name is not None:
objects_to_read.append(bucket.blob(remote_content.blob_name)) # type: ignore
elif remote_content.prefix is not None:
objects_to_read.extend(bucket.list_blobs(prefix=remote_content.prefix)) # type: ignore
else:
objects_to_read.extend(bucket.list_blobs()) # type: ignore
if objects_to_read:
log_info(f"Processing {len(objects_to_read)} file(s) from GCS")
bucket_name = remote_content.bucket_name or (bucket.name if bucket else "unknown")
is_folder_upload = len(objects_to_read) > 1
root_path = remote_content.prefix or ""
for gcs_object in objects_to_read:
blob_name = gcs_object.name
file_name = blob_name.split("/")[-1]
# Build metadata and virtual path using helpers
virtual_path = self._build_gcs_virtual_path(bucket_name, blob_name)
gcs_metadata = self._build_gcs_metadata(gcs_config, bucket_name, blob_name)
merged_metadata: Dict[str, Any] = self._merge_metadata(gcs_metadata, content.metadata)
# Compute content name using base helper
content_name = self._compute_content_name(blob_name, file_name, content.name, root_path, is_folder_upload)
# Create content entry
content_entry = Content(
name=content_name,
description=content.description,
path=virtual_path,
status=ContentStatus.PROCESSING,
metadata=merged_metadata,
file_type="gcs",
)
content_entry.content_hash = self._build_content_hash(content_entry)
content_entry.id = generate_id(content_entry.content_hash)
self._insert_contents_db(content_entry)
if self._should_skip(content_entry.content_hash, skip_if_exists):
content_entry.status = ContentStatus.COMPLETED
self._update_content(content_entry)
continue
# Select reader
reader = self._select_reader_by_uri(gcs_object.name, content.reader)
reader = cast(Reader, reader)
# Fetch and load the content
readable_content = BytesIO(gcs_object.download_as_bytes())
# Read the content
read_documents = reader.read(readable_content, name=file_name)
# Prepare and insert the content in the vector database
self._prepare_documents_for_insert(read_documents, content_entry.id)
self._handle_vector_db_insert(content_entry, read_documents, upsert)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/loaders/gcs.py",
"license": "Apache License 2.0",
"lines": 218,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/knowledge/loaders/github.py | """GitHub content loader for Knowledge.
Provides methods for loading content from GitHub repositories.
"""
# mypy: disable-error-code="attr-defined"
import asyncio
import threading
import time
from io import BytesIO
from typing import Any, Dict, List, Optional, Tuple, cast
import httpx
from httpx import AsyncClient
from agno.knowledge.content import Content, ContentStatus
from agno.knowledge.loaders.base import BaseLoader
from agno.knowledge.reader import Reader
from agno.knowledge.remote_content.base import BaseStorageConfig
from agno.knowledge.remote_content.github import GitHubConfig
from agno.knowledge.remote_content.remote_content import GitHubContent
from agno.utils.log import log_error, log_info, log_warning
from agno.utils.string import generate_id
class GitHubLoader(BaseLoader):
"""Loader for GitHub content."""
# Cache for GitHub App installation tokens: {cache_key: (token, expires_at_timestamp)}
# Uses double-checked locking: lock-free fast path for cache hits,
# lock only on cache miss to coordinate token refresh.
_github_app_token_cache: Dict[str, tuple] = {}
_token_cache_lock = threading.Lock()
_async_token_cache_lock: Optional[asyncio.Lock] = None
# ==========================================
# GITHUB HELPERS (shared between sync/async)
# ==========================================
@staticmethod
def _check_cached_token(cache: Dict[str, tuple], cache_key: str) -> Optional[str]:
"""Return a cached token if it is still valid (60s buffer), else None."""
cached = cache.get(cache_key)
if cached is not None:
token, expires_at = cached
if time.time() < expires_at - 60:
return token
return None
@staticmethod
def _build_jwt_and_url(gh_config: GitHubConfig) -> Tuple[str, Dict[str, str]]:
"""Build a signed JWT and return (exchange_url, headers).
Raises ImportError if PyJWT is not installed.
"""
try:
import jwt
except ImportError:
raise ImportError(
"GitHub App authentication requires PyJWT with cryptography. "
"Install via: pip install PyJWT cryptography"
)
now = int(time.time())
payload = {
"iat": now - 60,
"exp": now + 600,
"iss": str(gh_config.app_id),
}
private_key = gh_config.private_key
if private_key is None:
raise ValueError("private_key is required for GitHub App authentication")
app_jwt = jwt.encode(payload, private_key, algorithm="RS256")
url = f"https://api.github.com/app/installations/{gh_config.installation_id}/access_tokens"
jwt_headers = {
"Authorization": f"Bearer {app_jwt}",
"Accept": "application/vnd.github.v3+json",
"User-Agent": "Agno-Knowledge",
}
return url, jwt_headers
@staticmethod
def _parse_token_response(data: Dict[str, Any]) -> Tuple[str, float]:
"""Extract the installation token and expiry timestamp from the API response."""
installation_token: str = data["token"]
expires_at_str = data.get("expires_at", "")
now = int(time.time())
if expires_at_str:
from datetime import datetime
try:
expires_at_ts = datetime.fromisoformat(expires_at_str.replace("Z", "+00:00")).timestamp()
except (ValueError, AttributeError):
expires_at_ts = float(now + 3600)
else:
expires_at_ts = float(now + 3600)
return installation_token, expires_at_ts
def _get_github_app_token(self, gh_config: GitHubConfig) -> str:
"""Generate or retrieve a cached installation access token for GitHub App auth.
Creates a JWT signed with the app's private key, then exchanges it for
an installation access token via the GitHub API. Tokens are cached
until 60 seconds before expiry.
Uses double-checked locking: the cache is read lock-free first (safe
under the GIL since dict.get and tuple reads are atomic). On a cache
miss the lock is acquired for the full token exchange and cache write,
preventing duplicate HTTP requests for the same installation.
Requires ``PyJWT[crypto]``: ``pip install PyJWT cryptography``
"""
cache_key = f"{gh_config.app_id}:{gh_config.installation_id}"
# Fast path: lock-free cache read
cached = self._check_cached_token(self._github_app_token_cache, cache_key)
if cached is not None:
return cached
# Slow path: acquire lock, re-check, then fetch + cache write
with self._token_cache_lock:
cached = self._check_cached_token(self._github_app_token_cache, cache_key)
if cached is not None:
return cached
url, jwt_headers = self._build_jwt_and_url(gh_config)
try:
with httpx.Client() as client:
response = client.post(url, headers=jwt_headers, timeout=30.0)
response.raise_for_status()
data = response.json()
except httpx.HTTPStatusError as e:
log_error(f"GitHub App token exchange failed: {e.response.status_code} {e.response.text}")
raise
except httpx.HTTPError as e:
log_error(f"GitHub App token exchange request failed: {e}")
raise
installation_token, expires_at_ts = self._parse_token_response(data)
self._github_app_token_cache[cache_key] = (installation_token, expires_at_ts)
return installation_token
async def _aget_github_app_token(self, gh_config: GitHubConfig) -> str:
"""Generate or retrieve a cached installation access token for GitHub App auth (async).
Async variant of ``_get_github_app_token``. Uses ``httpx.AsyncClient``
so the event loop is not blocked during the token exchange.
Uses double-checked locking: the cache is read without the async lock
first (safe because no ``await`` is involved, so no coroutine can
interleave). On a cache miss the lock is held for the full token
exchange and cache write, preventing duplicate HTTP requests.
Requires ``PyJWT[crypto]``: ``pip install PyJWT cryptography``
"""
cache_key = f"{gh_config.app_id}:{gh_config.installation_id}"
# Fast path: lock-free cache read (no await, so no interleaving)
cached = self._check_cached_token(self._github_app_token_cache, cache_key)
if cached is not None:
return cached
# Ensure the async lock exists (sync lock guards initialization)
with self._token_cache_lock:
if self._async_token_cache_lock is None:
self.__class__._async_token_cache_lock = asyncio.Lock()
lock = self._async_token_cache_lock
assert lock is not None
# Slow path: acquire async lock, re-check, then fetch + cache write
async with lock:
cached = self._check_cached_token(self._github_app_token_cache, cache_key)
if cached is not None:
return cached
url, jwt_headers = self._build_jwt_and_url(gh_config)
try:
async with AsyncClient() as client:
response = await client.post(url, headers=jwt_headers, timeout=30.0)
response.raise_for_status()
data = response.json()
except httpx.HTTPStatusError as e:
log_error(f"GitHub App token exchange failed: {e.response.status_code} {e.response.text}")
raise
except httpx.HTTPError as e:
log_error(f"GitHub App token exchange request failed: {e}")
raise
installation_token, expires_at_ts = self._parse_token_response(data)
self._github_app_token_cache[cache_key] = (installation_token, expires_at_ts)
return installation_token
def _validate_github_config(
self,
content: Content,
config: Optional[BaseStorageConfig],
) -> Optional[GitHubConfig]:
"""Validate and extract GitHub config.
Returns:
GitHubConfig if valid, None otherwise
"""
remote_content: GitHubContent = cast(GitHubContent, content.remote_content)
gh_config = cast(GitHubConfig, config) if isinstance(config, GitHubConfig) else None
if gh_config is None:
log_error(f"GitHub config not found for config_id: {remote_content.config_id}")
return None
return gh_config
def _build_github_headers(self, gh_config: GitHubConfig) -> Dict[str, str]:
"""Build headers for GitHub API requests.
Uses GitHub App authentication when ``app_id`` is configured,
otherwise falls back to the personal access token.
"""
headers: Dict[str, str] = {
"Accept": "application/vnd.github.v3+json",
"User-Agent": "Agno-Knowledge",
}
if gh_config.app_id is not None:
token = self._get_github_app_token(gh_config)
headers["Authorization"] = f"Bearer {token}"
elif gh_config.token:
headers["Authorization"] = f"Bearer {gh_config.token}"
return headers
async def _abuild_github_headers(self, gh_config: GitHubConfig) -> Dict[str, str]:
"""Build headers for GitHub API requests (async).
Async variant of ``_build_github_headers``. Uses the async token
exchange so the event loop is not blocked.
"""
headers: Dict[str, str] = {
"Accept": "application/vnd.github.v3+json",
"User-Agent": "Agno-Knowledge",
}
if gh_config.app_id is not None:
token = await self._aget_github_app_token(gh_config)
headers["Authorization"] = f"Bearer {token}"
elif gh_config.token:
headers["Authorization"] = f"Bearer {gh_config.token}"
return headers
def _build_github_metadata(
self,
gh_config: GitHubConfig,
branch: str,
file_path: str,
file_name: str,
) -> Dict[str, str]:
"""Build GitHub-specific metadata dictionary."""
return {
"source_type": "github",
"source_config_id": gh_config.id,
"source_config_name": gh_config.name,
"github_repo": gh_config.repo,
"github_branch": branch,
"github_path": file_path,
"github_filename": file_name,
}
def _build_github_virtual_path(self, repo: str, branch: str, file_path: str) -> str:
"""Build virtual path for GitHub content."""
return f"github://{repo}/{branch}/{file_path}"
def _get_github_branch(self, remote_content: GitHubContent, gh_config: GitHubConfig) -> str:
"""Get the branch to use for GitHub operations."""
return remote_content.branch or gh_config.branch or "main"
def _get_github_path_to_process(self, remote_content: GitHubContent) -> str:
"""Get the path to process from remote content."""
return (remote_content.file_path or remote_content.folder_path or "").rstrip("/")
def _process_github_file_content(
self,
file_data: dict,
client: httpx.Client,
headers: Dict[str, str],
) -> bytes:
"""Process GitHub API response and return file content (sync)."""
if file_data.get("encoding") == "base64":
import base64
return base64.b64decode(file_data["content"])
else:
download_url = file_data.get("download_url")
if download_url:
dl_response = client.get(download_url, headers=headers, timeout=30.0)
dl_response.raise_for_status()
return dl_response.content
else:
raise ValueError("No content or download_url in response")
async def _aprocess_github_file_content(
self,
file_data: dict,
client: AsyncClient,
headers: Dict[str, str],
) -> bytes:
"""Process GitHub API response and return file content (async)."""
if file_data.get("encoding") == "base64":
import base64
return base64.b64decode(file_data["content"])
else:
download_url = file_data.get("download_url")
if download_url:
dl_response = await client.get(download_url, headers=headers, timeout=30.0)
dl_response.raise_for_status()
return dl_response.content
else:
raise ValueError("No content or download_url in response")
# ==========================================
# GITHUB LOADERS
# ==========================================
async def _aload_from_github(
self,
content: Content,
upsert: bool,
skip_if_exists: bool,
config: Optional[BaseStorageConfig] = None,
):
"""Load content from GitHub (async).
Requires the GitHub config to contain repo and optionally token for private repos.
Uses the GitHub API to fetch file contents.
"""
remote_content: GitHubContent = cast(GitHubContent, content.remote_content)
gh_config = self._validate_github_config(content, config)
if gh_config is None:
return
headers = await self._abuild_github_headers(gh_config)
branch = self._get_github_branch(remote_content, gh_config)
path_to_process = self._get_github_path_to_process(remote_content)
files_to_process: List[Dict[str, str]] = []
async with AsyncClient() as client:
# Helper function to recursively list all files in a folder
async def list_files_recursive(folder: str) -> List[Dict[str, str]]:
"""Recursively list all files in a GitHub folder."""
files: List[Dict[str, str]] = []
api_url = f"https://api.github.com/repos/{gh_config.repo}/contents/{folder}"
if branch:
api_url += f"?ref={branch}"
try:
response = await client.get(api_url, headers=headers, timeout=30.0)
response.raise_for_status()
items = response.json()
if not isinstance(items, list):
items = [items]
for item in items:
if item.get("type") == "file":
files.append({"path": item["path"], "name": item["name"]})
elif item.get("type") == "dir":
subdir_files = await list_files_recursive(item["path"])
files.extend(subdir_files)
except Exception as e:
log_error(f"Error listing GitHub folder {folder}: {e}")
return files
if path_to_process:
api_url = f"https://api.github.com/repos/{gh_config.repo}/contents/{path_to_process}"
if branch:
api_url += f"?ref={branch}"
try:
response = await client.get(api_url, headers=headers, timeout=30.0)
response.raise_for_status()
path_data = response.json()
if isinstance(path_data, list):
for item in path_data:
if item.get("type") == "file":
files_to_process.append({"path": item["path"], "name": item["name"]})
elif item.get("type") == "dir":
subdir_files = await list_files_recursive(item["path"])
files_to_process.extend(subdir_files)
else:
files_to_process.append({"path": path_data["path"], "name": path_data["name"]})
except Exception as e:
log_error(f"Error fetching GitHub path {path_to_process}: {e}")
return
if not files_to_process:
log_warning(f"No files found at GitHub path: {path_to_process}")
return
log_info(f"Processing {len(files_to_process)} file(s) from GitHub")
is_folder_upload = len(files_to_process) > 1
for file_info in files_to_process:
file_path = file_info["path"]
file_name = file_info["name"]
# Build metadata and virtual path using helpers
virtual_path = self._build_github_virtual_path(gh_config.repo, branch, file_path)
github_metadata = self._build_github_metadata(gh_config, branch, file_path, file_name)
merged_metadata = self._merge_metadata(github_metadata, content.metadata)
# Compute content name using base helper
content_name = self._compute_content_name(
file_path, file_name, content.name, path_to_process, is_folder_upload
)
# Create content entry using base helper
content_entry = self._create_content_entry(
content, content_name, virtual_path, merged_metadata, "github", is_folder_upload
)
await self._ainsert_contents_db(content_entry)
if self._should_skip(content_entry.content_hash, skip_if_exists):
content_entry.status = ContentStatus.COMPLETED
await self._aupdate_content(content_entry)
continue
# Fetch file content
api_url = f"https://api.github.com/repos/{gh_config.repo}/contents/{file_path}"
if branch:
api_url += f"?ref={branch}"
try:
response = await client.get(api_url, headers=headers, timeout=30.0)
response.raise_for_status()
file_data = response.json()
file_content = await self._aprocess_github_file_content(file_data, client, headers)
except Exception as e:
log_error(f"Error fetching GitHub file {file_path}: {e}")
content_entry.status = ContentStatus.FAILED
content_entry.status_message = str(e)
await self._aupdate_content(content_entry)
continue
# Select reader and read content
reader = self._select_reader_by_uri(file_name, content.reader)
if reader is None:
log_warning(f"No reader found for file: {file_name}")
content_entry.status = ContentStatus.FAILED
content_entry.status_message = "No suitable reader found"
await self._aupdate_content(content_entry)
continue
reader = cast(Reader, reader)
readable_content = BytesIO(file_content)
read_documents = await reader.async_read(readable_content, name=file_name)
# Prepare and insert into vector database
if not content_entry.id:
content_entry.id = generate_id(content_entry.content_hash or "")
self._prepare_documents_for_insert(read_documents, content_entry.id)
await self._ahandle_vector_db_insert(content_entry, read_documents, upsert)
def _load_from_github(
self,
content: Content,
upsert: bool,
skip_if_exists: bool,
config: Optional[BaseStorageConfig] = None,
):
"""Load content from GitHub (sync).
Requires the GitHub config to contain repo and optionally token for private repos.
Uses the GitHub API to fetch file contents.
"""
remote_content: GitHubContent = cast(GitHubContent, content.remote_content)
gh_config = self._validate_github_config(content, config)
if gh_config is None:
return
headers = self._build_github_headers(gh_config)
branch = self._get_github_branch(remote_content, gh_config)
path_to_process = self._get_github_path_to_process(remote_content)
files_to_process: List[Dict[str, str]] = []
with httpx.Client() as client:
# Helper function to recursively list all files in a folder
def list_files_recursive(folder: str) -> List[Dict[str, str]]:
"""Recursively list all files in a GitHub folder."""
files: List[Dict[str, str]] = []
api_url = f"https://api.github.com/repos/{gh_config.repo}/contents/{folder}"
if branch:
api_url += f"?ref={branch}"
try:
response = client.get(api_url, headers=headers, timeout=30.0)
response.raise_for_status()
items = response.json()
if not isinstance(items, list):
items = [items]
for item in items:
if item.get("type") == "file":
files.append({"path": item["path"], "name": item["name"]})
elif item.get("type") == "dir":
subdir_files = list_files_recursive(item["path"])
files.extend(subdir_files)
except Exception as e:
log_error(f"Error listing GitHub folder {folder}: {e}")
return files
if path_to_process:
api_url = f"https://api.github.com/repos/{gh_config.repo}/contents/{path_to_process}"
if branch:
api_url += f"?ref={branch}"
try:
response = client.get(api_url, headers=headers, timeout=30.0)
response.raise_for_status()
path_data = response.json()
if isinstance(path_data, list):
for item in path_data:
if item.get("type") == "file":
files_to_process.append({"path": item["path"], "name": item["name"]})
elif item.get("type") == "dir":
subdir_files = list_files_recursive(item["path"])
files_to_process.extend(subdir_files)
else:
files_to_process.append({"path": path_data["path"], "name": path_data["name"]})
except Exception as e:
log_error(f"Error fetching GitHub path {path_to_process}: {e}")
return
if not files_to_process:
log_warning(f"No files found at GitHub path: {path_to_process}")
return
log_info(f"Processing {len(files_to_process)} file(s) from GitHub")
is_folder_upload = len(files_to_process) > 1
for file_info in files_to_process:
file_path = file_info["path"]
file_name = file_info["name"]
# Build metadata and virtual path using helpers
virtual_path = self._build_github_virtual_path(gh_config.repo, branch, file_path)
github_metadata = self._build_github_metadata(gh_config, branch, file_path, file_name)
merged_metadata = self._merge_metadata(github_metadata, content.metadata)
# Compute content name using base helper
content_name = self._compute_content_name(
file_path, file_name, content.name, path_to_process, is_folder_upload
)
# Create content entry using base helper
content_entry = self._create_content_entry(
content, content_name, virtual_path, merged_metadata, "github", is_folder_upload
)
self._insert_contents_db(content_entry)
if self._should_skip(content_entry.content_hash, skip_if_exists):
content_entry.status = ContentStatus.COMPLETED
self._update_content(content_entry)
continue
# Fetch file content
api_url = f"https://api.github.com/repos/{gh_config.repo}/contents/{file_path}"
if branch:
api_url += f"?ref={branch}"
try:
response = client.get(api_url, headers=headers, timeout=30.0)
response.raise_for_status()
file_data = response.json()
file_content = self._process_github_file_content(file_data, client, headers)
except Exception as e:
log_error(f"Error fetching GitHub file {file_path}: {e}")
content_entry.status = ContentStatus.FAILED
content_entry.status_message = str(e)
self._update_content(content_entry)
continue
# Select reader and read content
reader = self._select_reader_by_uri(file_name, content.reader)
if reader is None:
log_warning(f"No reader found for file: {file_name}")
content_entry.status = ContentStatus.FAILED
content_entry.status_message = "No suitable reader found"
self._update_content(content_entry)
continue
reader = cast(Reader, reader)
readable_content = BytesIO(file_content)
read_documents = reader.read(readable_content, name=file_name)
# Prepare and insert into vector database
if not content_entry.id:
content_entry.id = generate_id(content_entry.content_hash or "")
self._prepare_documents_for_insert(read_documents, content_entry.id)
self._handle_vector_db_insert(content_entry, read_documents, upsert)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/loaders/github.py",
"license": "Apache License 2.0",
"lines": 506,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/knowledge/loaders/s3.py | """S3 content loader for Knowledge.
Provides methods for loading content from AWS S3.
"""
# mypy: disable-error-code="attr-defined"
from io import BytesIO
from pathlib import Path
from typing import Any, Dict, List, Optional, Union, cast
from agno.knowledge.content import Content, ContentStatus
from agno.knowledge.loaders.base import BaseLoader
from agno.knowledge.reader import Reader
from agno.knowledge.remote_content.base import BaseStorageConfig
from agno.knowledge.remote_content.remote_content import S3Content
from agno.knowledge.remote_content.s3 import S3Config
from agno.utils.log import log_error, log_info
from agno.utils.string import generate_id
class S3Loader(BaseLoader):
"""Loader for S3 content."""
# ==========================================
# S3 HELPERS (shared between sync/async)
# ==========================================
def _validate_s3_config(
self,
content: Content,
config: Optional[BaseStorageConfig],
) -> Optional[S3Config]:
"""Validate and extract S3 config.
Returns:
S3Config if valid, None otherwise (S3 can work without explicit config)
"""
return cast(S3Config, config) if isinstance(config, S3Config) else None
def _build_s3_metadata(
self,
s3_config: Optional[S3Config],
bucket_name: str,
object_name: str,
) -> Dict[str, str]:
"""Build S3-specific metadata dictionary."""
metadata: Dict[str, str] = {
"source_type": "s3",
"s3_bucket": bucket_name,
"s3_object_name": object_name,
}
if s3_config:
metadata["source_config_id"] = s3_config.id
metadata["source_config_name"] = s3_config.name
if s3_config.region:
metadata["s3_region"] = s3_config.region
return metadata
def _build_s3_virtual_path(self, bucket_name: str, object_name: str) -> str:
"""Build virtual path for S3 content."""
return f"s3://{bucket_name}/{object_name}"
# ==========================================
# S3 LOADERS
# ==========================================
async def _aload_from_s3(
self,
content: Content,
upsert: bool,
skip_if_exists: bool,
config: Optional[BaseStorageConfig] = None,
):
"""Load content from AWS S3 (async).
Note: Uses sync boto3 calls as boto3 doesn't have an async API.
"""
from agno.cloud.aws.s3.bucket import S3Bucket
from agno.cloud.aws.s3.object import S3Object
remote_content: S3Content = cast(S3Content, content.remote_content)
s3_config = self._validate_s3_config(content, config)
# Get or create bucket with credentials from config
bucket = remote_content.bucket
try:
if bucket is None and remote_content.bucket_name:
bucket = S3Bucket(
name=remote_content.bucket_name,
region=s3_config.region if s3_config else None,
aws_access_key_id=s3_config.aws_access_key_id if s3_config else None,
aws_secret_access_key=s3_config.aws_secret_access_key if s3_config else None,
)
except Exception as e:
log_error(f"Error getting bucket: {e}")
# Identify objects to read
objects_to_read: List[S3Object] = []
if bucket is not None:
if remote_content.key is not None:
_object = S3Object(bucket_name=bucket.name, name=remote_content.key)
objects_to_read.append(_object)
elif remote_content.object is not None:
objects_to_read.append(remote_content.object)
elif remote_content.prefix is not None:
objects_to_read.extend(bucket.get_objects(prefix=remote_content.prefix))
else:
objects_to_read.extend(bucket.get_objects())
if objects_to_read:
log_info(f"Processing {len(objects_to_read)} file(s) from S3")
bucket_name = bucket.name if bucket else "unknown"
is_folder_upload = len(objects_to_read) > 1
root_path = remote_content.prefix or ""
for s3_object in objects_to_read:
object_name = s3_object.name or ""
file_name = object_name.split("/")[-1]
# Build metadata and virtual path using helpers
virtual_path = self._build_s3_virtual_path(bucket_name, object_name)
s3_metadata = self._build_s3_metadata(s3_config, bucket_name, object_name)
merged_metadata: Dict[str, Any] = self._merge_metadata(s3_metadata, content.metadata)
# Compute content name using base helper
content_name = self._compute_content_name(object_name, file_name, content.name, root_path, is_folder_upload)
# Create content entry
content_entry = Content(
name=content_name,
description=content.description,
path=virtual_path,
status=ContentStatus.PROCESSING,
metadata=merged_metadata,
file_type="s3",
)
content_entry.content_hash = self._build_content_hash(content_entry)
content_entry.id = generate_id(content_entry.content_hash)
await self._ainsert_contents_db(content_entry)
if self._should_skip(content_entry.content_hash, skip_if_exists):
content_entry.status = ContentStatus.COMPLETED
await self._aupdate_content(content_entry)
continue
# Select reader
reader = self._select_reader_by_uri(s3_object.uri, content.reader)
reader = cast(Reader, reader)
# Fetch and load the content
temporary_file = None
readable_content: Optional[Union[BytesIO, Path]] = None
if s3_object.uri.endswith(".pdf"):
readable_content = BytesIO(s3_object.get_resource().get()["Body"].read())
else:
temporary_file = Path("storage").joinpath(file_name)
readable_content = temporary_file
s3_object.download(readable_content) # type: ignore
# Read the content
read_documents = await reader.async_read(readable_content, name=file_name)
# Prepare and insert the content in the vector database
self._prepare_documents_for_insert(read_documents, content_entry.id)
await self._ahandle_vector_db_insert(content_entry, read_documents, upsert)
# Remove temporary file if needed
if temporary_file:
temporary_file.unlink()
def _load_from_s3(
self,
content: Content,
upsert: bool,
skip_if_exists: bool,
config: Optional[BaseStorageConfig] = None,
):
"""Load content from AWS S3 (sync)."""
from agno.cloud.aws.s3.bucket import S3Bucket
from agno.cloud.aws.s3.object import S3Object
remote_content: S3Content = cast(S3Content, content.remote_content)
s3_config = self._validate_s3_config(content, config)
# Get or create bucket with credentials from config
bucket = remote_content.bucket
if bucket is None and remote_content.bucket_name:
bucket = S3Bucket(
name=remote_content.bucket_name,
region=s3_config.region if s3_config else None,
aws_access_key_id=s3_config.aws_access_key_id if s3_config else None,
aws_secret_access_key=s3_config.aws_secret_access_key if s3_config else None,
)
# Identify objects to read
objects_to_read: List[S3Object] = []
if bucket is not None:
if remote_content.key is not None:
_object = S3Object(bucket_name=bucket.name, name=remote_content.key)
objects_to_read.append(_object)
elif remote_content.object is not None:
objects_to_read.append(remote_content.object)
elif remote_content.prefix is not None:
objects_to_read.extend(bucket.get_objects(prefix=remote_content.prefix))
else:
objects_to_read.extend(bucket.get_objects())
if objects_to_read:
log_info(f"Processing {len(objects_to_read)} file(s) from S3")
bucket_name = bucket.name if bucket else "unknown"
is_folder_upload = len(objects_to_read) > 1
root_path = remote_content.prefix or ""
for s3_object in objects_to_read:
object_name = s3_object.name or ""
file_name = object_name.split("/")[-1]
# Build metadata and virtual path using helpers
virtual_path = self._build_s3_virtual_path(bucket_name, object_name)
s3_metadata = self._build_s3_metadata(s3_config, bucket_name, object_name)
merged_metadata: Dict[str, Any] = self._merge_metadata(s3_metadata, content.metadata)
# Compute content name using base helper
content_name = self._compute_content_name(object_name, file_name, content.name, root_path, is_folder_upload)
# Create content entry
content_entry = Content(
name=content_name,
description=content.description,
path=virtual_path,
status=ContentStatus.PROCESSING,
metadata=merged_metadata,
file_type="s3",
)
content_entry.content_hash = self._build_content_hash(content_entry)
content_entry.id = generate_id(content_entry.content_hash)
self._insert_contents_db(content_entry)
if self._should_skip(content_entry.content_hash, skip_if_exists):
content_entry.status = ContentStatus.COMPLETED
self._update_content(content_entry)
continue
# Select reader
reader = self._select_reader_by_uri(s3_object.uri, content.reader)
reader = cast(Reader, reader)
# Fetch and load the content
temporary_file = None
readable_content: Optional[Union[BytesIO, Path]] = None
if s3_object.uri.endswith(".pdf"):
readable_content = BytesIO(s3_object.get_resource().get()["Body"].read())
else:
temporary_file = Path("storage").joinpath(file_name)
readable_content = temporary_file
s3_object.download(readable_content) # type: ignore
# Read the content
read_documents = reader.read(readable_content, name=file_name)
# Prepare and insert the content in the vector database
self._prepare_documents_for_insert(read_documents, content_entry.id)
self._handle_vector_db_insert(content_entry, read_documents, upsert)
# Remove temporary file if needed
if temporary_file:
temporary_file.unlink()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/loaders/s3.py",
"license": "Apache License 2.0",
"lines": 225,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/knowledge/loaders/sharepoint.py | """SharePoint content loader for Knowledge.
Provides methods for loading content from Microsoft SharePoint.
"""
# mypy: disable-error-code="attr-defined"
from io import BytesIO
from typing import Dict, List, Optional, cast
import httpx
from httpx import AsyncClient
from agno.knowledge.content import Content, ContentStatus
from agno.knowledge.loaders.base import BaseLoader
from agno.knowledge.reader import Reader
from agno.knowledge.remote_content.base import BaseStorageConfig
from agno.knowledge.remote_content.remote_content import SharePointContent
from agno.knowledge.remote_content.sharepoint import SharePointConfig
from agno.utils.log import log_error, log_info, log_warning
class SharePointLoader(BaseLoader):
"""Loader for SharePoint content."""
# ==========================================
# SHAREPOINT HELPERS (shared between sync/async)
# ==========================================
def _validate_sharepoint_config(
self,
content: Content,
config: Optional[BaseStorageConfig],
) -> Optional[SharePointConfig]:
"""Validate and extract SharePoint config.
Returns:
SharePointConfig if valid, None otherwise
"""
remote_content: SharePointContent = cast(SharePointContent, content.remote_content)
sp_config = cast(SharePointConfig, config) if isinstance(config, SharePointConfig) else None
if sp_config is None:
log_error(f"SharePoint config not found for config_id: {remote_content.config_id}")
return None
return sp_config
def _get_sharepoint_access_token(self, sp_config: SharePointConfig) -> Optional[str]:
"""Get an access token for Microsoft Graph API using client credentials flow.
Requires the `msal` package: pip install msal
"""
try:
from msal import ConfidentialClientApplication # type: ignore
except ImportError:
raise ImportError("The `msal` package is not installed. Please install it via `pip install msal`.")
authority = f"https://login.microsoftonline.com/{sp_config.tenant_id}"
app = ConfidentialClientApplication(
sp_config.client_id,
authority=authority,
client_credential=sp_config.client_secret,
)
scopes = ["https://graph.microsoft.com/.default"]
result = app.acquire_token_for_client(scopes=scopes)
if "access_token" in result:
return result["access_token"]
else:
log_error(f"Failed to acquire SharePoint token: {result.get('error_description', result.get('error'))}")
return None
def _get_sharepoint_site_id(self, hostname: str, site_path: Optional[str], access_token: str) -> Optional[str]:
"""Get the SharePoint site ID using Microsoft Graph API (sync)."""
if site_path:
url = f"https://graph.microsoft.com/v1.0/sites/{hostname}:/{site_path}"
else:
url = f"https://graph.microsoft.com/v1.0/sites/{hostname}"
headers = {"Authorization": f"Bearer {access_token}"}
try:
response = httpx.get(url, headers=headers)
response.raise_for_status()
return response.json().get("id")
except httpx.HTTPStatusError as e:
log_error(f"Failed to get SharePoint site ID: {e.response.status_code} - {e.response.text}")
return None
async def _aget_sharepoint_site_id(
self, hostname: str, site_path: Optional[str], access_token: str
) -> Optional[str]:
"""Get the SharePoint site ID using Microsoft Graph API (async)."""
if site_path:
url = f"https://graph.microsoft.com/v1.0/sites/{hostname}:/{site_path}"
else:
url = f"https://graph.microsoft.com/v1.0/sites/{hostname}"
headers = {"Authorization": f"Bearer {access_token}"}
try:
async with httpx.AsyncClient() as client:
response = await client.get(url, headers=headers)
response.raise_for_status()
return response.json().get("id")
except httpx.HTTPStatusError as e:
log_error(f"Failed to get SharePoint site ID: {e.response.status_code} - {e.response.text}")
return None
def _list_sharepoint_folder_items(self, site_id: str, folder_path: str, access_token: str) -> List[dict]:
"""List all items in a SharePoint folder (sync)."""
folder_path = folder_path.lstrip("/")
url: Optional[str] = f"https://graph.microsoft.com/v1.0/sites/{site_id}/drive/root:/{folder_path}:/children"
headers = {"Authorization": f"Bearer {access_token}"}
items: List[dict] = []
try:
while url:
response = httpx.get(url, headers=headers)
response.raise_for_status()
data = response.json()
items.extend(data.get("value", []))
url = data.get("@odata.nextLink")
except httpx.HTTPStatusError as e:
log_error(f"Failed to list SharePoint folder: {e.response.status_code} - {e.response.text}")
return items
async def _alist_sharepoint_folder_items(self, site_id: str, folder_path: str, access_token: str) -> List[dict]:
"""List all items in a SharePoint folder (async)."""
folder_path = folder_path.lstrip("/")
url: Optional[str] = f"https://graph.microsoft.com/v1.0/sites/{site_id}/drive/root:/{folder_path}:/children"
headers = {"Authorization": f"Bearer {access_token}"}
items: List[dict] = []
try:
async with httpx.AsyncClient() as client:
while url:
response = await client.get(url, headers=headers)
response.raise_for_status()
data = response.json()
items.extend(data.get("value", []))
url = data.get("@odata.nextLink")
except httpx.HTTPStatusError as e:
log_error(f"Failed to list SharePoint folder: {e.response.status_code} - {e.response.text}")
return items
def _download_sharepoint_file(self, site_id: str, file_path: str, access_token: str) -> Optional[BytesIO]:
"""Download a file from SharePoint (sync)."""
file_path = file_path.lstrip("/")
url = f"https://graph.microsoft.com/v1.0/sites/{site_id}/drive/root:/{file_path}:/content"
headers = {"Authorization": f"Bearer {access_token}"}
try:
response = httpx.get(url, headers=headers, follow_redirects=True)
response.raise_for_status()
return BytesIO(response.content)
except httpx.HTTPStatusError as e:
log_error(f"Failed to download SharePoint file {file_path}: {e.response.status_code} - {e.response.text}")
return None
async def _adownload_sharepoint_file(self, site_id: str, file_path: str, access_token: str) -> Optional[BytesIO]:
"""Download a file from SharePoint (async)."""
file_path = file_path.lstrip("/")
url = f"https://graph.microsoft.com/v1.0/sites/{site_id}/drive/root:/{file_path}:/content"
headers = {"Authorization": f"Bearer {access_token}"}
try:
async with httpx.AsyncClient() as client:
response = await client.get(url, headers=headers, follow_redirects=True)
response.raise_for_status()
return BytesIO(response.content)
except httpx.HTTPStatusError as e:
log_error(f"Failed to download SharePoint file {file_path}: {e.response.status_code} - {e.response.text}")
return None
def _build_sharepoint_metadata(
self,
sp_config: SharePointConfig,
site_id: str,
file_path: str,
file_name: str,
) -> Dict[str, str]:
"""Build SharePoint-specific metadata dictionary."""
return {
"source_type": "sharepoint",
"source_config_id": sp_config.id,
"source_config_name": sp_config.name,
"sharepoint_hostname": sp_config.hostname,
"sharepoint_site_id": site_id,
"sharepoint_path": file_path,
"sharepoint_filename": file_name,
}
def _build_sharepoint_virtual_path(self, hostname: str, site_id: str, file_path: str) -> str:
"""Build virtual path for SharePoint content."""
return f"sharepoint://{hostname}/{site_id}/{file_path}"
def _get_sharepoint_path_to_process(self, remote_content: SharePointContent) -> str:
"""Get the path to process from remote content."""
return (remote_content.file_path or remote_content.folder_path or "").strip("/")
# ==========================================
# SHAREPOINT LOADERS
# ==========================================
async def _aload_from_sharepoint(
self,
content: Content,
upsert: bool,
skip_if_exists: bool,
config: Optional[BaseStorageConfig] = None,
):
"""Load content from SharePoint (async).
Requires the SharePoint config to contain tenant_id, client_id, client_secret, and hostname.
"""
remote_content: SharePointContent = cast(SharePointContent, content.remote_content)
sp_config = self._validate_sharepoint_config(content, config)
if sp_config is None:
return
# Get access token
access_token = self._get_sharepoint_access_token(sp_config)
if not access_token:
return
# Get site ID
site_id: Optional[str] = sp_config.site_id
if not site_id:
site_path = remote_content.site_path or sp_config.site_path
site_id = await self._aget_sharepoint_site_id(sp_config.hostname, site_path, access_token)
if not site_id:
log_error(f"Failed to get SharePoint site ID for {sp_config.hostname}/{site_path}")
return
# Identify files to download
files_to_process: List[tuple] = []
path_to_process = self._get_sharepoint_path_to_process(remote_content)
# Helper function to recursively list all files in a folder
async def list_files_recursive(folder: str) -> List[tuple]:
"""Recursively list all files in a SharePoint folder."""
files: List[tuple] = []
items = await self._alist_sharepoint_folder_items(site_id, folder, access_token) # type: ignore
for item in items:
if "file" in item:
item_path = f"{folder}/{item['name']}"
files.append((item_path, item["name"]))
elif "folder" in item:
subdir_path = f"{folder}/{item['name']}"
subdir_files = await list_files_recursive(subdir_path)
files.extend(subdir_files)
return files
if path_to_process:
try:
async with AsyncClient() as client:
url = f"https://graph.microsoft.com/v1.0/sites/{site_id}/drive/root:/{path_to_process}"
headers = {"Authorization": f"Bearer {access_token}"}
response = await client.get(url, headers=headers, timeout=30.0)
response.raise_for_status()
item_data = response.json()
if "folder" in item_data:
files_to_process = await list_files_recursive(path_to_process)
elif "file" in item_data:
files_to_process.append((path_to_process, item_data["name"]))
else:
log_warning(f"SharePoint path {path_to_process} is neither file nor folder")
return
except Exception as e:
log_error(f"Error checking SharePoint path {path_to_process}: {e}")
return
if not files_to_process:
log_warning(f"No files found at SharePoint path: {path_to_process}")
return
log_info(f"Processing {len(files_to_process)} file(s) from SharePoint")
is_folder_upload = len(files_to_process) > 1
for file_path, file_name in files_to_process:
# Build metadata and virtual path using helpers
virtual_path = self._build_sharepoint_virtual_path(sp_config.hostname, site_id, file_path)
sharepoint_metadata = self._build_sharepoint_metadata(sp_config, site_id, file_path, file_name)
merged_metadata = self._merge_metadata(sharepoint_metadata, content.metadata)
# Compute content name using base helper
content_name = self._compute_content_name(
file_path, file_name, content.name, path_to_process, is_folder_upload
)
# Create content entry using base helper
content_entry = self._create_content_entry(
content, content_name, virtual_path, merged_metadata, "sharepoint", is_folder_upload
)
await self._ainsert_contents_db(content_entry)
if self._should_skip(content_entry.content_hash, skip_if_exists):
content_entry.status = ContentStatus.COMPLETED
await self._aupdate_content(content_entry)
continue
# Select reader and download file
reader = self._select_reader_by_uri(file_name, content.reader)
reader = cast(Reader, reader)
file_content = await self._adownload_sharepoint_file(site_id, file_path, access_token)
if not file_content:
content_entry.status = ContentStatus.FAILED
await self._aupdate_content(content_entry)
continue
# Read the content
read_documents = await reader.async_read(file_content, name=file_name)
# Prepare and insert to vector database
self._prepare_documents_for_insert(read_documents, content_entry.id)
await self._ahandle_vector_db_insert(content_entry, read_documents, upsert)
def _load_from_sharepoint(
self,
content: Content,
upsert: bool,
skip_if_exists: bool,
config: Optional[BaseStorageConfig] = None,
):
"""Load content from SharePoint (sync).
Requires the SharePoint config to contain tenant_id, client_id, client_secret, and hostname.
"""
remote_content: SharePointContent = cast(SharePointContent, content.remote_content)
sp_config = self._validate_sharepoint_config(content, config)
if sp_config is None:
return
# Get access token
access_token = self._get_sharepoint_access_token(sp_config)
if not access_token:
return
# Get site ID
site_id: Optional[str] = sp_config.site_id
if not site_id:
site_path = remote_content.site_path or sp_config.site_path
site_id = self._get_sharepoint_site_id(sp_config.hostname, site_path, access_token)
if not site_id:
log_error(f"Failed to get SharePoint site ID for {sp_config.hostname}/{site_path}")
return
# Identify files to download
files_to_process: List[tuple] = []
path_to_process = self._get_sharepoint_path_to_process(remote_content)
# Helper function to recursively list all files in a folder
def list_files_recursive(folder: str) -> List[tuple]:
"""Recursively list all files in a SharePoint folder."""
files: List[tuple] = []
items = self._list_sharepoint_folder_items(site_id, folder, access_token) # type: ignore
for item in items:
if "file" in item:
item_path = f"{folder}/{item['name']}"
files.append((item_path, item["name"]))
elif "folder" in item:
subdir_path = f"{folder}/{item['name']}"
subdir_files = list_files_recursive(subdir_path)
files.extend(subdir_files)
return files
if path_to_process:
try:
with httpx.Client() as client:
url = f"https://graph.microsoft.com/v1.0/sites/{site_id}/drive/root:/{path_to_process}"
headers = {"Authorization": f"Bearer {access_token}"}
response = client.get(url, headers=headers, timeout=30.0)
response.raise_for_status()
item_data = response.json()
if "folder" in item_data:
files_to_process = list_files_recursive(path_to_process)
elif "file" in item_data:
files_to_process.append((path_to_process, item_data["name"]))
else:
log_warning(f"SharePoint path {path_to_process} is neither file nor folder")
return
except Exception as e:
log_error(f"Error checking SharePoint path {path_to_process}: {e}")
return
if not files_to_process:
log_warning(f"No files found at SharePoint path: {path_to_process}")
return
log_info(f"Processing {len(files_to_process)} file(s) from SharePoint")
is_folder_upload = len(files_to_process) > 1
for file_path, file_name in files_to_process:
# Build metadata and virtual path using helpers
virtual_path = self._build_sharepoint_virtual_path(sp_config.hostname, site_id, file_path)
sharepoint_metadata = self._build_sharepoint_metadata(sp_config, site_id, file_path, file_name)
merged_metadata = self._merge_metadata(sharepoint_metadata, content.metadata)
# Compute content name using base helper
content_name = self._compute_content_name(
file_path, file_name, content.name, path_to_process, is_folder_upload
)
# Create content entry using base helper
content_entry = self._create_content_entry(
content, content_name, virtual_path, merged_metadata, "sharepoint", is_folder_upload
)
self._insert_contents_db(content_entry)
if self._should_skip(content_entry.content_hash, skip_if_exists):
content_entry.status = ContentStatus.COMPLETED
self._update_content(content_entry)
continue
# Select reader and download file
reader = self._select_reader_by_uri(file_name, content.reader)
reader = cast(Reader, reader)
file_content = self._download_sharepoint_file(site_id, file_path, access_token)
if not file_content:
content_entry.status = ContentStatus.FAILED
self._update_content(content_entry)
continue
# Read the content
read_documents = reader.read(file_content, name=file_name)
# Prepare and insert to vector database
self._prepare_documents_for_insert(read_documents, content_entry.id)
self._handle_vector_db_insert(content_entry, read_documents, upsert)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/loaders/sharepoint.py",
"license": "Apache License 2.0",
"lines": 365,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/knowledge/remote_knowledge.py | """Remote content loading for Knowledge.
Provides methods for loading content from cloud storage providers:
- S3, GCS, SharePoint, GitHub, Azure Blob Storage
This module contains the RemoteKnowledge class which combines all loader
capabilities through inheritance. The Knowledge class inherits from this
to gain remote content loading capabilities.
"""
from typing import List, Optional
from agno.knowledge.content import Content
from agno.knowledge.loaders.azure_blob import AzureBlobLoader
from agno.knowledge.loaders.gcs import GCSLoader
from agno.knowledge.loaders.github import GitHubLoader
from agno.knowledge.loaders.s3 import S3Loader
from agno.knowledge.loaders.sharepoint import SharePointLoader
from agno.knowledge.remote_content.base import BaseStorageConfig
from agno.knowledge.remote_content.remote_content import (
AzureBlobContent,
GCSContent,
GitHubContent,
S3Content,
SharePointContent,
)
from agno.utils.log import log_warning
class RemoteKnowledge(S3Loader, GCSLoader, SharePointLoader, GitHubLoader, AzureBlobLoader):
"""Base class providing remote content loading capabilities.
Inherits from all provider-specific loaders:
- S3Loader: AWS S3 content loading
- GCSLoader: Google Cloud Storage content loading
- SharePointLoader: Microsoft SharePoint content loading
- GitHubLoader: GitHub repository content loading
- AzureBlobLoader: Azure Blob Storage content loading
Knowledge inherits from this class and provides:
- content_sources: List[BaseStorageConfig]
- vector_db, contents_db attributes
- _should_skip(), _select_reader_by_uri(), _prepare_documents_for_insert() methods
- _ahandle_vector_db_insert(), _handle_vector_db_insert() methods
- _ainsert_contents_db(), _insert_contents_db() methods
- _aupdate_content(), _update_content() methods
- _build_content_hash() method
"""
# These attributes are provided by the Knowledge subclass
content_sources: Optional[List[BaseStorageConfig]]
# ==========================================
# REMOTE CONTENT DISPATCHERS
# ==========================================
async def _aload_from_remote_content(
self,
content: Content,
upsert: bool,
skip_if_exists: bool,
):
"""Async dispatcher for remote content loading.
Routes to the appropriate provider-specific loader based on content type.
"""
if content.remote_content is None:
log_warning("No remote content provided for content")
return
remote_content = content.remote_content
# Look up config if config_id is provided
config = None
if hasattr(remote_content, "config_id") and remote_content.config_id:
config = self._get_remote_config_by_id(remote_content.config_id)
if config is None:
log_warning(f"No config found for config_id: {remote_content.config_id}")
if isinstance(remote_content, S3Content):
await self._aload_from_s3(content, upsert, skip_if_exists, config)
elif isinstance(remote_content, GCSContent):
await self._aload_from_gcs(content, upsert, skip_if_exists, config)
elif isinstance(remote_content, SharePointContent):
await self._aload_from_sharepoint(content, upsert, skip_if_exists, config)
elif isinstance(remote_content, GitHubContent):
await self._aload_from_github(content, upsert, skip_if_exists, config)
elif isinstance(remote_content, AzureBlobContent):
await self._aload_from_azure_blob(content, upsert, skip_if_exists, config)
else:
log_warning(f"Unsupported remote content type: {type(remote_content)}")
def _load_from_remote_content(
self,
content: Content,
upsert: bool,
skip_if_exists: bool,
):
"""Sync dispatcher for remote content loading.
Routes to the appropriate provider-specific loader based on content type.
"""
if content.remote_content is None:
log_warning("No remote content provided for content")
return
remote_content = content.remote_content
# Look up config if config_id is provided
config = None
if hasattr(remote_content, "config_id") and remote_content.config_id:
config = self._get_remote_config_by_id(remote_content.config_id)
if config is None:
log_warning(f"No config found for config_id: {remote_content.config_id}")
if isinstance(remote_content, S3Content):
self._load_from_s3(content, upsert, skip_if_exists, config)
elif isinstance(remote_content, GCSContent):
self._load_from_gcs(content, upsert, skip_if_exists, config)
elif isinstance(remote_content, SharePointContent):
self._load_from_sharepoint(content, upsert, skip_if_exists, config)
elif isinstance(remote_content, GitHubContent):
self._load_from_github(content, upsert, skip_if_exists, config)
elif isinstance(remote_content, AzureBlobContent):
self._load_from_azure_blob(content, upsert, skip_if_exists, config)
else:
log_warning(f"Unsupported remote content type: {type(remote_content)}")
# ==========================================
# REMOTE CONFIG HELPERS
# ==========================================
def _get_remote_configs(self) -> List[BaseStorageConfig]:
"""Return configured remote content sources."""
return self.content_sources or []
def _get_remote_config_by_id(self, config_id: str) -> Optional[BaseStorageConfig]:
"""Get a remote content config by its ID."""
if not self.content_sources:
return None
return next((c for c in self.content_sources if c.id == config_id), None)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/remote_knowledge.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/07_knowledge/embedders/aws_bedrock_embedder_v4.py | """
AWS Bedrock Embedder v4
=======================
Demonstrates Cohere v4 embeddings on AWS Bedrock with configurable dimensions.
Requirements:
- AWS credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
- AWS region configured (AWS_REGION)
- boto3 installed: pip install boto3
"""
from agno.knowledge.embedder.aws_bedrock import AwsBedrockEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Setup
# ---------------------------------------------------------------------------
embedder_v4 = AwsBedrockEmbedder(
id="cohere.embed-v4:0",
output_dimension=1024,
input_type="search_query",
)
# ---------------------------------------------------------------------------
# Create Knowledge Base
# ---------------------------------------------------------------------------
knowledge = Knowledge(
vector_db=PgVector(
table_name="ml_knowledge",
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
embedder=AwsBedrockEmbedder(
id="cohere.embed-v4:0",
output_dimension=1024,
input_type="search_document",
),
),
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
def main() -> None:
text = "What is machine learning?"
embeddings = embedder_v4.get_embedding(text)
print(f"Model: {embedder_v4.id}")
print(f"Embeddings (first 5): {embeddings[:5]}")
print(f"Dimensions: {len(embeddings)}")
print("\n--- Testing different dimensions ---")
for dim in [256, 512, 1024, 1536]:
emb = AwsBedrockEmbedder(id="cohere.embed-v4:0", output_dimension=dim)
result = emb.get_embedding("Test text")
print(f"Dimension {dim}: Got {len(result)} dimensional vector")
_ = knowledge
if __name__ == "__main__":
main()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/embedders/aws_bedrock_embedder_v4.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/07_knowledge/vector_db/pgvector/pgvector_with_bedrock_reranker.py | """
AWS Bedrock Reranker Example with PgVector
==========================================
Demonstrates AWS Bedrock rerankers with PgVector for retrieval augmented generation.
Requirements:
- AWS credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
- AWS region configured (AWS_REGION)
- boto3 installed: pip install boto3
- PostgreSQL with pgvector running
"""
from agno.agent import Agent
from agno.knowledge.embedder.aws_bedrock import AwsBedrockEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.reranker.aws_bedrock import (
AmazonReranker,
AwsBedrockReranker,
CohereBedrockReranker,
)
from agno.models.aws.bedrock import AwsBedrock
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Knowledge Base
# ---------------------------------------------------------------------------
knowledge_cohere = Knowledge(
vector_db=PgVector(
table_name="bedrock_rag_demo",
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
embedder=AwsBedrockEmbedder(
id="cohere.embed-multilingual-v3",
input_type="search_document",
),
reranker=AwsBedrockReranker(
model="cohere.rerank-v3-5:0",
top_n=5,
),
),
)
knowledge_convenience = Knowledge(
vector_db=PgVector(
table_name="bedrock_rag_demo_v2",
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
embedder=AwsBedrockEmbedder(
id="cohere.embed-v4:0",
output_dimension=1024,
input_type="search_document",
),
reranker=CohereBedrockReranker(top_n=5),
),
)
knowledge_amazon = Knowledge(
vector_db=PgVector(
table_name="bedrock_rag_amazon",
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
embedder=AwsBedrockEmbedder(
id="cohere.embed-multilingual-v3",
input_type="search_document",
),
reranker=AmazonReranker(
top_n=5,
aws_region="us-west-2",
),
),
)
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=AwsBedrock(id="anthropic.claude-sonnet-4-20250514-v1:0"),
knowledge=knowledge_cohere,
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
def main() -> None:
knowledge_cohere.insert(
name="Agno Docs", url="https://docs.agno.com/introduction.md"
)
_ = knowledge_convenience
_ = knowledge_amazon
agent.print_response("What are the key features?")
if __name__ == "__main__":
main()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/vector_db/pgvector/pgvector_with_bedrock_reranker.py",
"license": "Apache License 2.0",
"lines": 83,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/knowledge/reranker/aws_bedrock.py | from os import getenv
from typing import Any, Dict, List, Literal, Optional
from pydantic import ConfigDict, Field
from agno.knowledge.document import Document
from agno.knowledge.reranker.base import Reranker
from agno.utils.log import logger
try:
from boto3 import client as AwsClient
from boto3.session import Session
from botocore.exceptions import ClientError
except ImportError:
raise ImportError("`boto3` not installed. Please install it via `pip install boto3`.")
# Model ID constants
AMAZON_RERANK_V1 = "amazon.rerank-v1:0"
COHERE_RERANK_V3_5 = "cohere.rerank-v3-5:0"
# Type alias for supported models
RerankerModel = Literal["amazon.rerank-v1:0", "cohere.rerank-v3-5:0"]
class AwsBedrockReranker(Reranker):
"""
AWS Bedrock reranker supporting Amazon Rerank 1.0 and Cohere Rerank 3.5 models.
This reranker uses the unified Bedrock Rerank API (bedrock-agent-runtime)
which provides a consistent interface for both model providers.
To use this reranker, you need to either:
1. Set the following environment variables:
- AWS_ACCESS_KEY_ID
- AWS_SECRET_ACCESS_KEY
- AWS_REGION
2. Or provide a boto3 Session object
Args:
model (str): The model ID to use. Options:
- 'amazon.rerank-v1:0' (Amazon Rerank 1.0)
- 'cohere.rerank-v3-5:0' (Cohere Rerank 3.5)
Default is 'cohere.rerank-v3-5:0'.
top_n (Optional[int]): Number of top results to return after reranking.
If None, returns all documents reranked.
aws_region (Optional[str]): The AWS region to use.
aws_access_key_id (Optional[str]): The AWS access key ID to use.
aws_secret_access_key (Optional[str]): The AWS secret access key to use.
session (Optional[Session]): A boto3 Session object for authentication.
additional_model_request_fields (Optional[Dict]): Additional model-specific
parameters to pass in the request (e.g., Cohere-specific options).
Example:
```python
from agno.knowledge.reranker.aws_bedrock import AwsBedrockReranker
# Using Cohere Rerank 3.5 (default)
reranker = AwsBedrockReranker(
model="cohere.rerank-v3-5:0",
top_n=5,
aws_region="us-west-2",
)
# Using Amazon Rerank 1.0
reranker = AwsBedrockReranker(
model="amazon.rerank-v1:0",
top_n=10,
aws_region="us-west-2",
)
# Rerank documents
reranked_docs = reranker.rerank(query="What is machine learning?", documents=docs)
```
Note:
- Amazon Rerank 1.0 is NOT available in us-east-1 (N. Virginia).
Use Cohere Rerank 3.5 in that region.
- Maximum 1000 documents per request.
"""
model_config = ConfigDict(arbitrary_types_allowed=True, populate_by_name=True)
model: str = Field(default=COHERE_RERANK_V3_5, description="Reranker model ID")
top_n: Optional[int] = Field(default=None, description="Number of top results to return")
aws_region: Optional[str] = Field(default=None, description="AWS region")
aws_access_key_id: Optional[str] = Field(default=None, description="AWS access key ID")
aws_secret_access_key: Optional[str] = Field(default=None, description="AWS secret access key")
session: Optional[Session] = Field(default=None, description="Boto3 session", exclude=True)
additional_model_request_fields: Optional[Dict[str, Any]] = Field(
default=None,
description="Additional model-specific request parameters",
)
_client: Optional[AwsClient] = None
@property
def client(self) -> AwsClient:
"""
Returns a bedrock-agent-runtime client for the Rerank API.
Returns:
AwsClient: An instance of the bedrock-agent-runtime client.
"""
if self._client is not None:
return self._client
if self.session:
self._client = self.session.client("bedrock-agent-runtime")
return self._client
aws_access_key_id = self.aws_access_key_id or getenv("AWS_ACCESS_KEY_ID")
aws_secret_access_key = self.aws_secret_access_key or getenv("AWS_SECRET_ACCESS_KEY")
aws_region = self.aws_region or getenv("AWS_REGION")
if not aws_access_key_id or not aws_secret_access_key:
# Fall back to default credential chain
self._client = AwsClient(
service_name="bedrock-agent-runtime",
region_name=aws_region,
)
else:
self._client = AwsClient(
service_name="bedrock-agent-runtime",
region_name=aws_region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
)
return self._client
def _get_model_arn(self) -> str:
"""
Constructs the full model ARN for the reranker model.
Returns:
str: The model ARN.
"""
region = self.aws_region or getenv("AWS_REGION", "us-west-2")
return f"arn:aws:bedrock:{region}::foundation-model/{self.model}"
def _build_sources(self, documents: List[Document]) -> List[Dict[str, Any]]:
"""
Convert Document objects to Bedrock Rerank API source format.
Args:
documents: List of Document objects to convert.
Returns:
List of RerankSource objects for the API.
"""
sources = []
for doc in documents:
# Use text format for document content
source = {
"type": "INLINE",
"inlineDocumentSource": {
"type": "TEXT",
"textDocument": {
"text": doc.content,
},
},
}
sources.append(source)
return sources
def _rerank(self, query: str, documents: List[Document]) -> List[Document]:
"""
Internal method to perform reranking via Bedrock Rerank API.
Args:
query: The query string to rank documents against.
documents: List of Document objects to rerank.
Returns:
List of Document objects sorted by relevance score.
"""
if not documents:
return []
# Validate top_n
top_n = self.top_n
if top_n is not None and top_n <= 0:
logger.warning(f"top_n should be a positive integer, got {self.top_n}, setting top_n to None")
top_n = None
# Build the request
rerank_request: Dict[str, Any] = {
"queries": [
{
"type": "TEXT",
"textQuery": {
"text": query,
},
}
],
"sources": self._build_sources(documents),
"rerankingConfiguration": {
"type": "BEDROCK_RERANKING_MODEL",
"bedrockRerankingConfiguration": {
"modelConfiguration": {
"modelArn": self._get_model_arn(),
},
},
},
}
# Add numberOfResults if top_n is specified
if top_n is not None:
rerank_request["rerankingConfiguration"]["bedrockRerankingConfiguration"]["numberOfResults"] = top_n
# Add additional model request fields if provided
if self.additional_model_request_fields:
rerank_request["rerankingConfiguration"]["bedrockRerankingConfiguration"]["modelConfiguration"][
"additionalModelRequestFields"
] = self.additional_model_request_fields
# Call the Rerank API
response = self.client.rerank(**rerank_request)
# Process results
reranked_docs: List[Document] = []
results = response.get("results", [])
for result in results:
index = result.get("index")
relevance_score = result.get("relevanceScore")
if index is not None and index < len(documents):
doc = documents[index]
doc.reranking_score = relevance_score
reranked_docs.append(doc)
# Results from API are already sorted by relevance, but ensure sorting
reranked_docs.sort(
key=lambda x: x.reranking_score if x.reranking_score is not None else float("-inf"),
reverse=True,
)
return reranked_docs
def rerank(self, query: str, documents: List[Document]) -> List[Document]:
"""
Rerank documents based on their relevance to a query.
Args:
query: The query string to rank documents against.
documents: List of Document objects to rerank.
Returns:
List of Document objects sorted by relevance score (highest first).
Each document will have its `reranking_score` attribute set.
"""
try:
return self._rerank(query=query, documents=documents)
except ClientError as e:
error_code = e.response.get("Error", {}).get("Code", "Unknown")
error_message = e.response.get("Error", {}).get("Message", str(e))
logger.error(f"AWS Bedrock Rerank API error ({error_code}): {error_message}. Returning original documents.")
return documents
except Exception as e:
logger.error(f"Error reranking documents: {e}. Returning original documents.")
return documents
class CohereBedrockReranker(AwsBedrockReranker):
"""
Convenience class for Cohere Rerank 3.5 on AWS Bedrock.
This is a pre-configured AwsBedrockReranker using the Cohere Rerank 3.5 model.
Example:
```python
reranker = CohereBedrockReranker(top_n=5, aws_region="us-west-2")
reranked_docs = reranker.rerank(query="What is AI?", documents=docs)
```
"""
model: str = Field(default=COHERE_RERANK_V3_5)
class AmazonReranker(AwsBedrockReranker):
"""
Convenience class for Amazon Rerank 1.0 on AWS Bedrock.
This is a pre-configured AwsBedrockReranker using the Amazon Rerank 1.0 model.
Note: Amazon Rerank 1.0 is NOT available in us-east-1 (N. Virginia).
Example:
```python
reranker = AmazonReranker(top_n=5, aws_region="us-west-2")
reranked_docs = reranker.rerank(query="What is AI?", documents=docs)
```
"""
model: str = Field(default=AMAZON_RERANK_V1)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/reranker/aws_bedrock.py",
"license": "Apache License 2.0",
"lines": 240,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/tests/integration/embedder/test_aws_bedrock_embedder.py | """
Integration tests for AWS Bedrock Embedder.
These tests require valid AWS credentials with access to Bedrock.
Credentials can be provided via:
- Environment variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
- AWS credentials file (~/.aws/credentials)
- AWS SSO session
- IAM role (when running on AWS infrastructure)
To run these tests:
pytest libs/agno/tests/integration/embedder/test_aws_bedrock_embedder.py -v
"""
import os
import pytest
from agno.knowledge.embedder.aws_bedrock import AwsBedrockEmbedder
def _has_aws_credentials() -> bool:
"""Check if AWS credentials are available via any method."""
try:
import boto3
session = boto3.Session()
credentials = session.get_credentials()
return credentials is not None
except Exception:
return False
# Skip all tests if AWS credentials are not configured
pytestmark = pytest.mark.skipif(
not _has_aws_credentials(),
reason="AWS credentials not configured",
)
class TestAwsBedrockEmbedderV3:
"""Tests for Cohere Embed v3 models on Bedrock."""
@pytest.fixture
def embedder(self):
return AwsBedrockEmbedder(
id="cohere.embed-multilingual-v3",
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
def test_embedder_initialization(self, embedder):
"""Test that the embedder initializes correctly."""
assert embedder is not None
assert embedder.id == "cohere.embed-multilingual-v3"
assert embedder.dimensions == 1024
assert embedder.input_type == "search_query"
def test_get_embedding(self, embedder):
"""Test that we can get embeddings for a simple text."""
text = "The quick brown fox jumps over the lazy dog."
embeddings = embedder.get_embedding(text)
assert isinstance(embeddings, list)
assert len(embeddings) > 0
assert all(isinstance(x, float) for x in embeddings)
assert len(embeddings) == embedder.dimensions
def test_get_embedding_and_usage(self, embedder):
"""Test that we can get embeddings with usage information."""
text = "Test embedding with usage information."
embedding, usage = embedder.get_embedding_and_usage(text)
assert isinstance(embedding, list)
assert len(embedding) > 0
assert all(isinstance(x, float) for x in embedding)
assert len(embedding) == embedder.dimensions
def test_special_characters(self, embedder):
"""Test that special characters are handled correctly."""
text = "Hello, world! 123 @#$%"
embeddings = embedder.get_embedding(text)
assert isinstance(embeddings, list)
assert len(embeddings) > 0
assert len(embeddings) == embedder.dimensions
def test_embedding_consistency(self, embedder):
"""Test that embeddings for the same text are consistent."""
text = "Consistency test"
embeddings1 = embedder.get_embedding(text)
embeddings2 = embedder.get_embedding(text)
assert len(embeddings1) == len(embeddings2)
# Allow small floating point differences
assert all(abs(a - b) < 1e-3 for a, b in zip(embeddings1, embeddings2))
def test_input_type_search_document(self):
"""Test embedder with search_document input type."""
embedder = AwsBedrockEmbedder(
id="cohere.embed-multilingual-v3",
input_type="search_document",
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
text = "This is a document to be indexed for search."
embeddings = embedder.get_embedding(text)
assert isinstance(embeddings, list)
assert len(embeddings) == 1024
def test_truncate_option(self):
"""Test embedder with truncate option."""
embedder = AwsBedrockEmbedder(
id="cohere.embed-multilingual-v3",
truncate="END",
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
# Create a moderately long text (within API limits but tests truncate param is accepted)
long_text = " ".join(["word"] * 200)
embeddings = embedder.get_embedding(long_text)
assert isinstance(embeddings, list)
assert len(embeddings) == 1024
class TestAwsBedrockEmbedderV4:
"""Tests for Cohere Embed v4 model on Bedrock."""
@pytest.fixture
def embedder(self):
return AwsBedrockEmbedder(
id="us.cohere.embed-v4:0",
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
def test_embedder_initialization(self, embedder):
"""Test that the v4 embedder initializes correctly."""
assert embedder is not None
assert embedder.id == "us.cohere.embed-v4:0"
assert embedder.dimensions == 1536 # v4 default
assert embedder._is_v4_model()
def test_get_embedding(self, embedder):
"""Test that we can get embeddings for a simple text."""
text = "The quick brown fox jumps over the lazy dog."
embeddings = embedder.get_embedding(text)
assert isinstance(embeddings, list)
assert len(embeddings) > 0
assert all(isinstance(x, float) for x in embeddings)
assert len(embeddings) == embedder.dimensions
def test_custom_output_dimension(self):
"""Test v4 embedder with custom output dimension."""
embedder = AwsBedrockEmbedder(
id="us.cohere.embed-v4:0",
output_dimension=1024,
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
assert embedder.dimensions == 1024
text = "Test with custom dimension"
embeddings = embedder.get_embedding(text)
assert isinstance(embeddings, list)
assert len(embeddings) == 1024
def test_all_output_dimensions(self):
"""Test all supported output dimensions for v4."""
dimensions_to_test = [256, 512, 1024, 1536]
text = "Test dimensions"
for dim in dimensions_to_test:
embedder = AwsBedrockEmbedder(
id="us.cohere.embed-v4:0",
output_dimension=dim,
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
embeddings = embedder.get_embedding(text)
assert len(embeddings) == dim, f"Expected {dim} dimensions, got {len(embeddings)}"
def test_v4_truncate_options(self):
"""Test v4 truncate options (LEFT/RIGHT)."""
embedder = AwsBedrockEmbedder(
id="us.cohere.embed-v4:0",
truncate="RIGHT",
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
long_text = " ".join(["word"] * 1000)
embeddings = embedder.get_embedding(long_text)
assert isinstance(embeddings, list)
assert len(embeddings) == 1536
class TestAwsBedrockEmbedderV4Multimodal:
"""Tests for Cohere Embed v4 multimodal features."""
@pytest.fixture
def embedder(self):
return AwsBedrockEmbedder(
id="us.cohere.embed-v4:0",
output_dimension=1024,
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
def test_image_embedding_requires_v4(self):
"""Test that image embedding raises error for v3 models."""
v3_embedder = AwsBedrockEmbedder(
id="cohere.embed-multilingual-v3",
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
with pytest.raises(Exception) as exc_info:
v3_embedder.get_image_embedding("data:image/png;base64,...")
assert "v4" in str(exc_info.value).lower() or "supported" in str(exc_info.value).lower()
def test_multimodal_embedding_requires_v4(self):
"""Test that multimodal embedding raises error for v3 models."""
v3_embedder = AwsBedrockEmbedder(
id="cohere.embed-multilingual-v3",
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
with pytest.raises(Exception) as exc_info:
v3_embedder.get_multimodal_embedding([{"type": "text", "text": "test"}])
assert "v4" in str(exc_info.value).lower() or "supported" in str(exc_info.value).lower()
class TestAwsBedrockEmbedderAsync:
"""Tests for async methods of AWS Bedrock Embedder."""
@pytest.fixture
def embedder(self):
return AwsBedrockEmbedder(
id="cohere.embed-multilingual-v3",
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
@pytest.mark.asyncio
async def test_async_get_embedding(self, embedder):
"""Test async embedding retrieval."""
text = "Async embedding test"
embeddings = await embedder.async_get_embedding(text)
assert isinstance(embeddings, list)
assert len(embeddings) > 0
assert len(embeddings) == embedder.dimensions
@pytest.mark.asyncio
async def test_async_get_embedding_and_usage(self, embedder):
"""Test async embedding with usage retrieval."""
text = "Async embedding with usage test"
embedding, usage = await embedder.async_get_embedding_and_usage(text)
assert isinstance(embedding, list)
assert len(embedding) > 0
assert len(embedding) == embedder.dimensions
class TestAwsBedrockEmbedderConfiguration:
"""Tests for AWS Bedrock Embedder configuration options."""
def test_english_model(self):
"""Test with English-only model."""
embedder = AwsBedrockEmbedder(
id="cohere.embed-english-v3",
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
text = "English text for embedding"
embeddings = embedder.get_embedding(text)
assert isinstance(embeddings, list)
assert len(embeddings) == 1024
def test_batch_disabled_warning(self, caplog):
"""Test that batch mode is properly disabled."""
embedder = AwsBedrockEmbedder(
id="cohere.embed-multilingual-v3",
enable_batch=True,
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
assert embedder.enable_batch is False
def test_embedding_types_parameter(self):
"""Test with explicit embedding types parameter."""
embedder = AwsBedrockEmbedder(
id="cohere.embed-multilingual-v3",
embedding_types=["float"],
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
text = "Test with embedding types"
embeddings = embedder.get_embedding(text)
assert isinstance(embeddings, list)
assert len(embeddings) == 1024
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/embedder/test_aws_bedrock_embedder.py",
"license": "Apache License 2.0",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/reranker/test_aws_bedrock_reranker.py | """
Integration tests for AWS Bedrock Reranker.
These tests require valid AWS credentials with access to Bedrock.
Credentials can be provided via:
- Environment variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
- AWS credentials file (~/.aws/credentials)
- AWS SSO session
- IAM role (when running on AWS infrastructure)
To run these tests:
pytest libs/agno/tests/integration/reranker/test_aws_bedrock_reranker.py -v
Note:
- Amazon Rerank 1.0 is NOT available in us-east-1 (N. Virginia).
- Use us-west-2 or another supported region.
"""
import os
import pytest
from agno.knowledge.document import Document
from agno.knowledge.reranker.aws_bedrock import (
AMAZON_RERANK_V1,
COHERE_RERANK_V3_5,
AmazonReranker,
AwsBedrockReranker,
CohereBedrockReranker,
)
def _has_aws_credentials() -> bool:
"""Check if AWS credentials are available via any method."""
try:
import boto3
session = boto3.Session()
credentials = session.get_credentials()
return credentials is not None
except Exception:
return False
# Skip all tests if AWS credentials are not configured
pytestmark = pytest.mark.skipif(
not _has_aws_credentials(),
reason="AWS credentials not configured",
)
@pytest.fixture
def sample_documents():
"""Create sample documents for testing."""
return [
Document(content="Machine learning is a subset of artificial intelligence."),
Document(content="The weather in Paris is typically mild in spring."),
Document(content="Deep learning uses neural networks with many layers."),
Document(content="Python is a popular programming language for data science."),
Document(content="Transformers revolutionized natural language processing."),
]
@pytest.fixture
def ml_query():
"""A query about machine learning."""
return "What is machine learning and how does it relate to AI?"
class TestAwsBedrockRerankerCohere:
"""Tests for Cohere Rerank 3.5 on Bedrock."""
@pytest.fixture
def reranker(self):
return AwsBedrockReranker(
model=COHERE_RERANK_V3_5,
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
def test_reranker_initialization(self, reranker):
"""Test that the reranker initializes correctly."""
assert reranker is not None
assert reranker.model == COHERE_RERANK_V3_5
assert reranker.top_n is None
def test_rerank_documents(self, reranker, sample_documents, ml_query):
"""Test basic document reranking."""
reranked = reranker.rerank(query=ml_query, documents=sample_documents)
assert isinstance(reranked, list)
assert len(reranked) == len(sample_documents)
# Check that all documents have reranking scores
for doc in reranked:
assert doc.reranking_score is not None
assert isinstance(doc.reranking_score, float)
# Check that results are sorted by relevance (descending)
scores = [doc.reranking_score for doc in reranked]
assert scores == sorted(scores, reverse=True)
def test_rerank_with_top_n(self, sample_documents, ml_query):
"""Test reranking with top_n limit."""
reranker = AwsBedrockReranker(
model=COHERE_RERANK_V3_5,
top_n=3,
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
reranked = reranker.rerank(query=ml_query, documents=sample_documents)
assert len(reranked) == 3
# Top results should be ML-related
assert all(doc.reranking_score is not None for doc in reranked)
def test_rerank_empty_documents(self, reranker):
"""Test reranking with empty document list."""
reranked = reranker.rerank(query="Any query", documents=[])
assert reranked == []
def test_rerank_single_document(self, reranker, ml_query):
"""Test reranking with a single document."""
single_doc = [Document(content="Machine learning is amazing.")]
reranked = reranker.rerank(query=ml_query, documents=single_doc)
assert len(reranked) == 1
assert reranked[0].reranking_score is not None
def test_ml_documents_ranked_higher(self, reranker, sample_documents, ml_query):
"""Test that ML-related documents are ranked higher for ML query."""
reranked = reranker.rerank(query=ml_query, documents=sample_documents)
# The top results should be about ML/AI/deep learning
top_contents = [doc.content.lower() for doc in reranked[:3]]
ml_related_count = sum(
1
for content in top_contents
if any(term in content for term in ["machine learning", "ai", "deep learning", "neural"])
)
# At least 2 of the top 3 should be ML-related
assert ml_related_count >= 2
class TestAwsBedrockRerankerAmazon:
"""Tests for Amazon Rerank 1.0 on Bedrock."""
@pytest.fixture
def reranker(self):
# Note: Amazon Rerank 1.0 is NOT available in us-east-1
return AwsBedrockReranker(
model=AMAZON_RERANK_V1,
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
def test_reranker_initialization(self, reranker):
"""Test that the Amazon reranker initializes correctly."""
assert reranker is not None
assert reranker.model == AMAZON_RERANK_V1
def test_rerank_documents(self, reranker, sample_documents, ml_query):
"""Test document reranking with Amazon model."""
reranked = reranker.rerank(query=ml_query, documents=sample_documents)
assert isinstance(reranked, list)
assert len(reranked) == len(sample_documents)
for doc in reranked:
assert doc.reranking_score is not None
def test_rerank_with_top_n(self, sample_documents, ml_query):
"""Test Amazon reranker with top_n limit."""
reranker = AwsBedrockReranker(
model=AMAZON_RERANK_V1,
top_n=2,
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
reranked = reranker.rerank(query=ml_query, documents=sample_documents)
assert len(reranked) == 2
class TestConvenienceClasses:
"""Tests for convenience reranker classes."""
def test_cohere_bedrock_reranker(self, sample_documents, ml_query):
"""Test CohereBedrockReranker convenience class."""
reranker = CohereBedrockReranker(
top_n=3,
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
assert reranker.model == COHERE_RERANK_V3_5
reranked = reranker.rerank(query=ml_query, documents=sample_documents)
assert len(reranked) == 3
def test_amazon_reranker(self, sample_documents, ml_query):
"""Test AmazonReranker convenience class."""
reranker = AmazonReranker(
top_n=3,
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
assert reranker.model == AMAZON_RERANK_V1
reranked = reranker.rerank(query=ml_query, documents=sample_documents)
assert len(reranked) == 3
class TestRerankerEdgeCases:
"""Tests for edge cases and error handling."""
@pytest.fixture
def reranker(self):
return AwsBedrockReranker(
model=COHERE_RERANK_V3_5,
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
def test_long_document(self, reranker):
"""Test reranking with a long document."""
long_content = "Machine learning is powerful. " * 500
documents = [
Document(content=long_content),
Document(content="Short document about weather."),
]
reranked = reranker.rerank(query="What is machine learning?", documents=documents)
assert len(reranked) == 2
for doc in reranked:
assert doc.reranking_score is not None
def test_special_characters_in_query(self, reranker, sample_documents):
"""Test reranking with special characters in query."""
query = "What's ML & AI? @#$%"
reranked = reranker.rerank(query=query, documents=sample_documents)
assert len(reranked) == len(sample_documents)
def test_special_characters_in_documents(self, reranker):
"""Test reranking with special characters in documents."""
documents = [
Document(content="Machine learning (ML) & artificial intelligence (AI) are related!"),
Document(content="Hello @world #test $special %chars"),
]
reranked = reranker.rerank(query="What is ML?", documents=documents)
assert len(reranked) == 2
def test_invalid_top_n_ignored(self, sample_documents, ml_query):
"""Test that invalid top_n values are handled gracefully."""
reranker = AwsBedrockReranker(
model=COHERE_RERANK_V3_5,
top_n=-1, # Invalid value
aws_region=os.getenv("AWS_REGION", "us-west-2"),
)
# Should still work, treating invalid top_n as None
reranked = reranker.rerank(query=ml_query, documents=sample_documents)
assert len(reranked) == len(sample_documents)
def test_unicode_content(self, reranker):
"""Test reranking with Unicode content."""
documents = [
Document(content="Machine learning is powerful."),
Document(content="Aprendizaje automatico es importante."),
]
reranked = reranker.rerank(query="What is machine learning?", documents=documents)
assert len(reranked) == 2
class TestRerankerWithAdditionalFields:
"""Tests for additional model request fields."""
def test_additional_model_request_fields(self, sample_documents, ml_query):
"""Test reranker with additional model-specific parameters."""
reranker = AwsBedrockReranker(
model=COHERE_RERANK_V3_5,
top_n=3,
aws_region=os.getenv("AWS_REGION", "us-west-2"),
additional_model_request_fields={}, # Empty dict should work
)
reranked = reranker.rerank(query=ml_query, documents=sample_documents)
assert len(reranked) == 3
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/reranker/test_aws_bedrock_reranker.py",
"license": "Apache License 2.0",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/08_learning/00_quickstart/01_always_learn.py | """
Learning Machines
=================
Set learning=True to turn an agent into a learning machine.
The agent automatically captures:
- User profile: name, role, preferences
- User memory: observations, context, patterns
No explicit tool calls needed. Extraction runs in parallel.
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIResponses
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db = SqliteDb(db_file="tmp/agents.db")
agent = Agent(
model=OpenAIResponses(id="gpt-5.2"),
db=db,
learning=True,
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Demo
# ---------------------------------------------------------------------------
if __name__ == "__main__":
user_id = "alice1@example.com"
# Session 1: Share information naturally
print("\n--- Session 1: Extraction happens automatically ---\n")
agent.print_response(
"Hi! I'm Alice. I work at Anthropic as a research scientist. "
"I prefer concise responses without too much explanation.",
user_id=user_id,
session_id="session_1",
stream=True,
)
lm = agent.learning_machine
lm.user_profile_store.print(user_id=user_id)
lm.user_memory_store.print(user_id=user_id)
# Session 2: New session - agent remembers
print("\n--- Session 2: Agent remembers across sessions ---\n")
agent.print_response(
"What do you know about me?",
user_id=user_id,
session_id="session_2",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/08_learning/00_quickstart/01_always_learn.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/08_learning/00_quickstart/02_agentic_learn.py | """
Learning Machines: Agentic Mode
===============================
In AGENTIC mode, the agent receives tools to explicitly manage learning.
It decides when to save profiles and memories based on conversation context.
Compare with learning=True (ALWAYS mode) where extraction happens automatically.
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.learn import (
LearningMachine,
LearningMode,
UserMemoryConfig,
UserProfileConfig,
)
from agno.models.openai import OpenAIResponses
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db = SqliteDb(db_file="tmp/agents.db")
agent = Agent(
model=OpenAIResponses(id="gpt-5.2"),
db=db,
learning=LearningMachine(
user_profile=UserProfileConfig(mode=LearningMode.AGENTIC),
user_memory=UserMemoryConfig(mode=LearningMode.AGENTIC),
),
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Demo
# ---------------------------------------------------------------------------
if __name__ == "__main__":
user_id = "alice2@example.com"
# Session 1: Agent decides what to save via tool calls
print("\n--- Session 1: Agent uses tools to save profile and memories ---\n")
agent.print_response(
"Hi! I'm Alice. I work at Anthropic as a research scientist. "
"I prefer concise responses without too much explanation.",
user_id=user_id,
session_id="session_1",
stream=True,
)
lm = agent.learning_machine
lm.user_profile_store.print(user_id=user_id)
lm.user_memory_store.print(user_id=user_id)
# Session 2: New session - agent remembers
print("\n--- Session 2: Agent remembers across sessions ---\n")
agent.print_response(
"What do you know about me?",
user_id=user_id,
session_id="session_2",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/08_learning/00_quickstart/02_agentic_learn.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/08_learning/00_quickstart/03_learned_knowledge.py | """
Learning Machines: Learned Knowledge
====================================
Learned Knowledge stores insights that transfer across users.
One person teaches the agent something. Another person benefits.
In AGENTIC mode, the agent receives tools to:
- search_learnings: Find relevant past knowledge
- save_learning: Store a new insight
The agent decides when to save and apply learnings.
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.knowledge import Knowledge
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.learn import LearnedKnowledgeConfig, LearningMachine, LearningMode
from agno.models.openai import OpenAIResponses
from agno.vectordb.chroma import ChromaDb, SearchType
# ---------------------------------------------------------------------------
# Create Knowledge and Agent
# ---------------------------------------------------------------------------
db = SqliteDb(db_file="tmp/agents.db")
knowledge = Knowledge(
name="Agent Learnings",
vector_db=ChromaDb(
name="learnings",
path="tmp/chromadb",
persistent_client=True,
search_type=SearchType.hybrid,
embedder=OpenAIEmbedder(id="text-embedding-3-small"),
),
)
agent = Agent(
model=OpenAIResponses(id="gpt-5.2"),
db=db,
learning=LearningMachine(
knowledge=knowledge,
learned_knowledge=LearnedKnowledgeConfig(mode=LearningMode.AGENTIC),
),
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Demo
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# Session 1: User 1 teaches the agent
print("\n--- Session 1: User 1 saves a learning ---\n")
agent.print_response(
"We're trying to reduce our cloud egress costs. Remember this.",
user_id="engineer_1@example.com",
session_id="session_1",
stream=True,
)
lm = agent.learning_machine
lm.learned_knowledge_store.print(query="cloud")
# Session 2: User 2 benefits from the learning
print("\n--- Session 2: User 2 asks a related question ---\n")
agent.print_response(
"I'm picking a cloud provider for a data pipeline. Give me 2 key considerations.",
user_id="engineer_2@example.com",
session_id="session_2",
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/08_learning/00_quickstart/03_learned_knowledge.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/unit/knowledge/chunking/test_chunk_id_generation.py | from agno.knowledge.chunking.document import DocumentChunking
from agno.knowledge.chunking.fixed import FixedSizeChunking
from agno.knowledge.chunking.row import RowChunking
from agno.knowledge.document.base import Document
# --- Fallback chain tests ---
def test_fallback_priority_1_uses_document_id():
"""When document has id, chunk ID uses document.id regardless of name."""
doc = Document(id="doc123", name="test.txt", content="Some content.")
chunks = FixedSizeChunking(chunk_size=100).chunk(doc)
assert chunks[0].id == "doc123_1"
def test_fallback_priority_2_uses_document_name():
"""When document has name but no id, chunk ID uses document.name."""
doc = Document(name="report.pdf", content="Some content.")
chunks = FixedSizeChunking(chunk_size=100).chunk(doc)
assert chunks[0].id == "report.pdf_1"
def test_fallback_priority_3_uses_content_hash():
"""When document has neither id nor name, chunk ID uses content hash."""
doc = Document(content="Content for hashing.")
chunks = FixedSizeChunking(chunk_size=100).chunk(doc)
assert chunks[0].id is not None
assert chunks[0].id.startswith("chunk_")
# Format: chunk_{12-char-hash}_{chunk_number}
parts = chunks[0].id.split("_")
assert len(parts) == 3
assert parts[0] == "chunk"
assert len(parts[1]) == 12
assert parts[2] == "1"
# --- Determinism tests ---
def test_same_content_produces_same_hash():
"""Identical content should produce identical chunk IDs."""
content = "Deterministic content."
chunks1 = FixedSizeChunking(chunk_size=100).chunk(Document(content=content))
chunks2 = FixedSizeChunking(chunk_size=100).chunk(Document(content=content))
assert chunks1[0].id == chunks2[0].id
def test_different_content_produces_different_hash():
"""Different content should produce different chunk IDs."""
chunks1 = FixedSizeChunking(chunk_size=100).chunk(Document(content="Content A"))
chunks2 = FixedSizeChunking(chunk_size=100).chunk(Document(content="Content B"))
assert chunks1[0].id != chunks2[0].id
def test_multiple_chunks_have_unique_ids():
"""Each chunk from the same document should have a unique ID."""
doc = Document(content="A" * 100 + "B" * 100 + "C" * 100)
chunks = FixedSizeChunking(chunk_size=100).chunk(doc)
ids = [c.id for c in chunks]
assert len(ids) == len(set(ids)), "Chunk IDs should be unique"
# --- RowChunking prefix tests ---
def test_row_chunking_id_format_with_document_id():
"""RowChunking should produce IDs with _row_ prefix."""
doc = Document(id="data", content="row1\nrow2\nrow3")
chunks = RowChunking().chunk(doc)
assert chunks[0].id == "data_row_1"
assert chunks[1].id == "data_row_2"
assert chunks[2].id == "data_row_3"
def test_row_chunking_id_format_with_name():
"""RowChunking should use name with _row_ prefix when no id."""
doc = Document(name="data.csv", content="row1\nrow2")
chunks = RowChunking().chunk(doc)
assert chunks[0].id == "data.csv_row_1"
assert chunks[1].id == "data.csv_row_2"
def test_row_chunking_id_format_with_hash():
"""RowChunking should use hash with _row_ prefix when no id/name."""
doc = Document(content="row1\nrow2")
chunks = RowChunking().chunk(doc)
assert chunks[0].id.startswith("chunk_")
assert "_row_" in chunks[0].id
assert chunks[0].id.endswith("_row_1")
# --- Edge cases ---
def test_empty_content_returns_no_chunks():
"""Empty content should return empty list (no ID generation needed)."""
doc = Document(content="")
chunks = FixedSizeChunking(chunk_size=100).chunk(doc)
assert len(chunks) == 0
def test_unicode_content_produces_valid_id():
"""Unicode content should hash correctly."""
doc = Document(content="Hello")
chunks = FixedSizeChunking(chunk_size=100).chunk(doc)
assert chunks[0].id is not None
assert chunks[0].id.startswith("chunk_")
def test_emoji_content_produces_valid_id():
"""Emoji content should hash correctly."""
doc = Document(content="Hello world")
chunks = FixedSizeChunking(chunk_size=100).chunk(doc)
assert chunks[0].id is not None
assert chunks[0].id.startswith("chunk_")
def test_document_chunking_uses_fallback():
"""DocumentChunking should also use the fallback chain."""
doc = Document(content="Para one.\n\nPara two.\n\nPara three.")
chunks = DocumentChunking(chunk_size=20, overlap=0).chunk(doc)
assert len(chunks) >= 2
for chunk in chunks:
assert chunk.id is not None
assert chunk.id.startswith("chunk_")
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/chunking/test_chunk_id_generation.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/db/test_surrealdb_models.py | from datetime import date, datetime, timezone
from agno.db.surrealdb.models import desurrealize_dates, surrealize_dates
def test_surrealize_int_timestamp_converts_to_correct_utc():
utc_dt = datetime(2024, 1, 15, 12, 0, 0, tzinfo=timezone.utc)
epoch = int(utc_dt.timestamp())
result = surrealize_dates({"created_at": epoch})
assert result["created_at"] == utc_dt
def test_surrealize_result_has_utc_tzinfo():
result = surrealize_dates({"created_at": 1705320000})
assert result["created_at"].tzinfo is not None
assert result["created_at"].tzinfo == timezone.utc
def test_surrealize_does_not_mutate_original():
record = {"created_at": 1705320000}
surrealize_dates(record)
assert record["created_at"] == 1705320000
def test_surrealize_date_converts_to_midnight_utc():
d = date(2024, 3, 15)
result = surrealize_dates({"some_date": d})
expected = datetime(2024, 3, 15, 0, 0, 0, tzinfo=timezone.utc)
assert result["some_date"] == expected
def test_surrealize_non_date_fields_unchanged():
result = surrealize_dates({"created_at": 1705320000, "name": "test", "count": 42})
assert result["name"] == "test"
assert result["count"] == 42
def test_epoch_round_trip_preserves_value():
epoch = 1718476200
surrealized = surrealize_dates({"created_at": epoch, "updated_at": epoch})
desurrealized = desurrealize_dates(surrealized)
assert desurrealized["created_at"] == epoch
assert desurrealized["updated_at"] == epoch
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/db/test_surrealdb_models.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/knowledge/test_knowledge_filter_validation.py | from typing import List, Set
import pytest
from agno.filters import AND, EQ, GT, IN, LT, OR
from agno.knowledge.document import Document
from agno.knowledge.knowledge import Knowledge
from agno.vectordb.base import VectorDb
class MockVectorDb(VectorDb):
def create(self) -> None:
pass
async def async_create(self) -> None:
pass
def name_exists(self, name: str) -> bool:
return False
async def async_name_exists(self, name: str) -> bool:
return False
def id_exists(self, id: str) -> bool:
return False
def content_hash_exists(self, content_hash: str) -> bool:
return False
def insert(self, content_hash: str, documents: List[Document], filters=None) -> None:
pass
async def async_insert(self, content_hash: str, documents: List[Document], filters=None) -> None:
pass
def upsert(self, content_hash: str, documents: List[Document], filters=None) -> None:
pass
async def async_upsert(self, content_hash: str, documents: List[Document], filters=None) -> None:
pass
def search(self, query: str, limit: int = 5, filters=None) -> List[Document]:
return []
async def async_search(self, query: str, limit: int = 5, filters=None) -> List[Document]:
return []
def drop(self) -> None:
pass
async def async_drop(self) -> None:
pass
def exists(self) -> bool:
return True
async def async_exists(self) -> bool:
return True
def delete(self) -> bool:
return True
def delete_by_id(self, id: str) -> bool:
return True
def delete_by_name(self, name: str) -> bool:
return True
def delete_by_metadata(self, metadata) -> bool:
return True
def update_metadata(self, content_id: str, metadata) -> None:
pass
def delete_by_content_id(self, content_id: str) -> bool:
return True
def get_supported_search_types(self) -> List[str]:
return ["vector"]
@pytest.fixture
def knowledge():
return Knowledge(vector_db=MockVectorDb())
def test_validate_filters_removes_invalid_dict_keys(knowledge):
filters = {"region": "us", "invalid_key": "value"}
valid_metadata: Set[str] = {"region", "year"}
valid, invalid = knowledge._validate_filters(filters, valid_metadata)
assert "region" in valid
assert "invalid_key" not in valid
assert "invalid_key" in invalid
def test_validate_filters_removes_invalid_list_items(knowledge):
filters = [EQ("region", "us"), EQ("invalid_key", "value")]
valid_metadata: Set[str] = {"region", "year"}
valid, invalid = knowledge._validate_filters(filters, valid_metadata)
valid_keys = [f.key for f in valid]
assert "region" in valid_keys
assert "invalid_key" not in valid_keys
assert "invalid_key" in invalid
def test_validate_filters_keeps_complex_filters(knowledge):
filters = [AND(EQ("region", "us"), EQ("year", 2024)), OR(EQ("region", "eu"))]
valid_metadata: Set[str] = {"region", "year"}
valid, invalid = knowledge._validate_filters(filters, valid_metadata)
assert len(valid) == 2
assert len(invalid) == 0
def test_validate_filters_with_gt_lt_in(knowledge):
filters = [
GT("price", 100),
LT("date", "2024-01-01"),
IN("category", ["tech", "science"]),
GT("invalid_key", 50),
]
valid_metadata: Set[str] = {"price", "date", "category"}
valid, invalid = knowledge._validate_filters(filters, valid_metadata)
valid_keys = [f.key for f in valid]
assert "price" in valid_keys
assert "date" in valid_keys
assert "category" in valid_keys
assert len(valid) == 3
assert "invalid_key" in invalid
def test_validate_filters_with_prefixed_keys(knowledge):
filters = {"meta_data.region": "us", "meta_data.invalid": "value"}
valid_metadata: Set[str] = {"region", "year"}
valid, invalid = knowledge._validate_filters(filters, valid_metadata)
assert "meta_data.region" in valid
assert "meta_data.invalid" not in valid
assert "meta_data.invalid" in invalid
def test_validate_filters_empty_metadata(knowledge):
filters = [EQ("region", "us")]
valid, invalid = knowledge._validate_filters(filters, set())
assert valid == filters
assert invalid == []
def test_validate_filters_mixed_valid_invalid_list(knowledge):
filters = [
EQ("region", "us"),
EQ("invalid1", "value"),
EQ("year", 2024),
EQ("invalid2", "value"),
]
valid_metadata: Set[str] = {"region", "year"}
valid, invalid = knowledge._validate_filters(filters, valid_metadata)
assert len(valid) == 2
assert len(invalid) == 2
valid_keys = [f.key for f in valid]
assert "region" in valid_keys
assert "year" in valid_keys
assert "invalid1" in invalid
assert "invalid2" in invalid
def test_filter_merge_raises_on_type_mismatch():
from agno.utils.knowledge import get_agentic_or_user_search_filters
with pytest.raises(ValueError):
get_agentic_or_user_search_filters({"region": "us"}, [EQ("year", 2024)])
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/test_knowledge_filter_validation.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/knowledge/test_knowledge_search_tools.py | from typing import List
from unittest.mock import AsyncMock, MagicMock
import pytest
from agno.knowledge.document import Document
from agno.knowledge.knowledge import Knowledge
from agno.vectordb.base import VectorDb
class MockVectorDb(VectorDb):
def create(self) -> None:
pass
async def async_create(self) -> None:
pass
def name_exists(self, name: str) -> bool:
return False
async def async_name_exists(self, name: str) -> bool:
return False
def id_exists(self, id: str) -> bool:
return False
def content_hash_exists(self, content_hash: str) -> bool:
return False
def insert(self, content_hash: str, documents: List[Document], filters=None) -> None:
pass
async def async_insert(self, content_hash: str, documents: List[Document], filters=None) -> None:
pass
def upsert(self, content_hash: str, documents: List[Document], filters=None) -> None:
pass
async def async_upsert(self, content_hash: str, documents: List[Document], filters=None) -> None:
pass
def search(self, query: str, limit: int = 5, filters=None) -> List[Document]:
return []
async def async_search(self, query: str, limit: int = 5, filters=None) -> List[Document]:
return []
def drop(self) -> None:
pass
async def async_drop(self) -> None:
pass
def exists(self) -> bool:
return True
async def async_exists(self) -> bool:
return True
def delete(self) -> bool:
return True
def delete_by_id(self, id: str) -> bool:
return True
def delete_by_name(self, name: str) -> bool:
return True
def delete_by_metadata(self, metadata) -> bool:
return True
def update_metadata(self, content_id: str, metadata) -> None:
pass
def delete_by_content_id(self, content_id: str) -> bool:
return True
def get_supported_search_types(self) -> List[str]:
return ["vector"]
@pytest.fixture
def knowledge():
return Knowledge(vector_db=MockVectorDb())
def test_search_tool_catches_exceptions(knowledge):
knowledge.search = MagicMock(side_effect=Exception("Connection refused"))
tool = knowledge._create_search_tool(async_mode=False)
result = tool.entrypoint(query="test")
assert isinstance(result, str)
assert "Error searching knowledge base" in result
assert "Exception" in result
def test_search_tool_with_filters_catches_exceptions(knowledge):
knowledge.search = MagicMock(side_effect=Exception("DB timeout"))
tool = knowledge._create_search_tool_with_filters(async_mode=False)
result = tool.entrypoint(query="test")
assert isinstance(result, str)
assert "Error searching knowledge base" in result
@pytest.mark.asyncio
async def test_async_search_tool_catches_exceptions(knowledge):
knowledge.asearch = AsyncMock(side_effect=Exception("Network error"))
tool = knowledge._create_search_tool(async_mode=True)
result = await tool.entrypoint(query="test")
assert isinstance(result, str)
assert "Error searching knowledge base" in result
@pytest.mark.asyncio
async def test_async_search_tool_with_filters_catches_exceptions(knowledge):
knowledge.asearch = AsyncMock(side_effect=Exception("Connection timeout"))
tool = knowledge._create_search_tool_with_filters(async_mode=True)
result = await tool.entrypoint(query="test")
assert isinstance(result, str)
assert "Error searching knowledge base" in result
def test_search_tool_does_not_leak_sensitive_info(knowledge):
knowledge.search = MagicMock(side_effect=Exception("Connection failed: postgres://user:password@host:5432/db"))
tool = knowledge._create_search_tool(async_mode=False)
result = tool.entrypoint(query="test")
assert "Exception" in result
assert "password" not in result
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/test_knowledge_search_tools.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/knowledge/test_knowledge_topic_loading.py | from typing import List
from unittest.mock import AsyncMock, MagicMock
import pytest
from agno.knowledge.content import Content
from agno.knowledge.document import Document
from agno.knowledge.knowledge import Knowledge
from agno.vectordb.base import VectorDb
class MockVectorDb(VectorDb):
def __init__(self, content_exists: bool = False):
self.content_exists = content_exists
self.inserted_documents: List[Document] = []
def create(self) -> None:
pass
async def async_create(self) -> None:
pass
def name_exists(self, name: str) -> bool:
return False
async def async_name_exists(self, name: str) -> bool:
return False
def id_exists(self, id: str) -> bool:
return False
def content_hash_exists(self, content_hash: str) -> bool:
return self.content_exists
def insert(self, content_hash: str, documents: List[Document], filters=None) -> None:
self.inserted_documents.extend(documents)
async def async_insert(self, content_hash: str, documents: List[Document], filters=None) -> None:
self.inserted_documents.extend(documents)
def upsert(self, content_hash: str, documents: List[Document], filters=None) -> None:
self.inserted_documents.extend(documents)
async def async_upsert(self, content_hash: str, documents: List[Document], filters=None) -> None:
self.inserted_documents.extend(documents)
def search(self, query: str, limit: int = 5, filters=None) -> List[Document]:
return []
async def async_search(self, query: str, limit: int = 5, filters=None) -> List[Document]:
return []
def drop(self) -> None:
pass
async def async_drop(self) -> None:
pass
def exists(self) -> bool:
return True
async def async_exists(self) -> bool:
return True
def delete(self) -> bool:
return True
def delete_by_id(self, id: str) -> bool:
return True
def delete_by_name(self, name: str) -> bool:
return True
def delete_by_metadata(self, metadata) -> bool:
return True
def update_metadata(self, content_id: str, metadata) -> None:
pass
def delete_by_content_id(self, content_id: str) -> bool:
return True
def get_supported_search_types(self) -> List[str]:
return ["vector"]
class MockReader:
def __init__(self):
self.processed_topics: List[str] = []
def read(self, topic) -> List[Document]:
self.processed_topics.append(topic)
return [Document(name=topic, content=f"Content for {topic}")]
async def async_read(self, topic) -> List[Document]:
self.processed_topics.append(topic)
return [Document(name=topic, content=f"Content for {topic}")]
@pytest.fixture
def mock_reader():
return MockReader()
def test_load_from_topics_continues_after_skip(mock_reader):
knowledge = Knowledge(vector_db=MockVectorDb())
skip_pattern = [True, False, False]
skip_index = [0]
def mock_should_skip(content_hash, skip_if_exists):
result = skip_pattern[skip_index[0] % len(skip_pattern)]
skip_index[0] += 1
return result
knowledge._should_skip = mock_should_skip
knowledge._insert_contents_db = MagicMock()
knowledge._update_content = MagicMock()
knowledge._handle_vector_db_insert = MagicMock()
knowledge._build_content_hash = MagicMock(return_value="hash")
knowledge._prepare_documents_for_insert = MagicMock()
content = Content(topics=["A", "B", "C"], reader=mock_reader)
knowledge._load_from_topics(content, upsert=False, skip_if_exists=True)
assert "B" in mock_reader.processed_topics
assert "C" in mock_reader.processed_topics
@pytest.mark.asyncio
async def test_aload_from_topics_continues_after_skip():
knowledge = Knowledge(vector_db=MockVectorDb())
processed_topics = []
skip_pattern = [True, False, False]
skip_index = [0]
def mock_should_skip(content_hash, skip_if_exists):
result = skip_pattern[skip_index[0] % len(skip_pattern)]
skip_index[0] += 1
return result
async def mock_async_read(topic):
processed_topics.append(topic)
return [Document(name=topic, content=f"Content for {topic}")]
knowledge._should_skip = mock_should_skip
knowledge._ainsert_contents_db = AsyncMock()
knowledge._aupdate_content = AsyncMock()
knowledge._ahandle_vector_db_insert = AsyncMock()
knowledge._build_content_hash = MagicMock(return_value="hash")
knowledge._prepare_documents_for_insert = MagicMock()
mock_reader = MagicMock()
mock_reader.async_read = mock_async_read
content = Content(topics=["A", "B", "C"], reader=mock_reader)
await knowledge._aload_from_topics(content, upsert=False, skip_if_exists=True)
assert "B" in processed_topics
assert "C" in processed_topics
def test_load_from_topics_multiple_skips():
knowledge = Knowledge(vector_db=MockVectorDb())
mock_reader = MockReader()
skip_pattern = [True, True, False, True, False]
skip_index = [0]
def mock_should_skip(content_hash, skip_if_exists):
result = skip_pattern[skip_index[0] % len(skip_pattern)]
skip_index[0] += 1
return result
knowledge._should_skip = mock_should_skip
knowledge._insert_contents_db = MagicMock()
knowledge._update_content = MagicMock()
knowledge._handle_vector_db_insert = MagicMock()
knowledge._build_content_hash = MagicMock(return_value="hash")
knowledge._prepare_documents_for_insert = MagicMock()
content = Content(topics=["A", "B", "C", "D", "E"], reader=mock_reader)
knowledge._load_from_topics(content, upsert=False, skip_if_exists=True)
assert mock_reader.processed_topics == ["C", "E"]
def test_load_from_topics_all_skipped():
knowledge = Knowledge(vector_db=MockVectorDb())
mock_reader = MockReader()
knowledge._should_skip = MagicMock(return_value=True)
knowledge._insert_contents_db = MagicMock()
knowledge._update_content = MagicMock()
knowledge._build_content_hash = MagicMock(return_value="hash")
content = Content(topics=["A", "B", "C"], reader=mock_reader)
knowledge._load_from_topics(content, upsert=False, skip_if_exists=True)
assert mock_reader.processed_topics == []
assert knowledge._update_content.call_count == 3
def test_load_from_topics_lightrag_continues():
knowledge = Knowledge(vector_db=MockVectorDb())
knowledge.vector_db.__class__.__name__ = "LightRag"
processed_topics = []
knowledge._process_lightrag_content = MagicMock(
side_effect=lambda content, origin: processed_topics.append(content.name)
)
knowledge._build_content_hash = MagicMock(return_value="hash")
knowledge._insert_contents_db = MagicMock()
mock_reader = MagicMock()
content = Content(topics=["A", "B", "C"], reader=mock_reader)
knowledge._load_from_topics(content, upsert=False, skip_if_exists=False)
assert len(processed_topics) == 3
assert "A" in processed_topics
assert "B" in processed_topics
assert "C" in processed_topics
@pytest.mark.asyncio
async def test_aload_from_topics_lightrag_continues():
knowledge = Knowledge(vector_db=MockVectorDb())
knowledge.vector_db.__class__.__name__ = "LightRag"
processed_topics = []
async def mock_process_lightrag(content, origin):
processed_topics.append(content.name)
knowledge._aprocess_lightrag_content = mock_process_lightrag
knowledge._build_content_hash = MagicMock(return_value="hash")
knowledge._ainsert_contents_db = AsyncMock()
mock_reader = MagicMock()
content = Content(topics=["A", "B", "C"], reader=mock_reader)
await knowledge._aload_from_topics(content, upsert=False, skip_if_exists=False)
assert len(processed_topics) == 3
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/test_knowledge_topic_loading.py",
"license": "Apache License 2.0",
"lines": 171,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/07_knowledge/quickstart.py | from agno.agent import Agent
from agno.knowledge.embedder.google import GeminiEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.google import Gemini
from agno.vectordb.chroma import ChromaDb
from agno.vectordb.search import SearchType
# Create a knowledge base with ChromaDB
knowledge = Knowledge(
vector_db=ChromaDb(
collection="docs",
path="tmp/chromadb",
persistent_client=True,
search_type=SearchType.hybrid,
embedder=GeminiEmbedder(id="gemini-embedding-001"),
),
)
# Load content into the knowledge base
knowledge.insert(url="https://docs.agno.com/introduction.md", skip_if_exists=True)
# Create an agent that searches the knowledge base
agent = Agent(
model=Gemini(id="gemini-3-flash-preview"),
knowledge=knowledge,
search_knowledge=True,
markdown=True,
)
agent.print_response("What is Agno?", stream=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/quickstart.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/groq/research_agent_seltz.py | """Run `pip install groq seltz agno` to install dependencies."""
from pathlib import Path
from textwrap import dedent
from agno.agent import Agent
from agno.models.groq import Groq
from agno.tools.seltz import SeltzTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
cwd = Path(__file__).parent.resolve()
tmp = cwd.joinpath("tmp")
if not tmp.exists():
tmp.mkdir(exist_ok=True, parents=True)
agent = Agent(
model=Groq(id="llama-3.3-70b-versatile"),
tools=[SeltzTools(max_documents=10, show_results=True)],
description="You are an advanced AI researcher writing a report on a topic.",
instructions=[
"For the provided topic, run 3 different searches.",
"Read the results carefully and prepare a report.",
"Focus on facts and make sure to provide references.",
],
expected_output=dedent(
"""\
An engaging, informative, and well-structured report in markdown format:
## Engaging Report Title
### Overview
{give a brief introduction of the report and why the user should read this report}
{make this section engaging and create a hook for the reader}
### Section 1
{break the report into sections}
{provide details/facts/processes in this section}
... more sections as necessary...
### Takeaways
{provide key takeaways from the article}
### References
- [Reference 1](link)
- [Reference 2](link)
- [Reference 3](link)
### About the Author
{write a made up for yourself, give yourself a cyberpunk name and a title}
- published on {date} in dd/mm/yyyy
"""
),
markdown=True,
add_datetime_to_context=True,
save_response_to_file=str(tmp.joinpath("{message}.md")),
)
agent.print_response("Recent advances in AI safety", stream=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/groq/research_agent_seltz.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/91_tools/seltz_tools.py | """Seltz Tools Example.
Run `pip install seltz agno openai python-dotenv` to install dependencies.
"""
from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.tools.seltz import SeltzTools
from dotenv import load_dotenv
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
load_dotenv()
agent = Agent(
model=OpenAIResponses(id="gpt-5.2"),
tools=[SeltzTools(show_results=True)],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
agent.print_response("Search for current AI safety reports", markdown=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/91_tools/seltz_tools.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/tools/seltz.py | import json
from os import getenv
from typing import Any, List, Optional
from agno.tools import Toolkit
from agno.utils.log import log_info, logger
try:
from seltz import Includes, Seltz
from seltz.exceptions import (
SeltzAPIError,
SeltzAuthenticationError,
SeltzConfigurationError,
SeltzConnectionError,
SeltzError,
SeltzRateLimitError,
SeltzTimeoutError,
)
except ImportError as exc:
raise ImportError("`seltz` not installed. Please install using `pip install seltz`") from exc
class SeltzTools(Toolkit):
"""Toolkit for interacting with the Seltz AI-powered search API.
Args:
api_key: Seltz API key. If not provided, uses the `SELTZ_API_KEY` env var.
endpoint: Optional Seltz gRPC endpoint. If not provided, uses SDK default.
insecure: Use an insecure gRPC channel. Defaults to False.
max_documents: Default maximum number of documents to return per search.
context: Default context to improve search quality (e.g., "user is looking for Python docs").
profile: Default search profile to use for ranking.
show_results: Log search results for debugging.
enable_search: Enable search tool functionality. Defaults to True.
all: Enable all tools. Overrides individual flags when True. Defaults to False.
"""
def __init__(
self,
api_key: Optional[str] = None,
endpoint: Optional[str] = None,
insecure: bool = False,
max_documents: int = 10,
context: Optional[str] = None,
profile: Optional[str] = None,
show_results: bool = False,
enable_search: bool = True,
all: bool = False,
**kwargs: Any,
):
if max_documents <= 0:
raise ValueError("max_documents must be greater than 0")
self.api_key = api_key or getenv("SELTZ_API_KEY")
if not self.api_key:
logger.error("SELTZ_API_KEY not set. Please set the SELTZ_API_KEY environment variable.")
self.endpoint = endpoint
self.insecure = insecure
self.max_documents = max_documents
self.context = context
self.profile = profile
self.show_results = show_results
self.client: Optional[Seltz] = None
if self.api_key:
client_kwargs: dict[str, Any] = {"api_key": self.api_key}
if self.endpoint:
client_kwargs["endpoint"] = self.endpoint
if self.insecure:
client_kwargs["insecure"] = self.insecure
self.client = Seltz(**client_kwargs)
tools: List[Any] = []
if all or enable_search:
tools.append(self.search_seltz)
super().__init__(name="seltz", tools=tools, **kwargs)
def _parse_documents(self, documents: Any) -> str:
"""Convert Seltz documents into JSON for the agent."""
parsed: List[dict[str, Any]] = []
for doc in documents or []:
# New SDK documents have a to_dict() method
if hasattr(doc, "to_dict"):
doc_dict = doc.to_dict()
else:
# Fallback for compatibility
doc_dict = {}
url = getattr(doc, "url", None)
content = getattr(doc, "content", None)
if url is not None:
doc_dict["url"] = url
if content:
doc_dict["content"] = content
if doc_dict:
parsed.append(doc_dict)
return json.dumps(parsed, indent=4, ensure_ascii=False)
def search_seltz(
self,
query: str,
max_documents: Optional[int] = None,
context: Optional[str] = None,
) -> str:
"""Use this function to search Seltz for a query.
Args:
query: The query to search for.
max_documents: Maximum number of documents to return. Defaults to toolkit `max_documents`.
context: Additional context to improve search quality. Defaults to toolkit `context`.
Returns:
str: Search results in JSON format.
"""
if not query:
return "Error: Please provide a query to search for."
if not self.client:
return "Error: SELTZ_API_KEY not set. Please set the SELTZ_API_KEY environment variable."
limit = max_documents if max_documents is not None else self.max_documents
if limit <= 0:
return "Error: max_documents must be greater than 0."
search_context = context if context is not None else self.context
try:
if self.show_results:
log_info(f"Searching Seltz for: {query}")
includes = Includes(max_documents=limit)
response = self.client.search(
query=query,
includes=includes,
context=search_context,
profile=self.profile,
)
result = self._parse_documents(response.documents)
if self.show_results:
log_info(result)
return result
except (
SeltzConfigurationError,
SeltzAuthenticationError,
SeltzConnectionError,
SeltzTimeoutError,
SeltzRateLimitError,
SeltzAPIError,
SeltzError,
) as exc:
logger.error(f"Seltz error: {exc}")
return f"Error: {exc}"
except Exception as exc:
logger.error(f"Failed to search Seltz: {exc}")
return f"Error: {exc}"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/seltz.py",
"license": "Apache License 2.0",
"lines": 136,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_seltz.py | """Unit tests for SeltzTools class."""
import json
from unittest.mock import Mock, patch
import pytest
pytest.importorskip("seltz")
from seltz.exceptions import (
SeltzAPIError,
SeltzAuthenticationError,
SeltzConnectionError,
SeltzRateLimitError,
SeltzTimeoutError,
)
from agno.tools.seltz import SeltzTools
@pytest.fixture
def mock_seltz_client():
"""Create a mock Seltz API client."""
with patch("agno.tools.seltz.Seltz") as mock_seltz:
with patch("agno.tools.seltz.Includes"):
mock_client = Mock()
mock_seltz.return_value = mock_client
return mock_client
@pytest.fixture
def seltz_tools(mock_seltz_client):
"""Create SeltzTools instance with mocked API."""
with patch("agno.tools.seltz.Includes"):
with patch.dict("os.environ", {"SELTZ_API_KEY": "test_key"}):
tools = SeltzTools()
tools.client = mock_seltz_client
return tools
def create_mock_document(url: str, content: str | None = None):
"""Helper function to create mock document that mimics SDK Document."""
doc = Mock()
doc.url = url
doc.content = content
# Mock to_dict() method from new SDK
def to_dict():
result = {}
if url is not None:
result["url"] = url
if content is not None:
result["content"] = content
return result
doc.to_dict = to_dict
return doc
def test_init_with_api_key():
"""Test initialization with provided API key."""
with patch("agno.tools.seltz.Seltz") as mock_seltz:
with patch("agno.tools.seltz.Includes"):
with patch.dict("os.environ", {"SELTZ_API_KEY": "test_key"}):
SeltzTools()
mock_seltz.assert_called_once_with(api_key="test_key")
def test_init_with_search_disabled():
"""Test initialization with search tool disabled."""
with patch("agno.tools.seltz.Includes"):
with patch.dict("os.environ", {"SELTZ_API_KEY": "test_key"}):
tools = SeltzTools(enable_search=False)
assert "search_seltz" not in [func.name for func in tools.functions.values()]
def test_init_with_custom_endpoint():
"""Test initialization with custom endpoint and insecure flag."""
with patch("agno.tools.seltz.Seltz") as mock_seltz:
with patch("agno.tools.seltz.Includes"):
with patch.dict("os.environ", {"SELTZ_API_KEY": "test_key"}):
SeltzTools(endpoint="custom.endpoint.ai", insecure=True)
mock_seltz.assert_called_once_with(api_key="test_key", endpoint="custom.endpoint.ai", insecure=True)
def test_search_success(seltz_tools, mock_seltz_client):
"""Test successful search operation."""
mock_response = Mock()
mock_response.documents = [create_mock_document(url="https://example.com", content="Example content")]
mock_seltz_client.search.return_value = mock_response
with patch("agno.tools.seltz.Includes") as mock_includes:
mock_includes_instance = Mock()
mock_includes.return_value = mock_includes_instance
result = seltz_tools.search_seltz("test query", max_documents=3)
result_data = json.loads(result)
assert len(result_data) == 1
assert result_data[0]["url"] == "https://example.com"
assert result_data[0]["content"] == "Example content"
mock_includes.assert_called_with(max_documents=3)
mock_seltz_client.search.assert_called_with(
query="test query",
includes=mock_includes_instance,
context=None,
profile=None,
)
def test_search_default_limit(seltz_tools, mock_seltz_client):
"""Test search uses default max_documents when not provided."""
mock_response = Mock()
mock_response.documents = [create_mock_document(url="https://example.com")]
mock_seltz_client.search.return_value = mock_response
with patch("agno.tools.seltz.Includes") as mock_includes:
mock_includes_instance = Mock()
mock_includes.return_value = mock_includes_instance
result = seltz_tools.search_seltz("test query")
result_data = json.loads(result)
assert len(result_data) == 1
assert result_data[0]["url"] == "https://example.com"
mock_includes.assert_called_with(max_documents=10)
mock_seltz_client.search.assert_called_with(
query="test query",
includes=mock_includes_instance,
context=None,
profile=None,
)
def test_parse_documents_with_missing_fields(seltz_tools):
"""Test parsing documents with missing optional fields."""
mock_doc = create_mock_document(url="https://example.com")
mock_response = Mock()
mock_response.documents = [mock_doc]
result = seltz_tools._parse_documents(mock_response.documents)
result_data = json.loads(result)
assert len(result_data) == 1
assert result_data[0]["url"] == "https://example.com"
assert "content" not in result_data[0]
def test_search_empty_query(seltz_tools):
"""Test search with empty query returns error."""
result = seltz_tools.search_seltz("")
assert "Error" in result
assert "provide a query" in result
def test_search_without_api_key():
"""Test search without API key returns error."""
with patch("agno.tools.seltz.Seltz"):
with patch("agno.tools.seltz.Includes"):
with patch.dict("os.environ", {"SELTZ_API_KEY": ""}, clear=False):
with patch.object(SeltzTools, "__init__", lambda self, **kwargs: None):
tools = SeltzTools()
tools.client = None
tools.max_documents = 10
tools.context = None
tools.profile = None
tools.show_results = False
result = tools.search_seltz("test query")
assert "SELTZ_API_KEY not set" in result
def test_init_invalid_max_documents():
"""Test initialization with invalid max_documents raises error."""
with patch("agno.tools.seltz.Includes"):
with patch.dict("os.environ", {"SELTZ_API_KEY": "test_key"}):
with pytest.raises(ValueError, match="max_documents must be greater than 0"):
SeltzTools(max_documents=0)
def test_search_invalid_max_documents(seltz_tools):
"""Test search with invalid max_documents returns error."""
result = seltz_tools.search_seltz("test query", max_documents=0)
assert "Error" in result
assert "max_documents must be greater than 0" in result
def test_parse_documents_skips_empty(seltz_tools):
"""Test that documents with no url or content are skipped."""
empty_doc = Mock()
empty_doc.url = None
empty_doc.content = None
# Mock to_dict() returning empty dict (new SDK behavior)
empty_doc.to_dict = Mock(return_value={})
result = seltz_tools._parse_documents([empty_doc])
result_data = json.loads(result)
assert len(result_data) == 0
def test_parse_documents_fallback_without_to_dict(seltz_tools):
"""Test parsing documents without to_dict method (fallback for old SDK)."""
old_style_doc = Mock(spec=["url", "content"])
old_style_doc.url = "https://example.com"
old_style_doc.content = "Test content"
result = seltz_tools._parse_documents([old_style_doc])
result_data = json.loads(result)
assert len(result_data) == 1
assert result_data[0]["url"] == "https://example.com"
assert result_data[0]["content"] == "Test content"
def test_init_with_all_flag():
"""Test initialization with all=True enables all tools."""
with patch("agno.tools.seltz.Seltz"):
with patch("agno.tools.seltz.Includes"):
with patch.dict("os.environ", {"SELTZ_API_KEY": "test_key"}):
tools = SeltzTools(all=True, enable_search=False)
assert "search_seltz" in [func.name for func in tools.functions.values()]
@pytest.mark.parametrize(
"exception",
[
Exception("Unknown error"),
SeltzAuthenticationError("Invalid API key"),
SeltzConnectionError("Connection failed"),
SeltzTimeoutError("Request timed out"),
SeltzRateLimitError("Rate limit exceeded"),
SeltzAPIError("API error"),
],
)
def test_error_handling(seltz_tools, mock_seltz_client, exception):
"""Test error handling in search."""
mock_seltz_client.search.side_effect = exception
with patch("agno.tools.seltz.Includes"):
result = seltz_tools.search_seltz("test query")
assert "Error" in result
def test_search_with_context(seltz_tools, mock_seltz_client):
"""Test search with context parameter."""
mock_response = Mock()
mock_response.documents = [create_mock_document(url="https://example.com", content="Example content")]
mock_seltz_client.search.return_value = mock_response
with patch("agno.tools.seltz.Includes") as mock_includes:
mock_includes_instance = Mock()
mock_includes.return_value = mock_includes_instance
result = seltz_tools.search_seltz("test query", context="looking for Python tutorials")
result_data = json.loads(result)
assert len(result_data) == 1
mock_seltz_client.search.assert_called_with(
query="test query",
includes=mock_includes_instance,
context="looking for Python tutorials",
profile=None,
)
def test_init_with_context_and_profile():
"""Test initialization with context and profile parameters."""
with patch("agno.tools.seltz.Seltz"):
with patch("agno.tools.seltz.Includes"):
with patch.dict("os.environ", {"SELTZ_API_KEY": "test_key"}):
tools = SeltzTools(context="default context", profile="test_profile")
assert tools.context == "default context"
assert tools.profile == "test_profile"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_seltz.py",
"license": "Apache License 2.0",
"lines": 212,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/integration/db/surrealdb/test_surrealdb_traces.py | # Run SurrealDB in a container before running this script
#
# ```
# docker run --rm --pull always -p 8000:8000 surrealdb/surrealdb:latest start --user root --pass root
# ```
#
# or with
#
# ```
# surreal start -u root -p root
# ```
#
# Then, run this test like this:
#
# ```
# pytest libs/agno/tests/integration/db/surrealdb/test_surrealdb_traces.py
# ```
import time
from datetime import datetime, timezone
import pytest
from surrealdb import RecordID
from agno.db.surrealdb import SurrealDb
from agno.debug import enable_debug_mode
from agno.tracing.schemas import Span, Trace
enable_debug_mode()
# SurrealDB connection parameters
SURREALDB_URL = "ws://localhost:8000"
SURREALDB_USER = "root"
SURREALDB_PASSWORD = "root"
SURREALDB_NAMESPACE = "test"
SURREALDB_DATABASE = "test"
@pytest.fixture
def db() -> SurrealDb:
"""Create a SurrealDB memory database for testing."""
creds = {"username": SURREALDB_USER, "password": SURREALDB_PASSWORD}
db = SurrealDb(None, SURREALDB_URL, creds, SURREALDB_NAMESPACE, SURREALDB_DATABASE)
return db
def test_crud_traces(db: SurrealDb):
now = datetime.now(timezone.utc)
trace = Trace(
trace_id="1",
name="test_trace",
status="OK",
start_time=now,
end_time=now,
duration_ms=0,
total_spans=1,
error_count=0,
run_id="1",
session_id="1",
user_id=None,
agent_id=None,
team_id=None,
workflow_id=None,
created_at=now,
)
# upsert
db.upsert_trace(trace)
# get
fetched = db.get_trace("1")
assert fetched is not None
assert fetched.trace_id == "1"
def test_trace_created_at_preserved_on_update(db: SurrealDb):
"""Test that trace created_at is preserved when updating."""
now = datetime.now(timezone.utc)
trace = Trace(
trace_id="2",
name="test_trace",
status="OK",
start_time=now,
end_time=now,
duration_ms=0,
total_spans=1,
error_count=0,
run_id="1",
session_id="1",
user_id=None,
agent_id=None,
team_id=None,
workflow_id=None,
created_at=now,
)
db.upsert_trace(trace)
table = db._get_table("traces")
record_id = RecordID(table, "2")
raw_result = db._query_one("SELECT * FROM ONLY $record_id", {"record_id": record_id}, dict)
assert raw_result is not None
original_created_at = raw_result.get("created_at")
time.sleep(1.1)
trace.status = "ERROR"
trace.end_time = datetime.now(timezone.utc)
db.upsert_trace(trace)
raw_result = db._query_one("SELECT * FROM ONLY $record_id", {"record_id": record_id}, dict)
assert raw_result is not None
new_created_at = raw_result.get("created_at")
# created_at should not change on update
assert original_created_at == new_created_at
def test_crud_spans(db: SurrealDb):
now = datetime.now(timezone.utc)
# create parent trace first
trace = Trace(
trace_id="3",
name="test_trace",
status="OK",
start_time=now,
end_time=now,
duration_ms=0,
total_spans=1,
error_count=0,
run_id="1",
session_id="1",
user_id=None,
agent_id=None,
team_id=None,
workflow_id=None,
created_at=now,
)
db.upsert_trace(trace)
span = Span(
span_id="1",
trace_id="3",
parent_span_id=None,
name="test_span",
span_kind="INTERNAL",
status_code="OK",
status_message=None,
start_time=now,
end_time=now,
duration_ms=0,
attributes={},
created_at=now,
)
# create
db.create_span(span)
# get
fetched = db.get_span("1")
assert fetched is not None
assert fetched.span_id == "1"
def test_span_created_at_preserved_on_update(db: SurrealDb):
"""Test that span created_at is preserved when updating."""
now = datetime.now(timezone.utc)
trace = Trace(
trace_id="4",
name="test_trace",
status="OK",
start_time=now,
end_time=now,
duration_ms=0,
total_spans=1,
error_count=0,
run_id="1",
session_id="1",
user_id=None,
agent_id=None,
team_id=None,
workflow_id=None,
created_at=now,
)
db.upsert_trace(trace)
span = Span(
span_id="2",
trace_id="4",
parent_span_id=None,
name="test_span",
span_kind="INTERNAL",
status_code="OK",
status_message=None,
start_time=now,
end_time=now,
duration_ms=0,
attributes={},
created_at=now,
)
db.create_span(span)
table = db._get_table("spans")
record_id = RecordID(table, "2")
raw_result = db._query_one("SELECT * FROM ONLY $record_id", {"record_id": record_id}, dict)
assert raw_result is not None
original_created_at = raw_result.get("created_at")
time.sleep(1.1)
span.status_code = "ERROR"
span.end_time = datetime.now(timezone.utc)
db.create_span(span)
raw_result = db._query_one("SELECT * FROM ONLY $record_id", {"record_id": record_id}, dict)
assert raw_result is not None
new_created_at = raw_result.get("created_at")
# created_at should not change on update
assert original_created_at == new_created_at
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/db/surrealdb/test_surrealdb_traces.py",
"license": "Apache License 2.0",
"lines": 188,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/90_models/moonshot/basic.py | """
Moonshot Basic
==============
Cookbook example for `moonshot/basic.py`.
"""
from agno.agent import Agent
from agno.models.moonshot import MoonShot
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(model=MoonShot(id="kimi-k2-thinking"), markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Share a 2 sentence horror story.")
# --- Sync + Streaming ---
agent.print_response("Share a 2 sentence horror story.", stream=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/moonshot/basic.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/moonshot/tool_use.py | """
Moonshot Tool Use
=================
Cookbook example for `moonshot/tool_use.py`.
"""
from agno.agent import Agent
from agno.models.moonshot import MoonShot
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=MoonShot(id="kimi-k2-thinking"),
markdown=True,
tools=[WebSearchTools()],
)
agent.print_response("What is happening in France?", stream=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/moonshot/tool_use.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/models/moonshot/moonshot.py | from dataclasses import dataclass, field
from os import getenv
from typing import Any, Dict, Optional
from agno.exceptions import ModelAuthenticationError
from agno.models.openai.like import OpenAILike
@dataclass
class MoonShot(OpenAILike):
"""
A class for interacting with MoonShot models.
Attributes:
id (str): The model id. Defaults to "kimi-k2-thinking".
name (str): The model name. Defaults to "Moonshot".
provider (str): The provider name. Defaults to "Moonshot".
api_key (Optional[str]): The API key.
base_url (str): The base URL. Defaults to "https://api.moonshot.ai/v1".
"""
id: str = "kimi-k2-thinking"
name: str = "Moonshot"
provider: str = "Moonshot"
api_key: Optional[str] = field(default_factory=lambda: getenv("MOONSHOT_API_KEY"))
base_url: str = "https://api.moonshot.ai/v1"
def _get_client_params(self) -> Dict[str, Any]:
# Fetch API key from env if not already set
if not self.api_key:
self.api_key = getenv("MOONSHOT_API_KEY")
if not self.api_key:
# Raise error immediately if key is missing
raise ModelAuthenticationError(
message="MOONSHOT_API_KEY not set. Please set the MOONSHOT_API_KEY environment variable.",
model_name=self.name,
)
# Define base client params
base_params = {
"api_key": self.api_key,
"organization": self.organization,
"base_url": self.base_url,
"timeout": self.timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
# Create client_params dict with non-None values
client_params = {k: v for k, v in base_params.items() if v is not None}
# Add additional client params if provided
if self.client_params:
client_params.update(self.client_params)
return client_params
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/moonshot/moonshot.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/unit/models/moonshot/test_moonshot.py | import os
from unittest.mock import patch
import pytest
from agno.exceptions import ModelAuthenticationError
from agno.models.moonshot import MoonShot
def test_moonshot_initialization_with_api_key():
model = MoonShot(id="kimi-k2-thinking", api_key="test-api-key")
assert model.id == "kimi-k2-thinking"
assert model.api_key == "test-api-key"
assert model.base_url == "https://api.moonshot.ai/v1"
def test_moonshot_initialization_without_api_key():
with patch.dict(os.environ, {}, clear=True):
model = MoonShot(id="kimi-k2-thinking")
client_params = None
with pytest.raises(ModelAuthenticationError):
client_params = model._get_client_params()
assert client_params is None
def test_moonshot_initialization_with_env_api_key():
with patch.dict(os.environ, {"MOONSHOT_API_KEY": "env-api-key"}):
model = MoonShot(id="kimi-k2-thinking")
assert model.api_key == "env-api-key"
def test_moonshot_client_params():
model = MoonShot(id="kimi-k2-thinking", api_key="test-api-key")
client_params = model._get_client_params()
assert client_params["api_key"] == "test-api-key"
assert client_params["base_url"] == "https://api.moonshot.ai/v1"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/moonshot/test_moonshot.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/91_tools/unsplash_tools.py | """Unsplash Tools Example
This example demonstrates how to use the UnsplashTools toolkit with an AI agent
to search for and retrieve high-quality, royalty-free images from Unsplash.
UnsplashTools provides:
- search_photos: Search photos by keyword with filters (orientation, color)
- get_photo: Get detailed info about a specific photo
- get_random_photo: Get random photo(s) with optional query
- download_photo: Track downloads for Unsplash API compliance
Setup:
1. Get a free API key from https://unsplash.com/developers
2. Set the environment variable: export UNSPLASH_ACCESS_KEY="your_access_key"
3. Install dependencies: pip install openai agno
Usage:
python unsplash_tools.py
"""
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.unsplash import UnsplashTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Example 1: Basic usage with default tools
# By default, search_photos, get_photo, and get_random_photo are enabled
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[UnsplashTools()],
instructions=[
"You are a helpful assistant that can search for high-quality images.",
"When presenting image results, include the image URL, author name, and description.",
"Always credit the photographer by including their name and Unsplash profile link.",
],
markdown=True,
)
# Example 2: Enable all tools including download tracking
# Use this when you need to comply with Unsplash's download tracking requirement
agent_with_download = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[UnsplashTools(enable_download_photo=True)],
instructions=[
"You are a helpful assistant that can search for high-quality images.",
"When a user wants to use/download an image, use the download_photo tool to track it.",
],
markdown=True,
)
# Example 3: Enable only specific tools
agent_search_only = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[
UnsplashTools(
enable_search_photos=True,
enable_get_photo=False,
enable_get_random_photo=False,
)
],
markdown=True,
)
# Run examples
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# Search for photos
print("=" * 60)
print("Example 1: Searching for nature photos")
print("=" * 60)
agent.print_response(
"Find me 3 beautiful landscape photos of mountains",
stream=True,
)
# Get a random photo
print("\n" + "=" * 60)
print("Example 2: Getting a random photo")
print("=" * 60)
agent.print_response(
"Get me a random photo of a coffee shop",
stream=True,
)
# Search with filters
print("\n" + "=" * 60)
print("Example 3: Search with orientation filter")
print("=" * 60)
agent.print_response(
"Find 2 portrait-oriented photos of city skylines at night",
stream=True,
)
# --- Download Compliance Note ---
#
# The download_photo tool exists for Unsplash API compliance.
# According to Unsplash API guidelines, you must trigger the download endpoint
# when a photo is actually downloaded or used in your application.
#
# What download_photo does:
# - Calls /photos/{id}/download to increment the photographer's download count
# - Returns a time-limited download URL
# - Does NOT download the image file itself
#
# This is required for proper attribution tracking and is part of Unsplash's
# terms of service. The tool is disabled by default (enable_download_photo=False)
# since it's only needed when actually using/downloading images.
#
# See: https://unsplash.com/documentation#track-a-photo-download
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/91_tools/unsplash_tools.py",
"license": "Apache License 2.0",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/tools/unsplash.py | """Unsplash Tools for searching and retrieving high-quality, royalty-free images.
This toolkit provides AI agents with the ability to search for and retrieve images
from Unsplash, a popular platform with over 4.3 million high-quality photos.
Get your free API key at: https://unsplash.com/developers
"""
import json
from os import getenv
from typing import Any, Dict, List, Optional
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from agno.tools import Toolkit
from agno.utils.log import log_debug, logger
class UnsplashTools(Toolkit):
"""A toolkit for searching and retrieving images from Unsplash.
Unsplash provides access to over 4.3 million high-quality, royalty-free images
that can be used for various purposes. This toolkit enables AI agents to:
- Search for photos by keywords
- Get detailed information about specific photos
- Retrieve random photos with optional filters
- Track downloads (required by Unsplash API guidelines)
Example:
```python
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.unsplash import UnsplashTools
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[UnsplashTools()],
)
agent.print_response("Find me 3 photos of mountains at sunset")
```
"""
def __init__(
self,
access_key: Optional[str] = None,
enable_search_photos: bool = True,
enable_get_photo: bool = True,
enable_get_random_photo: bool = True,
enable_download_photo: bool = False,
all: bool = False,
**kwargs: Any,
):
"""Initialize the Unsplash toolkit.
Args:
access_key: Unsplash API access key. If not provided, will look for
UNSPLASH_ACCESS_KEY environment variable.
enable_search_photos: Enable the search_photos tool. Default: True.
enable_get_photo: Enable the get_photo tool. Default: True.
enable_get_random_photo: Enable the get_random_photo tool. Default: True.
enable_download_photo: Enable the download_photo tool. Default: False.
all: Enable all tools. Default: False.
**kwargs: Additional arguments passed to the Toolkit base class.
"""
self.access_key = access_key or getenv("UNSPLASH_ACCESS_KEY")
if not self.access_key:
logger.warning("No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable.")
self.base_url = "https://api.unsplash.com"
tools: List[Any] = []
if all or enable_search_photos:
tools.append(self.search_photos)
if all or enable_get_photo:
tools.append(self.get_photo)
if all or enable_get_random_photo:
tools.append(self.get_random_photo)
if all or enable_download_photo:
tools.append(self.download_photo)
super().__init__(name="unsplash_tools", tools=tools, **kwargs)
def _make_request(self, endpoint: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""Make an authenticated request to the Unsplash API.
Args:
endpoint: API endpoint path (e.g., "/search/photos").
params: Optional query parameters.
Returns:
JSON response as a dictionary.
Raises:
Exception: If the API request fails.
"""
url = f"{self.base_url}{endpoint}"
if params:
url = f"{url}?{urlencode(params)}"
headers = {
"Authorization": f"Client-ID {self.access_key}",
"Accept-Version": "v1",
}
request = Request(url, headers=headers)
with urlopen(request) as response:
return json.loads(response.read().decode())
def _format_photo(self, photo: Dict[str, Any]) -> Dict[str, Any]:
"""Format photo data into a clean, consistent structure.
Args:
photo: Raw photo data from Unsplash API.
Returns:
Formatted photo dictionary with essential fields.
"""
return {
"id": photo.get("id"),
"description": photo.get("description") or photo.get("alt_description"),
"width": photo.get("width"),
"height": photo.get("height"),
"color": photo.get("color"),
"created_at": photo.get("created_at"),
"urls": {
"raw": photo.get("urls", {}).get("raw"),
"full": photo.get("urls", {}).get("full"),
"regular": photo.get("urls", {}).get("regular"),
"small": photo.get("urls", {}).get("small"),
"thumb": photo.get("urls", {}).get("thumb"),
},
"author": {
"name": photo.get("user", {}).get("name"),
"username": photo.get("user", {}).get("username"),
"profile_url": photo.get("user", {}).get("links", {}).get("html"),
},
"links": {
"html": photo.get("links", {}).get("html"),
"download": photo.get("links", {}).get("download"),
},
"likes": photo.get("likes"),
"tags": [tag.get("title") for tag in photo.get("tags", [])[:5] if tag.get("title")],
}
def search_photos(
self,
query: str,
per_page: int = 10,
page: int = 1,
orientation: Optional[str] = None,
color: Optional[str] = None,
) -> str:
"""Search for photos on Unsplash by keyword.
Args:
query: The search query string (e.g., "mountain sunset", "office workspace").
per_page: Number of results per page (1-30). Default: 10.
page: Page number to retrieve. Default: 1.
orientation: Filter by orientation: "landscape", "portrait", or "squarish".
color: Filter by color: "black_and_white", "black", "white", "yellow",
"orange", "red", "purple", "magenta", "green", "teal", "blue".
Returns:
JSON string containing search results with photo details including
URLs, author information, and metadata.
"""
if not self.access_key:
return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
if not query:
return "Error: Please provide a search query."
log_debug(f"Searching Unsplash for: {query}")
try:
params: Dict[str, Any] = {
"query": query,
"per_page": min(max(1, per_page), 30),
"page": max(1, page),
}
if orientation and orientation in ["landscape", "portrait", "squarish"]:
params["orientation"] = orientation
if color:
valid_colors = [
"black_and_white",
"black",
"white",
"yellow",
"orange",
"red",
"purple",
"magenta",
"green",
"teal",
"blue",
]
if color in valid_colors:
params["color"] = color
response = self._make_request("/search/photos", params)
results = {
"total": response.get("total", 0),
"total_pages": response.get("total_pages", 0),
"photos": [self._format_photo(photo) for photo in response.get("results", [])],
}
return json.dumps(results, indent=2)
except Exception as e:
return f"Error searching Unsplash: {e}"
def get_photo(self, photo_id: str) -> str:
"""Get detailed information about a specific photo.
Args:
photo_id: The unique identifier of the photo.
Returns:
JSON string containing detailed photo information including
URLs, author, metadata, EXIF data, and location if available.
"""
if not self.access_key:
return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
if not photo_id:
return "Error: Please provide a photo ID."
log_debug(f"Getting Unsplash photo: {photo_id}")
try:
photo = self._make_request(f"/photos/{photo_id}")
result = self._format_photo(photo)
# Add extra details available for single photo requests
if photo.get("exif"):
result["exif"] = {
"make": photo["exif"].get("make"),
"model": photo["exif"].get("model"),
"aperture": photo["exif"].get("aperture"),
"exposure_time": photo["exif"].get("exposure_time"),
"focal_length": photo["exif"].get("focal_length"),
"iso": photo["exif"].get("iso"),
}
if photo.get("location"):
result["location"] = {
"name": photo["location"].get("name"),
"city": photo["location"].get("city"),
"country": photo["location"].get("country"),
}
result["views"] = photo.get("views")
result["downloads"] = photo.get("downloads")
return json.dumps(result, indent=2)
except Exception as e:
return f"Error getting photo: {e}"
def get_random_photo(
self,
query: Optional[str] = None,
orientation: Optional[str] = None,
count: int = 1,
) -> str:
"""Get random photo(s) from Unsplash.
Args:
query: Optional search query to filter random photos.
orientation: Filter by orientation: "landscape", "portrait", or "squarish".
count: Number of random photos to return (1-30). Default: 1.
Returns:
JSON string containing random photo(s) data.
"""
if not self.access_key:
return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
log_debug(f"Getting random Unsplash photo (query={query})")
try:
params: Dict[str, Any] = {
"count": min(max(1, count), 30),
}
if query:
params["query"] = query
if orientation and orientation in ["landscape", "portrait", "squarish"]:
params["orientation"] = orientation
response = self._make_request("/photos/random", params)
# Response is a list when count > 1, single object when count = 1
if isinstance(response, list):
photos = [self._format_photo(photo) for photo in response]
else:
photos = [self._format_photo(response)]
return json.dumps({"photos": photos}, indent=2)
except Exception as e:
return f"Error getting random photo: {e}"
def download_photo(self, photo_id: str) -> str:
"""Trigger a download event for a photo.
This is required by the Unsplash API guidelines when a photo is downloaded
or used. It helps photographers track the usage of their work.
Args:
photo_id: The unique identifier of the photo being downloaded.
Returns:
JSON string with the download URL.
"""
if not self.access_key:
return "Error: No Unsplash API key provided. Set UNSPLASH_ACCESS_KEY environment variable."
if not photo_id:
return "Error: Please provide a photo ID."
log_debug(f"Tracking download for Unsplash photo: {photo_id}")
try:
response = self._make_request(f"/photos/{photo_id}/download")
return json.dumps(
{
"photo_id": photo_id,
"download_url": response.get("url"),
},
indent=2,
)
except Exception as e:
return f"Error tracking download: {e}"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/tools/unsplash.py",
"license": "Apache License 2.0",
"lines": 274,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/tools/test_unsplash.py | """Unit tests for UnsplashTools class."""
import json
from unittest.mock import MagicMock, Mock, patch
import pytest
from agno.tools.unsplash import UnsplashTools
@pytest.fixture
def mock_urlopen():
"""Create a mock urlopen."""
with patch("agno.tools.unsplash.urlopen") as mock:
yield mock
@pytest.fixture
def unsplash_tools():
"""Create UnsplashTools instance with mocked API key."""
with patch.dict("os.environ", {"UNSPLASH_ACCESS_KEY": "test_key"}):
return UnsplashTools()
@pytest.fixture
def unsplash_tools_all():
"""Create UnsplashTools instance with all tools enabled."""
with patch.dict("os.environ", {"UNSPLASH_ACCESS_KEY": "test_key"}):
return UnsplashTools(all=True)
def create_mock_photo(
photo_id: str = "abc123",
description: str = "A beautiful sunset",
alt_description: str = "Orange sky over mountains",
width: int = 4000,
height: int = 3000,
color: str = "#FF5733",
created_at: str = "2024-01-15T12:00:00Z",
likes: int = 150,
):
"""Helper function to create mock photo data."""
return {
"id": photo_id,
"description": description,
"alt_description": alt_description,
"width": width,
"height": height,
"color": color,
"created_at": created_at,
"likes": likes,
"urls": {
"raw": f"https://images.unsplash.com/photo-{photo_id}?format=raw",
"full": f"https://images.unsplash.com/photo-{photo_id}?format=full",
"regular": f"https://images.unsplash.com/photo-{photo_id}?format=regular",
"small": f"https://images.unsplash.com/photo-{photo_id}?format=small",
"thumb": f"https://images.unsplash.com/photo-{photo_id}?format=thumb",
},
"user": {
"name": "John Photographer",
"username": "johnphoto",
"links": {"html": "https://unsplash.com/@johnphoto"},
},
"links": {
"html": f"https://unsplash.com/photos/{photo_id}",
"download": f"https://unsplash.com/photos/{photo_id}/download",
},
"tags": [
{"title": "sunset"},
{"title": "mountain"},
{"title": "nature"},
],
}
def create_mock_response(data):
"""Create a mock HTTP response."""
mock_response = MagicMock()
mock_response.read.return_value = json.dumps(data).encode()
mock_response.__enter__ = Mock(return_value=mock_response)
mock_response.__exit__ = Mock(return_value=False)
return mock_response
class TestUnsplashToolsInit:
"""Tests for UnsplashTools initialization."""
def test_init_with_env_api_key(self):
"""Test initialization with API key from environment."""
with patch.dict("os.environ", {"UNSPLASH_ACCESS_KEY": "env_test_key"}):
tools = UnsplashTools()
assert tools.access_key == "env_test_key"
def test_init_with_provided_api_key(self):
"""Test initialization with provided API key."""
tools = UnsplashTools(access_key="provided_key")
assert tools.access_key == "provided_key"
def test_init_without_api_key_logs_warning(self):
"""Test initialization without API key logs a warning."""
with patch.dict("os.environ", {}, clear=True):
with patch("agno.tools.unsplash.logger") as mock_logger:
# Remove UNSPLASH_ACCESS_KEY if present
import os
old_key = os.environ.pop("UNSPLASH_ACCESS_KEY", None)
try:
tools = UnsplashTools()
mock_logger.warning.assert_called_once()
assert tools.access_key is None
finally:
if old_key:
os.environ["UNSPLASH_ACCESS_KEY"] = old_key
def test_init_with_default_tools(self):
"""Test initialization with default tools."""
with patch.dict("os.environ", {"UNSPLASH_ACCESS_KEY": "test_key"}):
tools = UnsplashTools()
function_names = [func.name for func in tools.functions.values()]
assert "search_photos" in function_names
assert "get_photo" in function_names
assert "get_random_photo" in function_names
# download_photo is disabled by default
assert "download_photo" not in function_names
def test_init_with_all_tools(self):
"""Test initialization with all tools enabled."""
with patch.dict("os.environ", {"UNSPLASH_ACCESS_KEY": "test_key"}):
tools = UnsplashTools(all=True)
function_names = [func.name for func in tools.functions.values()]
assert "search_photos" in function_names
assert "get_photo" in function_names
assert "get_random_photo" in function_names
assert "download_photo" in function_names
def test_init_with_selective_tools(self):
"""Test initialization with only selected tools."""
with patch.dict("os.environ", {"UNSPLASH_ACCESS_KEY": "test_key"}):
tools = UnsplashTools(
enable_search_photos=True,
enable_get_photo=False,
enable_get_random_photo=True,
enable_download_photo=False,
)
function_names = [func.name for func in tools.functions.values()]
assert "search_photos" in function_names
assert "get_photo" not in function_names
assert "get_random_photo" in function_names
assert "download_photo" not in function_names
class TestSearchPhotos:
"""Tests for search_photos method."""
def test_search_photos_success(self, unsplash_tools, mock_urlopen):
"""Test successful photo search."""
mock_response_data = {
"total": 100,
"total_pages": 10,
"results": [create_mock_photo()],
}
mock_urlopen.return_value = create_mock_response(mock_response_data)
result = unsplash_tools.search_photos("sunset")
result_data = json.loads(result)
assert result_data["total"] == 100
assert result_data["total_pages"] == 10
assert len(result_data["photos"]) == 1
assert result_data["photos"][0]["id"] == "abc123"
assert result_data["photos"][0]["description"] == "A beautiful sunset"
def test_search_photos_with_filters(self, unsplash_tools, mock_urlopen):
"""Test photo search with orientation and color filters."""
mock_response_data = {
"total": 50,
"total_pages": 5,
"results": [create_mock_photo()],
}
mock_urlopen.return_value = create_mock_response(mock_response_data)
result = unsplash_tools.search_photos(
"nature",
per_page=5,
orientation="landscape",
color="green",
)
result_data = json.loads(result)
assert result_data["total"] == 50
assert len(result_data["photos"]) == 1
def test_search_photos_without_api_key(self):
"""Test search_photos without API key."""
with patch.dict("os.environ", {}, clear=True):
import os
old_key = os.environ.pop("UNSPLASH_ACCESS_KEY", None)
try:
tools = UnsplashTools()
result = tools.search_photos("test")
assert "Error: No Unsplash API key provided" in result
finally:
if old_key:
os.environ["UNSPLASH_ACCESS_KEY"] = old_key
def test_search_photos_empty_query(self, unsplash_tools):
"""Test search_photos with empty query."""
result = unsplash_tools.search_photos("")
assert "Error: Please provide a search query" in result
def test_search_photos_api_error(self, unsplash_tools, mock_urlopen):
"""Test search_photos with API error."""
mock_urlopen.side_effect = Exception("API Connection Error")
result = unsplash_tools.search_photos("sunset")
assert "Error searching Unsplash: API Connection Error" in result
def test_search_photos_invalid_orientation_ignored(self, unsplash_tools, mock_urlopen):
"""Test that invalid orientation is ignored."""
mock_response_data = {
"total": 10,
"total_pages": 1,
"results": [create_mock_photo()],
}
mock_urlopen.return_value = create_mock_response(mock_response_data)
# Invalid orientation should be ignored
result = unsplash_tools.search_photos("test", orientation="invalid")
result_data = json.loads(result)
assert result_data["total"] == 10
def test_search_photos_per_page_bounds(self, unsplash_tools, mock_urlopen):
"""Test per_page parameter is bounded correctly."""
mock_response_data = {
"total": 10,
"total_pages": 1,
"results": [],
}
mock_urlopen.return_value = create_mock_response(mock_response_data)
# Test with value too high (should be capped at 30)
unsplash_tools.search_photos("test", per_page=100)
# Test with value too low (should be at least 1)
unsplash_tools.search_photos("test", per_page=0)
class TestGetPhoto:
"""Tests for get_photo method."""
def test_get_photo_success(self, unsplash_tools, mock_urlopen):
"""Test successful photo retrieval."""
photo_data = create_mock_photo()
photo_data["exif"] = {
"make": "Canon",
"model": "EOS 5D",
"aperture": "f/2.8",
"exposure_time": "1/500",
"focal_length": "50mm",
"iso": 400,
}
photo_data["location"] = {
"name": "Yosemite Valley",
"city": "Yosemite",
"country": "USA",
}
photo_data["views"] = 10000
photo_data["downloads"] = 500
mock_urlopen.return_value = create_mock_response(photo_data)
result = unsplash_tools.get_photo("abc123")
result_data = json.loads(result)
assert result_data["id"] == "abc123"
assert result_data["exif"]["make"] == "Canon"
assert result_data["location"]["country"] == "USA"
assert result_data["views"] == 10000
assert result_data["downloads"] == 500
def test_get_photo_without_api_key(self):
"""Test get_photo without API key."""
with patch.dict("os.environ", {}, clear=True):
import os
old_key = os.environ.pop("UNSPLASH_ACCESS_KEY", None)
try:
tools = UnsplashTools()
result = tools.get_photo("abc123")
assert "Error: No Unsplash API key provided" in result
finally:
if old_key:
os.environ["UNSPLASH_ACCESS_KEY"] = old_key
def test_get_photo_empty_id(self, unsplash_tools):
"""Test get_photo with empty photo ID."""
result = unsplash_tools.get_photo("")
assert "Error: Please provide a photo ID" in result
def test_get_photo_api_error(self, unsplash_tools, mock_urlopen):
"""Test get_photo with API error."""
mock_urlopen.side_effect = Exception("Photo not found")
result = unsplash_tools.get_photo("nonexistent")
assert "Error getting photo: Photo not found" in result
class TestGetRandomPhoto:
"""Tests for get_random_photo method."""
def test_get_random_photo_success_single(self, unsplash_tools, mock_urlopen):
"""Test successful single random photo retrieval."""
# When count=1, API returns single object, not list
mock_urlopen.return_value = create_mock_response([create_mock_photo()])
result = unsplash_tools.get_random_photo()
result_data = json.loads(result)
assert "photos" in result_data
assert len(result_data["photos"]) == 1
assert result_data["photos"][0]["id"] == "abc123"
def test_get_random_photo_success_multiple(self, unsplash_tools, mock_urlopen):
"""Test successful multiple random photos retrieval."""
mock_photos = [
create_mock_photo(photo_id="photo1"),
create_mock_photo(photo_id="photo2"),
create_mock_photo(photo_id="photo3"),
]
mock_urlopen.return_value = create_mock_response(mock_photos)
result = unsplash_tools.get_random_photo(count=3)
result_data = json.loads(result)
assert len(result_data["photos"]) == 3
assert result_data["photos"][0]["id"] == "photo1"
assert result_data["photos"][1]["id"] == "photo2"
def test_get_random_photo_with_query(self, unsplash_tools, mock_urlopen):
"""Test random photo with query filter."""
mock_urlopen.return_value = create_mock_response([create_mock_photo()])
result = unsplash_tools.get_random_photo(query="nature")
result_data = json.loads(result)
assert len(result_data["photos"]) == 1
def test_get_random_photo_with_orientation(self, unsplash_tools, mock_urlopen):
"""Test random photo with orientation filter."""
mock_urlopen.return_value = create_mock_response([create_mock_photo()])
result = unsplash_tools.get_random_photo(orientation="portrait")
result_data = json.loads(result)
assert len(result_data["photos"]) == 1
def test_get_random_photo_without_api_key(self):
"""Test get_random_photo without API key."""
with patch.dict("os.environ", {}, clear=True):
import os
old_key = os.environ.pop("UNSPLASH_ACCESS_KEY", None)
try:
tools = UnsplashTools()
result = tools.get_random_photo()
assert "Error: No Unsplash API key provided" in result
finally:
if old_key:
os.environ["UNSPLASH_ACCESS_KEY"] = old_key
def test_get_random_photo_api_error(self, unsplash_tools, mock_urlopen):
"""Test get_random_photo with API error."""
mock_urlopen.side_effect = Exception("API Error")
result = unsplash_tools.get_random_photo()
assert "Error getting random photo: API Error" in result
def test_get_random_photo_count_bounds(self, unsplash_tools, mock_urlopen):
"""Test count parameter is bounded correctly."""
mock_urlopen.return_value = create_mock_response([create_mock_photo()])
# Test with value too high (should be capped at 30)
unsplash_tools.get_random_photo(count=100)
# Test with value too low (should be at least 1)
unsplash_tools.get_random_photo(count=0)
class TestDownloadPhoto:
"""Tests for download_photo method."""
def test_download_photo_success(self, unsplash_tools_all, mock_urlopen):
"""Test successful download tracking."""
mock_response_data = {"url": "https://images.unsplash.com/photo-abc123?download=true"}
mock_urlopen.return_value = create_mock_response(mock_response_data)
result = unsplash_tools_all.download_photo("abc123")
result_data = json.loads(result)
assert result_data["photo_id"] == "abc123"
assert "download" in result_data["download_url"]
def test_download_photo_without_api_key(self):
"""Test download_photo without API key."""
with patch.dict("os.environ", {}, clear=True):
import os
old_key = os.environ.pop("UNSPLASH_ACCESS_KEY", None)
try:
tools = UnsplashTools(enable_download_photo=True)
result = tools.download_photo("abc123")
assert "Error: No Unsplash API key provided" in result
finally:
if old_key:
os.environ["UNSPLASH_ACCESS_KEY"] = old_key
def test_download_photo_empty_id(self, unsplash_tools_all):
"""Test download_photo with empty photo ID."""
result = unsplash_tools_all.download_photo("")
assert "Error: Please provide a photo ID" in result
def test_download_photo_api_error(self, unsplash_tools_all, mock_urlopen):
"""Test download_photo with API error."""
mock_urlopen.side_effect = Exception("Download tracking failed")
result = unsplash_tools_all.download_photo("abc123")
assert "Error tracking download: Download tracking failed" in result
class TestFormatPhoto:
"""Tests for _format_photo helper method."""
def test_format_photo_with_all_fields(self, unsplash_tools):
"""Test photo formatting with all fields present."""
photo = create_mock_photo()
result = unsplash_tools._format_photo(photo)
assert result["id"] == "abc123"
assert result["description"] == "A beautiful sunset"
assert result["width"] == 4000
assert result["height"] == 3000
assert result["color"] == "#FF5733"
assert "regular" in result["urls"]
assert result["author"]["name"] == "John Photographer"
assert result["likes"] == 150
assert "sunset" in result["tags"]
def test_format_photo_with_missing_description(self, unsplash_tools):
"""Test photo formatting falls back to alt_description."""
photo = create_mock_photo()
photo["description"] = None
result = unsplash_tools._format_photo(photo)
assert result["description"] == "Orange sky over mountains"
def test_format_photo_with_empty_tags(self, unsplash_tools):
"""Test photo formatting with no tags."""
photo = create_mock_photo()
photo["tags"] = []
result = unsplash_tools._format_photo(photo)
assert result["tags"] == []
def test_format_photo_limits_tags_to_five(self, unsplash_tools):
"""Test that only first 5 tags are included."""
photo = create_mock_photo()
photo["tags"] = [{"title": f"tag{i}"} for i in range(10)]
result = unsplash_tools._format_photo(photo)
assert len(result["tags"]) == 5
class TestMakeRequest:
"""Tests for _make_request helper method."""
def test_make_request_with_params(self, unsplash_tools, mock_urlopen):
"""Test request includes query parameters."""
mock_urlopen.return_value = create_mock_response({"test": "data"})
unsplash_tools._make_request("/test", {"param1": "value1", "param2": "value2"})
# Verify the URL was constructed correctly
call_args = mock_urlopen.call_args[0][0]
assert "/test?" in call_args.full_url
assert "param1=value1" in call_args.full_url
assert "param2=value2" in call_args.full_url
def test_make_request_includes_auth_header(self, unsplash_tools, mock_urlopen):
"""Test request includes authorization header."""
mock_urlopen.return_value = create_mock_response({"test": "data"})
unsplash_tools._make_request("/test")
call_args = mock_urlopen.call_args[0][0]
assert call_args.headers["Authorization"] == "Client-ID test_key"
assert call_args.headers["Accept-version"] == "v1"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/tools/test_unsplash.py",
"license": "Apache License 2.0",
"lines": 397,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/05_agent_os/knowledge/agentos_excel_analyst.py | """
Agentos Excel Analyst
=====================
Demonstrates agentos excel analyst.
"""
from pathlib import Path
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.reader.excel_reader import ExcelReader
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Example
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
excel_knowledge = Knowledge(
name="Excel Products",
contents_db=db, # Required for UI to show knowledge
vector_db=PgVector(
db_url=db_url,
table_name="agentos_excel_knowledge",
),
)
excel_agent = Agent(
name="Excel Data Agent",
model=OpenAIChat(id="gpt-4o-mini"),
db=db, # For session storage
knowledge=excel_knowledge,
search_knowledge=True,
markdown=True,
instructions=[
"You are a data analyst assistant with access to Excel spreadsheet data.",
"Search the knowledge base to answer questions about the data.",
"Provide specific numbers and details when available.",
],
)
# Create AgentOS app
agent_os = AgentOS(
description="Excel Knowledge API - Query Excel data via REST",
agents=[excel_agent],
)
app = agent_os.get_app()
# ---------------------------------------------------------------------------
# Run Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
repo_root = Path(__file__).parent.parent.parent.parent
sample_file = (
repo_root / "cookbook/07_knowledge/testing_resources/sample_products.xlsx"
)
if sample_file.exists():
print("Loading sample products data...")
excel_knowledge.insert(
path=str(sample_file),
reader=ExcelReader(),
skip_if_exists=True,
)
print("\nStarting AgentOS server...")
print("Test at: http://localhost:7777/")
print("\nExample queries:")
print(" - What electronics products are in stock?")
print(" - What is the price of the Bluetooth speaker?")
agent_os.serve(app="agentos_excel_analyst:app", reload=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/knowledge/agentos_excel_analyst.py",
"license": "Apache License 2.0",
"lines": 66,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/07_knowledge/readers/excel_legacy_xls.py | from pathlib import Path
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.reader.excel_reader import ExcelReader
from agno.models.openai import OpenAIChat
from agno.vectordb.pgvector import PgVector
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
# ExcelReader automatically uses xlrd for .xls files
# Date values are converted from Excel serial numbers to ISO format
# Boolean values are converted from 0/1 to True/False
reader = ExcelReader()
knowledge_base = Knowledge(
vector_db=PgVector(
table_name="excel_legacy_xls",
db_url=db_url,
),
)
data_path = Path(__file__).parent.parent / "testing_resources" / "legacy_data.xls"
knowledge_base.insert(
path=str(data_path),
reader=reader,
)
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
knowledge=knowledge_base,
search_knowledge=True,
instructions=[
"You are a data assistant for legacy Excel files.",
"The workbook has two sheets: Sales Data and Inventory.",
"Sales Data contains: Date, Product, Quantity, Price, Total.",
"Inventory contains: Item, Available (True/False).",
"Dates are in ISO format (YYYY-MM-DD).",
],
)
if __name__ == "__main__":
print("=" * 60)
print("Excel Legacy XLS - .xls Format Compatibility")
print("=" * 60)
print("\n--- Query 1: Sales records ---\n")
agent.print_response(
"What products were sold? Include the dates and quantities.",
markdown=True,
stream=True,
)
print("\n--- Query 2: Inventory status ---\n")
agent.print_response(
"Which items are currently available in inventory?",
markdown=True,
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/readers/excel_legacy_xls.py",
"license": "Apache License 2.0",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/knowledge/reader/excel_reader.py | import asyncio
import io
from pathlib import Path
from typing import IO, Any, Iterable, List, Optional, Sequence, Tuple, Union
from agno.knowledge.chunking.row import RowChunking
from agno.knowledge.chunking.strategy import ChunkingStrategy, ChunkingStrategyType
from agno.knowledge.document.base import Document
from agno.knowledge.reader.base import Reader
from agno.knowledge.reader.utils import (
convert_xls_cell_value,
excel_rows_to_documents,
get_workbook_name,
infer_file_extension,
)
from agno.knowledge.types import ContentType
from agno.utils.log import log_debug, log_error
class ExcelReader(Reader):
"""Reader for Excel files (.xlsx and .xls)."""
def __init__(
self,
sheets: Optional[List[Union[str, int]]] = None,
chunking_strategy: Optional[ChunkingStrategy] = RowChunking(),
**kwargs,
):
super().__init__(chunking_strategy=chunking_strategy, **kwargs)
self.sheets = sheets
@classmethod
def get_supported_chunking_strategies(cls) -> List[ChunkingStrategyType]:
"""Get the list of supported chunking strategies for Excel readers."""
return [
ChunkingStrategyType.ROW_CHUNKER,
ChunkingStrategyType.CODE_CHUNKER,
ChunkingStrategyType.FIXED_SIZE_CHUNKER,
ChunkingStrategyType.AGENTIC_CHUNKER,
ChunkingStrategyType.DOCUMENT_CHUNKER,
ChunkingStrategyType.RECURSIVE_CHUNKER,
]
@classmethod
def get_supported_content_types(cls) -> List[ContentType]:
"""Get the list of supported content types."""
return [ContentType.XLSX, ContentType.XLS]
def _should_include_sheet(
self,
sheet_name: str,
sheet_index: int,
) -> bool:
"""Check if sheet passes the configured filters.
Args:
sheet_name: Name of the sheet
sheet_index: 1-based index of the sheet (matches document metadata)
Returns:
True if sheet should be included, False otherwise.
Note:
- Index filtering is 1-based to match sheet_index in document metadata
- Name filtering is case-insensitive
- Empty list or None means include all sheets
"""
# None or empty list = include all sheets
if not self.sheets:
return True
for sheet_filter in self.sheets:
if isinstance(sheet_filter, int):
# 1-based indexing to match metadata
if sheet_index == sheet_filter:
return True
elif isinstance(sheet_filter, str):
# Case-insensitive name matching
if sheet_name.lower() == sheet_filter.lower():
return True
return False
def _read_xlsx(self, file: Union[Path, IO[Any]], *, workbook_name: str) -> List[Document]:
"""Read .xlsx file using openpyxl."""
try:
import openpyxl
except ImportError as e:
raise ImportError("`openpyxl` not installed. Please install it via `pip install openpyxl`.") from e
if isinstance(file, Path):
workbook = openpyxl.load_workbook(filename=str(file), read_only=True, data_only=True)
else:
file.seek(0)
raw = file.read()
if isinstance(raw, str):
raw = raw.encode("utf-8", errors="replace")
workbook = openpyxl.load_workbook(filename=io.BytesIO(raw), read_only=True, data_only=True)
try:
sheets: List[Tuple[str, int, Iterable[Sequence[Any]]]] = []
for sheet_index, worksheet in enumerate(workbook.worksheets):
# Pass 1-based index to match metadata (sheet_index + 1)
if not self._should_include_sheet(worksheet.title, sheet_index + 1):
log_debug(f"Skipping sheet '{worksheet.title}' (filtered out)")
continue
sheets.append((worksheet.title, sheet_index + 1, worksheet.iter_rows(values_only=True)))
return excel_rows_to_documents(workbook_name=workbook_name, sheets=sheets)
finally:
workbook.close()
def _read_xls(self, file: Union[Path, IO[Any]], *, workbook_name: str) -> List[Document]:
"""Read .xls file using xlrd."""
try:
import xlrd
except ImportError as e:
raise ImportError("`xlrd` not installed. Please install it via `pip install xlrd`.") from e
if isinstance(file, Path):
workbook = xlrd.open_workbook(filename=str(file), encoding_override=self.encoding)
else:
file.seek(0)
raw = file.read()
if isinstance(raw, str):
raw = raw.encode("utf-8", errors="replace")
workbook = xlrd.open_workbook(file_contents=raw, encoding_override=self.encoding)
sheets: List[Tuple[str, int, Iterable[Sequence[Any]]]] = []
for sheet_index in range(workbook.nsheets):
sheet = workbook.sheet_by_index(sheet_index)
# Pass 1-based index to match metadata (sheet_index + 1)
if not self._should_include_sheet(sheet.name, sheet_index + 1):
log_debug(f"Skipping sheet '{sheet.name}' (filtered out)")
continue
def _iter_sheet_rows(_sheet: Any = sheet, _datemode: int = workbook.datemode) -> Iterable[Sequence[Any]]:
for row_index in range(_sheet.nrows):
yield [
convert_xls_cell_value(
_sheet.cell_value(row_index, col_index),
_sheet.cell_type(row_index, col_index),
_datemode,
)
for col_index in range(_sheet.ncols)
]
sheets.append((sheet.name, sheet_index + 1, _iter_sheet_rows()))
return excel_rows_to_documents(workbook_name=workbook_name, sheets=sheets)
def read(
self,
file: Union[Path, IO[Any]],
name: Optional[str] = None,
) -> List[Document]:
"""Read an Excel file and return documents (one per sheet)."""
try:
file_extension = infer_file_extension(file, name)
workbook_name = get_workbook_name(file, name)
if isinstance(file, Path) and not file.exists():
raise FileNotFoundError(f"Could not find file: {file}")
file_desc = str(file) if isinstance(file, Path) else getattr(file, "name", "BytesIO")
log_debug(f"Reading Excel file: {file_desc}")
if file_extension == ContentType.XLSX or file_extension == ".xlsx":
documents = self._read_xlsx(file, workbook_name=workbook_name)
elif file_extension == ContentType.XLS or file_extension == ".xls":
documents = self._read_xls(file, workbook_name=workbook_name)
else:
raise ValueError(f"Unsupported file extension: '{file_extension}'. Expected .xlsx or .xls")
if self.chunk:
chunked_documents = []
for document in documents:
chunked_documents.extend(self.chunk_document(document))
return chunked_documents
return documents
except (FileNotFoundError, ImportError, ValueError):
raise
except Exception as e:
file_desc = getattr(file, "name", str(file)) if isinstance(file, IO) else file
log_error(f"Error reading {file_desc}: {e}")
return []
async def async_read(
self,
file: Union[Path, IO[Any]],
name: Optional[str] = None,
) -> List[Document]:
"""Async version of read()."""
try:
file_extension = infer_file_extension(file, name)
workbook_name = get_workbook_name(file, name)
if isinstance(file, Path) and not file.exists():
raise FileNotFoundError(f"Could not find file: {file}")
file_desc = str(file) if isinstance(file, Path) else getattr(file, "name", "BytesIO")
log_debug(f"Reading Excel file async: {file_desc}")
if file_extension == ContentType.XLSX or file_extension == ".xlsx":
documents = await asyncio.to_thread(self._read_xlsx, file, workbook_name=workbook_name)
elif file_extension == ContentType.XLS or file_extension == ".xls":
documents = await asyncio.to_thread(self._read_xls, file, workbook_name=workbook_name)
else:
raise ValueError(f"Unsupported file extension: '{file_extension}'. Expected .xlsx or .xls")
if self.chunk:
documents = await self.chunk_documents_async(documents)
return documents
except (FileNotFoundError, ImportError, ValueError):
raise
except Exception as e:
file_desc = getattr(file, "name", str(file)) if isinstance(file, IO) else file
log_error(f"Error reading {file_desc}: {e}")
return []
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/reader/excel_reader.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/knowledge/reader/utils/spreadsheet.py | from datetime import date, datetime
from pathlib import Path
from typing import IO, Any, Iterable, List, Optional, Sequence, Tuple, Union
from uuid import uuid4
from agno.knowledge.document.base import Document
from agno.utils.log import log_debug
def stringify_cell_value(value: Any) -> str:
"""Convert cell value to string, normalizing dates and line endings."""
if value is None:
return ""
if isinstance(value, datetime):
return value.isoformat()
if isinstance(value, date):
return value.isoformat()
if isinstance(value, float) and value.is_integer():
return str(int(value))
result = str(value)
# Normalize all line endings to space to preserve row integrity in CSV-like output
# Must handle CRLF first before individual CR/LF to avoid double-spacing
result = result.replace("\r\n", " ") # Windows (CRLF)
result = result.replace("\r", " ") # Old Mac (CR)
result = result.replace("\n", " ") # Unix (LF)
return result
def get_workbook_name(file: Union[Path, IO[Any]], name: Optional[str]) -> str:
"""Extract workbook name from file path or name parameter."""
if name:
return Path(name).stem
if isinstance(file, Path):
return file.stem
# getattr returns None when attribute exists but is None, so check explicitly
file_name = getattr(file, "name", None)
if file_name:
return Path(file_name).stem
return "workbook"
def infer_file_extension(file: Union[Path, IO[Any]], name: Optional[str]) -> str:
"""Infer file extension from Path, IO object, or explicit name."""
if isinstance(file, Path):
return file.suffix.lower()
file_name = getattr(file, "name", None)
if isinstance(file_name, str) and file_name:
return Path(file_name).suffix.lower()
if name:
return Path(name).suffix.lower()
return ""
def convert_xls_cell_value(cell_value: Any, cell_type: int, datemode: int) -> Any:
"""Convert xlrd cell value to Python type (dates and booleans need conversion)."""
try:
import xlrd
except ImportError:
return cell_value
if cell_type == xlrd.XL_CELL_DATE:
try:
date_tuple = xlrd.xldate_as_tuple(cell_value, datemode)
return datetime(*date_tuple)
except Exception:
return cell_value
if cell_type == xlrd.XL_CELL_BOOLEAN:
return bool(cell_value)
return cell_value
def row_to_csv_line(row_values: Sequence[Any]) -> str:
"""Convert row values to CSV-like string, trimming trailing empty cells."""
values = [stringify_cell_value(v) for v in row_values]
while values and values[-1] == "":
values.pop()
return ", ".join(values)
def excel_rows_to_documents(
*,
workbook_name: str,
sheets: Iterable[Tuple[str, int, Iterable[Sequence[Any]]]],
) -> List[Document]:
"""Convert Excel sheet rows to Documents (one per sheet)."""
documents = []
for sheet_name, sheet_index, rows in sheets:
lines = []
for row in rows:
line = row_to_csv_line(row)
if line:
lines.append(line)
if not lines:
log_debug(f"Sheet '{sheet_name}' is empty, skipping")
continue
documents.append(
Document(
name=workbook_name,
id=str(uuid4()),
meta_data={"sheet_name": sheet_name, "sheet_index": sheet_index},
content="\n".join(lines),
)
)
return documents
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/reader/utils/spreadsheet.py",
"license": "Apache License 2.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/90_models/ollama/responses/basic.py | """Basic example using Ollama with the OpenAI Responses API.
This uses Ollama's OpenAI-compatible /v1/responses endpoint, which was added
in Ollama v0.13.3. It provides an alternative to the native Ollama API.
Requirements:
- Ollama v0.13.3 or later running locally
- Run: ollama pull llama3.1:8b
"""
import asyncio
from agno.agent import Agent
from agno.models.ollama import OllamaResponses
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OllamaResponses(id="gpt-oss:20b"),
markdown=True,
)
# Print the response in the terminal
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Share a 2 sentence horror story")
# --- Sync + Streaming ---
agent.print_response("Write a short poem about the moon", stream=True)
# --- Async ---
asyncio.run(agent.aprint_response("Share a 2 sentence horror story"))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/ollama/responses/basic.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/ollama/responses/structured_output.py | """Structured output example using Ollama with the OpenAI Responses API.
This demonstrates using Pydantic models for structured output with Ollama's
Responses API endpoint.
Requirements:
- Ollama v0.13.3 or later running locally
- Run: ollama pull llama3.1:8b
"""
from typing import List
from agno.agent import Agent
from agno.models.ollama import OllamaResponses
from pydantic import BaseModel, Field
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
class MovieScript(BaseModel):
name: str = Field(..., description="Give a name to this movie")
setting: str = Field(
..., description="Provide a nice setting for a blockbuster movie."
)
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(
..., description="3 sentence storyline for the movie. Make it exciting!"
)
agent = Agent(
model=OllamaResponses(id="gpt-oss:20b"),
description="You write movie scripts.",
output_schema=MovieScript,
)
agent.print_response("New York")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/ollama/responses/structured_output.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/ollama/responses/tool_use.py | """Tool use example using Ollama with the OpenAI Responses API.
This demonstrates using tools with Ollama's Responses API endpoint.
Requirements:
- Ollama v0.13.3 or later running locally
- Run: ollama pull llama3.1:8b
"""
from agno.agent import Agent
from agno.models.ollama import OllamaResponses
from agno.tools.duckduckgo import DuckDuckGoTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OllamaResponses(id="gpt-oss:20b"),
tools=[DuckDuckGoTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("What is the latest news about AI?")
# --- Sync + Streaming ---
agent.print_response("What is the latest news about AI?", stream=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/ollama/responses/tool_use.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/openrouter/responses/basic.py | """Basic example using OpenRouter with the Responses API.
OpenRouter's Responses API (beta) provides OpenAI-compatible access to multiple
AI models through a unified interface.
Requirements:
- Set OPENROUTER_API_KEY environment variable
"""
import asyncio
from agno.agent import Agent
from agno.models.openrouter import OpenRouterResponses
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenRouterResponses(id="openai/gpt-oss-20b", reasoning={"enabled": True}),
markdown=True,
)
# Print the response in the terminal
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Share a 2 sentence horror story")
# --- Async ---
asyncio.run(agent.aprint_response("Share a 2 sentence horror story"))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openrouter/responses/basic.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/openrouter/responses/fallback.py | """Model fallback example using OpenRouter with the Responses API.
This demonstrates using fallback models with OpenRouter's dynamic model routing.
If the primary model fails due to rate limits, timeouts, or unavailability,
OpenRouter will automatically try the fallback models in order.
Requirements:
- Set OPENROUTER_API_KEY environment variable
"""
from agno.agent import Agent
from agno.models.openrouter import OpenRouterResponses
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenRouterResponses(
id="openai/gpt-oss-20b",
# Fallback models if primary fails
models=[
"openai/gpt-oss-20b",
"openai/gpt-4o",
],
),
markdown=True,
)
agent.print_response("Write a haiku about coding", stream=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openrouter/responses/fallback.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/openrouter/responses/stream.py | """Streaming example using OpenRouter with the Responses API.
Requirements:
- Set OPENROUTER_API_KEY environment variable
"""
from agno.agent import Agent
from agno.models.openrouter import OpenRouterResponses
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenRouterResponses(id="openai/gpt-oss-20b", reasoning={"enabled": True}),
markdown=True,
)
# Stream the response
agent.print_response("Write a short poem about the moon", stream=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openrouter/responses/stream.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/openrouter/responses/structured_output.py | """Structured output example using OpenRouter with the Responses API.
This demonstrates using Pydantic models for structured output with OpenRouter's
Responses API endpoint.
Requirements:
- Set OPENROUTER_API_KEY environment variable
"""
from typing import List
from agno.agent import Agent
from agno.models.openrouter import OpenRouterResponses
from pydantic import BaseModel, Field
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
class MovieScript(BaseModel):
name: str = Field(..., description="Give a name to this movie")
setting: str = Field(
..., description="Provide a nice setting for a blockbuster movie."
)
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(
..., description="3 sentence storyline for the movie. Make it exciting!"
)
agent = Agent(
model=OpenRouterResponses(id="openai/gpt-oss-20b", reasoning={"enabled": True}),
description="You write movie scripts.",
output_schema=MovieScript,
)
agent.print_response("New York")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openrouter/responses/structured_output.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/openrouter/responses/tool_use.py | """Tool use example using OpenRouter with the Responses API.
This demonstrates using tools with OpenRouter's Responses API endpoint.
Requirements:
- Set OPENROUTER_API_KEY environment variable
"""
from agno.agent import Agent
from agno.models.openrouter import OpenRouterResponses
from agno.tools.duckduckgo import DuckDuckGoTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenRouterResponses(id="openai/gpt-oss-20b", reasoning={"enabled": True}),
tools=[DuckDuckGoTools()],
markdown=True,
)
agent.print_response("What is the latest news about AI?", stream=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openrouter/responses/tool_use.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/models/ollama/responses.py | from dataclasses import dataclass, field
from os import getenv
from typing import Any, Dict, Optional
from agno.models.openai.open_responses import OpenResponses
from agno.utils.log import log_debug
@dataclass
class OllamaResponses(OpenResponses):
"""
A class for interacting with Ollama models using the OpenAI Responses API.
This uses Ollama's OpenAI-compatible `/v1/responses` endpoint, which was added
in Ollama v0.13.3. It allows using Ollama models with the Responses API format.
Note: Ollama's Responses API is stateless - it does not support `previous_response_id`
or conversation chaining. Each request is independent.
Requirements:
- Ollama v0.13.3 or later
- For local usage: Ollama server running at http://localhost:11434
- For Ollama Cloud: Set OLLAMA_API_KEY environment variable
For more information, see: https://docs.ollama.com/api/openai-compatibility
Attributes:
id (str): The model id. Defaults to "gpt-oss:20b".
name (str): The model name. Defaults to "OllamaResponses".
provider (str): The provider name. Defaults to "Ollama".
host (Optional[str]): The Ollama server host. Defaults to "http://localhost:11434".
api_key (Optional[str]): The API key for Ollama Cloud. Not required for local usage.
"""
id: str = "gpt-oss:20b"
name: str = "OllamaResponses"
provider: str = "Ollama"
# Ollama server host - defaults to local instance
host: Optional[str] = None
# API key for Ollama Cloud (not required for local)
api_key: Optional[str] = field(default_factory=lambda: getenv("OLLAMA_API_KEY"))
# Ollama's Responses API is stateless
store: Optional[bool] = False
def _get_client_params(self) -> Dict[str, Any]:
"""
Get client parameters for API requests.
Returns:
Dict[str, Any]: Client parameters including base_url and optional api_key.
"""
# Determine the base URL
if self.host:
base_url = self.host.rstrip("/")
if not base_url.endswith("/v1"):
base_url = f"{base_url}/v1"
elif self.api_key:
# Ollama Cloud
base_url = "https://ollama.com/v1"
log_debug(f"Using Ollama Cloud endpoint: {base_url}")
else:
# Local Ollama instance
base_url = "http://localhost:11434/v1"
# Build client params
base_params: Dict[str, Any] = {
"base_url": base_url,
"timeout": self.timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
# Add API key if provided (required for Ollama Cloud, ignored for local)
if self.api_key:
base_params["api_key"] = self.api_key
else:
# OpenAI client requires an api_key, but Ollama ignores it locally
base_params["api_key"] = "ollama"
# Filter out None values
client_params = {k: v for k, v in base_params.items() if v is not None}
# Add additional client params if provided
if self.client_params:
client_params.update(self.client_params)
return client_params
def _using_reasoning_model(self) -> bool:
"""
Ollama doesn't have native reasoning models like OpenAI's o-series.
Some models may support thinking/reasoning through their architecture
(like DeepSeek-R1), but they don't use OpenAI's reasoning API format.
"""
return False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/ollama/responses.py",
"license": "Apache License 2.0",
"lines": 79,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/models/openai/open_responses.py | from dataclasses import dataclass
from typing import Optional
from agno.models.openai.responses import OpenAIResponses
@dataclass
class OpenResponses(OpenAIResponses):
"""
A base class for interacting with any provider using the Open Responses API specification.
Open Responses is an open-source specification for building multi-provider, interoperable
LLM interfaces based on the OpenAI Responses API. This class provides a foundation for
providers that implement the spec (e.g., Ollama, OpenRouter).
For more information, see: https://openresponses.org
Key differences from OpenAIResponses:
- Configurable base_url for pointing to different API endpoints
- Stateless by default (no previous_response_id chaining)
- Flexible api_key handling for providers that don't require authentication
Args:
id (str): The model id. Defaults to "not-provided".
name (str): The model name. Defaults to "OpenResponses".
api_key (Optional[str]): The API key. Defaults to "not-provided".
"""
id: str = "not-provided"
name: str = "OpenResponses"
provider: str = "OpenResponses"
api_key: Optional[str] = "not-provided"
# Disable stateful features by default for compatible providers
# Most OpenAI-compatible providers don't support previous_response_id chaining
store: Optional[bool] = False
def _using_reasoning_model(self) -> bool:
"""
Override to disable reasoning model detection for compatible providers.
Most compatible providers don't support OpenAI's reasoning models,
so we disable the special handling by default. Subclasses can override
this if they support specific reasoning models.
"""
return False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/openai/open_responses.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/agno/models/openrouter/responses.py | from dataclasses import dataclass
from os import getenv
from typing import Any, Dict, List, Optional, Type, Union
from pydantic import BaseModel
from agno.exceptions import ModelAuthenticationError
from agno.models.message import Message
from agno.models.openai.open_responses import OpenResponses
@dataclass
class OpenRouterResponses(OpenResponses):
"""
A class for interacting with OpenRouter models using the OpenAI Responses API.
OpenRouter's Responses API (currently in beta) provides OpenAI-compatible access
to multiple AI models through a unified interface. It supports tools, reasoning,
streaming, and plugins.
Note: OpenRouter's Responses API is stateless - each request is independent and
no server-side state is persisted.
For more information, see: https://openrouter.ai/docs/api/reference/responses/overview
Attributes:
id (str): The model id. Defaults to "openai/gpt-oss-20b".
name (str): The model name. Defaults to "OpenRouterResponses".
provider (str): The provider name. Defaults to "OpenRouter".
api_key (Optional[str]): The API key. Uses OPENROUTER_API_KEY env var if not set.
base_url (str): The base URL. Defaults to "https://openrouter.ai/api/v1".
models (Optional[List[str]]): List of fallback model IDs to use if the primary model
fails due to rate limits, timeouts, or unavailability. OpenRouter will automatically
try these models in order. Example: ["anthropic/claude-sonnet-4", "deepseek/deepseek-r1"]
Example:
```python
from agno.agent import Agent
from agno.models.openrouter import OpenRouterResponses
agent = Agent(
model=OpenRouterResponses(id="anthropic/claude-sonnet-4"),
markdown=True,
)
agent.print_response("Write a haiku about coding")
```
"""
id: str = "openai/gpt-oss-20b"
name: str = "OpenRouterResponses"
provider: str = "OpenRouter"
api_key: Optional[str] = None
base_url: str = "https://openrouter.ai/api/v1"
# Dynamic model routing - fallback models if primary fails
# https://openrouter.ai/docs/features/model-routing
models: Optional[List[str]] = None
# OpenRouter's Responses API is stateless
store: Optional[bool] = False
def _get_client_params(self) -> Dict[str, Any]:
"""
Returns client parameters for API requests, checking for OPENROUTER_API_KEY.
Returns:
Dict[str, Any]: A dictionary of client parameters for API requests.
Raises:
ModelAuthenticationError: If OPENROUTER_API_KEY is not set.
"""
# Fetch API key from env if not already set
if not self.api_key:
self.api_key = getenv("OPENROUTER_API_KEY")
if not self.api_key:
raise ModelAuthenticationError(
message="OPENROUTER_API_KEY not set. Please set the OPENROUTER_API_KEY environment variable.",
model_name=self.name,
)
# Build client params
base_params: Dict[str, Any] = {
"api_key": self.api_key,
"base_url": self.base_url,
"organization": self.organization,
"timeout": self.timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
# Filter out None values
client_params = {k: v for k, v in base_params.items() if v is not None}
# Add additional client params if provided
if self.client_params:
client_params.update(self.client_params)
return client_params
def get_request_params(
self,
messages: Optional[List[Message]] = None,
response_format: Optional[Union[Dict, Type[BaseModel]]] = None,
tools: Optional[List[Dict[str, Any]]] = None,
tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
) -> Dict[str, Any]:
"""
Returns keyword arguments for API requests, including fallback models configuration.
Returns:
Dict[str, Any]: A dictionary of keyword arguments for API requests.
"""
# Get base request params from parent class
request_params = super().get_request_params(
messages=messages,
response_format=response_format,
tools=tools,
tool_choice=tool_choice,
)
# Add fallback models to extra_body if specified
if self.models:
# Get existing extra_body or create new dict
extra_body = request_params.get("extra_body") or {}
# Merge fallback models into extra_body
extra_body["models"] = self.models
# Update request params
request_params["extra_body"] = extra_body
return request_params
def _using_reasoning_model(self) -> bool:
"""
Check if the model is a reasoning model that requires special handling.
OpenRouter hosts various reasoning models, but they may not all use
OpenAI's reasoning API format. We check for known reasoning model patterns.
"""
# Check for OpenAI reasoning models hosted on OpenRouter
if self.id.startswith("openai/o3") or self.id.startswith("openai/o4"):
return True
return False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/openrouter/responses.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/tests/unit/models/ollama_model/test_ollama_responses.py | from unittest.mock import patch
from agno.models.ollama import OllamaResponses
def test_ollama_responses_default_config():
"""Test OllamaResponses default configuration."""
model = OllamaResponses()
assert model.id == "gpt-oss:20b"
assert model.name == "OllamaResponses"
assert model.provider == "Ollama"
assert model.store is False # Stateless by default
def test_ollama_responses_local_base_url():
"""Test OllamaResponses uses local base URL by default."""
model = OllamaResponses(id="llama3.1")
with patch("agno.models.openai.responses.OpenAI") as mock_client:
model.get_client()
_, kwargs = mock_client.call_args
assert kwargs["base_url"] == "http://localhost:11434/v1"
assert kwargs["api_key"] == "ollama" # Dummy key for local
def test_ollama_responses_custom_host():
"""Test OllamaResponses with custom host."""
model = OllamaResponses(id="llama3.1", host="http://192.168.1.100:11434")
with patch("agno.models.openai.responses.OpenAI") as mock_client:
model.get_client()
_, kwargs = mock_client.call_args
assert kwargs["base_url"] == "http://192.168.1.100:11434/v1"
def test_ollama_responses_custom_host_with_v1():
"""Test OllamaResponses with custom host that already has /v1."""
model = OllamaResponses(id="llama3.1", host="http://192.168.1.100:11434/v1")
with patch("agno.models.openai.responses.OpenAI") as mock_client:
model.get_client()
_, kwargs = mock_client.call_args
assert kwargs["base_url"] == "http://192.168.1.100:11434/v1"
def test_ollama_responses_cloud():
"""Test OllamaResponses with Ollama Cloud API key."""
model = OllamaResponses(id="llama3.1", api_key="test-api-key")
with patch("agno.models.openai.responses.OpenAI") as mock_client:
model.get_client()
_, kwargs = mock_client.call_args
assert kwargs["base_url"] == "https://ollama.com/v1"
assert kwargs["api_key"] == "test-api-key"
def test_ollama_responses_not_reasoning_model():
"""Test that OllamaResponses never reports as reasoning model."""
model = OllamaResponses(id="llama3.1")
assert model._using_reasoning_model() is False
# Even with DeepSeek-R1 which has reasoning capabilities
model = OllamaResponses(id="deepseek-r1")
assert model._using_reasoning_model() is False
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/ollama_model/test_ollama_responses.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/models/openrouter_model/test_openrouter_responses.py | from unittest.mock import patch
import pytest
from agno.exceptions import ModelAuthenticationError
from agno.models.openrouter import OpenRouterResponses
def test_openrouter_responses_default_config():
"""Test OpenRouterResponses default configuration."""
with patch.dict("os.environ", {"OPENROUTER_API_KEY": "test-key"}):
model = OpenRouterResponses()
assert model.id == "openai/gpt-oss-20b"
assert model.name == "OpenRouterResponses"
assert model.provider == "OpenRouter"
assert model.base_url == "https://openrouter.ai/api/v1"
assert model.store is False # Stateless by default
def test_openrouter_responses_requires_api_key():
"""Test OpenRouterResponses raises error when no API key is provided."""
model = OpenRouterResponses()
with patch.dict("os.environ", {}, clear=True):
with pytest.raises(ModelAuthenticationError, match="OPENROUTER_API_KEY not set"):
model._get_client_params()
def test_openrouter_responses_api_key_from_env():
"""Test OpenRouterResponses uses API key from environment."""
model = OpenRouterResponses()
with patch.dict("os.environ", {"OPENROUTER_API_KEY": "env-api-key"}):
params = model._get_client_params()
assert params["api_key"] == "env-api-key"
def test_openrouter_responses_api_key_explicit():
"""Test OpenRouterResponses uses explicit API key over environment."""
model = OpenRouterResponses(api_key="explicit-key")
with patch.dict("os.environ", {"OPENROUTER_API_KEY": "env-key"}):
params = model._get_client_params()
assert params["api_key"] == "explicit-key"
def test_openrouter_responses_fallback_models():
"""Test OpenRouterResponses with fallback models configuration."""
model = OpenRouterResponses(
api_key="test-key",
models=["anthropic/claude-sonnet-4", "google/gemini-2.0-flash"],
)
request_params = model.get_request_params()
assert "extra_body" in request_params
assert request_params["extra_body"]["models"] == [
"anthropic/claude-sonnet-4",
"google/gemini-2.0-flash",
]
def test_openrouter_responses_reasoning_model_detection():
"""Test OpenRouterResponses reasoning model detection."""
# Non-reasoning model
model = OpenRouterResponses(id="anthropic/claude-sonnet-4", api_key="test-key")
assert model._using_reasoning_model() is False
# OpenAI o3 model via OpenRouter
model = OpenRouterResponses(id="openai/o3-mini", api_key="test-key")
assert model._using_reasoning_model() is True
# OpenAI o4 model via OpenRouter
model = OpenRouterResponses(id="openai/o4-mini", api_key="test-key")
assert model._using_reasoning_model() is True
def test_openrouter_responses_client_params():
"""Test OpenRouterResponses client parameters."""
model = OpenRouterResponses(
id="anthropic/claude-sonnet-4",
api_key="test-key",
timeout=30.0,
max_retries=3,
)
params = model._get_client_params()
assert params["api_key"] == "test-key"
assert params["base_url"] == "https://openrouter.ai/api/v1"
assert params["timeout"] == 30.0
assert params["max_retries"] == 3
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/openrouter_model/test_openrouter_responses.py",
"license": "Apache License 2.0",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/07_knowledge/cloud/azure_blob.py | """
Azure Blob Storage Content Source for Knowledge
================================================
Load files and folders from Azure Blob Storage containers into your Knowledge base.
Uses Azure AD client credentials flow for authentication.
Features:
- Load single blobs or entire prefixes (folders) recursively
- Supports any Azure Storage Account
- Automatic file type detection and reader selection
- Rich metadata stored for each file (storage account, container, path)
Requirements:
- Azure AD App Registration with:
- Application (client) ID
- Client secret
- Storage Blob Data Reader role on the storage account
- Storage account name and container name
Setup:
1. Register an app in Azure AD (portal.azure.com)
2. Assign "Storage Blob Data Reader" role to the app on your storage account
3. Create a client secret
4. Set environment variables (see below)
Environment Variables:
AZURE_TENANT_ID - Azure AD tenant ID
AZURE_CLIENT_ID - App registration client ID
AZURE_CLIENT_SECRET - App registration client secret
AZURE_STORAGE_ACCOUNT_NAME - Storage account name (without .blob.core.windows.net)
AZURE_CONTAINER_NAME - Container name
Run this cookbook:
python cookbook/07_knowledge/cloud/azure_blob.py
"""
from os import getenv
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.remote_content import AzureBlobConfig
from agno.vectordb.pgvector import PgVector
# Configure Azure Blob Storage content source
# All credentials should come from environment variables
azure_config = AzureBlobConfig(
id="company-docs",
name="Company Documents",
tenant_id=getenv("AZURE_TENANT_ID"),
client_id=getenv("AZURE_CLIENT_ID"),
client_secret=getenv("AZURE_CLIENT_SECRET"),
storage_account=getenv("AZURE_STORAGE_ACCOUNT_NAME"),
container=getenv("AZURE_CONTAINER_NAME"),
)
# Create Knowledge with Azure Blob Storage as a content source
knowledge = Knowledge(
name="Azure Blob Knowledge",
vector_db=PgVector(
table_name="azure_blob_knowledge",
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
),
content_sources=[azure_config],
)
if __name__ == "__main__":
# Insert a single file from Azure Blob Storage
print("Inserting single file from Azure Blob Storage...")
knowledge.insert(
name="DeepSeek Paper",
remote_content=azure_config.file("DeepSeek_R1.pdf"),
)
# Insert an entire folder (prefix)
print("Inserting folder from Azure Blob Storage...")
knowledge.insert(
name="Research Papers",
remote_content=azure_config.folder("testfolder/"),
)
# Search the knowledge base
print("Searching knowledge base...")
results = knowledge.search("What is DeepSeek?")
for doc in results:
print(f"- {doc.name}: {doc.content[:100]}...")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/cloud/azure_blob.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/tests/unit/vectordb/test_lightrag.py | import pytest
from agno.vectordb.lightrag import LightRag
TEST_SERVER_URL = "http://localhost:9621"
TEST_API_KEY = "test_api_key"
@pytest.fixture
def lightrag_db():
"""Fixture to create a LightRag instance"""
db = LightRag(
server_url=TEST_SERVER_URL,
api_key=TEST_API_KEY,
)
yield db
def test_initialization():
"""Test basic initialization with defaults"""
db = LightRag()
assert db.server_url == "http://localhost:9621"
assert db.api_key is None
def test_initialization_with_params():
"""Test initialization with custom parameters"""
db = LightRag(
server_url="http://custom:8080",
api_key="secret",
name="test_db",
description="Test database",
)
assert db.server_url == "http://custom:8080"
assert db.api_key == "secret"
assert db.name == "test_db"
assert db.description == "Test database"
def test_get_headers_with_api_key(lightrag_db):
"""Test headers include API key when configured"""
headers = lightrag_db._get_headers()
assert headers["Content-Type"] == "application/json"
assert headers["X-API-KEY"] == TEST_API_KEY
def test_get_headers_without_api_key():
"""Test headers without API key"""
db = LightRag(server_url=TEST_SERVER_URL)
headers = db._get_headers()
assert headers["Content-Type"] == "application/json"
assert "X-API-KEY" not in headers
def test_get_auth_headers(lightrag_db):
"""Test auth headers for file uploads"""
headers = lightrag_db._get_auth_headers()
assert "Content-Type" not in headers
assert headers["X-API-KEY"] == TEST_API_KEY
def test_custom_auth_header_format():
"""Test custom auth header name and format"""
db = LightRag(
server_url=TEST_SERVER_URL,
api_key="my_key",
auth_header_name="Authorization",
auth_header_format="Bearer {api_key}",
)
headers = db._get_headers()
assert headers["Authorization"] == "Bearer my_key"
def test_format_response_with_references(lightrag_db):
"""Test that references are preserved in meta_data"""
result = {
"response": "Jordan Mitchell has skills in Python and JavaScript.",
"references": [
{"reference_id": "1", "file_path": "cv_1.pdf", "content": None},
{"reference_id": "2", "file_path": "cv_2.pdf", "content": None},
],
}
documents = lightrag_db._format_lightrag_response(result, "What skills?", "hybrid")
assert len(documents) == 1
assert documents[0].content == "Jordan Mitchell has skills in Python and JavaScript."
assert documents[0].meta_data["source"] == "lightrag"
assert documents[0].meta_data["query"] == "What skills?"
assert documents[0].meta_data["mode"] == "hybrid"
assert "references" in documents[0].meta_data
assert len(documents[0].meta_data["references"]) == 2
assert documents[0].meta_data["references"][0]["file_path"] == "cv_1.pdf"
def test_format_response_without_references(lightrag_db):
"""Test backward compatibility when no references in response"""
result = {"response": "Some content without references."}
documents = lightrag_db._format_lightrag_response(result, "query", "local")
assert len(documents) == 1
assert documents[0].content == "Some content without references."
assert "references" not in documents[0].meta_data
def test_format_response_list_with_content(lightrag_db):
"""Test formatting list response with content field"""
result = [
{"content": "First document", "metadata": {"source": "custom"}},
{"content": "Second document"},
]
documents = lightrag_db._format_lightrag_response(result, "query", "global")
assert len(documents) == 2
assert documents[0].content == "First document"
assert documents[0].meta_data["source"] == "custom"
def test_format_response_list_plain_strings(lightrag_db):
"""Test formatting list response with plain strings"""
result = ["plain text item 1", "plain text item 2"]
documents = lightrag_db._format_lightrag_response(result, "query", "hybrid")
assert len(documents) == 2
assert documents[0].content == "plain text item 1"
assert documents[0].meta_data["source"] == "lightrag"
def test_format_response_string(lightrag_db):
"""Test formatting plain string response"""
result = "Just a plain string response"
documents = lightrag_db._format_lightrag_response(result, "query", "hybrid")
assert len(documents) == 1
assert documents[0].content == "Just a plain string response"
assert documents[0].meta_data["source"] == "lightrag"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/vectordb/test_lightrag.py",
"license": "Apache License 2.0",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/07_knowledge/readers/excel_reader.py | from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.reader.excel_reader import ExcelReader
from agno.models.openai import OpenAIChat
from agno.vectordb.pgvector import PgVector
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
reader = ExcelReader()
knowledge_base = Knowledge(
vector_db=PgVector(
table_name="excel_products_demo",
db_url=db_url,
),
)
# Insert Excel file - ExcelReader uses openpyxl for .xlsx, xlrd for .xls
knowledge_base.insert(
path="cookbook/07_knowledge/testing_resources/sample_products.xlsx",
reader=reader,
)
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
knowledge=knowledge_base,
search_knowledge=True,
instructions=[
"You are a product catalog assistant.",
"Use the knowledge base to answer questions about products.",
"The data comes from an Excel workbook with Products and Categories sheets.",
],
)
if __name__ == "__main__":
agent.print_response(
"What electronics products are currently in stock? Include their prices.",
markdown=True,
stream=True,
)
agent.print_response(
"What is the price of the Bluetooth speaker?",
markdown=True,
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/readers/excel_reader.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/unit/knowledge/test_excel_reader.py | import io
import sys
from datetime import datetime
from pathlib import Path
from unittest.mock import patch
import pytest
from agno.knowledge.reader.excel_reader import ExcelReader
from agno.knowledge.reader.reader_factory import ReaderFactory
def test_reader_factory_routes_xlsx_to_excel_reader():
ReaderFactory.clear_cache()
reader = ReaderFactory.get_reader_for_extension(".xlsx")
assert isinstance(reader, ExcelReader)
def test_reader_factory_routes_xls_to_excel_reader():
ReaderFactory.clear_cache()
reader = ReaderFactory.get_reader_for_extension(".xls")
assert isinstance(reader, ExcelReader)
def test_excel_reader_reads_xlsx_as_per_sheet_documents(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
first_sheet = workbook.active
first_sheet.title = "First"
first_sheet.append(["name", "age"])
first_sheet.append(["alice", 30])
second_sheet = workbook.create_sheet("Second")
second_sheet.append(["city"])
second_sheet.append(["SF"])
workbook.create_sheet("Empty") # Should be ignored
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "workbook.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 2
assert {doc.meta_data["sheet_name"] for doc in documents} == {"First", "Second"}
first_doc = next(doc for doc in documents if doc.meta_data["sheet_name"] == "First")
assert first_doc.meta_data["sheet_index"] == 1
assert first_doc.content.splitlines() == ["name, age", "alice, 30"]
def test_excel_reader_reads_xlsx_preserves_cell_whitespace_when_chunk_disabled(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Sheet"
sheet.append([" name", "age "])
sheet.append([" alice", "30 "])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "whitespace.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
assert documents[0].content.splitlines() == [" name, age ", " alice, 30 "]
def test_excel_reader_chunks_xlsx_rows_and_preserves_sheet_metadata(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
first_sheet = workbook.active
first_sheet.title = "First"
first_sheet.append(["name", "age"])
first_sheet.append(["alice", 30])
second_sheet = workbook.create_sheet("Second")
second_sheet.append(["city"])
second_sheet.append(["SF"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "workbook.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader() # chunk=True by default
chunked_documents = reader.read(file_path)
assert len(chunked_documents) == 4
assert {doc.meta_data["sheet_name"] for doc in chunked_documents} == {"First", "Second"}
first_rows = sorted(
doc.meta_data["row_number"] for doc in chunked_documents if doc.meta_data["sheet_name"] == "First"
)
assert first_rows == [1, 2]
def test_excel_reader_reads_xls_as_per_sheet_documents(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
first_sheet = workbook.add_sheet("First")
first_sheet.write(0, 0, "name")
first_sheet.write(0, 1, "age")
first_sheet.write(1, 0, "alice")
first_sheet.write(1, 1, 30)
second_sheet = workbook.add_sheet("Second")
second_sheet.write(0, 0, "city")
second_sheet.write(1, 0, "SF")
workbook.add_sheet("Empty") # Should be ignored
file_path = tmp_path / "workbook.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 2
assert {doc.meta_data["sheet_name"] for doc in documents} == {"First", "Second"}
first_doc = next(doc for doc in documents if doc.meta_data["sheet_name"] == "First")
assert first_doc.meta_data["sheet_index"] == 1
assert first_doc.content.splitlines() == ["name, age", "alice, 30"]
@pytest.mark.asyncio
async def test_excel_reader_async_reads_xlsx(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Data"
sheet.append(["name", "value"])
sheet.append(["test", 42])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "async_test.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = await reader.async_read(file_path)
assert len(documents) == 1
assert documents[0].meta_data["sheet_name"] == "Data"
assert documents[0].content.splitlines() == ["name, value", "test, 42"]
@pytest.mark.asyncio
async def test_excel_reader_async_reads_xls(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Data")
sheet.write(0, 0, "name")
sheet.write(0, 1, "value")
sheet.write(1, 0, "test")
sheet.write(1, 1, 42)
file_path = tmp_path / "async_test.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = await reader.async_read(file_path)
assert len(documents) == 1
assert documents[0].meta_data["sheet_name"] == "Data"
assert documents[0].content.splitlines() == ["name, value", "test, 42"]
def test_excel_reader_reads_xlsx_from_bytesio_with_name(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Sheet1"
sheet.append(["col1", "col2"])
sheet.append(["a", "b"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
buffer.seek(0)
buffer.name = "named_workbook.xlsx"
reader = ExcelReader(chunk=False)
documents = reader.read(buffer)
assert len(documents) == 1
assert documents[0].name == "named_workbook"
assert documents[0].content.splitlines() == ["col1, col2", "a, b"]
def test_excel_reader_reads_xlsx_from_bytesio_without_name():
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Sheet1"
sheet.append(["col1", "col2"])
sheet.append(["a", "b"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
buffer.seek(0)
reader = ExcelReader(chunk=False)
documents = reader.read(buffer, name="fallback.xlsx")
assert len(documents) == 1
assert documents[0].name == "fallback"
assert documents[0].content.splitlines() == ["col1, col2", "a, b"]
def test_excel_reader_raises_error_when_name_has_no_extension():
"""When BytesIO has no name and name param has no extension, reader raises ValueError."""
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Sheet1"
sheet.append(["col1", "col2"])
sheet.append(["a", "b"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
buffer.seek(0)
reader = ExcelReader(chunk=False)
with pytest.raises(ValueError, match="Unsupported file extension.*Expected .xlsx or .xls"):
reader.read(buffer, name="Lorcan_data")
def test_excel_reader_succeeds_when_name_has_extension():
"""When BytesIO has no name but name param has extension, reader works correctly."""
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Products"
sheet.append(["product", "price"])
sheet.append(["Widget", 19.99])
sheet.append(["Gadget", 29.99])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
buffer.seek(0)
reader = ExcelReader(chunk=False)
# This is the FIXED scenario: name="Lorcan_data.xlsx" (with extension)
documents = reader.read(buffer, name="Lorcan_data.xlsx")
assert len(documents) == 1
assert documents[0].name == "Lorcan_data"
assert "Widget" in documents[0].content
assert "19.99" in documents[0].content
def test_excel_reader_xlsx_data_types(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Types"
sheet.append(["type", "value"])
sheet.append(["float", 3.14])
sheet.append(["int_float", 30.0])
sheet.append(["boolean_true", True])
sheet.append(["boolean_false", False])
sheet.append(["none", None])
sheet.append(["string", "hello"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "types.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
assert lines[0] == "type, value"
assert lines[1] == "float, 3.14"
assert lines[2] == "int_float, 30"
assert lines[3] == "boolean_true, True"
assert lines[4] == "boolean_false, False"
assert lines[5] == "none"
assert lines[6] == "string, hello"
def test_excel_reader_xlsx_unicode_content(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Unicode"
sheet.append(["language", "greeting"])
sheet.append(["Japanese", "こんにちは"])
sheet.append(["Emoji", "Hello 👋🌍"])
sheet.append(["Chinese", "你好"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "unicode.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
assert lines[1] == "Japanese, こんにちは"
assert lines[2] == "Emoji, Hello 👋🌍"
assert lines[3] == "Chinese, 你好"
def test_excel_reader_xlsx_all_empty_sheets_returns_empty_list(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
workbook.active.title = "Empty1"
workbook.create_sheet("Empty2")
workbook.create_sheet("Empty3")
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "all_empty.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert documents == []
def test_excel_reader_xlsx_trims_trailing_empty_cells(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Trailing"
sheet["A1"] = "a"
sheet["B1"] = "b"
sheet["C1"] = None
sheet["D1"] = None
sheet["E1"] = None
sheet["A2"] = "x"
sheet["B2"] = None
sheet["C2"] = None
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "trailing.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
assert lines[0] == "a, b"
assert lines[1] == "x"
def test_excel_reader_xlsx_skips_empty_rows(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Sparse"
sheet.append(["header"])
sheet.append([None, None, None])
sheet.append(["data"])
sheet.append([None])
sheet.append(["more_data"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "sparse.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
assert lines == ["header", "data", "more_data"]
def test_excel_reader_xlsx_handles_special_characters(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Special"
sheet.append(["type", "value"])
sheet.append(["comma", "a,b,c"])
sheet.append(["quote", 'say "hello"'])
sheet.append(["newline", "line1\nline2"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "special.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
content = documents[0].content
assert "a,b,c" in content
assert 'say "hello"' in content
# Newlines in cells are converted to spaces to preserve row integrity
assert "line1 line2" in content
def test_excel_reader_xlsx_datetime_cells_formatted_as_iso(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Dates"
sheet.append(["type", "value"])
sheet.append(["datetime", datetime(2024, 1, 20, 14, 30, 0)])
sheet.append(["datetime_midnight", datetime(2024, 12, 25, 0, 0, 0)])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "dates.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
assert lines[0] == "type, value"
# datetime objects are formatted as ISO 8601 (instead of repr like "datetime.datetime(2024, 1, 20...)")
assert lines[1] == "datetime, 2024-01-20T14:30:00"
assert lines[2] == "datetime_midnight, 2024-12-25T00:00:00"
@pytest.mark.asyncio
async def test_excel_reader_async_xlsx_with_chunking(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Data"
sheet.append(["name", "value"])
sheet.append(["row1", 100])
sheet.append(["row2", 200])
sheet.append(["row3", 300])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "async_chunk.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=True)
documents = await reader.async_read(file_path)
assert len(documents) == 4
assert all(doc.meta_data.get("sheet_name") == "Data" for doc in documents)
row_numbers = sorted(doc.meta_data.get("row_number") for doc in documents)
assert row_numbers == [1, 2, 3, 4]
def test_excel_reader_xlsx_raises_import_error_when_openpyxl_missing(tmp_path: Path):
file_path = tmp_path / "test.xlsx"
file_path.write_bytes(b"dummy")
with patch.dict(sys.modules, {"openpyxl": None}):
reader = ExcelReader(chunk=False)
with pytest.raises(ImportError, match="openpyxl"):
reader.read(file_path)
def test_excel_reader_xls_raises_import_error_when_xlrd_missing(tmp_path: Path):
file_path = tmp_path / "test.xls"
file_path.write_bytes(b"dummy")
with patch.dict(sys.modules, {"xlrd": None}):
reader = ExcelReader(chunk=False)
with pytest.raises(ImportError, match="xlrd"):
reader.read(file_path)
def test_excel_reader_xlsx_corrupted_file_returns_empty_list(tmp_path: Path):
pytest.importorskip("openpyxl")
file_path = tmp_path / "corrupted.xlsx"
file_path.write_bytes(b"not a valid xlsx file content")
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert documents == []
def test_excel_reader_xlsx_file_not_found_raises_error(tmp_path: Path):
pytest.importorskip("openpyxl")
file_path = tmp_path / "nonexistent.xlsx"
reader = ExcelReader(chunk=False)
with pytest.raises(FileNotFoundError):
reader.read(file_path)
def test_excel_reader_xls_boolean_cells(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Booleans")
sheet.write(0, 0, "name")
sheet.write(0, 1, "in_stock")
sheet.write(1, 0, "Widget")
sheet.write(1, 1, True)
sheet.write(2, 0, "Gadget")
sheet.write(2, 1, False)
file_path = tmp_path / "booleans.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
assert lines[0] == "name, in_stock"
assert lines[1] == "Widget, True"
assert lines[2] == "Gadget, False"
def test_excel_reader_xls_multiline_content_preserved_as_space(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Multiline")
sheet.write(0, 0, "id")
sheet.write(0, 1, "description")
sheet.write(1, 0, "1")
sheet.write(1, 1, "Line1\nLine2\nLine3")
file_path = tmp_path / "multiline.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
# Multiline content should be on a single line with spaces instead of newlines
assert len(lines) == 2
assert lines[1] == "1, Line1 Line2 Line3"
def test_excel_reader_xlsx_multiline_content_preserved_as_space(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.append(["id", "description"])
sheet.append([1, "Line1\nLine2\nLine3"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "multiline.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
# Multiline content should be on a single line with spaces instead of newlines
assert len(lines) == 2
assert lines[1] == "1, Line1 Line2 Line3"
def test_excel_reader_xlsx_carriage_return_normalized(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.append(["type", "value"])
sheet.append(["cr_only", "line1\rline2"])
sheet.append(["crlf", "line1\r\nline2"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "carriage_return.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
# All line endings should be normalized to spaces
assert len(lines) == 3
assert lines[1] == "cr_only, line1 line2"
assert lines[2] == "crlf, line1 line2"
def test_excel_reader_xls_carriage_return_normalized(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("LineEndings")
sheet.write(0, 0, "type")
sheet.write(0, 1, "value")
sheet.write(1, 0, "cr_only")
sheet.write(1, 1, "line1\rline2")
sheet.write(2, 0, "crlf")
sheet.write(2, 1, "line1\r\nline2")
file_path = tmp_path / "carriage_return.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
# All line endings should be normalized to spaces
assert len(lines) == 3
assert lines[1] == "cr_only, line1 line2"
assert lines[2] == "crlf, line1 line2"
def test_excel_reader_filter_sheets_by_name(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
first_sheet = workbook.active
first_sheet.title = "First"
first_sheet.append(["a", "b"])
second_sheet = workbook.create_sheet("Second")
second_sheet.append(["c", "d"])
third_sheet = workbook.create_sheet("Third")
third_sheet.append(["e", "f"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "filter.xlsx"
file_path.write_bytes(buffer.getvalue())
# Read only "First" and "Third" sheets
reader = ExcelReader(chunk=False, sheets=["First", "Third"])
documents = reader.read(file_path)
assert len(documents) == 2
assert {doc.meta_data["sheet_name"] for doc in documents} == {"First", "Third"}
def test_excel_reader_filter_sheets_by_index(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
first_sheet = workbook.active
first_sheet.title = "First"
first_sheet.append(["a", "b"])
second_sheet = workbook.create_sheet("Second")
second_sheet.append(["c", "d"])
third_sheet = workbook.create_sheet("Third")
third_sheet.append(["e", "f"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "filter.xlsx"
file_path.write_bytes(buffer.getvalue())
# Read only sheets at index 1 and 3 (First and Third) - 1-based to match metadata
reader = ExcelReader(chunk=False, sheets=[1, 3])
documents = reader.read(file_path)
assert len(documents) == 2
assert {doc.meta_data["sheet_name"] for doc in documents} == {"First", "Third"}
def test_excel_reader_filter_by_index_is_1_based():
"""Index filtering uses 1-based indices to match sheet_index in document metadata."""
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
workbook.active.title = "First"
workbook.active["A1"] = "first"
workbook.create_sheet("Second")
workbook["Second"]["A1"] = "second"
workbook.create_sheet("Third")
workbook["Third"]["A1"] = "third"
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
buffer.seek(0)
buffer.name = "test.xlsx"
# sheets=[1] should get "First" (index 1 in metadata)
reader = ExcelReader(chunk=False, sheets=[1])
documents = reader.read(buffer)
assert len(documents) == 1
assert documents[0].meta_data["sheet_name"] == "First"
assert documents[0].meta_data["sheet_index"] == 1
def test_excel_reader_filter_by_name_case_insensitive():
"""Name filtering is case-insensitive."""
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
workbook.active.title = "Sales"
workbook.active["A1"] = "data"
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
buffer.seek(0)
buffer.name = "test.xlsx"
# "sales" (lowercase) should match "Sales" (capitalized)
reader = ExcelReader(chunk=False, sheets=["sales"])
documents = reader.read(buffer)
assert len(documents) == 1
assert documents[0].meta_data["sheet_name"] == "Sales"
def test_excel_reader_empty_sheets_list_returns_all():
"""Empty sheets list should return all sheets, same as None."""
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
workbook.active.title = "First"
workbook.active["A1"] = "first"
workbook.create_sheet("Second")
workbook["Second"]["A1"] = "second"
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
buffer.seek(0)
buffer.name = "test.xlsx"
# sheets=[] should return all sheets
reader = ExcelReader(chunk=False, sheets=[])
documents = reader.read(buffer)
assert len(documents) == 2
assert {doc.meta_data["sheet_name"] for doc in documents} == {"First", "Second"}
def test_excel_reader_unsupported_extension_raises_error(tmp_path: Path):
file_path = tmp_path / "file.txt"
file_path.write_text("not an excel file")
reader = ExcelReader(chunk=False)
with pytest.raises(ValueError, match="Unsupported file extension.*Expected .xlsx or .xls"):
reader.read(file_path)
def test_excel_reader_xls_date_cells_converted_to_iso(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Dates")
sheet.write(0, 0, "type")
sheet.write(0, 1, "value")
# xlwt requires xlrd-style date tuples or datetime objects
# Write dates using xlwt's date format
date_format = xlwt.XFStyle()
date_format.num_format_str = "YYYY-MM-DD"
datetime_format = xlwt.XFStyle()
datetime_format.num_format_str = "YYYY-MM-DD HH:MM:SS"
sheet.write(1, 0, "date")
sheet.write(1, 1, datetime(2024, 1, 20), date_format)
sheet.write(2, 0, "datetime")
sheet.write(2, 1, datetime(2024, 12, 25, 14, 30, 45), datetime_format)
file_path = tmp_path / "dates.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
assert lines[0] == "type, value"
# Dates should be converted to ISO format (not raw serial numbers)
assert "2024-01-20" in lines[1]
assert "2024-12-25" in lines[2]
def test_excel_reader_xls_corrupted_file_returns_empty_list(tmp_path: Path):
pytest.importorskip("xlrd")
file_path = tmp_path / "corrupted.xls"
file_path.write_bytes(b"not a valid xls file content")
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert documents == []
def test_excel_reader_xls_file_not_found_raises_error(tmp_path: Path):
pytest.importorskip("xlrd")
file_path = tmp_path / "nonexistent.xls"
reader = ExcelReader(chunk=False)
with pytest.raises(FileNotFoundError):
reader.read(file_path)
def test_excel_reader_xls_unicode_content(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Unicode")
sheet.write(0, 0, "language")
sheet.write(0, 1, "greeting")
sheet.write(1, 0, "Japanese")
sheet.write(1, 1, "こんにちは")
sheet.write(2, 0, "Chinese")
sheet.write(2, 1, "你好")
sheet.write(3, 0, "German")
sheet.write(3, 1, "Größe")
file_path = tmp_path / "unicode.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
assert lines[1] == "Japanese, こんにちは"
assert lines[2] == "Chinese, 你好"
assert lines[3] == "German, Größe"
def test_excel_reader_xls_filter_sheets_by_name(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
first_sheet = workbook.add_sheet("First")
first_sheet.write(0, 0, "a")
first_sheet.write(0, 1, "b")
second_sheet = workbook.add_sheet("Second")
second_sheet.write(0, 0, "c")
second_sheet.write(0, 1, "d")
third_sheet = workbook.add_sheet("Third")
third_sheet.write(0, 0, "e")
third_sheet.write(0, 1, "f")
file_path = tmp_path / "filter.xls"
workbook.save(str(file_path))
# Read only "First" and "Third" sheets
reader = ExcelReader(chunk=False, sheets=["First", "Third"])
documents = reader.read(file_path)
assert len(documents) == 2
assert {doc.meta_data["sheet_name"] for doc in documents} == {"First", "Third"}
def test_excel_reader_xls_filter_sheets_by_index(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
first_sheet = workbook.add_sheet("First")
first_sheet.write(0, 0, "a")
first_sheet.write(0, 1, "b")
second_sheet = workbook.add_sheet("Second")
second_sheet.write(0, 0, "c")
second_sheet.write(0, 1, "d")
third_sheet = workbook.add_sheet("Third")
third_sheet.write(0, 0, "e")
third_sheet.write(0, 1, "f")
file_path = tmp_path / "filter.xls"
workbook.save(str(file_path))
# Read only sheets at index 1 and 3 (First and Third) - 1-based to match metadata
reader = ExcelReader(chunk=False, sheets=[1, 3])
documents = reader.read(file_path)
assert len(documents) == 2
assert {doc.meta_data["sheet_name"] for doc in documents} == {"First", "Third"}
def test_excel_reader_xls_from_bytesio(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Data")
sheet.write(0, 0, "col1")
sheet.write(0, 1, "col2")
sheet.write(1, 0, "a")
sheet.write(1, 1, "b")
buffer = io.BytesIO()
workbook.save(buffer)
buffer.seek(0)
buffer.name = "test.xls"
reader = ExcelReader(chunk=False)
documents = reader.read(buffer)
assert len(documents) == 1
assert documents[0].name == "test"
assert documents[0].content.splitlines() == ["col1, col2", "a, b"]
def test_excel_reader_xls_data_types(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Types")
sheet.write(0, 0, "type")
sheet.write(0, 1, "value")
sheet.write(1, 0, "float")
sheet.write(1, 1, 3.14)
sheet.write(2, 0, "int_float")
sheet.write(2, 1, 30.0)
sheet.write(3, 0, "string")
sheet.write(3, 1, "hello")
sheet.write(4, 0, "empty")
sheet.write(4, 1, None)
file_path = tmp_path / "types.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
assert lines[0] == "type, value"
assert lines[1] == "float, 3.14"
assert lines[2] == "int_float, 30"
assert lines[3] == "string, hello"
assert lines[4] == "empty"
def test_excel_reader_xls_special_characters(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Special")
sheet.write(0, 0, "type")
sheet.write(0, 1, "value")
sheet.write(1, 0, "comma")
sheet.write(1, 1, "a,b,c")
sheet.write(2, 0, "quote")
sheet.write(2, 1, 'say "hello"')
file_path = tmp_path / "special.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
content = documents[0].content
assert "a,b,c" in content
assert 'say "hello"' in content
def test_excel_reader_xls_trims_trailing_empty_cells(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Trailing")
sheet.write(0, 0, "a")
sheet.write(0, 1, "b")
# Cells C1, D1, E1 left empty (trailing)
sheet.write(1, 0, "x")
# Cells B2, C2 left empty (trailing)
file_path = tmp_path / "trailing.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
assert lines[0] == "a, b"
assert lines[1] == "x"
def test_excel_reader_xls_skips_empty_rows(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Sparse")
sheet.write(0, 0, "header")
# Row 1 left empty
sheet.write(2, 0, "data")
# Row 3 left empty
sheet.write(4, 0, "more_data")
file_path = tmp_path / "sparse.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
assert lines == ["header", "data", "more_data"]
def test_excel_reader_xls_all_empty_sheets_returns_empty_list(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
workbook.add_sheet("Empty1")
workbook.add_sheet("Empty2")
file_path = tmp_path / "all_empty.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert documents == []
def test_excel_reader_xls_chunks_rows_for_rag(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Products")
sheet.write(0, 0, "name")
sheet.write(0, 1, "category")
sheet.write(0, 2, "price")
sheet.write(1, 0, "Widget A")
sheet.write(1, 1, "Electronics")
sheet.write(1, 2, 99.99)
sheet.write(2, 0, "Widget B")
sheet.write(2, 1, "Home")
sheet.write(2, 2, 49.99)
sheet.write(3, 0, "Widget C")
sheet.write(3, 1, "Electronics")
sheet.write(3, 2, 149.99)
file_path = tmp_path / "products.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=True) # Default chunking
documents = reader.read(file_path)
assert len(documents) == 4 # Header + 3 data rows
assert all(doc.meta_data.get("sheet_name") == "Products" for doc in documents)
row_numbers = sorted(doc.meta_data.get("row_number") for doc in documents)
assert row_numbers == [1, 2, 3, 4]
@pytest.mark.asyncio
async def test_excel_reader_xls_async_with_chunking(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Data")
sheet.write(0, 0, "id")
sheet.write(0, 1, "value")
sheet.write(1, 0, "1")
sheet.write(1, 1, "first")
sheet.write(2, 0, "2")
sheet.write(2, 1, "second")
file_path = tmp_path / "async_chunk.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=True)
documents = await reader.async_read(file_path)
assert len(documents) == 3
assert all(doc.meta_data.get("sheet_name") == "Data" for doc in documents)
def test_excel_reader_xls_numeric_edge_cases(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Financial")
sheet.write(0, 0, "type")
sheet.write(0, 1, "amount")
# Large number (revenue)
sheet.write(1, 0, "revenue")
sheet.write(1, 1, 1234567890.50)
# Negative number (refund)
sheet.write(2, 0, "refund")
sheet.write(2, 1, -5000.25)
# Scientific notation (market cap)
sheet.write(3, 0, "market_cap")
sheet.write(3, 1, 1.5e12)
# Very small number (interest rate)
sheet.write(4, 0, "rate")
sheet.write(4, 1, 0.0325)
file_path = tmp_path / "financial.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
assert "1234567890.5" in lines[1]
assert "-5000.25" in lines[2]
assert "1500000000000" in lines[3] or "1.5e" in lines[3].lower()
assert "0.0325" in lines[4]
def test_excel_reader_xls_long_text_cells(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Products")
sheet.write(0, 0, "name")
sheet.write(0, 1, "description")
long_description = (
"This premium widget features advanced technology with "
"multiple connectivity options including WiFi, Bluetooth, "
"and NFC. Perfect for home automation, smart home integration, "
"and IoT applications. Includes 2-year warranty and 24/7 support."
)
sheet.write(1, 0, "Smart Widget Pro")
sheet.write(1, 1, long_description)
file_path = tmp_path / "catalog.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
content = documents[0].content
assert "Smart Widget Pro" in content
assert "premium widget" in content
assert "24/7 support" in content
assert len(content) > 200
def test_excel_reader_xls_multi_sheet_chunking(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sales_sheet = workbook.add_sheet("Sales")
sales_sheet.write(0, 0, "product")
sales_sheet.write(0, 1, "amount")
sales_sheet.write(1, 0, "Widget")
sales_sheet.write(1, 1, 1000)
inventory_sheet = workbook.add_sheet("Inventory")
inventory_sheet.write(0, 0, "item")
inventory_sheet.write(0, 1, "stock")
inventory_sheet.write(1, 0, "Widget")
inventory_sheet.write(1, 1, 50)
file_path = tmp_path / "report.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=True)
documents = reader.read(file_path)
# 2 rows per sheet = 4 total documents
assert len(documents) == 4
sheet_names = {doc.meta_data["sheet_name"] for doc in documents}
assert sheet_names == {"Sales", "Inventory"}
def test_excel_reader_xls_wide_table(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Wide")
# Create 20 columns
num_cols = 20
for col in range(num_cols):
sheet.write(0, col, f"col_{col}")
sheet.write(1, col, f"val_{col}")
file_path = tmp_path / "wide.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
lines = documents[0].content.splitlines()
# All columns should be present
assert "col_0" in lines[0]
assert "col_19" in lines[0]
assert "val_0" in lines[1]
assert "val_19" in lines[1]
def test_excel_reader_stress_large_sheet_xlsx(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "LargeData"
sheet.append(["id", "name", "description", "price", "category"])
for i in range(1000):
sheet.append([i, f"Product_{i}", f"Description for product {i}", 99.99 + i, f"Category_{i % 10}"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "large.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=True)
documents = reader.read(file_path)
assert len(documents) == 1001 # header + 1000 data rows
assert all(doc.meta_data.get("sheet_name") == "LargeData" for doc in documents)
def test_excel_reader_stress_large_sheet_xls(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("LargeData")
headers = ["id", "name", "description", "price", "category"]
for col, header in enumerate(headers):
sheet.write(0, col, header)
for i in range(1000):
sheet.write(i + 1, 0, i)
sheet.write(i + 1, 1, f"Product_{i}")
sheet.write(i + 1, 2, f"Description for product {i}")
sheet.write(i + 1, 3, 99.99 + i)
sheet.write(i + 1, 4, f"Category_{i % 10}")
file_path = tmp_path / "large.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=True)
documents = reader.read(file_path)
assert len(documents) == 1001 # header + 1000 data rows
def test_excel_reader_stress_wide_table_50_cols(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "WideData"
# 50 columns
headers = [f"col_{i}" for i in range(50)]
sheet.append(headers)
# 100 rows of data
for row in range(100):
sheet.append([f"row{row}_col{col}" for col in range(50)])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "wide.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
content = documents[0].content
assert "col_0" in content
assert "col_49" in content
@pytest.mark.asyncio
async def test_excel_reader_stress_concurrent_reads(tmp_path: Path):
import asyncio
openpyxl = pytest.importorskip("openpyxl")
files = []
for i in range(5):
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = f"Data{i}"
sheet.append(["id", "value"])
for j in range(100):
sheet.append([j, f"file{i}_value{j}"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / f"concurrent_{i}.xlsx"
file_path.write_bytes(buffer.getvalue())
files.append(file_path)
reader = ExcelReader(chunk=True)
results = await asyncio.gather(*[reader.async_read(f) for f in files])
assert len(results) == 5
for docs in results:
assert len(docs) == 101 # header + 100 rows
def test_excel_reader_stress_multi_sheet_large(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet_names = ["Q1_Sales", "Q2_Sales", "Q3_Sales", "Q4_Sales", "Annual_Summary"]
for idx, name in enumerate(sheet_names):
if idx == 0:
sheet = workbook.active
sheet.title = name
else:
sheet = workbook.create_sheet(name)
sheet.append(["region", "product", "revenue", "units"])
for row in range(200):
sheet.append([f"Region_{row % 5}", f"Product_{row}", row * 1000, row * 10])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "multi_sheet.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=True)
documents = reader.read(file_path)
# 5 sheets × 201 rows (header + 200 data) = 1005 documents
assert len(documents) == 1005
found_sheets = {doc.meta_data["sheet_name"] for doc in documents}
assert found_sheets == set(sheet_names)
def test_excel_reader_stress_mixed_types_1000_rows(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "MixedTypes"
sheet.append(["id", "name", "price", "in_stock", "created_date", "description"])
for i in range(1000):
sheet.append(
[
i,
f"Product_{i}",
99.99 + (i * 0.01),
i % 2 == 0, # Boolean
datetime(2024, (i % 12) + 1, (i % 28) + 1),
f"Long description for product {i} with multiple words",
]
)
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "mixed_types.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=True)
documents = reader.read(file_path)
assert len(documents) == 1001
def test_excel_reader_xlsx_formula_cells_return_values(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Formulas"
sheet.append(["item", "quantity", "unit_price", "total"])
sheet.append(["Widget A", 10, 5.00, "=B2*C2"])
sheet.append(["Widget B", 5, 10.00, "=B3*C3"])
sheet.append(["", "", "Grand Total", "=SUM(D2:D3)"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "formulas.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
# Note: openpyxl with data_only=True returns None for formulas in newly created files
# because Excel hasn't calculated them yet. In real files from Excel, values would be present.
content = documents[0].content
assert "Widget A" in content
def test_excel_reader_xlsx_merged_cells_return_top_left_value(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Merged"
sheet["A1"] = "Merged Header"
sheet.merge_cells("A1:C1")
sheet["A2"] = "col1"
sheet["B2"] = "col2"
sheet["C2"] = "col3"
sheet["A3"] = "data1"
sheet["B3"] = "data2"
sheet["C3"] = "data3"
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "merged.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
content = documents[0].content
assert "Merged Header" in content
def test_excel_reader_xlsx_leading_zeros_in_text_cells(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "SKUs"
sheet.append(["sku", "name", "barcode"])
sheet.append(["00123", "Widget A", "0001234567890"])
sheet.append(["007", "Widget B", "0009876543210"])
sheet.append(["0001", "Widget C", "0000000000001"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "skus.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
content = documents[0].content
# Text cells preserve leading zeros
assert "00123" in content
assert "007" in content
assert "0001" in content
def test_excel_reader_xlsx_error_cells_returned_as_strings(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Errors"
sheet.append(["type", "value"])
# Manually set error values as strings (simulating what Excel would show)
sheet.append(["div_zero", "#DIV/0!"])
sheet.append(["ref_error", "#REF!"])
sheet.append(["na_error", "#N/A"])
sheet.append(["value_error", "#VALUE!"])
sheet.append(["name_error", "#NAME?"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "errors.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
content = documents[0].content
assert "#DIV/0!" in content
assert "#REF!" in content
assert "#N/A" in content
def test_excel_reader_xlsx_large_numbers_preserved(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Financial"
sheet.append(["type", "amount"])
sheet.append(["revenue", 1234567890.50])
sheet.append(["market_cap", 2500000000])
sheet.append(["refund", -5000.25])
sheet.append(["small", 0.0001])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "financial.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
content = documents[0].content
assert "1234567890.5" in content
assert "2500000000" in content
assert "-5000.25" in content
def test_excel_reader_xlsx_whitespace_only_cells_handled(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Whitespace"
sheet.append(["id", "name", "notes"])
sheet.append([1, "Widget", " "]) # spaces only
sheet.append([2, "Gadget", "\t\t"]) # tabs only
sheet.append([3, "Item", " \n "]) # mixed whitespace
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "whitespace.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
content = documents[0].content
assert "Widget" in content
assert "Gadget" in content
def test_excel_reader_xlsx_large_cell_content(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Products"
sheet.append(["name", "description"])
long_desc = (
"This premium enterprise widget features cutting-edge technology with "
"advanced AI-powered automation capabilities. It includes seamless integration "
"with existing enterprise systems, real-time analytics dashboard, comprehensive "
"reporting suite, multi-language support for global deployments, and 24/7 "
"enterprise support with guaranteed SLA. The widget also supports custom "
"configurations, API access for third-party integrations, and complies with "
"SOC 2, GDPR, and HIPAA regulations. Perfect for large-scale deployments."
)
sheet.append(["Enterprise Widget Pro", long_desc])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "products.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
content = documents[0].content
assert "Enterprise Widget Pro" in content
assert "AI-powered automation" in content
assert "HIPAA regulations" in content
def test_excel_reader_xlsx_sparse_data_with_gaps(tmp_path: Path):
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Sparse"
sheet["A1"] = "a"
sheet["C1"] = "c"
sheet["E1"] = "e"
sheet["A3"] = "data1"
sheet["C3"] = "data2"
sheet["A5"] = "more"
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "sparse.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
content = documents[0].content
assert "a" in content
assert "c" in content
assert "data1" in content
assert "more" in content
def test_excel_reader_xls_error_cells_returned_as_strings(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Errors")
sheet.write(0, 0, "type")
sheet.write(0, 1, "value")
sheet.write(1, 0, "div_zero")
sheet.write(1, 1, "#DIV/0!")
sheet.write(2, 0, "ref_error")
sheet.write(2, 1, "#REF!")
sheet.write(3, 0, "na_error")
sheet.write(3, 1, "#N/A")
file_path = tmp_path / "errors.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
content = documents[0].content
assert "#DIV/0!" in content
assert "#REF!" in content
def test_excel_reader_xls_leading_zeros_in_text_cells(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("SKUs")
sheet.write(0, 0, "sku")
sheet.write(0, 1, "name")
sheet.write(1, 0, "00123")
sheet.write(1, 1, "Widget A")
sheet.write(2, 0, "007")
sheet.write(2, 1, "Widget B")
sheet.write(3, 0, "0001")
sheet.write(3, 1, "Widget C")
file_path = tmp_path / "skus.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
content = documents[0].content
assert "00123" in content
assert "007" in content
assert "0001" in content
def test_excel_reader_xls_large_cell_content(tmp_path: Path):
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Products")
sheet.write(0, 0, "name")
sheet.write(0, 1, "description")
long_desc = (
"This premium enterprise widget features cutting-edge technology with "
"advanced AI-powered automation capabilities and comprehensive reporting."
)
sheet.write(1, 0, "Enterprise Widget")
sheet.write(1, 1, long_desc)
file_path = tmp_path / "products.xls"
workbook.save(str(file_path))
reader = ExcelReader(chunk=False)
documents = reader.read(file_path)
assert len(documents) == 1
content = documents[0].content
assert "Enterprise Widget" in content
assert "AI-powered automation" in content
def test_excel_reader_xlsx_non_ascii_via_bytesio():
"""Non-ASCII Unicode content should be preserved when reading via BytesIO."""
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "International"
sheet.append(["language", "greeting", "description"])
sheet.append(["Chinese", "你好世界", "中文描述"])
sheet.append(["Japanese", "こんにちは", "日本語の説明"])
sheet.append(["Korean", "안녕하세요", "한국어 설명"])
sheet.append(["Arabic", "مرحبا", "وصف عربي"])
sheet.append(["Russian", "Привет", "Русское описание"])
sheet.append(["Emoji", "Hello 👋🌍", "Description with 🎉 emoji"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
buffer.seek(0)
buffer.name = "international.xlsx"
reader = ExcelReader(chunk=False)
documents = reader.read(buffer)
assert len(documents) == 1
content = documents[0].content
# All non-ASCII characters should be preserved
assert "你好世界" in content
assert "こんにちは" in content
assert "안녕하세요" in content
assert "مرحبا" in content
assert "Привет" in content
assert "👋🌍" in content
assert "🎉" in content
def test_excel_reader_xls_non_ascii_via_bytesio():
"""XLS: Non-ASCII Unicode content should be preserved when reading via BytesIO."""
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("International")
sheet.write(0, 0, "language")
sheet.write(0, 1, "greeting")
sheet.write(1, 0, "Chinese")
sheet.write(1, 1, "你好世界")
sheet.write(2, 0, "Japanese")
sheet.write(2, 1, "こんにちは")
sheet.write(3, 0, "Russian")
sheet.write(3, 1, "Привет мир")
sheet.write(4, 0, "German")
sheet.write(4, 1, "Größenmaßstab")
buffer = io.BytesIO()
workbook.save(buffer)
buffer.seek(0)
buffer.name = "international.xls"
reader = ExcelReader(chunk=False)
documents = reader.read(buffer)
assert len(documents) == 1
content = documents[0].content
# All non-ASCII characters should be preserved
assert "你好世界" in content
assert "こんにちは" in content
assert "Привет" in content
assert "Größenmaßstab" in content
@pytest.mark.asyncio
async def test_excel_reader_xlsx_non_ascii_via_bytesio_async():
"""Async: Non-ASCII Unicode content should be preserved when reading via BytesIO."""
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "International"
sheet.append(["language", "greeting"])
sheet.append(["Chinese", "你好世界"])
sheet.append(["Japanese", "こんにちは"])
sheet.append(["Emoji", "Hello 👋🌍"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
buffer.seek(0)
buffer.name = "international.xlsx"
reader = ExcelReader(chunk=False)
documents = await reader.async_read(buffer)
assert len(documents) == 1
content = documents[0].content
assert "你好世界" in content
assert "こんにちは" in content
assert "👋🌍" in content
def test_excel_reader_bytesio_with_empty_name_uses_name_param():
"""BytesIO with empty .name attribute should use name param for workbook name."""
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Sheet1"
sheet.append(["col1", "col2"])
sheet.append(["a", "b"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
buffer.seek(0)
buffer.name = "" # Empty string
reader = ExcelReader(chunk=False)
documents = reader.read(buffer, name="fallback_name.xlsx")
assert len(documents) == 1
# Workbook name should come from name param since buffer.name is empty
assert documents[0].name == "fallback_name"
def test_excel_reader_bytesio_with_none_name_uses_name_param():
"""BytesIO with .name=None should use name param for workbook name."""
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Sheet1"
sheet.append(["col1", "col2"])
sheet.append(["a", "b"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
buffer.seek(0)
buffer.name = None # type: ignore[assignment] # Explicitly set to None
reader = ExcelReader(chunk=False)
documents = reader.read(buffer, name="fallback_name.xlsx")
assert len(documents) == 1
# Workbook name should come from name param since buffer.name is None
assert documents[0].name == "fallback_name"
def test_excel_reader_bytesio_no_name_no_param_uses_default():
"""BytesIO without .name and no name param should use default 'workbook'."""
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Sheet1"
sheet.append(["col1", "col2"])
sheet.append(["a", "b"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
buffer.seek(0)
# BytesIO has no .name attribute by default
reader = ExcelReader(chunk=False)
# Note: This will return empty list since no extension can be inferred
# But if we could bypass extension check, the name would be "workbook"
documents = reader.read(buffer, name="data.xlsx")
assert len(documents) == 1
assert documents[0].name == "data"
def test_sheets_filter_with_nonexistent_sheet_returns_empty(tmp_path: Path):
"""Filtering to a nonexistent sheet returns empty list."""
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Data"
sheet.append(["col1", "col2"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "test.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False, sheets=["NonexistentSheet"])
docs = reader.read(file_path)
assert docs == []
def test_sheets_filter_with_out_of_range_index_returns_empty(tmp_path: Path):
"""Filtering to an out-of-range index returns empty list."""
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
sheet = workbook.active
sheet.title = "Data"
sheet.append(["col1", "col2"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "test.xlsx"
file_path.write_bytes(buffer.getvalue())
reader = ExcelReader(chunk=False, sheets=[99])
docs = reader.read(file_path)
assert docs == []
def test_mixed_sheet_name_and_index_filter(tmp_path: Path):
"""Filtering with both names and indices works."""
openpyxl = pytest.importorskip("openpyxl")
workbook = openpyxl.Workbook()
first_sheet = workbook.active
first_sheet.title = "First"
first_sheet.append(["first"])
second_sheet = workbook.create_sheet("Second")
second_sheet.append(["second"])
third_sheet = workbook.create_sheet("Third")
third_sheet.append(["third"])
buffer = io.BytesIO()
workbook.save(buffer)
workbook.close()
file_path = tmp_path / "test.xlsx"
file_path.write_bytes(buffer.getvalue())
# Mix name ("First") and 1-based index (3=Third)
reader = ExcelReader(chunk=False, sheets=["First", 3])
docs = reader.read(file_path)
assert len(docs) == 2
assert {doc.meta_data["sheet_name"] for doc in docs} == {"First", "Third"}
def test_excel_reader_xls_encoding_parameter_passed_to_xlrd(tmp_path: Path):
"""Encoding parameter should be passed through to xlrd."""
xlwt = pytest.importorskip("xlwt")
workbook = xlwt.Workbook()
sheet = workbook.add_sheet("Data")
sheet.write(0, 0, "header")
sheet.write(0, 1, "value")
sheet.write(1, 0, "name")
sheet.write(1, 1, "test")
file_path = tmp_path / "encoding_test.xls"
workbook.save(str(file_path))
reader = ExcelReader(encoding="utf-8", chunk=False)
docs = reader.read(file_path)
assert len(docs) == 1
assert "name" in docs[0].content
assert "test" in docs[0].content
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/test_excel_reader.py",
"license": "Apache License 2.0",
"lines": 1506,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/07_knowledge/cloud/cloud_agentos.py | """
Cloud Content Sources with AgentOS
============================================================
Sets up an AgentOS app with Knowledge connected to multiple cloud
storage backends (S3, GCS, SharePoint, GitHub, Azure Blob).
Once running, the AgentOS API lets you browse sources, upload
content from any configured source, and search the knowledge base.
Run:
python cookbook/07_knowledge/cloud/cloud_agentos.py
Key Concepts:
- Each source type has its own config: S3Config, GcsConfig, SharePointConfig, GitHubConfig, AzureBlobConfig
- Configs are registered on Knowledge via `content_sources` parameter
- Configs have factory methods (.file(), .folder()) to create content references
- Content references are passed to knowledge.insert()
"""
from os import getenv
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.remote_content import (
AzureBlobConfig,
GitHubConfig,
S3Config,
SharePointConfig,
)
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
from agno.vectordb.pgvector import PgVector
# Database connections
contents_db = PostgresDb(
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
knowledge_table="knowledge_contents",
)
vector_db = PgVector(
table_name="knowledge_vectors",
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
)
# Define content source configs (credentials come from env vars).
# Only sources whose required env vars are set will be registered.
content_sources = []
# -- SharePoint (requires SHAREPOINT_TENANT_ID, CLIENT_ID, CLIENT_SECRET, HOSTNAME) --
if getenv("SHAREPOINT_TENANT_ID"):
content_sources.append(
SharePointConfig(
id="sharepoint",
name="Product Data",
tenant_id=getenv("SHAREPOINT_TENANT_ID", ""),
client_id=getenv("SHAREPOINT_CLIENT_ID", ""),
client_secret=getenv("SHAREPOINT_CLIENT_SECRET", ""),
hostname=getenv("SHAREPOINT_HOSTNAME", ""),
site_id=getenv("SHAREPOINT_SITE_ID"),
)
)
# -- GitHub (requires GITHUB_TOKEN for private repos) --
content_sources.append(
GitHubConfig(
id="my-repo",
name="My Repository",
repo=getenv("GITHUB_REPO", "agno-agi/agno"),
token=getenv("GITHUB_TOKEN"),
branch="main",
)
)
# -- Azure Blob (requires AZURE_TENANT_ID, CLIENT_ID, CLIENT_SECRET, STORAGE_ACCOUNT, CONTAINER) --
if getenv("AZURE_TENANT_ID"):
content_sources.append(
AzureBlobConfig(
id="azure-blob",
name="Azure Blob",
tenant_id=getenv("AZURE_TENANT_ID", ""),
client_id=getenv("AZURE_CLIENT_ID", ""),
client_secret=getenv("AZURE_CLIENT_SECRET", ""),
storage_account=getenv("AZURE_STORAGE_ACCOUNT_NAME", ""),
container=getenv("AZURE_CONTAINER_NAME", ""),
)
)
# -- S3 (uses default AWS credential chain if env vars are not set) --
content_sources.append(
S3Config(
id="s3-docs",
name="S3 Documents",
bucket_name=getenv("S3_BUCKET_NAME", "my-docs"),
region=getenv("AWS_REGION", "us-east-1"),
aws_access_key_id=getenv("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=getenv("AWS_SECRET_ACCESS_KEY"),
prefix="",
)
)
# Create Knowledge with content sources
knowledge = Knowledge(
name="Company Knowledge Base",
description="Unified knowledge from multiple sources",
contents_db=contents_db,
vector_db=vector_db,
content_sources=content_sources,
)
agent = Agent(
model=OpenAIChat(id="gpt-4o-mini"),
knowledge=knowledge,
search_knowledge=True,
)
agent_os = AgentOS(
knowledge=[knowledge],
agents=[agent],
)
app = agent_os.get_app()
# ============================================================================
# Run AgentOS
# ============================================================================
if __name__ == "__main__":
# Serves a FastAPI app exposed by AgentOS. Use reload=True for local dev.
agent_os.serve(app="cloud_agentos:app", reload=True)
# ============================================================================
# Using the Knowledge API
# ============================================================================
"""
Once AgentOS is running, use the Knowledge API to upload content from remote sources.
## Step 1: Get available content sources
curl -s http://localhost:7777/v1/knowledge/company-knowledge-base/config | jq
Response:
{
"remote_content_sources": [
{"id": "my-repo", "name": "My Repository", "type": "github"},
...
]
}
## Step 2: Upload content
curl -X POST http://localhost:7777/v1/knowledge/company-knowledge-base/remote-content \\
-H "Content-Type: application/json" \\
-d '{
"name": "Documentation",
"config_id": "my-repo",
"path": "docs/README.md"
}'
"""
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/cloud/cloud_agentos.py",
"license": "Apache License 2.0",
"lines": 135,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/07_knowledge/cloud/github.py | """
GitHub Content Source for Knowledge
====================================
Load files and folders from GitHub repositories into your Knowledge base,
then query them with an Agent.
Authentication methods:
- Personal Access Token (PAT): simple, set ``token``
- GitHub App: enterprise-grade, set ``app_id``, ``installation_id``, ``private_key``
Requirements:
- PostgreSQL with pgvector: ``./cookbook/scripts/run_pgvector.sh``
- For private repos with PAT: GitHub fine-grained PAT with "Contents: read" permission
- For GitHub App auth: ``pip install PyJWT cryptography``
Run this cookbook:
python cookbook/07_knowledge/cloud/github.py
"""
from os import getenv
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.remote_content import GitHubConfig
from agno.models.openai import OpenAIChat
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Option 1: Personal Access Token authentication
# ---------------------------------------------------------------------------
# For private repos, set GITHUB_TOKEN env var to a fine-grained PAT with "Contents: read"
github_config = GitHubConfig(
id="my-repo",
name="My Repository",
repo="owner/repo", # Format: owner/repo
token=getenv("GITHUB_TOKEN"), # Optional for public repos
branch="main",
)
# ---------------------------------------------------------------------------
# Option 2: GitHub App authentication
# ---------------------------------------------------------------------------
# For organizations using GitHub Apps instead of personal tokens.
# Requires: pip install PyJWT cryptography
#
# github_config = GitHubConfig(
# id="org-repo",
# name="Org Repository",
# repo="owner/repo",
# app_id=getenv("GITHUB_APP_ID"),
# installation_id=getenv("GITHUB_INSTALLATION_ID"),
# private_key=getenv("GITHUB_APP_PRIVATE_KEY"),
# branch="main",
# )
# ---------------------------------------------------------------------------
# Knowledge Base
# ---------------------------------------------------------------------------
knowledge = Knowledge(
name="GitHub Knowledge",
vector_db=PgVector(
table_name="github_knowledge",
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
),
content_sources=[github_config],
)
# ---------------------------------------------------------------------------
# Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenAIChat(id="gpt-5.1"),
name="GitHub Agent",
knowledge=knowledge,
search_knowledge=True,
)
# ---------------------------------------------------------------------------
# Run
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# Insert a single file
print("Inserting README from GitHub...")
knowledge.insert(
name="README",
remote_content=github_config.file("README.md"),
)
# Insert an entire folder (recursive)
print("Inserting folder from GitHub...")
knowledge.insert(
name="Docs",
remote_content=github_config.folder("docs"),
)
# Query the knowledge base through the agent
agent.print_response(
"Summarize what this repository is about based on the README",
markdown=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/cloud/github.py",
"license": "Apache License 2.0",
"lines": 88,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/07_knowledge/cloud/sharepoint.py | """
SharePoint Content Source for Knowledge
========================================
Load files and folders from SharePoint document libraries into your Knowledge base.
Uses Microsoft Graph API with OAuth2 client credentials flow.
Features:
- Load single files or entire folders recursively
- Supports any SharePoint Online site
- Automatic file type detection and reader selection
- Rich metadata stored for each file (site, path, filename)
Requirements:
- Azure AD App Registration with:
- Application (client) ID
- Client secret
- API permissions: Sites.Read.All (Application)
- SharePoint site ID or site path
Setup:
1. Register an app in Azure AD (portal.azure.com)
2. Add API permission: Microsoft Graph > Sites.Read.All (Application)
3. Grant admin consent
4. Create a client secret
5. Set environment variables (see below)
Environment Variables:
SHAREPOINT_TENANT_ID - Azure AD tenant ID
SHAREPOINT_CLIENT_ID - App registration client ID
SHAREPOINT_CLIENT_SECRET - App registration client secret
SHAREPOINT_HOSTNAME - e.g., "contoso.sharepoint.com"
SHAREPOINT_SITE_ID - Full site ID (hostname,guid,guid format)
Run this cookbook:
python cookbook/07_knowledge/cloud/sharepoint.py
"""
from os import getenv
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.remote_content import SharePointConfig
from agno.vectordb.pgvector import PgVector
# Configure SharePoint content source
# All credentials should come from environment variables
sharepoint_config = SharePointConfig(
id="company-docs",
name="Company Documents",
tenant_id=getenv("SHAREPOINT_TENANT_ID"),
client_id=getenv("SHAREPOINT_CLIENT_ID"),
client_secret=getenv("SHAREPOINT_CLIENT_SECRET"),
hostname=getenv("SHAREPOINT_HOSTNAME"), # e.g., "contoso.sharepoint.com"
# Option 1: Provide site_id directly (recommended, faster)
site_id=getenv("SHAREPOINT_SITE_ID"), # e.g., "contoso.sharepoint.com,guid1,guid2"
# Option 2: Or provide site_path and let the API look up the site ID
# site_path="/sites/documents",
)
# Create Knowledge with SharePoint as a content source
knowledge = Knowledge(
name="SharePoint Knowledge",
vector_db=PgVector(
table_name="sharepoint_knowledge",
db_url="postgresql+psycopg://ai:ai@localhost:5532/ai",
),
content_sources=[sharepoint_config],
)
if __name__ == "__main__":
# Insert a single file from SharePoint
print("Inserting single file from SharePoint...")
knowledge.insert(
name="Q1 Report",
remote_content=sharepoint_config.file("Shared Documents/Reports/q1-2024.pdf"),
)
# Insert an entire folder (recursive)
print("Inserting folder from SharePoint...")
knowledge.insert(
name="Policy Documents",
remote_content=sharepoint_config.folder("Shared Documents/Policies"),
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/cloud/sharepoint.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:libs/agno/tests/integration/os/test_remote_content.py | """Integration tests for remote content upload endpoint."""
from unittest.mock import AsyncMock, Mock, patch
import pytest
from fastapi import APIRouter, FastAPI
from fastapi.testclient import TestClient
from agno.knowledge.content import Content
from agno.knowledge.knowledge import Knowledge
from agno.knowledge.remote_content import (
AzureBlobConfig,
GcsConfig,
GitHubConfig,
S3Config,
SharePointConfig,
)
from agno.os.routers.knowledge.knowledge import attach_routes
@pytest.fixture
def mock_knowledge_with_remote_configs():
"""Create a Knowledge instance with remote content configs."""
content_sources = [
S3Config(
id="s3-docs",
name="S3 Documents",
bucket_name="my-docs-bucket",
region="us-east-1",
),
GcsConfig(
id="gcs-data",
name="GCS Data",
bucket_name="my-data-bucket",
project="my-project",
),
GitHubConfig(
id="github-repo",
name="GitHub Repository",
repo="myorg/myrepo",
branch="main",
),
SharePointConfig(
id="sharepoint-docs",
name="SharePoint Documents",
tenant_id="tenant-123",
client_id="client-456",
client_secret="secret-789",
hostname="contoso.sharepoint.com",
site_id="contoso.sharepoint.com,guid1,guid2",
),
AzureBlobConfig(
id="azure-docs",
name="Azure Documents",
tenant_id="tenant-123",
client_id="client-456",
client_secret="secret-789",
storage_account="mystorageaccount",
container="mycontainer",
),
]
knowledge = Knowledge(name="test_knowledge", content_sources=content_sources)
# Mock external dependencies
knowledge.vector_db = Mock()
knowledge.vector_db.id = "test_vector_db_id"
knowledge.contents_db = Mock()
knowledge.readers = {}
# Mock methods
knowledge.vector_db.content_hash_exists.return_value = False
knowledge.vector_db.async_insert = Mock()
knowledge.vector_db.async_upsert = Mock()
knowledge.vector_db.upsert_available.return_value = True
knowledge.contents_db.upsert_knowledge_content = Mock()
knowledge.contents_db.get_knowledge_contents = Mock(return_value=([], 0))
return knowledge
@pytest.fixture
def test_app(mock_knowledge_with_remote_configs):
"""Create a FastAPI test app with knowledge routes."""
app = FastAPI()
router = attach_routes(APIRouter(), [mock_knowledge_with_remote_configs])
app.include_router(router)
return TestClient(app)
# =============================================================================
# Remote Content Endpoint Tests
# =============================================================================
def test_upload_s3_file_success(test_app):
"""Test successful S3 file upload."""
with patch("agno.os.routers.knowledge.knowledge.process_content") as mock_process:
response = test_app.post(
"/knowledge/remote-content",
data={
"config_id": "s3-docs",
"path": "documents/report.pdf",
"name": "Q1 Report",
"description": "Quarterly report",
"metadata": '{"quarter": "Q1"}',
},
)
assert response.status_code == 202
data = response.json()
assert "id" in data
assert data["name"] == "Q1 Report"
assert data["status"] == "processing"
mock_process.assert_called_once()
def test_upload_s3_folder_success(test_app):
"""Test successful S3 folder upload (path ends with /)."""
with patch("agno.os.routers.knowledge.knowledge.process_content") as mock_process:
response = test_app.post(
"/knowledge/remote-content",
data={
"config_id": "s3-docs",
"path": "documents/reports/",
"name": "All Reports",
},
)
assert response.status_code == 202
data = response.json()
assert data["name"] == "All Reports"
mock_process.assert_called_once()
def test_upload_github_file_success(test_app):
"""Test successful GitHub file upload."""
with patch("agno.os.routers.knowledge.knowledge.process_content") as mock_process:
response = test_app.post(
"/knowledge/remote-content",
data={
"config_id": "github-repo",
"path": "docs/README.md",
},
)
assert response.status_code == 202
data = response.json()
assert data["name"] == "docs/README.md" # Auto-generated from path
mock_process.assert_called_once()
def test_upload_github_folder_success(test_app):
"""Test successful GitHub folder upload."""
with patch("agno.os.routers.knowledge.knowledge.process_content") as mock_process:
response = test_app.post(
"/knowledge/remote-content",
data={
"config_id": "github-repo",
"path": "src/api/",
"name": "API Source Code",
},
)
assert response.status_code == 202
data = response.json()
assert data["name"] == "API Source Code"
mock_process.assert_called_once()
def test_upload_gcs_file_success(test_app):
"""Test successful GCS file upload."""
with patch("agno.os.routers.knowledge.knowledge.process_content") as mock_process:
response = test_app.post(
"/knowledge/remote-content",
data={
"config_id": "gcs-data",
"path": "data/dataset.csv",
},
)
assert response.status_code == 202
data = response.json()
assert "id" in data
mock_process.assert_called_once()
def test_upload_sharepoint_file_success(test_app):
"""Test successful SharePoint file upload."""
with patch("agno.os.routers.knowledge.knowledge.process_content") as mock_process:
response = test_app.post(
"/knowledge/remote-content",
data={
"config_id": "sharepoint-docs",
"path": "Shared Documents/report.pdf",
},
)
assert response.status_code == 202
data = response.json()
assert "id" in data
mock_process.assert_called_once()
def test_upload_azure_blob_file_success(test_app):
"""Test successful Azure Blob file upload."""
with patch("agno.os.routers.knowledge.knowledge.process_content") as mock_process:
response = test_app.post(
"/knowledge/remote-content",
data={
"config_id": "azure-docs",
"path": "documents/report.pdf",
"name": "Azure Report",
},
)
assert response.status_code == 202
data = response.json()
assert "id" in data
assert data["name"] == "Azure Report"
mock_process.assert_called_once()
def test_upload_azure_blob_folder_success(test_app):
"""Test successful Azure Blob folder upload (path ends with /)."""
with patch("agno.os.routers.knowledge.knowledge.process_content") as mock_process:
response = test_app.post(
"/knowledge/remote-content",
data={
"config_id": "azure-docs",
"path": "documents/reports/",
"name": "All Azure Reports",
},
)
assert response.status_code == 202
data = response.json()
assert data["name"] == "All Azure Reports"
mock_process.assert_called_once()
def test_upload_unknown_config_id(test_app):
"""Test upload with unknown config_id returns 400."""
response = test_app.post(
"/knowledge/remote-content",
data={
"config_id": "unknown-source",
"path": "file.pdf",
},
)
assert response.status_code == 400
assert "Unknown content source" in response.json()["detail"]
def test_upload_missing_config_id(test_app):
"""Test upload without config_id returns 422."""
response = test_app.post(
"/knowledge/remote-content",
data={
"path": "file.pdf",
},
)
assert response.status_code == 422
def test_upload_missing_path(test_app):
"""Test upload without path returns 422."""
response = test_app.post(
"/knowledge/remote-content",
data={
"config_id": "s3-docs",
},
)
assert response.status_code == 422
def test_upload_with_invalid_metadata_json(test_app):
"""Test upload with invalid JSON metadata still succeeds."""
with patch("agno.os.routers.knowledge.knowledge.process_content"):
response = test_app.post(
"/knowledge/remote-content",
data={
"config_id": "s3-docs",
"path": "file.pdf",
"metadata": "not valid json",
},
)
# Should still succeed - invalid JSON is wrapped in {"value": ...}
assert response.status_code == 202
def test_upload_with_reader_id(test_app, mock_knowledge_with_remote_configs):
"""Test upload with specific reader_id."""
# Add a mock reader
mock_reader = Mock()
mock_knowledge_with_remote_configs.readers = {"pdf_reader": mock_reader}
with patch("agno.os.routers.knowledge.knowledge.process_content") as mock_process:
response = test_app.post(
"/knowledge/remote-content",
data={
"config_id": "s3-docs",
"path": "document.pdf",
"reader_id": "pdf_reader",
},
)
assert response.status_code == 202
# Verify reader_id was passed to process_content
call_args = mock_process.call_args
assert call_args[0][2] == "pdf_reader" # Third positional arg is reader_id
def test_upload_with_chunking_params(test_app):
"""Test upload with chunking parameters."""
with patch("agno.os.routers.knowledge.knowledge.process_content") as mock_process:
response = test_app.post(
"/knowledge/remote-content",
data={
"config_id": "github-repo",
"path": "docs/large-file.md",
"chunker": "recursive",
"chunk_size": "1000",
"chunk_overlap": "100",
},
)
assert response.status_code == 202
# Verify chunking params were passed
call_args = mock_process.call_args
assert call_args[0][3] == "recursive" # chunker
assert call_args[0][4] == 1000 # chunk_size
assert call_args[0][5] == 100 # chunk_overlap
def test_upload_auto_generates_name_from_path(test_app):
"""Test that name is auto-generated from path when not provided."""
with patch("agno.os.routers.knowledge.knowledge.process_content"):
response = test_app.post(
"/knowledge/remote-content",
data={
"config_id": "s3-docs",
"path": "documents/reports/annual-2024.pdf",
},
)
assert response.status_code == 202
data = response.json()
assert data["name"] == "documents/reports/annual-2024.pdf"
def test_folder_path_detection(test_app):
"""Test that paths ending with / are treated as folders."""
with patch("agno.os.routers.knowledge.knowledge.process_content") as mock_process:
# File path (no trailing slash)
response1 = test_app.post(
"/knowledge/remote-content",
data={"config_id": "s3-docs", "path": "documents/file.pdf"},
)
assert response1.status_code == 202
# Folder path (trailing slash)
response2 = test_app.post(
"/knowledge/remote-content",
data={"config_id": "s3-docs", "path": "documents/folder/"},
)
assert response2.status_code == 202
# Both should have been processed
assert mock_process.call_count == 2
# =============================================================================
# Config Endpoint Tests
# =============================================================================
def test_config_returns_remote_sources(test_app, mock_knowledge_with_remote_configs):
"""Test that /knowledge/config returns configured remote sources."""
# Set vector_db to None to simplify the test
mock_knowledge_with_remote_configs.vector_db = None
response = test_app.get("/knowledge/config")
assert response.status_code == 200
data = response.json()
# Verify remote sources are included (field name is snake_case)
assert "remote_content_sources" in data
sources = data["remote_content_sources"]
assert len(sources) == 5
# Verify source structure
source_ids = [s["id"] for s in sources]
assert "s3-docs" in source_ids
assert "gcs-data" in source_ids
assert "github-repo" in source_ids
assert "sharepoint-docs" in source_ids
assert "azure-docs" in source_ids
# Verify source details
s3_source = next(s for s in sources if s["id"] == "s3-docs")
assert s3_source["name"] == "S3 Documents"
assert s3_source["type"] == "s3"
github_source = next(s for s in sources if s["id"] == "github-repo")
assert github_source["name"] == "GitHub Repository"
assert github_source["type"] == "github"
azure_source = next(s for s in sources if s["id"] == "azure-docs")
assert azure_source["name"] == "Azure Documents"
assert azure_source["type"] == "azureblob"
# =============================================================================
# Content Processing Tests
# =============================================================================
@pytest.mark.asyncio
async def test_process_content_with_remote_s3(mock_knowledge_with_remote_configs):
"""Test processing content from S3."""
from agno.knowledge.remote_content.remote_content import S3Content
from agno.os.routers.knowledge.knowledge import process_content
# Create content with S3 remote source
s3_content = S3Content(bucket_name="my-bucket", key="test.pdf", config_id="s3-docs")
content = Content(name="Test PDF", remote_content=s3_content)
# Mock the loading method
with patch.object(mock_knowledge_with_remote_configs, "_aload_content", new_callable=AsyncMock) as mock_load:
await process_content(mock_knowledge_with_remote_configs, content, None)
mock_load.assert_called_once()
@pytest.mark.asyncio
async def test_process_content_with_remote_github(mock_knowledge_with_remote_configs):
"""Test processing content from GitHub."""
from agno.knowledge.remote_content.remote_content import GitHubContent
from agno.os.routers.knowledge.knowledge import process_content
# Create content with GitHub remote source
gh_content = GitHubContent(config_id="github-repo", file_path="docs/README.md", branch="main")
content = Content(name="README", remote_content=gh_content)
# Mock the loading method
with patch.object(mock_knowledge_with_remote_configs, "_aload_content", new_callable=AsyncMock) as mock_load:
await process_content(mock_knowledge_with_remote_configs, content, None)
mock_load.assert_called_once()
@pytest.mark.asyncio
async def test_process_content_with_remote_azure_blob(mock_knowledge_with_remote_configs):
"""Test processing content from Azure Blob Storage."""
from agno.knowledge.remote_content.remote_content import AzureBlobContent
from agno.os.routers.knowledge.knowledge import process_content
# Create content with Azure Blob remote source
azure_content = AzureBlobContent(config_id="azure-docs", blob_name="documents/report.pdf")
content = Content(name="Azure Report", remote_content=azure_content)
# Mock the loading method
with patch.object(mock_knowledge_with_remote_configs, "_aload_content", new_callable=AsyncMock) as mock_load:
await process_content(mock_knowledge_with_remote_configs, content, None)
mock_load.assert_called_once()
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/integration/os/test_remote_content.py",
"license": "Apache License 2.0",
"lines": 381,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/knowledge/test_remote_content_config.py | """Unit tests for remote content configuration classes."""
import pytest
from pydantic import ValidationError
from agno.knowledge.remote_content import (
AzureBlobConfig,
BaseStorageConfig,
GcsConfig,
GitHubConfig,
S3Config,
SharePointConfig,
)
# =============================================================================
# Base BaseStorageConfig Tests
# =============================================================================
def test_base_config_creation():
"""Test creating a base config with required fields."""
config = BaseStorageConfig(id="test-id", name="Test Config")
assert config.id == "test-id"
assert config.name == "Test Config"
assert config.metadata is None
def test_base_config_with_metadata():
"""Test creating a base config with metadata."""
metadata = {"key": "value", "nested": {"foo": "bar"}}
config = BaseStorageConfig(id="test-id", name="Test Config", metadata=metadata)
assert config.metadata == metadata
def test_base_config_missing_required_fields():
"""Test that missing required fields raise ValidationError."""
with pytest.raises(ValidationError):
BaseStorageConfig(id="test-id") # missing name
with pytest.raises(ValidationError):
BaseStorageConfig(name="Test") # missing id
def test_base_config_allows_extra_fields():
"""Test that extra fields are allowed (Config.extra = 'allow')."""
config = BaseStorageConfig(id="test-id", name="Test", custom_field="custom_value")
assert config.custom_field == "custom_value"
# =============================================================================
# S3Config Tests
# =============================================================================
def test_s3_config_creation():
"""Test creating an S3 config with required fields."""
config = S3Config(id="s3-source", name="My S3 Bucket", bucket_name="my-bucket")
assert config.id == "s3-source"
assert config.name == "My S3 Bucket"
assert config.bucket_name == "my-bucket"
assert config.region is None
assert config.aws_access_key_id is None
assert config.aws_secret_access_key is None
assert config.prefix is None
def test_s3_config_with_credentials():
"""Test creating an S3 config with AWS credentials."""
config = S3Config(
id="s3-source",
name="My S3 Bucket",
bucket_name="my-bucket",
region="us-east-1",
aws_access_key_id="AKIAIOSFODNN7EXAMPLE",
aws_secret_access_key="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY",
prefix="documents/",
)
assert config.region == "us-east-1"
assert config.aws_access_key_id == "AKIAIOSFODNN7EXAMPLE"
assert config.aws_secret_access_key == "wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
assert config.prefix == "documents/"
def test_s3_config_file_method():
"""Test the file() method creates correct S3Content."""
config = S3Config(id="s3-source", name="My S3 Bucket", bucket_name="my-bucket")
content = config.file("path/to/document.pdf")
assert content.bucket_name == "my-bucket"
assert content.key == "path/to/document.pdf"
assert content.config_id == "s3-source"
def test_s3_config_folder_method():
"""Test the folder() method creates correct S3Content."""
config = S3Config(id="s3-source", name="My S3 Bucket", bucket_name="my-bucket")
content = config.folder("documents/2024/")
assert content.bucket_name == "my-bucket"
assert content.prefix == "documents/2024/"
assert content.config_id == "s3-source"
def test_s3_config_missing_bucket_name():
"""Test that missing bucket_name raises ValidationError."""
with pytest.raises(ValidationError):
S3Config(id="s3-source", name="My S3 Bucket")
def test_s3_config_with_metadata():
"""Test S3Config with metadata."""
metadata = {"environment": "production", "team": "data"}
config = S3Config(id="s3", name="S3", bucket_name="bucket", metadata=metadata)
assert config.metadata == metadata
# =============================================================================
# GcsConfig Tests
# =============================================================================
def test_gcs_config_creation():
"""Test creating a GCS config with required fields."""
config = GcsConfig(id="gcs-source", name="My GCS Bucket", bucket_name="my-bucket")
assert config.id == "gcs-source"
assert config.name == "My GCS Bucket"
assert config.bucket_name == "my-bucket"
assert config.project is None
assert config.credentials_path is None
assert config.prefix is None
def test_gcs_config_with_all_fields():
"""Test creating a GCS config with all fields."""
config = GcsConfig(
id="gcs-source",
name="My GCS Bucket",
bucket_name="my-bucket",
project="my-project",
credentials_path="/path/to/credentials.json",
prefix="data/",
)
assert config.project == "my-project"
assert config.credentials_path == "/path/to/credentials.json"
assert config.prefix == "data/"
def test_gcs_config_file_method():
"""Test the file() method creates correct GCSContent."""
config = GcsConfig(id="gcs-source", name="My GCS Bucket", bucket_name="my-bucket")
content = config.file("path/to/document.pdf")
assert content.bucket_name == "my-bucket"
assert content.blob_name == "path/to/document.pdf"
assert content.config_id == "gcs-source"
def test_gcs_config_folder_method():
"""Test the folder() method creates correct GCSContent."""
config = GcsConfig(id="gcs-source", name="My GCS Bucket", bucket_name="my-bucket")
content = config.folder("documents/2024/")
assert content.bucket_name == "my-bucket"
assert content.prefix == "documents/2024/"
assert content.config_id == "gcs-source"
def test_gcs_config_with_metadata():
"""Test GcsConfig with metadata."""
metadata = {"project_type": "analytics"}
config = GcsConfig(id="gcs", name="GCS", bucket_name="bucket", metadata=metadata)
assert config.metadata == metadata
# =============================================================================
# SharePointConfig Tests
# =============================================================================
def test_sharepoint_config_creation():
"""Test creating a SharePoint config with required fields."""
config = SharePointConfig(
id="sp-source",
name="My SharePoint",
tenant_id="tenant-123",
client_id="client-456",
client_secret="secret-789",
hostname="contoso.sharepoint.com",
)
assert config.id == "sp-source"
assert config.name == "My SharePoint"
assert config.tenant_id == "tenant-123"
assert config.client_id == "client-456"
assert config.client_secret == "secret-789"
assert config.hostname == "contoso.sharepoint.com"
assert config.site_path is None
assert config.site_id is None
assert config.folder_path is None
def test_sharepoint_config_with_site_id():
"""Test creating a SharePoint config with site_id."""
config = SharePointConfig(
id="sp-source",
name="My SharePoint",
tenant_id="tenant-123",
client_id="client-456",
client_secret="secret-789",
hostname="contoso.sharepoint.com",
site_id="contoso.sharepoint.com,guid1,guid2",
)
assert config.site_id == "contoso.sharepoint.com,guid1,guid2"
def test_sharepoint_config_file_method():
"""Test the file() method creates correct SharePointContent."""
config = SharePointConfig(
id="sp-source",
name="My SharePoint",
tenant_id="tenant-123",
client_id="client-456",
client_secret="secret-789",
hostname="contoso.sharepoint.com",
site_path="/sites/documents",
)
content = config.file("Shared Documents/report.pdf")
assert content.config_id == "sp-source"
assert content.file_path == "Shared Documents/report.pdf"
assert content.site_path == "/sites/documents"
def test_sharepoint_config_file_method_with_site_override():
"""Test the file() method with site_path override."""
config = SharePointConfig(
id="sp-source",
name="My SharePoint",
tenant_id="tenant-123",
client_id="client-456",
client_secret="secret-789",
hostname="contoso.sharepoint.com",
site_path="/sites/documents",
)
content = config.file("report.pdf", site_path="/sites/other")
assert content.site_path == "/sites/other"
def test_sharepoint_config_folder_method():
"""Test the folder() method creates correct SharePointContent."""
config = SharePointConfig(
id="sp-source",
name="My SharePoint",
tenant_id="tenant-123",
client_id="client-456",
client_secret="secret-789",
hostname="contoso.sharepoint.com",
)
content = config.folder("Shared Documents/Reports")
assert content.config_id == "sp-source"
assert content.folder_path == "Shared Documents/Reports"
def test_sharepoint_config_missing_required_fields():
"""Test that missing required fields raise ValidationError."""
with pytest.raises(ValidationError):
SharePointConfig(
id="sp-source",
name="My SharePoint",
# Missing tenant_id, client_id, client_secret, hostname
)
def test_sharepoint_config_with_metadata():
"""Test SharePointConfig with metadata."""
metadata = {"department": "hr"}
config = SharePointConfig(
id="sp",
name="SP",
tenant_id="t",
client_id="c",
client_secret="s",
hostname="h.sharepoint.com",
metadata=metadata,
)
assert config.metadata == metadata
# =============================================================================
# GitHubConfig Tests
# =============================================================================
def test_github_config_creation():
"""Test creating a GitHub config with required fields."""
config = GitHubConfig(id="gh-source", name="My Repo", repo="owner/repo")
assert config.id == "gh-source"
assert config.name == "My Repo"
assert config.repo == "owner/repo"
assert config.token is None
assert config.branch is None
assert config.path is None
def test_github_config_with_all_fields():
"""Test creating a GitHub config with all fields."""
config = GitHubConfig(
id="gh-source",
name="My Repo",
repo="owner/repo",
token="ghp_xxxxxxxxxxxx",
branch="main",
path="docs/",
)
assert config.token == "ghp_xxxxxxxxxxxx"
assert config.branch == "main"
assert config.path == "docs/"
def test_github_config_file_method():
"""Test the file() method creates correct GitHubContent."""
config = GitHubConfig(id="gh-source", name="My Repo", repo="owner/repo", branch="main")
content = config.file("docs/README.md")
assert content.config_id == "gh-source"
assert content.file_path == "docs/README.md"
assert content.branch == "main"
def test_github_config_file_method_with_branch_override():
"""Test the file() method with branch override."""
config = GitHubConfig(id="gh-source", name="My Repo", repo="owner/repo", branch="main")
content = config.file("docs/README.md", branch="develop")
assert content.branch == "develop"
def test_github_config_folder_method():
"""Test the folder() method creates correct GitHubContent."""
config = GitHubConfig(id="gh-source", name="My Repo", repo="owner/repo", branch="main")
content = config.folder("src/api")
assert content.config_id == "gh-source"
assert content.folder_path == "src/api"
assert content.branch == "main"
def test_github_config_folder_method_with_branch_override():
"""Test the folder() method with branch override."""
config = GitHubConfig(id="gh-source", name="My Repo", repo="owner/repo", branch="main")
content = config.folder("src/api", branch="feature-branch")
assert content.branch == "feature-branch"
def test_github_config_missing_repo():
"""Test that missing repo raises ValidationError."""
with pytest.raises(ValidationError):
GitHubConfig(id="gh-source", name="My Repo")
def test_github_config_with_metadata():
"""Test GitHubConfig with metadata."""
metadata = {"visibility": "private", "language": "python"}
config = GitHubConfig(id="gh", name="GH", repo="owner/repo", metadata=metadata)
assert config.metadata == metadata
def test_github_config_with_app_auth():
"""Test creating a GitHub config with GitHub App authentication."""
config = GitHubConfig(
id="gh-app",
name="App Repo",
repo="org/repo",
app_id=12345,
installation_id=67890,
private_key="-----BEGIN RSA PRIVATE KEY-----\nfake\n-----END RSA PRIVATE KEY-----",
)
assert config.app_id == 12345
assert config.installation_id == 67890
assert config.private_key is not None
def test_github_config_partial_app_auth_raises():
"""Test that providing only some GitHub App fields raises ValueError."""
with pytest.raises(ValidationError, match="Missing"):
GitHubConfig(id="gh", name="GH", repo="owner/repo", app_id=123)
with pytest.raises(ValidationError, match="Missing"):
GitHubConfig(id="gh", name="GH", repo="owner/repo", app_id=123, installation_id=456)
def test_github_config_app_auth_with_token():
"""Test that both token and app auth fields can coexist."""
config = GitHubConfig(
id="gh",
name="GH",
repo="owner/repo",
token="ghp_xxx",
app_id=123,
installation_id=456,
private_key="-----BEGIN RSA PRIVATE KEY-----\nfake\n-----END RSA PRIVATE KEY-----",
)
assert config.token == "ghp_xxx"
assert config.app_id == 123
def test_github_config_app_auth_string_ids():
"""Test GitHub App auth with string-typed IDs."""
config = GitHubConfig(
id="gh",
name="GH",
repo="owner/repo",
app_id="12345",
installation_id="67890",
private_key="-----BEGIN RSA PRIVATE KEY-----\nfake\n-----END RSA PRIVATE KEY-----",
)
assert config.app_id == "12345"
assert config.installation_id == "67890"
def test_github_config_invalid_private_key_format():
"""Test that a non-PEM private_key raises ValueError."""
with pytest.raises(ValidationError, match="PEM-formatted"):
GitHubConfig(
id="gh",
name="GH",
repo="owner/repo",
app_id=123,
installation_id=456,
private_key="not-a-pem-key",
)
# =============================================================================
# AzureBlobConfig Tests
# =============================================================================
def test_azure_blob_config_creation():
"""Test creating an Azure Blob config with required fields."""
config = AzureBlobConfig(
id="azure-source",
name="My Azure Storage",
tenant_id="tenant-123",
client_id="client-456",
client_secret="secret-789",
storage_account="mystorageaccount",
container="mycontainer",
)
assert config.id == "azure-source"
assert config.name == "My Azure Storage"
assert config.tenant_id == "tenant-123"
assert config.client_id == "client-456"
assert config.client_secret == "secret-789"
assert config.storage_account == "mystorageaccount"
assert config.container == "mycontainer"
assert config.prefix is None
def test_azure_blob_config_with_prefix():
"""Test creating an Azure Blob config with a prefix."""
config = AzureBlobConfig(
id="azure-source",
name="My Azure Storage",
tenant_id="tenant-123",
client_id="client-456",
client_secret="secret-789",
storage_account="mystorageaccount",
container="mycontainer",
prefix="documents/",
)
assert config.prefix == "documents/"
def test_azure_blob_config_file_method():
"""Test the file() method creates correct AzureBlobContent."""
config = AzureBlobConfig(
id="azure-source",
name="My Azure Storage",
tenant_id="tenant-123",
client_id="client-456",
client_secret="secret-789",
storage_account="mystorageaccount",
container="mycontainer",
)
content = config.file("path/to/document.pdf")
assert content.config_id == "azure-source"
assert content.blob_name == "path/to/document.pdf"
assert content.prefix is None
def test_azure_blob_config_folder_method():
"""Test the folder() method creates correct AzureBlobContent."""
config = AzureBlobConfig(
id="azure-source",
name="My Azure Storage",
tenant_id="tenant-123",
client_id="client-456",
client_secret="secret-789",
storage_account="mystorageaccount",
container="mycontainer",
)
content = config.folder("documents/2024/")
assert content.config_id == "azure-source"
assert content.prefix == "documents/2024/"
assert content.blob_name is None
def test_azure_blob_config_missing_required_fields():
"""Test that missing required fields raise ValidationError."""
with pytest.raises(ValidationError):
AzureBlobConfig(
id="azure-source",
name="My Azure Storage",
# Missing tenant_id, client_id, client_secret, storage_account, container
)
def test_azure_blob_config_missing_storage_account():
"""Test that missing storage_account raises ValidationError."""
with pytest.raises(ValidationError):
AzureBlobConfig(
id="azure-source",
name="My Azure Storage",
tenant_id="tenant-123",
client_id="client-456",
client_secret="secret-789",
container="mycontainer",
# Missing storage_account
)
def test_azure_blob_config_missing_container():
"""Test that missing container raises ValidationError."""
with pytest.raises(ValidationError):
AzureBlobConfig(
id="azure-source",
name="My Azure Storage",
tenant_id="tenant-123",
client_id="client-456",
client_secret="secret-789",
storage_account="mystorageaccount",
# Missing container
)
def test_azure_blob_config_with_metadata():
"""Test AzureBlobConfig with metadata."""
metadata = {"environment": "production", "department": "finance"}
config = AzureBlobConfig(
id="azure",
name="Azure",
tenant_id="t",
client_id="c",
client_secret="s",
storage_account="account",
container="container",
metadata=metadata,
)
assert config.metadata == metadata
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/test_remote_content_config.py",
"license": "Apache License 2.0",
"lines": 450,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/models/openai/test_openai_metrics.py | """
Unit tests for OpenAI metrics collection.
Tests that the collect_metrics_on_completion flag works correctly for OpenAI models.
"""
from typing import Optional
from agno.models.openai.chat import OpenAIChat
class MockCompletionUsage:
"""Mock CompletionUsage object for testing."""
def __init__(
self,
prompt_tokens: Optional[int] = 0,
completion_tokens: Optional[int] = 0,
total_tokens: Optional[int] = 0,
):
self.prompt_tokens = prompt_tokens
self.completion_tokens = completion_tokens
self.total_tokens = total_tokens
self.prompt_tokens_details = None
self.completion_tokens_details = None
class MockChoice:
"""Mock Choice object for testing."""
def __init__(self, finish_reason=None):
self.finish_reason = finish_reason
class MockChatCompletionChunk:
"""Mock ChatCompletionChunk object for testing."""
def __init__(self, usage=None, finish_reason=None):
self.usage = usage
self.choices = [MockChoice(finish_reason=finish_reason)]
def test_openai_chat_default_collect_metrics_flag():
"""Test that OpenAIChat has collect_metrics_on_completion set to False by default."""
model = OpenAIChat(id="gpt-4o")
assert model.collect_metrics_on_completion is False
def test_should_collect_metrics_when_usage_is_none():
"""Test that _should_collect_metrics returns False when usage is None."""
model = OpenAIChat(id="gpt-4o")
response = MockChatCompletionChunk(usage=None)
assert model._should_collect_metrics(response) is False # type: ignore[arg-type]
def test_should_collect_metrics_default_behavior():
"""Test that _should_collect_metrics returns True when collect_metrics_on_completion is False."""
model = OpenAIChat(id="gpt-4o")
usage = MockCompletionUsage(prompt_tokens=100, completion_tokens=20, total_tokens=120)
# Test with no finish_reason (intermediate chunk)
response = MockChatCompletionChunk(usage=usage, finish_reason=None)
assert model._should_collect_metrics(response) is True # type: ignore[arg-type]
# Test with finish_reason (last chunk)
response = MockChatCompletionChunk(usage=usage, finish_reason="stop")
assert model._should_collect_metrics(response) is True # type: ignore[arg-type]
def test_openai_streaming_metrics_simulation():
"""
Simulate the default OpenAI streaming scenario.
OpenAI returns incremental token counts, and we should collect on every chunk.
"""
model = OpenAIChat(id="gpt-4o")
chunks = [
MockChatCompletionChunk(
usage=MockCompletionUsage(prompt_tokens=100, completion_tokens=1, total_tokens=101),
finish_reason=None,
),
MockChatCompletionChunk(
usage=MockCompletionUsage(prompt_tokens=0, completion_tokens=1, total_tokens=1),
finish_reason=None,
),
MockChatCompletionChunk(
usage=MockCompletionUsage(prompt_tokens=0, completion_tokens=1, total_tokens=1),
finish_reason="stop",
),
]
collected_metrics = []
for chunk in chunks:
if model._should_collect_metrics(chunk): # type: ignore[arg-type]
metrics = model._get_metrics(chunk.usage) # type: ignore[arg-type]
collected_metrics.append(metrics)
# Should collect metrics from all chunks with usage
assert len(collected_metrics) == 3
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/openai/test_openai_metrics.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/models/perplexity/test_perplexity_metrics.py | """
Unit tests for Perplexity metrics collection fix.
Tests the collect_metrics_on_completion flag that prevents
incorrect accumulation of cumulative token counts in streaming responses.
"""
from typing import Optional
from agno.models.metrics import MessageMetrics
from agno.models.perplexity.perplexity import Perplexity
class MockCompletionUsage:
"""Mock CompletionUsage object for testing."""
def __init__(
self,
prompt_tokens: Optional[int] = 0,
completion_tokens: Optional[int] = 0,
total_tokens: Optional[int] = 0,
prompt_tokens_details=None,
completion_tokens_details=None,
):
self.prompt_tokens = prompt_tokens
self.completion_tokens = completion_tokens
self.total_tokens = total_tokens
self.prompt_tokens_details = prompt_tokens_details
self.completion_tokens_details = completion_tokens_details
class MockChoice:
"""Mock Choice object for testing."""
def __init__(self, finish_reason=None):
self.finish_reason = finish_reason
class MockChatCompletionChunk:
"""Mock ChatCompletionChunk object for testing."""
def __init__(self, usage=None, finish_reason=None):
self.usage = usage
self.choices = [MockChoice(finish_reason=finish_reason)]
def test_perplexity_collect_metrics_flag():
"""Test that Perplexity has collect_metrics_on_completion set to True."""
model = Perplexity(id="sonar", api_key="test-key")
assert model.collect_metrics_on_completion is True
def test_should_collect_metrics_on_completion():
"""Test that _should_collect_metrics only returns True on last chunk when flag is True."""
model = Perplexity(id="sonar", api_key="test-key")
usage = MockCompletionUsage(prompt_tokens=100, completion_tokens=20, total_tokens=120)
# Test with no finish_reason (intermediate chunk) - should NOT collect
response = MockChatCompletionChunk(usage=usage, finish_reason=None)
assert model._should_collect_metrics(response) is False # type: ignore[arg-type]
# Test with finish_reason (last chunk) - should collect
response = MockChatCompletionChunk(usage=usage, finish_reason="stop")
assert model._should_collect_metrics(response) is True # type: ignore[arg-type]
def test_perplexity_get_metrics_basic():
"""Test that Perplexity._get_metrics correctly converts CompletionUsage to Metrics."""
model = Perplexity(id="sonar", api_key="test-key")
usage = MockCompletionUsage(prompt_tokens=1965, completion_tokens=29, total_tokens=1994)
metrics = model._get_metrics(usage) # type: ignore[arg-type]
assert isinstance(metrics, MessageMetrics)
assert metrics.input_tokens == 1965
assert metrics.output_tokens == 29
assert metrics.total_tokens == 1994
def test_perplexity_get_metrics_with_details():
"""Test that Perplexity._get_metrics correctly handles prompt and completion token details."""
model = Perplexity(id="sonar", api_key="test-key")
class MockPromptTokensDetails:
def __init__(self):
self.audio_tokens = 10
self.cached_tokens = 500
class MockCompletionTokensDetails:
def __init__(self):
self.audio_tokens = 5
self.reasoning_tokens = 100
usage = MockCompletionUsage(
prompt_tokens=1965,
completion_tokens=29,
total_tokens=1994,
prompt_tokens_details=MockPromptTokensDetails(),
completion_tokens_details=MockCompletionTokensDetails(),
)
metrics = model._get_metrics(usage) # type: ignore[arg-type]
assert metrics.input_tokens == 1965
assert metrics.output_tokens == 29
assert metrics.total_tokens == 1994
assert metrics.audio_input_tokens == 10
assert metrics.cache_read_tokens == 500
assert metrics.audio_output_tokens == 5
assert metrics.reasoning_tokens == 100
def test_perplexity_streaming_metrics_simulation():
"""
Simulate the streaming scenario that was causing the bug.
Perplexity returns cumulative token counts (1, 2, 3, ..., N) in each chunk.
This test verifies that metrics are only collected on the last chunk.
"""
model = Perplexity(id="sonar", api_key="test-key")
chunks = [
MockChatCompletionChunk(
usage=MockCompletionUsage(prompt_tokens=1965, completion_tokens=1, total_tokens=1966),
finish_reason=None,
),
MockChatCompletionChunk(
usage=MockCompletionUsage(prompt_tokens=1965, completion_tokens=2, total_tokens=1967),
finish_reason=None,
),
MockChatCompletionChunk(
usage=MockCompletionUsage(prompt_tokens=1965, completion_tokens=3, total_tokens=1968),
finish_reason=None,
),
MockChatCompletionChunk(
usage=MockCompletionUsage(prompt_tokens=1965, completion_tokens=29, total_tokens=1994),
finish_reason="stop",
),
]
collected_metrics = []
for chunk in chunks:
if model._should_collect_metrics(chunk): # type: ignore[arg-type]
metrics = model._get_metrics(chunk.usage) # type: ignore[arg-type]
collected_metrics.append(metrics)
# Should only collect metrics from the last chunk
assert len(collected_metrics) == 1
assert collected_metrics[0].input_tokens == 1965
assert collected_metrics[0].output_tokens == 29
assert collected_metrics[0].total_tokens == 1994
def test_perplexity_get_metrics_with_none_values():
"""Test that Perplexity._get_metrics handles None values gracefully."""
model = Perplexity(id="sonar", api_key="test-key")
usage = MockCompletionUsage(prompt_tokens=None, completion_tokens=None, total_tokens=None)
metrics = model._get_metrics(usage) # type: ignore[arg-type]
assert metrics.input_tokens == 0
assert metrics.output_tokens == 0
assert metrics.total_tokens == 0
def test_collect_metrics_with_different_finish_reasons():
"""Test that metrics are collected for all finish_reason values."""
model = Perplexity(id="sonar", api_key="test-key")
usage = MockCompletionUsage(prompt_tokens=100, completion_tokens=20, total_tokens=120)
for finish_reason in ["stop", "length", "tool_calls", "content_filter"]:
response = MockChatCompletionChunk(usage=usage, finish_reason=finish_reason)
assert model._should_collect_metrics(response) is True # type: ignore[arg-type]
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/perplexity/test_perplexity_metrics.py",
"license": "Apache License 2.0",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/90_models/n1n/basic.py | """
N1N Basic
=========
Cookbook example for `n1n/basic.py`.
"""
from agno.agent import Agent
from agno.models.n1n import N1N
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(model=N1N(id="gpt-4o"), markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Share a 2 sentence horror story.")
# --- Sync + Streaming ---
agent.print_response("Share a 2 sentence horror story.", stream=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/n1n/basic.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/n1n/tool_use.py | """
N1N Tool Use
============
Cookbook example for `n1n/tool_use.py`.
"""
from agno.agent import Agent
from agno.models.n1n import N1N
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=N1N(id="gpt-5-mini"),
markdown=True,
tools=[WebSearchTools()],
)
agent.print_response("What is happening in France?", stream=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/n1n/tool_use.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/models/n1n/n1n.py | from dataclasses import dataclass, field
from os import getenv
from typing import Any, Dict, Optional
from agno.exceptions import ModelAuthenticationError
from agno.models.openai.like import OpenAILike
@dataclass
class N1N(OpenAILike):
"""
A class for interacting with n1n.ai models.
Attributes:
id (str): The model id. Defaults to "gpt-4o".
name (str): The model name. Defaults to "N1N".
provider (str): The provider name. Defaults to "N1N".
api_key (Optional[str]): The API key.
base_url (str): The base URL. Defaults to "https://api.n1n.ai/v1".
"""
id: str = "gpt-4o"
name: str = "N1N"
provider: str = "N1N"
api_key: Optional[str] = field(default_factory=lambda: getenv("N1N_API_KEY"))
base_url: str = "https://api.n1n.ai/v1"
def _get_client_params(self) -> Dict[str, Any]:
# Fetch API key from env if not already set
if not self.api_key:
self.api_key = getenv("N1N_API_KEY")
if not self.api_key:
# Raise error immediately if key is missing
raise ModelAuthenticationError(
message="N1N_API_KEY not set. Please set the N1N_API_KEY environment variable.",
model_name=self.name,
)
# Define base client params
base_params = {
"api_key": self.api_key,
"organization": self.organization,
"base_url": self.base_url,
"timeout": self.timeout,
"max_retries": self.max_retries,
"default_headers": self.default_headers,
"default_query": self.default_query,
}
# Create client_params dict with non-None values
client_params = {k: v for k, v in base_params.items() if v is not None}
# Add additional client params if provided
if self.client_params:
client_params.update(self.client_params)
return client_params
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/models/n1n/n1n.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/tests/unit/models/test_n1n.py | import os
from unittest.mock import patch
import pytest
from agno.exceptions import ModelAuthenticationError
from agno.models.n1n import N1N
def test_n1n_initialization_with_api_key():
model = N1N(id="gpt-4o", api_key="test-api-key")
assert model.id == "gpt-4o"
assert model.api_key == "test-api-key"
assert model.base_url == "https://api.n1n.ai/v1"
def test_n1n_initialization_without_api_key():
with patch.dict(os.environ, {}, clear=True):
model = N1N(id="gpt-4o")
client_params = None
with pytest.raises(ModelAuthenticationError):
client_params = model._get_client_params()
assert client_params is None
def test_n1n_initialization_with_env_api_key():
with patch.dict(os.environ, {"N1N_API_KEY": "env-api-key"}):
model = N1N(id="gpt-4o")
assert model.api_key == "env-api-key"
def test_n1n_client_params():
model = N1N(id="gpt-4o", api_key="test-api-key")
client_params = model._get_client_params()
assert client_params["api_key"] == "test-api-key"
assert client_params["base_url"] == "https://api.n1n.ai/v1"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/test_n1n.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/models/test_async_generator_exception_handling.py | """
Test for async generator exception handling in the Model class.
Verifies that when an async generator tool raises an exception during iteration,
the exception is handled gracefully (like sync generators) rather than crashing.
Fix: Changed from `raise error` to setting `function_call.error = str(error)`
and `function_call_success = False`, matching sync generator behavior.
"""
import pytest
from agno.run import RunContext
from agno.tools.function import Function, FunctionCall
@pytest.mark.asyncio
async def test_async_generator_exception_handled_gracefully():
"""Test that async generator exceptions are captured instead of re-raised."""
from typing import AsyncIterator
session_state = {}
async def failing_async_generator(run_context: RunContext) -> AsyncIterator[str]:
"""An async generator that raises an exception during iteration."""
yield "first"
raise ValueError("Test error during async generator iteration")
# Create function and execute
func = Function.from_callable(failing_async_generator)
run_context = RunContext(run_id="test-run", session_id="test-session", session_state=session_state)
func._run_context = run_context
func.process_entrypoint()
fc = FunctionCall(function=func, arguments={})
result = await fc.aexecute()
# Consume the async generator and capture the error
error = None
output = []
try:
async for item in result.result:
output.append(item)
except ValueError as e:
error = e
# Verify: exception was raised during iteration (this is expected behavior)
# The fix ensures that in base.py, when this error is caught during
# async generator processing, it sets function_call.error instead of re-raising
assert error is not None
assert str(error) == "Test error during async generator iteration"
assert output == ["first"]
# Verify FunctionCall.error can be set (as the fix does in base.py)
fc.error = str(error)
assert fc.error == "Test error during async generator iteration"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/test_async_generator_exception_handling.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/team/test_delegate_closure_bug.py | """
Unit tests for the closure bug fix in adelegate_task_to_members.
This tests the regression fix for PR #6067 where Python closures in loops
captured variables by reference instead of by value, causing all concurrent
async tasks to use the last loop iteration's values.
Bug: When delegate_to_all_members=True and async mode is used, closures created
inside a for loop would all see the final loop values when asyncio.gather()
executed them later.
Fix: Capture loop variables via default arguments in the async function definitions.
"""
import asyncio
from typing import List
from unittest.mock import AsyncMock, MagicMock
import pytest
from agno.agent.agent import Agent
from agno.run.agent import RunOutput
from agno.team.team import Team
class TestClosureBugFix:
"""Test suite for the closure bug fix in adelegate_task_to_members."""
@pytest.mark.asyncio
async def test_async_closure_captures_correct_agent_identity(self):
"""
Test that each agent's identity is correctly captured in async closures.
This is the core regression test for the closure bug. Before the fix,
all closures would use the last agent's values.
"""
# Track which agents were actually called
called_agents: List[str] = []
# Create mock agents with distinct names
agents = []
for i in range(1, 4):
agent = Agent(name=f"Worker{i}", id=f"worker-{i}")
# Create a mock that tracks calls and returns agent-specific response
mock_arun = AsyncMock(
return_value=RunOutput(
run_id=f"run-{i}",
agent_id=f"worker-{i}",
agent_name=f"Worker{i}",
content=f"Response from Worker{i}",
)
)
# Capture which agent was called
def make_side_effect(agent_name):
async def side_effect(*args, **kwargs):
called_agents.append(agent_name)
return RunOutput(
run_id=f"run-{agent_name}",
agent_id=agent_name.lower().replace("worker", "worker-"),
agent_name=agent_name,
content=f"Response from {agent_name}",
)
return side_effect
mock_arun.side_effect = make_side_effect(f"Worker{i}")
agent.arun = mock_arun
agents.append(agent)
# Create team with delegate_to_all_members
team = Team(
name="Test Team",
members=agents,
delegate_to_all_members=True,
)
# Mock the team's model to trigger delegation
mock_team_model = AsyncMock()
mock_team_model.get_instructions_for_model = MagicMock(return_value=None)
mock_team_model.get_system_message_for_model = MagicMock(return_value=None)
team.model = mock_team_model
# We need to directly test the adelegate_task_to_members function
# by calling it with a simulated context
# For simplicity, we'll test the closure behavior pattern directly
# Simulate the buggy vs fixed closure pattern
results_buggy = await self._simulate_buggy_closure_pattern(agents)
results_fixed = await self._simulate_fixed_closure_pattern(agents)
# Buggy pattern: all results have the same (last) agent name
buggy_names = set(results_buggy)
assert len(buggy_names) == 1, "Buggy pattern should show all same agent"
assert "Worker3" in buggy_names, "Buggy pattern uses last agent"
# Fixed pattern: all results have distinct agent names
fixed_names = set(results_fixed)
assert len(fixed_names) == 3, "Fixed pattern should show all distinct agents"
assert fixed_names == {"Worker1", "Worker2", "Worker3"}
async def _simulate_buggy_closure_pattern(self, agents: List[Agent]) -> List[str]:
"""Simulate the buggy closure pattern (before fix)."""
tasks = []
for member_agent in agents:
# Bug: closure captures member_agent by reference
async def run_agent():
return member_agent.name # Will always be the last agent!
tasks.append(run_agent)
results = await asyncio.gather(*[task() for task in tasks])
return list(results)
async def _simulate_fixed_closure_pattern(self, agents: List[Agent]) -> List[str]:
"""Simulate the fixed closure pattern (after fix)."""
tasks = []
for member_agent in agents:
# Fix: capture member_agent via default argument
async def run_agent(agent=member_agent):
return agent.name # Correctly uses the captured agent
tasks.append(run_agent)
results = await asyncio.gather(*[task() for task in tasks])
return list(results)
@pytest.mark.asyncio
async def test_multiple_loop_variables_captured_correctly(self):
"""
Test that all loop variables are captured correctly, not just the agent.
The fix captures: member_agent, member_agent_task, history, member_agent_index
"""
# Simulate capturing multiple variables
items = [
{"index": 0, "name": "First", "task": "Task A"},
{"index": 1, "name": "Second", "task": "Task B"},
{"index": 2, "name": "Third", "task": "Task C"},
]
# Fixed pattern with all variables captured
tasks = []
for item in items:
async def process(
index=item["index"],
name=item["name"],
task=item["task"],
):
return {"index": index, "name": name, "task": task}
tasks.append(process)
results = await asyncio.gather(*[t() for t in tasks])
# Verify all variables were captured correctly
assert results[0] == {"index": 0, "name": "First", "task": "Task A"}
assert results[1] == {"index": 1, "name": "Second", "task": "Task B"}
assert results[2] == {"index": 2, "name": "Third", "task": "Task C"}
@pytest.mark.asyncio
async def test_streaming_branch_uses_function_parameter(self):
"""
Test that the streaming branch correctly uses the function parameter.
In the streaming branch, stream_member(agent) receives the agent as a
parameter, but was incorrectly using member_agent (outer loop variable)
in some places.
"""
agents = [
Agent(name="StreamAgent1", id="stream-1"),
Agent(name="StreamAgent2", id="stream-2"),
Agent(name="StreamAgent3", id="stream-3"),
]
# Simulate the fixed streaming pattern
results = []
async def stream_member(agent: Agent) -> str:
# This should use 'agent' parameter, not outer 'member_agent'
return agent.name or ""
tasks = []
for member_agent in agents:
current_agent = member_agent
tasks.append(asyncio.create_task(stream_member(current_agent)))
completed = await asyncio.gather(*tasks)
results = list(completed)
# Verify all agents are distinct
assert set(results) == {"StreamAgent1", "StreamAgent2", "StreamAgent3"}
class TestClosurePatternIsolation:
"""
Isolated tests demonstrating the closure bug pattern.
These tests prove the bug exists and the fix works without depending
on the full Team implementation.
"""
@pytest.mark.asyncio
async def test_closure_late_binding_demonstration(self):
"""
Demonstrate Python's late binding behavior with closures.
This is the fundamental issue that caused the bug.
"""
# Late binding: closure sees variable at call time, not definition time
funcs = []
for i in range(3):
def f():
return i # i is looked up when f() is called
funcs.append(f)
# All functions return 2 (the final value of i)
results = [f() for f in funcs]
assert results == [2, 2, 2], "Late binding causes all to return last value"
@pytest.mark.asyncio
async def test_closure_early_binding_fix(self):
"""
Demonstrate the fix using default arguments for early binding.
"""
# Early binding: default argument captures value at definition time
funcs = []
for i in range(3):
def f(captured_i=i): # captured_i gets current value of i
return captured_i
funcs.append(f)
# Each function returns its captured value
results = [f() for f in funcs]
assert results == [0, 1, 2], "Early binding via default args preserves values"
@pytest.mark.asyncio
async def test_async_closure_late_binding(self):
"""
Demonstrate the same issue occurs with async functions.
"""
tasks = []
for i in range(3):
async def async_f():
return i
tasks.append(async_f)
results = await asyncio.gather(*[t() for t in tasks])
assert list(results) == [2, 2, 2], "Async closures have same late binding issue"
@pytest.mark.asyncio
async def test_async_closure_early_binding_fix(self):
"""
Demonstrate the fix works for async functions.
"""
tasks = []
for i in range(3):
async def async_f(captured_i=i):
return captured_i
tasks.append(async_f)
results = await asyncio.gather(*[t() for t in tasks])
assert list(results) == [0, 1, 2], "Async closures fixed with default args"
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/team/test_delegate_closure_bug.py",
"license": "Apache License 2.0",
"lines": 212,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/knowledge/test_knowledge_metadata_propagation.py | """Tests for metadata propagation when using path in Knowledge.
This tests the fix for issue #6077 where metadata was not propagated to documents
when using path in add_content_async/ainsert.
"""
import tempfile
from pathlib import Path
from typing import Any, Dict, List
from unittest.mock import patch
import pytest
from agno.knowledge.content import Content
from agno.knowledge.document import Document
from agno.knowledge.knowledge import Knowledge
from agno.vectordb.base import VectorDb
class MockVectorDb(VectorDb):
"""Mock VectorDb that tracks inserted documents and their metadata."""
def __init__(self):
self.inserted_documents: List[Document] = []
self.upserted_documents: List[Document] = []
def create(self) -> None:
pass
async def async_create(self) -> None:
pass
def name_exists(self, name: str) -> bool:
return False
async def async_name_exists(self, name: str) -> bool:
return False
def id_exists(self, id: str) -> bool:
return False
def content_hash_exists(self, content_hash: str) -> bool:
return False
def insert(self, content_hash: str, documents: List[Document], filters=None) -> None:
self.inserted_documents.extend(documents)
async def async_insert(self, content_hash: str, documents: List[Document], filters=None) -> None:
self.inserted_documents.extend(documents)
def upsert(self, content_hash: str, documents: List[Document], filters=None) -> None:
self.upserted_documents.extend(documents)
async def async_upsert(self, content_hash: str, documents: List[Document], filters=None) -> None:
self.upserted_documents.extend(documents)
def upsert_available(self) -> bool:
return True
def search(self, query: str, limit: int = 5, filters=None) -> List[Document]:
return []
async def async_search(self, query: str, limit: int = 5, filters=None) -> List[Document]:
return []
def drop(self) -> None:
pass
async def async_drop(self) -> None:
pass
def exists(self) -> bool:
return True
async def async_exists(self) -> bool:
return True
def delete(self) -> bool:
return True
def delete_by_id(self, id: str) -> bool:
return True
def delete_by_name(self, name: str) -> bool:
return True
def delete_by_metadata(self, metadata: Dict[str, Any]) -> bool:
return True
def update_metadata(self, content_id: str, metadata: Dict[str, Any]) -> None:
pass
def delete_by_content_id(self, content_id: str) -> bool:
return True
def get_supported_search_types(self) -> List[str]:
return ["vector"]
class MockReader:
"""Mock reader that returns test documents."""
def read(self, path, name=None, password=None) -> List[Document]:
return [Document(name=name or str(path), content="Test document content")]
async def aread(self, path, name=None, password=None) -> List[Document]:
return [Document(name=name or str(path), content="Test document content")]
@pytest.fixture
def temp_text_file():
"""Create a temporary text file for testing."""
with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as f:
f.write("Test document content for metadata propagation testing.")
temp_path = f.name
yield temp_path
# Cleanup
Path(temp_path).unlink(missing_ok=True)
@pytest.fixture
def mock_vector_db():
"""Create a mock vector database."""
return MockVectorDb()
def test_prepare_documents_for_insert_with_metadata():
"""Test that _prepare_documents_for_insert correctly merges metadata."""
mock_db = MockVectorDb()
knowledge = Knowledge(vector_db=mock_db)
# Create test documents
documents = [
Document(name="doc1", content="Content 1", meta_data={"existing": "value1"}),
Document(name="doc2", content="Content 2", meta_data={}),
Document(name="doc3", content="Content 3"), # No meta_data
]
metadata = {"document_id": "123", "knowledge_base_id": "456", "filename": "test.txt"}
# Call _prepare_documents_for_insert with metadata
result = knowledge._prepare_documents_for_insert(documents, "content-id-1", metadata=metadata)
# Verify metadata was merged (linked_to is always added, empty string for unnamed knowledge)
assert result[0].meta_data == {
"existing": "value1",
"document_id": "123",
"knowledge_base_id": "456",
"filename": "test.txt",
"linked_to": "",
}
assert result[1].meta_data == {
"document_id": "123",
"knowledge_base_id": "456",
"filename": "test.txt",
"linked_to": "",
}
assert result[2].meta_data == {
"document_id": "123",
"knowledge_base_id": "456",
"filename": "test.txt",
"linked_to": "",
}
# Verify content_id was set
for doc in result:
assert doc.content_id == "content-id-1"
def test_prepare_documents_for_insert_without_metadata():
"""Test that _prepare_documents_for_insert works correctly without metadata."""
mock_db = MockVectorDb()
knowledge = Knowledge(vector_db=mock_db)
# Create test documents
documents = [
Document(name="doc1", content="Content 1", meta_data={"existing": "value1"}),
Document(name="doc2", content="Content 2", meta_data={}),
]
# Call _prepare_documents_for_insert without metadata
result = knowledge._prepare_documents_for_insert(documents, "content-id-1")
# Verify existing metadata is preserved (linked_to is always added)
assert result[0].meta_data == {"existing": "value1", "linked_to": ""}
assert result[1].meta_data == {"linked_to": ""}
# Verify content_id was set
for doc in result:
assert doc.content_id == "content-id-1"
def test_prepare_documents_for_insert_with_empty_metadata():
"""Test that _prepare_documents_for_insert works correctly with empty metadata dict."""
mock_db = MockVectorDb()
knowledge = Knowledge(vector_db=mock_db)
# Create test documents
documents = [
Document(name="doc1", content="Content 1", meta_data={"existing": "value1"}),
]
# Call _prepare_documents_for_insert with empty metadata
result = knowledge._prepare_documents_for_insert(documents, "content-id-1", metadata={})
# Verify existing metadata is preserved (linked_to is always added)
assert result[0].meta_data == {"existing": "value1", "linked_to": ""}
@pytest.mark.asyncio
async def test_aload_from_path_propagates_metadata(temp_text_file, mock_vector_db):
"""Test that _aload_from_path propagates metadata to documents."""
knowledge = Knowledge(vector_db=mock_vector_db)
# Create content with metadata
content = Content(
path=temp_text_file,
name="Test Document",
metadata={"document_id": "123", "knowledge_base_id": "456", "filename": "test.txt"},
)
content.content_hash = knowledge._build_content_hash(content)
with patch.object(knowledge, "_aread", return_value=[Document(name="test", content="Test content")]):
await knowledge._aload_from_path(content, upsert=False, skip_if_exists=False)
# Verify documents were inserted with metadata
assert len(mock_vector_db.inserted_documents) == 1
doc = mock_vector_db.inserted_documents[0]
assert doc.meta_data.get("document_id") == "123"
assert doc.meta_data.get("knowledge_base_id") == "456"
assert doc.meta_data.get("filename") == "test.txt"
@pytest.mark.asyncio
async def test_aload_from_path_upsert_propagates_metadata(temp_text_file, mock_vector_db):
"""Test that _aload_from_path propagates metadata to documents when using upsert."""
knowledge = Knowledge(vector_db=mock_vector_db)
# Create content with metadata
content = Content(
path=temp_text_file,
name="Test Document",
metadata={"source": "test", "category": "documentation"},
)
content.content_hash = knowledge._build_content_hash(content)
with patch.object(knowledge, "_aread", return_value=[Document(name="test", content="Test content")]):
await knowledge._aload_from_path(content, upsert=True, skip_if_exists=False)
# Verify documents were upserted with metadata
assert len(mock_vector_db.upserted_documents) == 1
doc = mock_vector_db.upserted_documents[0]
assert doc.meta_data.get("source") == "test"
assert doc.meta_data.get("category") == "documentation"
def test_load_from_path_propagates_metadata(temp_text_file, mock_vector_db):
"""Test that _load_from_path propagates metadata to documents."""
knowledge = Knowledge(vector_db=mock_vector_db)
# Create content with metadata
content = Content(
path=temp_text_file,
name="Test Document",
metadata={"document_id": "789", "author": "test_author"},
)
content.content_hash = knowledge._build_content_hash(content)
with patch.object(knowledge, "_read", return_value=[Document(name="test", content="Test content")]):
knowledge._load_from_path(content, upsert=False, skip_if_exists=False)
# Verify documents were inserted with metadata
assert len(mock_vector_db.inserted_documents) == 1
doc = mock_vector_db.inserted_documents[0]
assert doc.meta_data.get("document_id") == "789"
assert doc.meta_data.get("author") == "test_author"
def test_load_from_path_upsert_propagates_metadata(temp_text_file, mock_vector_db):
"""Test that _load_from_path propagates metadata to documents when using upsert."""
knowledge = Knowledge(vector_db=mock_vector_db)
# Create content with metadata
content = Content(
path=temp_text_file,
name="Test Document",
metadata={"version": "1.0", "language": "en"},
)
content.content_hash = knowledge._build_content_hash(content)
with patch.object(knowledge, "_read", return_value=[Document(name="test", content="Test content")]):
knowledge._load_from_path(content, upsert=True, skip_if_exists=False)
# Verify documents were upserted with metadata
assert len(mock_vector_db.upserted_documents) == 1
doc = mock_vector_db.upserted_documents[0]
assert doc.meta_data.get("version") == "1.0"
assert doc.meta_data.get("language") == "en"
def test_load_from_path_without_metadata(temp_text_file, mock_vector_db):
"""Test that _load_from_path works correctly without metadata."""
knowledge = Knowledge(vector_db=mock_vector_db)
# Create content without metadata
content = Content(
path=temp_text_file,
name="Test Document",
)
content.content_hash = knowledge._build_content_hash(content)
with patch.object(
knowledge, "_read", return_value=[Document(name="test", content="Test content", meta_data={"original": "data"})]
):
knowledge._load_from_path(content, upsert=False, skip_if_exists=False)
# Verify documents were inserted with original metadata preserved (linked_to is always added)
assert len(mock_vector_db.inserted_documents) == 1
doc = mock_vector_db.inserted_documents[0]
assert doc.meta_data == {"original": "data", "linked_to": ""}
def test_metadata_merges_with_existing_document_metadata(temp_text_file, mock_vector_db):
"""Test that content metadata merges with existing document metadata."""
knowledge = Knowledge(vector_db=mock_vector_db)
# Create content with metadata
content = Content(
path=temp_text_file,
name="Test Document",
metadata={"new_field": "new_value", "shared_field": "content_value"},
)
content.content_hash = knowledge._build_content_hash(content)
# Mock reader returns document with existing metadata
with patch.object(
knowledge,
"_read",
return_value=[
Document(
name="test",
content="Test content",
meta_data={"existing_field": "existing_value", "shared_field": "doc_value"},
)
],
):
knowledge._load_from_path(content, upsert=False, skip_if_exists=False)
# Verify metadata was merged (content metadata should override document metadata for shared keys)
assert len(mock_vector_db.inserted_documents) == 1
doc = mock_vector_db.inserted_documents[0]
assert doc.meta_data.get("existing_field") == "existing_value"
assert doc.meta_data.get("new_field") == "new_value"
assert doc.meta_data.get("shared_field") == "content_value" # Content metadata overrides
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/knowledge/test_knowledge_metadata_propagation.py",
"license": "Apache License 2.0",
"lines": 267,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:libs/agno/tests/unit/models/litellm/test_client_deepcopy.py | """
Tests for LiteLLM client preservation across deepcopy.
This test verifies that custom client objects are preserved when the model is deep copied for background tasks.
"""
from copy import deepcopy
from unittest.mock import MagicMock
from agno.models.litellm import LiteLLM
def test_client_preserved_after_deepcopy():
"""Verify that client is preserved after deepcopy."""
mock_client = MagicMock()
model = LiteLLM(id="test-model", client=mock_client)
model_copy = deepcopy(model)
assert model_copy.client is mock_client
def test_original_client_set_on_init():
"""Verify that _original_client is set when client is provided."""
mock_client = MagicMock()
model = LiteLLM(id="test-model", client=mock_client)
assert model._original_client is mock_client
def test_get_client_returns_client_after_deepcopy():
"""Verify that get_client() returns the client after deepcopy."""
mock_client = MagicMock()
model = LiteLLM(id="test-model", client=mock_client)
model_copy = deepcopy(model)
assert model_copy.get_client() is mock_client
def test_get_client_falls_back_to_original_client():
"""Verify that get_client() falls back to _original_client when client is None."""
mock_client = MagicMock()
model = LiteLLM(id="test-model", client=mock_client)
model.client = None
assert model.get_client() is mock_client
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/tests/unit/models/litellm/test_client_deepcopy.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
agno-agi/agno:cookbook/00_quickstart/run.py | """
Agent OS - Web Interface for Your Agents
=========================================
This file starts an Agent OS server that provides a web interface for all
the agents, teams, and workflows in this Quick Start guide.
What is Agent OS?
-----------------
Agent OS is Agno's runtime that lets you:
- Chat with your agents through a beautiful web UI
- Explore session history
- Monitor traces and debug agent behavior
- Manage knowledge bases and memories
- Switch between agents, teams, and workflows
How to Use
----------
1. Start the server:
python cookbook/00_quickstart/run.py
2. Visit https://os.agno.com in your browser
3. Add your local endpoint: http://localhost:7777
4. Select any agent, team, or workflow and start chatting
Prerequisites
-------------
- All agents from this quick start are registered automatically
- For the knowledge agent, load the knowledge base first:
python cookbook/00_quickstart/agent_search_over_knowledge.py
Learn More
----------
- Agent OS Overview: https://docs.agno.com/agent-os/overview
- Agno Documentation: https://docs.agno.com
"""
from pathlib import Path
from agent_search_over_knowledge import agent_with_knowledge
from agent_with_guardrails import agent_with_guardrails
from agent_with_memory import agent_with_memory
from agent_with_state_management import agent_with_state_management
from agent_with_storage import agent_with_storage
from agent_with_structured_output import agent_with_structured_output
from agent_with_tools import agent_with_tools
from agent_with_typed_input_output import agent_with_typed_input_output
from agno.os import AgentOS
from custom_tool_for_self_learning import self_learning_agent
from human_in_the_loop import human_in_the_loop_agent
from multi_agent_team import multi_agent_team
from sequential_workflow import sequential_workflow
# ---------------------------------------------------------------------------
# AgentOS Config
# ---------------------------------------------------------------------------
config_path = str(Path(__file__).parent.joinpath("config.yaml"))
# ---------------------------------------------------------------------------
# Create AgentOS
# ---------------------------------------------------------------------------
agent_os = AgentOS(
id="Quick Start AgentOS",
agents=[
agent_with_tools,
agent_with_storage,
agent_with_knowledge,
self_learning_agent,
agent_with_structured_output,
agent_with_typed_input_output,
agent_with_memory,
agent_with_state_management,
human_in_the_loop_agent,
agent_with_guardrails,
],
teams=[multi_agent_team],
workflows=[sequential_workflow],
config=config_path,
tracing=True,
)
app = agent_os.get_app()
# ---------------------------------------------------------------------------
# Run AgentOS
# ---------------------------------------------------------------------------
if __name__ == "__main__":
agent_os.serve(app="run:app", reload=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/00_quickstart/run.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/client/server.py | """
AgentOS Server for Cookbook Client Examples
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
from agno.team.team import Team
from agno.tools.calculator import CalculatorTools
from agno.tools.websearch import WebSearchTools
from agno.vectordb.chroma import ChromaDb
from agno.workflow.step import Step
from agno.workflow.workflow import Workflow
# ---------------------------------------------------------------------------
# Create Example
# ---------------------------------------------------------------------------
# =============================================================================
# Database Configuration
# =============================================================================
# SQLite database for sessions, memory, and content metadata
db = SqliteDb(db_file="tmp/cookbook_client.db")
# =============================================================================
# Knowledge Base Configuration
# =============================================================================
knowledge = Knowledge(
vector_db=ChromaDb(
path="tmp/cookbook_chromadb",
collection="cookbook_knowledge",
embedder=OpenAIEmbedder(id="text-embedding-3-small"),
),
contents_db=db, # Required for content upload/management endpoints
)
# =============================================================================
# Agent Configuration
# =============================================================================
# Agent 1: Assistant with calculator tools and memory
assistant = Agent(
name="Assistant",
model=OpenAIChat(id="gpt-5.2"),
db=db,
instructions=[
"You are a helpful AI assistant.",
"Use the calculator tool for any math operations.",
"You have access to a knowledge base - search it when asked about documents.",
],
markdown=True,
update_memory_on_run=True, # Required for 03_memory_operations
tools=[CalculatorTools()],
knowledge=knowledge,
search_knowledge=True,
)
# Agent 2: Researcher with web search capabilities
researcher = Agent(
name="Researcher",
model=OpenAIChat(id="gpt-5.2"),
db=db,
instructions=[
"You are a research assistant.",
"Search the web for information when needed.",
"Provide well-researched, accurate responses.",
],
markdown=True,
tools=[WebSearchTools()],
)
# =============================================================================
# Team Configuration
# =============================================================================
research_team = Team(
name="Research Team",
model=OpenAIChat(id="gpt-5.2"),
members=[assistant, researcher],
instructions=[
"You are a research team that coordinates multiple specialists.",
"Delegate math questions to the Assistant.",
"Delegate research questions to the Researcher.",
"Combine insights from team members for comprehensive answers.",
],
markdown=True,
db=db,
)
# =============================================================================
# Workflow Configuration
# =============================================================================
qa_workflow = Workflow(
name="QA Workflow",
description="A simple Q&A workflow that uses the assistant agent",
db=db,
steps=[
Step(
name="Answer Question",
agent=assistant,
),
],
)
# =============================================================================
# AgentOS Configuration
# =============================================================================
agent_os = AgentOS(
id="cookbook-client-server",
description="AgentOS server for running cookbook client examples",
agents=[assistant, researcher],
teams=[research_team],
workflows=[qa_workflow],
knowledge=[knowledge],
)
# FastAPI app instance (for uvicorn)
app = agent_os.get_app()
# =============================================================================
# Main Entry Point
# =============================================================================
# ---------------------------------------------------------------------------
# Run Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
agent_os.serve(app="server:app", reload=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/client/server.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/interfaces/agui/multiple_instances.py | """
Multiple Instances
==================
Demonstrates multiple instances.
"""
from agno.agent.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
from agno.os.interfaces.agui import AGUI
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Example
# ---------------------------------------------------------------------------
db = SqliteDb(db_file="tmp/agentos.db")
chat_agent = Agent(
name="Assistant",
model=OpenAIChat(id="gpt-5.2"),
db=db,
instructions="You are a helpful AI assistant.",
add_datetime_to_context=True,
markdown=True,
)
web_research_agent = Agent(
name="Web Research Agent",
model=OpenAIChat(id="gpt-5.2"),
db=db,
tools=[WebSearchTools()],
instructions="You are a helpful AI assistant that can search the web.",
markdown=True,
)
# Setup your AgentOS app
agent_os = AgentOS(
agents=[chat_agent, web_research_agent],
interfaces=[
AGUI(agent=chat_agent, prefix="/chat"),
AGUI(agent=web_research_agent, prefix="/web-research"),
],
)
app = agent_os.get_app()
# ---------------------------------------------------------------------------
# Run Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
"""Run your AgentOS.
You can see the configuration and available apps at:
http://localhost:7777/config
"""
agent_os.serve(app="multiple_instances:app", reload=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/interfaces/agui/multiple_instances.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/interfaces/whatsapp/agent_with_media.py | """
Agent With Media
================
Demonstrates agent with media.
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.google import Gemini
from agno.os.app import AgentOS
from agno.os.interfaces.whatsapp import Whatsapp
# ---------------------------------------------------------------------------
# Create Example
# ---------------------------------------------------------------------------
agent_db = SqliteDb(db_file="tmp/persistent_memory.db")
media_agent = Agent(
name="Media Agent",
model=Gemini(id="gemini-3-flash-preview"),
db=agent_db,
add_history_to_context=True,
num_history_runs=3,
add_datetime_to_context=True,
markdown=True,
)
# Setup our AgentOS app
agent_os = AgentOS(
agents=[media_agent],
interfaces=[Whatsapp(agent=media_agent)],
)
app = agent_os.get_app()
# ---------------------------------------------------------------------------
# Run Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
"""Run your AgentOS.
You can see the configuration and available apps at:
http://localhost:7777/config
"""
agent_os.serve(app="agent_with_media:app", reload=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/interfaces/whatsapp/agent_with_media.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/interfaces/whatsapp/multiple_instances.py | """
Multiple Instances
==================
Demonstrates multiple instances.
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.openai import OpenAIChat
from agno.os.app import AgentOS
from agno.os.interfaces.whatsapp import Whatsapp
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Example
# ---------------------------------------------------------------------------
agent_db = SqliteDb(db_file="tmp/persistent_memory.db")
basic_agent = Agent(
name="Basic Agent",
model=OpenAIChat(id="gpt-5.2"),
db=agent_db,
add_history_to_context=True,
num_history_runs=3,
add_datetime_to_context=True,
markdown=True,
)
web_research_agent = Agent(
name="Web Research Agent",
model=OpenAIChat(id="gpt-5.2"),
db=agent_db,
tools=[WebSearchTools()],
add_history_to_context=True,
num_history_runs=3,
add_datetime_to_context=True,
)
# Setup our AgentOS app
agent_os = AgentOS(
agents=[basic_agent, web_research_agent],
interfaces=[
Whatsapp(agent=basic_agent, prefix="/basic"),
Whatsapp(agent=web_research_agent, prefix="/web-research"),
],
)
app = agent_os.get_app()
# ---------------------------------------------------------------------------
# Run Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
"""Run your AgentOS.
You can see the configuration and available apps at:
http://localhost:7777/config
"""
agent_os.serve(app="basic:app", reload=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/interfaces/whatsapp/multiple_instances.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/05_agent_os/remote/server.py | """
AgentOS Server for Cookbook Client Examples
"""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.openai import OpenAIChat
from agno.os import AgentOS
from agno.team.team import Team
from agno.tools.calculator import CalculatorTools
from agno.tools.websearch import WebSearchTools
from agno.vectordb.chroma import ChromaDb
from agno.workflow.step import Step
from agno.workflow.workflow import Workflow
# ---------------------------------------------------------------------------
# Create Example
# ---------------------------------------------------------------------------
# =============================================================================
# Database Configuration
# =============================================================================
# SQLite database for sessions, memory, and content metadata
db = SqliteDb(id="cookbook-client-db", db_file="tmp/cookbook_client.db")
# =============================================================================
# Knowledge Base Configuration
# =============================================================================
knowledge = Knowledge(
vector_db=ChromaDb(
path="tmp/cookbook_chromadb",
collection="cookbook_knowledge",
embedder=OpenAIEmbedder(id="text-embedding-3-small"),
),
contents_db=db, # Required for content upload/management endpoints
)
# =============================================================================
# Agent Configuration
# =============================================================================
# Agent 1: Assistant with calculator tools and memory
assistant = Agent(
name="Assistant",
id="assistant-agent",
description="You are a helpful AI assistant.",
model=OpenAIChat(id="gpt-5.2"),
db=db,
instructions=[
"You are a helpful AI assistant.",
"Use the calculator tool for any math operations.",
"You have access to a knowledge base - search it when asked about documents.",
],
markdown=True,
update_memory_on_run=True, # Required for 03_memory_operations
tools=[CalculatorTools()],
knowledge=knowledge,
search_knowledge=True,
)
# Agent 2: Researcher with web search capabilities
researcher = Agent(
name="Researcher",
id="researcher-agent",
model=OpenAIChat(id="gpt-5"),
db=db,
instructions=[
"You are a research assistant.",
"Search the web for information when needed.",
"Provide well-researched, accurate responses.",
],
markdown=True,
tools=[WebSearchTools()],
)
# =============================================================================
# Team Configuration
# =============================================================================
research_team = Team(
name="Research Team",
id="research-team",
model=OpenAIChat(id="gpt-5.2"),
members=[assistant, researcher],
instructions=[
"You are a research team that coordinates multiple specialists.",
"Delegate math questions to the Assistant.",
"Delegate research questions to the Researcher.",
"Combine insights from team members for comprehensive answers.",
],
markdown=True,
db=db,
)
# =============================================================================
# Workflow Configuration
# =============================================================================
qa_workflow = Workflow(
name="QA Workflow",
description="A simple Q&A workflow that uses the assistant agent",
id="qa-workflow",
db=db,
steps=[
Step(
name="Answer Question",
agent=assistant,
),
],
)
# =============================================================================
# AgentOS Configuration
# =============================================================================
agent_os = AgentOS(
id="cookbook-client-server",
description="AgentOS server for running cookbook client examples",
agents=[assistant, researcher],
teams=[research_team],
workflows=[qa_workflow],
knowledge=[knowledge],
)
# FastAPI app instance (for uvicorn)
app = agent_os.get_app()
# =============================================================================
# Main Entry Point
# =============================================================================
# ---------------------------------------------------------------------------
# Run Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
agent_os.serve(app="server:app", reload=True, access_log=True, port=7778)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/05_agent_os/remote/server.py",
"license": "Apache License 2.0",
"lines": 122,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/07_knowledge/knowledge_tools.py | """
Here is a tool with reasoning capabilities to allow agents to search and analyze information from a knowledge base.
1. Run: `uv pip install openai agno lancedb tantivy sqlalchemy` to install the dependencies
2. Export your OPENAI_API_KEY
3. Run: `python cookbook/07_knowledge/knowledge_tools.py` to run the agent
"""
from agno.agent import Agent
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.openai import OpenAIChat
from agno.tools.knowledge import KnowledgeTools
from agno.vectordb.lancedb import LanceDb, SearchType
# Create a knowledge containing information from a URL
agno_docs = Knowledge(
# Use LanceDB as the vector database and store embeddings in the `agno_docs` table
vector_db=LanceDb(
uri="tmp/lancedb",
table_name="agno_docs",
search_type=SearchType.hybrid,
embedder=OpenAIEmbedder(id="text-embedding-3-small"),
),
)
# Add content to the knowledge
agno_docs.insert(url="https://docs.agno.com/llms-full.txt")
knowledge_tools = KnowledgeTools(
knowledge=agno_docs,
enable_think=True,
enable_search=True,
enable_analyze=True,
add_few_shot=True,
)
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[knowledge_tools],
markdown=True,
)
if __name__ == "__main__":
agent.print_response(
"How do I build a team of agents in agno?",
markdown=True,
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/07_knowledge/knowledge_tools.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/09_evals/performance/db_logging.py | """
Performance Evaluation with Database Logging
============================================
Demonstrates storing performance evaluation results in PostgreSQL.
"""
from agno.agent import Agent
from agno.db.postgres.postgres import PostgresDb
from agno.eval.performance import PerformanceEval
from agno.models.openai import OpenAIChat
# ---------------------------------------------------------------------------
# Create Benchmark Function
# ---------------------------------------------------------------------------
def run_agent():
agent = Agent(
model=OpenAIChat(id="gpt-5.2"),
system_message="Be concise, reply with one sentence.",
)
response = agent.run("What is the capital of France?")
print(response.content)
return response
# ---------------------------------------------------------------------------
# Create Database
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5432/ai"
db = PostgresDb(db_url=db_url, eval_table="eval_runs_cookbook")
# ---------------------------------------------------------------------------
# Create Evaluation
# ---------------------------------------------------------------------------
simple_response_perf = PerformanceEval(
db=db,
name="Simple Performance Evaluation",
func=run_agent,
num_iterations=1,
warmup_runs=0,
)
# ---------------------------------------------------------------------------
# Run Evaluation
# ---------------------------------------------------------------------------
if __name__ == "__main__":
simple_response_perf.run(print_results=True, print_summary=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/09_evals/performance/db_logging.py",
"license": "Apache License 2.0",
"lines": 40,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/09_evals/reliability/db_logging.py | """
Reliability Evaluation with Database Logging
============================================
Demonstrates storing reliability evaluation results in PostgreSQL.
"""
from typing import Optional
from agno.agent import Agent
from agno.db.postgres.postgres import PostgresDb
from agno.eval.reliability import ReliabilityEval, ReliabilityResult
from agno.models.openai import OpenAIChat
from agno.run.agent import RunOutput
from agno.tools.calculator import CalculatorTools
# ---------------------------------------------------------------------------
# Create Database
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5432/ai"
db = PostgresDb(db_url=db_url, eval_table="eval_runs")
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenAIChat(id="gpt-5.2"),
tools=[CalculatorTools()],
)
# ---------------------------------------------------------------------------
# Create Evaluation
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Run Evaluation
# ---------------------------------------------------------------------------
if __name__ == "__main__":
response: RunOutput = agent.run("What is 10!?")
evaluation = ReliabilityEval(
db=db,
name="Tool Call Reliability",
agent_response=response,
expected_tool_calls=["factorial"],
)
result: Optional[ReliabilityResult] = evaluation.run(print_results=True)
if result:
result.assert_passed()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/09_evals/reliability/db_logging.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/09_evals/reliability/multiple_tool_calls/calculator.py | """
Multiple Tool Call Reliability Evaluation
=========================================
Demonstrates reliability checks for multiple expected tool calls.
"""
from typing import Optional
from agno.agent import Agent
from agno.eval.reliability import ReliabilityEval, ReliabilityResult
from agno.models.openai import OpenAIChat
from agno.run.agent import RunOutput
from agno.tools.calculator import CalculatorTools
# ---------------------------------------------------------------------------
# Create Evaluation Function
# ---------------------------------------------------------------------------
def multiply_and_exponentiate():
agent = Agent(
model=OpenAIChat(id="gpt-5.2"),
tools=[CalculatorTools()],
)
response: RunOutput = agent.run(
"What is 10*5 then to the power of 2? do it step by step"
)
evaluation = ReliabilityEval(
name="Tool Calls Reliability",
agent_response=response,
expected_tool_calls=["multiply", "exponentiate"],
)
result: Optional[ReliabilityResult] = evaluation.run(print_results=True)
if result:
result.assert_passed()
# ---------------------------------------------------------------------------
# Run Evaluation
# ---------------------------------------------------------------------------
if __name__ == "__main__":
multiply_and_exponentiate()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/09_evals/reliability/multiple_tool_calls/calculator.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.