sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
agno-agi/agno:cookbook/09_evals/reliability/single_tool_calls/calculator.py | """
Single Tool Call Reliability Evaluation
=======================================
Demonstrates reliability checks for one expected tool call.
"""
from typing import Optional
from agno.agent import Agent
from agno.eval.reliability import ReliabilityEval, ReliabilityResult
from agno.models.openai import OpenAIChat
from agno.run.agent import RunOutput
from agno.tools.calculator import CalculatorTools
# ---------------------------------------------------------------------------
# Create Evaluation Function
# ---------------------------------------------------------------------------
def factorial():
agent = Agent(
model=OpenAIChat(id="gpt-5.2"),
tools=[CalculatorTools()],
)
response: RunOutput = agent.run("What is 10! (ten factorial)?")
evaluation = ReliabilityEval(
name="Tool Call Reliability",
agent_response=response,
expected_tool_calls=["factorial"],
)
result: Optional[ReliabilityResult] = evaluation.run(print_results=True)
if result:
result.assert_passed()
# ---------------------------------------------------------------------------
# Run Evaluation
# ---------------------------------------------------------------------------
if __name__ == "__main__":
factorial()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/09_evals/reliability/single_tool_calls/calculator.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/aimlapi/basic.py | """
Aimlapi Basic
=============
Cookbook example for `aimlapi/basic.py`.
"""
from agno.agent import Agent, RunOutput # noqa
from agno.models.aimlapi import AIMLAPI
import asyncio
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(model=AIMLAPI(id="gpt-5.2"), markdown=True)
# Get the response in a variable
# run: RunOutput = agent.run("Share a 2 sentence horror story")
# print(run.content)
# Print the response in the terminal
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Share a 2 sentence horror story")
# --- Sync + Streaming ---
agent.print_response("Share a 2 sentence horror story", stream=True)
# --- Async ---
asyncio.run(agent.aprint_response("Share a 2 sentence horror story"))
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Share a 2 sentence horror story", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/aimlapi/basic.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/aimlapi/structured_output.py | """
Aimlapi Structured Output
=========================
Cookbook example for `aimlapi/structured_output.py`.
"""
from typing import List
from agno.agent import Agent, RunOutput # noqa
from agno.models.aimlapi import AIMLAPI
from pydantic import BaseModel, Field
from rich.pretty import pprint # noqa
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
class MovieScript(BaseModel):
setting: str = Field(
..., description="Provide a nice setting for a blockbuster movie."
)
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(
..., description="3 sentence storyline for the movie. Make it exciting!"
)
json_mode_agent = Agent(
model=AIMLAPI(id="gpt-5.2"),
description="You help people write movie scripts.",
output_schema=MovieScript,
use_json_mode=True,
)
# Get the response in a variable
json_mode_response: RunOutput = json_mode_agent.run("New York")
pprint(json_mode_response.content)
# json_mode_agent.print_response("New York")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/aimlapi/structured_output.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/anthropic/db.py | """Run `uv pip install ddgs sqlalchemy anthropic` to install dependencies."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.anthropic import Claude
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=Claude(id="claude-sonnet-4-20250514"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/anthropic/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/anthropic/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf anthropic openai` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.embedder.azure_openai import AzureOpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.anthropic import Claude
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(
table_name="recipes",
db_url=db_url,
embedder=AzureOpenAIEmbedder(),
),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(
model=Claude(id="claude-sonnet-4-20250514"),
knowledge=knowledge,
)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/anthropic/knowledge.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/anthropic/memory.py | """
This recipe shows how to use personalized memories and summaries in an agent.
Steps:
1. Run: `./cookbook/scripts/run_pgvector.sh` to start a postgres container with pgvector
2. Run: `uv pip install anthropic sqlalchemy 'psycopg[binary]' pgvector` to install the dependencies
3. Run: `python cookbook/92_models/anthropic/memory.py` to run the agent
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.anthropic import Claude
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=Claude(id="claude-sonnet-4-20250514"),
# Pass the database to the Agent
db=db,
# Store the memories and summary in the database
update_memory_on_run=True,
enable_session_summaries=True,
)
# -*- Share personal information
agent.print_response("My name is john billings?", stream=True)
# -*- Share personal information
agent.print_response("I live in nyc?", stream=True)
# -*- Share personal information
agent.print_response("I'm going to a concert tomorrow?", stream=True)
# Ask about the conversation
agent.print_response(
"What have we been talking about, do you know my name?", stream=True
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/anthropic/memory.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/anthropic/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.anthropic import Claude
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=Claude(id="claude-sonnet-4-20250514"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Whats happening in France?")
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/anthropic/tool_use.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/aws/bedrock/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.aws import AwsBedrock
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=AwsBedrock(id="us.anthropic.claude-3-5-haiku-20241022-v1:0"),
tools=[WebSearchTools()],
instructions="You are a helpful assistant that can use the following tools to answer questions.",
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Whats happening in France?")
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/aws/bedrock/tool_use.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/aws/claude/db.py | """Run `uv pip install ddgs sqlalchemy anthropic` to install dependencies."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.aws import Claude
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=Claude(id="global.anthropic.claude-sonnet-4-5-20250929-v1:0"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/aws/claude/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/aws/claude/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf openai anthropic` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.aws import Claude
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(table_name="recipes", db_url=db_url),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(
model=Claude(id="global.anthropic.claude-sonnet-4-5-20250929-v1:0"),
knowledge=knowledge,
)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/aws/claude/knowledge.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/aws/claude/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.aws import Claude
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=Claude(id="global.anthropic.claude-sonnet-4-5-20250929-v1:0"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Whats happening in France?")
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/aws/claude/tool_use.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/azure/ai_foundry/db.py | """Run `uv pip install ddgs sqlalchemy anthropic` to install dependencies."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.azure import AzureAIFoundry
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=AzureAIFoundry(id="Phi-4"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/azure/ai_foundry/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/azure/ai_foundry/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf openai` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.embedder.azure_openai import AzureOpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.azure import AzureAIFoundry
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(
table_name="recipes",
db_url=db_url,
embedder=AzureOpenAIEmbedder(),
),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(
model=AzureAIFoundry(id="Cohere-command-r-08-2024"),
knowledge=knowledge,
)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/azure/ai_foundry/knowledge.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/azure/ai_foundry/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.azure import AzureAIFoundry
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=AzureAIFoundry(id="Cohere-command-r-08-2024"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync + Streaming ---
agent.print_response("What is currently happening in France?", stream=True)
# --- Async ---
asyncio.run(agent.aprint_response("Whats happening in France?"))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/azure/ai_foundry/tool_use.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/azure/openai/basic.py | """
Azure Basic
===========
Cookbook example for `azure/openai/basic.py`.
"""
from agno.agent import Agent, RunOutput # noqa
from agno.models.azure import AzureOpenAI
import asyncio
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(model=AzureOpenAI(id="gpt-5.2"), markdown=True)
# Get the response in a variable
# run: RunOutput = agent.run("Share a 2 sentence horror story")
# print(run.content)
# Print the response on the terminal
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Share a 2 sentence horror story")
# --- Sync + Streaming ---
agent.print_response("Share a 2 sentence horror story", stream=True)
# --- Async ---
asyncio.run(agent.aprint_response("Share a breakfast recipe.", markdown=True))
# --- Async + Streaming ---
asyncio.run(
agent.aprint_response("Share a breakfast recipe.", markdown=True, stream=True)
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/azure/openai/basic.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/azure/openai/db.py | """Run `uv pip install ddgs sqlalchemy anthropic` to install dependencies."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.azure import AzureOpenAI
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=AzureOpenAI(id="gpt-5.2"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/azure/openai/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/azure/openai/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf openai` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.knowledge.embedder.azure_openai import AzureOpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.azure import AzureOpenAI
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(
table_name="recipes",
db_url=db_url,
embedder=AzureOpenAIEmbedder(),
),
)
# Add content to the knowledge
asyncio.run(
knowledge.ainsert(
url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf"
)
)
agent = Agent(
model=AzureOpenAI(id="gpt-5.2"),
knowledge=knowledge,
)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/azure/openai/knowledge.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/azure/openai/structured_output.py | """
Azure Structured Output
=======================
Cookbook example for `azure/openai/structured_output.py`.
"""
from typing import List
from agno.agent import Agent, RunOutput # noqa
from agno.models.azure import AzureOpenAI
from pydantic import BaseModel, Field
from rich.pretty import pprint # noqa
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
class MovieScript(BaseModel):
setting: str = Field(
..., description="Provide a nice setting for a blockbuster movie."
)
ending: str = Field(
...,
description="Ending of the movie. If not available, provide a happy ending.",
)
genre: str = Field(
...,
description="Genre of the movie. If not available, select action, thriller or romantic comedy.",
)
name: str = Field(..., description="Give a name to this movie")
characters: List[str] = Field(..., description="Name of characters for this movie.")
storyline: str = Field(
..., description="3 sentence storyline for the movie. Make it exciting!"
)
agent = Agent(
model=AzureOpenAI(id="gpt-5.2"),
description="You help people write movie scripts.",
output_schema=MovieScript,
)
# Get the response in a variable
run: RunOutput = agent.run("New York")
pprint(run.content)
# agent.print_response("New York")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/azure/openai/structured_output.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/cerebras/db.py | """Run `uv pip install ddgs sqlalchemy cerebras_cloud_sdk` to install dependencies."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.cerebras import Cerebras
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=Cerebras(id="llama-3.3-70b"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/cerebras/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/cerebras/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf cerebras_cloud_sdk` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.cerebras import Cerebras
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(table_name="recipes", db_url=db_url),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(model=Cerebras(id="llama-3.3-70b"), knowledge=knowledge)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/cerebras/knowledge.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/cerebras_openai/db.py | """Run `uv pip install ddgs sqlalchemy cerebras_cloud_sdk` to install dependencies."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.cerebras import CerebrasOpenAI
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=CerebrasOpenAI(id="llama-4-scout-17b-16e-instruct"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/cerebras_openai/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/cerebras_openai/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf cerebras_cloud_sdk` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.cerebras import CerebrasOpenAI
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(table_name="recipes", db_url=db_url),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(
model=CerebrasOpenAI(id="llama-4-scout-17b-16e-instruct"), knowledge=knowledge
)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/cerebras_openai/knowledge.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/cohere/db.py | """Run `uv pip install ddgs sqlalchemy cohere` to install dependencies."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.cohere import Cohere
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=Cohere(id="command-a-03-2025"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/cohere/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/cohere/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf openai cohere` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.cohere import Cohere
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(table_name="recipes", db_url=db_url),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(model=Cohere(id="command-a-03-2025"), knowledge=knowledge)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/cohere/knowledge.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/cohere/memory.py | """
This recipe shows how to use personalized memories and summaries in an agent.
Steps:
1. Run: `./cookbook/scripts/run_pgvector.sh` to start a postgres container with pgvector
2. Run: `uv pip install cohere sqlalchemy 'psycopg[binary]' pgvector` to install the dependencies
3. Run: `python cookbook/92_models/cohere/memory.py` to run the agent
"""
from agno.agent.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.cohere import Cohere
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
agent = Agent(
model=Cohere(id="command-a-03-2025"),
# Store agent sessions in a database
db=PostgresDb(db_url=db_url),
update_memory_on_run=True,
enable_session_summaries=True,
)
# -*- Share personal information
agent.print_response("My name is john billings?", stream=True)
# -*- Share personal information
agent.print_response("I live in nyc?", stream=True)
# -*- Share personal information
agent.print_response("I'm going to a concert tomorrow?", stream=True)
# Ask about the conversation
agent.print_response(
"What have we been talking about, do you know my name?", stream=True
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/cohere/memory.py",
"license": "Apache License 2.0",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/cohere/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.cohere import Cohere
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=Cohere(id="command-a-03-2025"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Whats happening in France?")
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/cohere/tool_use.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/cometapi/basic.py | """
Cometapi Basic
==============
Cookbook example for `cometapi/basic.py`.
"""
from agno.agent import Agent, RunOutput # noqa
from agno.models.cometapi import CometAPI
import asyncio
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(model=CometAPI(id="gpt-5.2"), markdown=True)
# Get the response in a variable
# run: RunOutput = agent.run("Explain quantum computing in simple terms")
# print(run.content)
# Print the response in the terminal
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Explain quantum computing in simple terms")
# --- Sync + Streaming ---
agent.print_response("Explain quantum computing in simple terms", stream=True)
# --- Async ---
asyncio.run(agent.aprint_response("Share a 2 sentence horror story"))
# --- Async + Streaming ---
asyncio.run(
agent.aprint_response(
"Write a short poem about artificial intelligence", stream=True
)
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/cometapi/basic.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/cometapi/structured_output.py | """
Cometapi Structured Output
==========================
Cookbook example for `cometapi/structured_output.py`.
"""
from typing import List
from agno.agent import Agent
from agno.models.cometapi import CometAPI
from pydantic import BaseModel, Field
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
class MovieScript(BaseModel):
setting: str = Field(..., description="The setting of the movie")
protagonist: str = Field(..., description="Name of the protagonist")
antagonist: str = Field(..., description="Name of the antagonist")
plot: str = Field(..., description="The plot of the movie")
genre: str = Field(..., description="The genre of the movie")
scenes: List[str] = Field(..., description="List of scenes in the movie")
agent = Agent(
model=CometAPI(id="gpt-5.2"),
description="You help people write movie scripts.",
output_schema=MovieScript,
use_json_mode=True,
markdown=True,
)
agent.print_response("Generate a movie script about a time-traveling detective")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/cometapi/structured_output.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/dashscope/knowledge_tools.py | """
Here is a tool with reasoning capabilities to allow agents to search and analyze information from a knowledge base.
1. Run: `uv pip install openai agno lancedb tantivy sqlalchemy` to install the dependencies
2. Export your OPENAI_API_KEY
3. Run: `cookbook/92_models/dashscope/knowledge_tools.py` to run the agent
"""
from agno.agent import Agent
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.dashscope import DashScope
from agno.tools.knowledge import KnowledgeTools
from agno.vectordb.lancedb import LanceDb, SearchType
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Create a knowledge containing information from a URL
agno_docs = Knowledge(
# Use LanceDB as the vector database and store embeddings in the `agno_docs` table
vector_db=LanceDb(
uri="tmp/lancedb",
table_name="agno_docs",
search_type=SearchType.hybrid,
embedder=OpenAIEmbedder(id="text-embedding-3-small"),
),
)
# Add content to the knowledge
agno_docs.insert(url="https://docs.agno.com/llms-full.txt")
knowledge_tools = KnowledgeTools(
knowledge=agno_docs,
enable_think=True,
enable_search=True,
enable_analyze=True,
add_few_shot=True,
)
agent = Agent(
model=DashScope(id="qwen-plus"),
tools=[knowledge_tools],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
agent.print_response(
"How do I build a team of agents in agno?",
markdown=True,
stream=True,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/dashscope/knowledge_tools.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/deepinfra/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
from agno.agent import Agent # noqa
from agno.models.deepinfra import DeepInfra # noqa
from agno.tools.websearch import WebSearchTools # noqa
import asyncio
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=DeepInfra(id="meta-llama/Llama-2-70b-chat-hf"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("What's the latest news about AI?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/deepinfra/tool_use.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/deepseek/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.deepseek import DeepSeek
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
"""
The current version of the deepseek-chat model's Function Calling capabilitity is unstable, which may result in looped calls or empty responses.
Their development team is actively working on a fix, and it is expected to be resolved in the next version.
"""
agent = Agent(
model=DeepSeek(id="deepseek-chat"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Whats happening in France?")
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/deepseek/tool_use.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/fireworks/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.fireworks import Fireworks
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=Fireworks(id="accounts/fireworks/models/llama-v3p1-405b-instruct"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/fireworks/tool_use.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/google/gemini/basic.py | """
Google Basic
============
Cookbook example for `google/gemini/basic.py`.
"""
from agno.agent import Agent, RunOutput # noqa
from agno.models.google import Gemini
import asyncio
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(model=Gemini(id="gemini-3-flash-preview"), markdown=True)
# Get the response in a variable
# run: RunOutput = agent.run("Share a 2 sentence horror story")
# print(run.content)
# Print the response in the terminal
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Share a 2 sentence horror story")
# --- Sync + Streaming ---
agent.print_response("Share a 2 sentence horror story", stream=True)
# --- Async ---
asyncio.run(agent.aprint_response("Share a 2 sentence horror story"))
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Share a 2 sentence horror story", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/google/gemini/basic.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/google/gemini/db.py | """Run `uv pip install ddgs sqlalchemy google.genai` to install dependencies."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.google import Gemini
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=Gemini(id="gemini-3-flash-preview"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/google/gemini/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/google/gemini/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf openai google.genai` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.embedder.google import GeminiEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.google import Gemini
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(
table_name="recipes",
db_url=db_url,
embedder=GeminiEmbedder(),
),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(model=Gemini(id="gemini-3-flash-preview"), knowledge=knowledge)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/google/gemini/knowledge.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/google/gemini/pdf_input_local.py | """
Google Pdf Input Local
======================
Cookbook example for `google/gemini/pdf_input_local.py`.
"""
from pathlib import Path
from agno.agent import Agent
from agno.media import File
from agno.models.google import Gemini
from agno.utils.media import download_file
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
pdf_path = Path(__file__).parent.joinpath("ThaiRecipes.pdf")
# Download the file using the download_file function
download_file(
"https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf", str(pdf_path)
)
agent = Agent(
model=Gemini(id="gemini-3-flash-preview"),
markdown=True,
add_history_to_context=True,
)
agent.print_response(
"Summarize the contents of the attached file.",
files=[File(filepath=pdf_path)],
)
agent.print_response("Suggest me a recipe from the attached file.")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/google/gemini/pdf_input_local.py",
"license": "Apache License 2.0",
"lines": 33,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/google/gemini/pdf_input_url.py | """
Google Pdf Input Url
====================
Cookbook example for `google/gemini/pdf_input_url.py`.
"""
from agno.agent import Agent
from agno.db.in_memory import InMemoryDb
from agno.media import File
from agno.models.google import Gemini
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=Gemini(id="gemini-3-flash-preview"),
markdown=True,
db=InMemoryDb(),
add_history_to_context=True,
)
agent.print_response(
"Summarize the contents of the attached file.",
files=[File(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")],
)
agent.print_response("Suggest me a recipe from the attached file.")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/google/gemini/pdf_input_url.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/groq/db.py | """Run `uv pip install ddgs sqlalchemy groq` to install dependencies."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.groq import Groq
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=Groq(id="llama-3.3-70b-versatile"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/groq/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/groq/deep_knowledge.py | """DeepKnowledge - An AI Agent that iteratively searches a knowledge base to answer questions
This agent performs iterative searches through its knowledge base, breaking down complex
queries into sub-questions, and synthesizing comprehensive answers. It's designed to explore
topics deeply and thoroughly by following chains of reasoning.
In this example, the agent uses the Agno documentation as a knowledge base
Key Features:
- Iteratively searches a knowledge base
- Source attribution and citations
Run `uv pip install openai lancedb tantivy inquirer agno groq` to install dependencies.
"""
from textwrap import dedent
from typing import List, Optional
import inquirer
import typer
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.groq import Groq
from agno.vectordb.lancedb import LanceDb, SearchType
from rich import print
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
def initialize_knowledge_base():
"""Initialize the knowledge base with your preferred documentation or knowledge source
Here we use Agno docs as an example, but you can replace with any relevant URLs
"""
agent_knowledge = Knowledge(
vector_db=LanceDb(
uri="tmp/lancedb",
table_name="deep_knowledge_knowledge",
search_type=SearchType.hybrid,
embedder=OpenAIEmbedder(id="text-embedding-3-small"),
),
)
agent_knowledge.insert(url="https://docs.agno.com/llms-full.txt")
return agent_knowledge
def get_db():
return SqliteDb(db_file="tmp/agents.db")
def create_agent(session_id: Optional[str] = None) -> Agent:
"""Create and return a configured DeepKnowledge agent."""
agent_knowledge = initialize_knowledge_base()
db = get_db()
return Agent(
name="DeepKnowledge",
session_id=session_id,
model=Groq(id="llama-3.3-70b-versatile"),
description=dedent("""\
You are DeepKnowledge, an advanced reasoning agent designed to provide thorough,
well-researched answers to any query by searching your knowledge base.
Your strengths include:
- Breaking down complex topics into manageable components
- Connecting information across multiple domains
- Providing nuanced, well-researched answers
- Maintaining intellectual honesty and citing sources
- Explaining complex concepts in clear, accessible terms"""),
instructions=dedent("""\
Your mission is to leave no stone unturned in your pursuit of the correct answer.
To achieve this, follow these steps:
1. **Analyze the input and break it down into key components**.
2. **Search terms**: You must identify at least 3-5 key search terms to search for.
3. **Initial Search:** Searching your knowledge base for relevant information. You must make atleast 3 searches to get all relevant information.
4. **Evaluation:** If the answer from the knowledge base is incomplete, ambiguous, or insufficient - Ask the user for clarification. Do not make informed guesses.
5. **Iterative Process:**
- Continue searching your knowledge base till you have a comprehensive answer.
- Reevaluate the completeness of your answer after each search iteration.
- Repeat the search process until you are confident that every aspect of the question is addressed.
4. **Reasoning Documentation:** Clearly document your reasoning process:
- Note when additional searches were triggered.
- Indicate which pieces of information came from the knowledge base and where it was sourced from.
- Explain how you reconciled any conflicting or ambiguous information.
5. **Final Synthesis:** Only finalize and present your answer once you have verified it through multiple search passes.
Include all pertinent details and provide proper references.
6. **Continuous Improvement:** If new, relevant information emerges even after presenting your answer,
be prepared to update or expand upon your response.
**Communication Style:**
- Use clear and concise language.
- Organize your response with numbered steps, bullet points, or short paragraphs as needed.
- Be transparent about your search process and cite your sources.
- Ensure that your final answer is comprehensive and leaves no part of the query unaddressed.
Remember: **Do not finalize your answer until every angle of the question has been explored.**"""),
additional_context=dedent("""\
You should only respond with the final answer and the reasoning process.
No need to include irrelevant information.
- User ID: {user_id}
- Memory: You have access to your previous search results and reasoning process.
"""),
knowledge=agent_knowledge,
db=db,
add_history_to_context=True,
num_history_runs=3,
read_chat_history=True,
markdown=True,
)
def get_example_topics() -> List[str]:
"""Return a list of example topics for the agent."""
return [
"What are AI agents and how do they work in Agno?",
"What chunking strategies does Agno support for text processing?",
"How can I implement custom tools in Agno?",
"How does knowledge retrieval work in Agno?",
"What types of embeddings does Agno support?",
]
def handle_session_selection() -> Optional[str]:
"""Handle session selection and return the selected session ID."""
db = get_db()
new = typer.confirm("Do you want to start a new session?", default=True)
if new:
return None
existing_sessions = db.get_sessions()
if not existing_sessions:
print("No existing sessions found. Starting a new session.")
return None
print("\nExisting sessions:")
for i, session in enumerate(existing_sessions, 1):
print(f"{i}. {session.session_id}") # type: ignore
session_idx = typer.prompt(
"Choose a session number to continue (or press Enter for most recent)",
default=1,
)
try:
return existing_sessions[int(session_idx) - 1].session_id # type: ignore
except (ValueError, IndexError):
return existing_sessions[0].session_id # type: ignore
def run_interactive_loop(agent: Agent):
"""Run the interactive question-answering loop."""
example_topics = get_example_topics()
while True:
choices = [f"{i + 1}. {topic}" for i, topic in enumerate(example_topics)]
choices.extend(["Enter custom question...", "Exit"])
questions = [
inquirer.List(
"topic",
message="Select a topic or ask a different question:",
choices=choices,
)
]
answer = inquirer.prompt(questions)
if answer and answer["topic"] == "Exit":
break
if answer and answer["topic"] == "Enter custom question...":
questions = [inquirer.Text("custom", message="Enter your question:")]
custom_answer = inquirer.prompt(questions)
topic = custom_answer["custom"] # type: ignore
else:
topic = example_topics[int(answer["topic"].split(".")[0]) - 1] # type: ignore
agent.print_response(topic, stream=True)
def deep_knowledge_agent():
"""Main function to run the DeepKnowledge agent."""
session_id = handle_session_selection()
agent = create_agent(session_id)
print("\n Welcome to DeepKnowledge - Your Advanced Research Assistant! ")
if session_id is None:
session_id = agent.session_id
if session_id is not None:
print(f"[bold green]Started New Session: {session_id}[/bold green]\n")
else:
print("[bold green]Started New Session[/bold green]\n")
else:
print(f"[bold blue]Continuing Previous Session: {session_id}[/bold blue]\n")
run_interactive_loop(agent)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
typer.run(deep_knowledge_agent)
# Example prompts to try:
"""
Explore Agno's capabilities with these queries:
1. "What are the different types of agents in Agno?"
2. "How does Agno handle knowledge base management?"
3. "What embedding models does Agno support?"
4. "How can I implement custom tools in Agno?"
5. "What storage options are available for workflow caching?"
6. "How does Agno handle streaming responses?"
7. "What types of LLM providers does Agno support?"
8. "How can I implement custom knowledge sources?"
"""
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/groq/deep_knowledge.py",
"license": "Apache License 2.0",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/90_models/groq/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf openai groq` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.groq import Groq
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(table_name="recipes", db_url=db_url),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(
model=Groq(id="llama-3.3-70b-versatile"),
knowledge=knowledge,
)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/groq/knowledge.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/groq/research_agent_exa.py | """Run `uv pip install groq exa-py` to install dependencies."""
from datetime import datetime
from pathlib import Path
from textwrap import dedent
from agno.agent import Agent
from agno.models.groq import Groq
from agno.tools.exa import ExaTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
cwd = Path(__file__).parent.resolve()
tmp = cwd.joinpath("tmp")
if not tmp.exists():
tmp.mkdir(exist_ok=True, parents=True)
today = datetime.now().strftime("%Y-%m-%d")
agent = Agent(
model=Groq(id="llama-3.3-70b-versatile"),
tools=[ExaTools(start_published_date=today, type="keyword")],
description="You are an advanced AI researcher writing a report on a topic.",
instructions=[
"For the provided topic, run 3 different searches.",
"Read the results carefully and prepare a NYT worthy report.",
"Focus on facts and make sure to provide references.",
],
expected_output=dedent("""\
An engaging, informative, and well-structured report in markdown format:
## Engaging Report Title
### Overview
{give a brief introduction of the report and why the user should read this report}
{make this section engaging and create a hook for the reader}
### Section 1
{break the report into sections}
{provide details/facts/processes in this section}
... more sections as necessary...
### Takeaways
{provide key takeaways from the article}
### References
- [Reference 1](link)
- [Reference 2](link)
- [Reference 3](link)
### About the Author
{write a made up for yourself, give yourself a cyberpunk name and a title}
- published on {date} in dd/mm/yyyy
"""),
markdown=True,
add_datetime_to_context=True,
save_response_to_file=str(tmp.joinpath("{message}.md")),
)
agent.print_response("Llama 3.3 running on Groq", stream=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/groq/research_agent_exa.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/groq/tool_use.py | """Please install dependencies using:
uv pip install openai ddgs newspaper4k lxml_html_clean agno
"""
import asyncio
from agno.agent import Agent
from agno.models.groq import Groq
from agno.tools.newspaper4k import Newspaper4kTools
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=Groq(id="llama-3.3-70b-versatile"),
tools=[WebSearchTools(), Newspaper4kTools()],
description="You are a senior NYT researcher writing an article on a topic.",
instructions=[
"For a given topic, search for the top 5 links.",
"Then read each URL and extract the article text, if a URL isn't available, ignore it.",
"Analyse and prepare an NYT worthy article based on the information.",
],
markdown=True,
add_datetime_to_context=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync + Streaming ---
agent.print_response("Simulation theory", stream=True)
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Simulation theory", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/groq/tool_use.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/groq/translation_agent.py | """
Groq Translation Agent
======================
Cookbook example for `groq/translation_agent.py`.
"""
import base64
from pathlib import Path
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.models.groq import GroqTools
from agno.utils.media import save_base64_data
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
path = "tmp/sample-fr.mp3"
agent = Agent(
name="Groq Translation Agent",
model=OpenAIChat(id="gpt-5.2"),
tools=[GroqTools()],
cache_session=True,
)
response = agent.run(
f"Let's transcribe the audio file located at '{path}' and translate it to English. After that generate a new music audio file using the translated text."
)
if response and response.audio:
base64_audio = base64.b64encode(response.audio[0].content).decode("utf-8")
save_base64_data(base64_audio, Path("tmp/sample-en.mp3")) # type: ignore
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/groq/translation_agent.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/ibm/watsonx/db.py | """Run `uv pip install ddgs sqlalchemy ibm-watsonx-ai` to install dependencies."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.ibm import WatsonX
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=WatsonX(id="mistralai/mistral-small-3-1-24b-instruct-2503"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/ibm/watsonx/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/ibm/watsonx/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf openai ibm-watsonx-ai` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.ibm import WatsonX
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(table_name="recipes", db_url=db_url),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(
model=WatsonX(id="mistralai/mistral-small-3-1-24b-instruct-2503"),
knowledge=knowledge,
)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/ibm/watsonx/knowledge.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/ibm/watsonx/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.ibm import WatsonX
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=WatsonX(id="mistralai/mistral-small-3-1-24b-instruct-2503"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/ibm/watsonx/tool_use.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/langdb/agent.py | """Run `uv pip install yfinance` to install dependencies."""
from agno.agent import Agent, RunOutput # noqa
from agno.models.langdb import LangDB
from agno.tools.yfinance import YFinanceTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=LangDB(id="gpt-4o"),
tools=[YFinanceTools()],
instructions=["Use tables where possible."],
markdown=True,
)
# Get the response in a variable
# run: RunOutput = agent.run("What is the stock price of NVDA and TSLA")
# print(run.content)
# Print the response in the terminal
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("What is the stock price of NVDA and TSLA")
# --- Sync + Streaming ---
agent.print_response("What is the stock price of NVDA and TSLA", stream=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/langdb/agent.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/langdb/data_analyst.py | """Run `uv pip install duckdb` to install dependencies."""
from textwrap import dedent
from agno.agent import Agent
from agno.models.langdb import LangDB
from agno.tools.duckdb import DuckDbTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
duckdb_tools = DuckDbTools()
duckdb_tools.create_table_from_path(
path="https://phidata-public.s3.amazonaws.com/demo_data/IMDB-Movie-Data.csv",
table="movies",
)
agent = Agent(
model=LangDB(id="llama3-1-70b-instruct-v1.0"),
tools=[duckdb_tools],
markdown=True,
additional_context=dedent("""\
You have access to the following tables:
- movies: contains information about movies from IMDB.
"""),
)
agent.print_response("What is the average rating of movies?", stream=False)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/langdb/data_analyst.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/langdb/finance_agent.py | """Run `uv pip install yfinance` to install dependencies."""
from agno.agent import Agent
from agno.models.langdb import LangDB
from agno.tools.yfinance import YFinanceTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=LangDB(id="llama3-1-70b-instruct-v1.0"),
tools=[YFinanceTools()],
description="You are an investment analyst that researches stocks and helps users make informed decisions.",
instructions=["Use tables to display data where possible."],
markdown=True,
)
# agent.print_response("Share the NVDA stock price and analyst recommendations", stream=True)
agent.print_response("Summarize fundamentals for TSLA", stream=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/langdb/finance_agent.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/litellm/db.py | """Run `uv pip install ddgs openai` to install dependencies."""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.litellm import LiteLLM
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db = SqliteDb(
db_file="tmp/data.db",
)
# Add storage to the Agent
agent = Agent(
model=LiteLLM(id="gpt-4o"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/litellm/db.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/litellm_openai/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
from agno.agent import Agent
from agno.models.litellm import LiteLLMOpenAI
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=LiteLLMOpenAI(id="gpt-4o"),
tools=[WebSearchTools()],
markdown=True,
)
agent.print_response("Whats happening in France?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/litellm_openai/tool_use.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/llama_cpp/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
from agno.agent import Agent
from agno.models.llama_cpp import LlamaCpp
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=LlamaCpp(id="ggml-org/gpt-oss-20b-GGUF"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Whats happening in France?")
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/llama_cpp/tool_use.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/lmstudio/db.py | """Run `uv pip install ddgs sqlalchemy` to install dependencies."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.lmstudio import LMStudio
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=LMStudio(id="qwen2.5-7b-instruct-1m"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/lmstudio/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/lmstudio/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf openai ollama` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.lmstudio import LMStudio
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(
table_name="recipes",
db_url=db_url,
),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(model=LMStudio(id="qwen2.5-7b-instruct-1m"), knowledge=knowledge)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/lmstudio/knowledge.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/lmstudio/memory.py | """
This recipe shows how to use personalized memories and summaries in an agent.
Steps:
1. Run: `./cookbook/scripts/run_pgvector.sh` to start a postgres container with pgvector
2. Run: `uv pip install ollama sqlalchemy 'psycopg[binary]' pgvector` to install the dependencies
3. Run: `python cookbook/92_models/lmstudio/memory.py` to run the agent
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.lmstudio import LMStudio
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=LMStudio(id="qwen2.5-7b-instruct-1m"),
# Pass the database to the Agent
db=db,
# Enable user memories
update_memory_on_run=True,
# Enable session summaries
enable_session_summaries=True,
# Show debug logs so, you can see the memory being created
)
# -*- Share personal information
agent.print_response("My name is john billings?", stream=True)
# -*- Share personal information
agent.print_response("I live in nyc?", stream=True)
# -*- Share personal information
agent.print_response("I'm going to a concert tomorrow?", stream=True)
# Ask about the conversation
agent.print_response(
"What have we been talking about, do you know my name?", stream=True
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/lmstudio/memory.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/lmstudio/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
from agno.agent import Agent
from agno.models.lmstudio import LMStudio
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=LMStudio(id="qwen2.5-7b-instruct-1m"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Whats happening in France?")
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/lmstudio/tool_use.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/meta/llama/db.py | """Run `uv pip install ddgs sqlalchemy llama-api-client` to install dependencies."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.meta import Llama
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/meta/llama/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/meta/llama/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf llama-api-client` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.meta import Llama
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(table_name="recipes", db_url=db_url),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(
model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"), knowledge=knowledge
)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/meta/llama/knowledge.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/meta/llama/memory.py | """
This recipe shows how to use personalized memories and summaries in an agent.
Steps:
1. Run: `./cookbook/scripts/run_pgvector.sh` to start a postgres container with pgvector
2. Run: `uv pip install openai sqlalchemy 'psycopg[binary]' pgvector` to install the dependencies
3. Run: `python cookbook/agents/personalized_memories_and_summaries.py` to run the agent
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.meta import Llama
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
user_id="test_user",
session_id="test_session",
# Pass the database to the Agent
db=db,
# Enable user memories
update_memory_on_run=True,
# Enable session summaries
enable_session_summaries=True,
# Show debug logs so, you can see the memory being created
)
# -*- Share personal information
agent.print_response("My name is John Billings", stream=True)
# -*- Print memories and session summary
if agent.db:
pprint(agent.get_user_memories(user_id="test_user"))
pprint(
agent.get_session(session_id="test_session").summary # type: ignore
)
# -*- Share personal information
agent.print_response("I live in NYC", stream=True)
# -*- Print memories and session summary
if agent.db:
pprint(agent.get_user_memories(user_id="test_user"))
pprint(
agent.get_session(session_id="test_session").summary # type: ignore
)
# Ask about the conversation
agent.print_response(
"What have we been talking about, do you know my name?", stream=True
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/meta/llama/memory.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/meta/llama/tool_use.py | """Run `uv pip install agno llama-api-client yfinance` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.meta import Llama
from agno.tools.yfinance import YFinanceTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=Llama(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools()],
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("What is the price of AAPL stock?")
# --- Sync + Streaming ---
agent.print_response("Tell me the price of AAPL stock", stream=True)
# --- Async ---
asyncio.run(agent.aprint_response("Whats the price of AAPL stock?"))
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats the price of AAPL stock?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/meta/llama/tool_use.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/meta/llama_openai/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf openai` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.meta import LlamaOpenAI
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(table_name="recipes", db_url=db_url),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(
model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
knowledge=knowledge,
)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/meta/llama_openai/knowledge.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/meta/llama_openai/memory.py | """
This recipe shows how to use personalized memories and summaries in an agent.
Steps:
1. Run: `./cookbook/scripts/run_pgvector.sh` to start a postgres container with pgvector
2. Run: `uv pip install openai sqlalchemy 'psycopg[binary]' pgvector` to install the dependencies
3. Run: `python cookbook/agents/personalized_memories_and_summaries.py` to run the agent
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.meta import LlamaOpenAI
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
agent = Agent(
model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
# Store sessions, memories and summaries in the
db=PostgresDb(db_url=db_url, memory_table="agent_memory"),
update_memory_on_run=True,
enable_session_summaries=True,
# Show debug logs so, you can see the memory being created
debug_mode=True,
)
# -*- Share personal information
agent.print_response("My name is john billings?", stream=True)
# -*- Print memories
pprint(agent.memory.memories)
# -*- Print summary
pprint(agent.memory.summaries)
# -*- Share personal information
agent.print_response("I live in nyc?", stream=True)
# -*- Print memories
pprint(agent.memory.memories)
# -*- Print summary
pprint(agent.memory.summaries)
# -*- Share personal information
agent.print_response("I'm going to a concert tomorrow?", stream=True)
# -*- Print memories
pprint(agent.memory.memories)
# -*- Print summary
pprint(agent.memory.summaries)
# Ask about the conversation
agent.print_response(
"What have we been talking about, do you know my name?", stream=True
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/meta/llama_openai/memory.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/meta/llama_openai/tool_use.py | """Run `uv pip install openai yfinance` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.meta import LlamaOpenAI
from agno.tools.yfinance import YFinanceTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=LlamaOpenAI(id="Llama-4-Maverick-17B-128E-Instruct-FP8"),
tools=[YFinanceTools()],
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Whats the price of AAPL stock?")
# --- Sync + Streaming ---
agent.print_response("Whats the price of AAPL stock?", stream=True)
# --- Async ---
asyncio.run(agent.aprint_response("Whats the price of AAPL stock?"))
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats the price of AAPL stock?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/meta/llama_openai/tool_use.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/mistral/memory.py | """
This recipe shows how to use personalized memories and summaries in an agent.
Steps:
1. Run: `./cookbook/scripts/run_pgvector.sh` to start a postgres container with pgvector
2. Run: `uv pip install mistralai sqlalchemy 'psycopg[binary]' pgvector` to install the dependencies
3. Run: `python cookbook/92_models/mistral/memory.py` to run the agent
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.mistral.mistral import MistralChat
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
# Setup the database
db = PostgresDb(db_url=db_url)
agent = Agent(
model=MistralChat(id="mistral-large-latest"),
tools=[WebSearchTools()],
# Pass the database to the Agent
db=db,
# Enable user memories
update_memory_on_run=True,
# Enable session summaries
enable_session_summaries=True,
# Show debug logs so, you can see the memory being created
)
# -*- Share personal information
agent.print_response("My name is john billings?", stream=True)
# -*- Share personal information
agent.print_response("I live in nyc?", stream=True)
# -*- Share personal information
agent.print_response("I'm going to a concert tomorrow?", stream=True)
# -*- Make tool call
agent.print_response("What is the weather in nyc?", stream=True)
# Ask about the conversation
agent.print_response(
"What have we been talking about, do you know my name?", stream=True
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/mistral/memory.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/mistral/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.mistral import MistralChat
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=MistralChat(
id="mistral-large-latest",
),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/mistral/tool_use.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/nebius/db.py | """Run `uv pip install ddgs sqlalchemy cerebras_cloud_sdk` to install dependencies."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.nebius import Nebius
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=Nebius(),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/nebius/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/nebius/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf cerebras_cloud_sdk` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.nebius import Nebius
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(table_name="recipes", db_url=db_url),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(model=Nebius(id="Qwen/Qwen3-30B-A3B"), knowledge=knowledge)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/nebius/knowledge.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/nexus/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.nexus import Nexus
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=Nexus(id="anthropic/claude-sonnet-4-20250514"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Whats happening in France?")
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
# --- Async ---
asyncio.run(agent.aprint_response("Whats happening in France?"))
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/nexus/tool_use.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/nvidia/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.nvidia import Nvidia
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=Nvidia(id="meta/llama-3.3-70b-instruct"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Whats happening in France?")
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/nvidia/tool_use.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/openai/chat/db.py | """Run `uv pip install ddgs sqlalchemy openai` to install dependencies."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.openai import OpenAIChat
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openai/chat/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/openai/chat/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf openai` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.openai import OpenAIChat
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(table_name="recipes", db_url=db_url),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(model=OpenAIChat(id="gpt-4o"), knowledge=knowledge)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openai/chat/knowledge.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/openai/chat/memory.py | """
This recipe shows how to use personalized memories and summaries in an agent.
Steps:
1. Run: `./cookbook/scripts/run_pgvector.sh` to start a postgres container with pgvector
2. Run: `uv pip install openai sqlalchemy 'psycopg[binary]' pgvector` to install the dependencies
3. Run: `python cookbook/agents/personalized_memories_and_summaries.py` to run the agent
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.openai import OpenAIChat
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
user_id="test_user",
session_id="test_session",
# Pass the database to the Agent
db=db,
# Enable user memories
update_memory_on_run=True,
# Enable session summaries
enable_session_summaries=True,
# Show debug logs so, you can see the memory being created
)
# -*- Share personal information
agent.print_response("My name is john billings", stream=True)
# -*- Print memories and summary
if agent.db:
pprint(agent.get_user_memories(user_id="test_user"))
pprint(
agent.get_session(session_id="test_session").summary # type: ignore
)
# -*- Share personal information
agent.print_response("I live in nyc", stream=True)
# -*- Print memories and summary
if agent.db:
pprint(agent.get_user_memories(user_id="test_user"))
pprint(
agent.get_session(session_id="test_session").summary # type: ignore
)
# -*- Share personal information
agent.print_response("I'm going to a concert tomorrow", stream=True)
# -*- Print memories and summary
if agent.db:
pprint(agent.get_user_memories(user_id="test_user"))
pprint(
agent.get_session(session_id="test_session").summary # type: ignore
)
# Ask about the conversation
agent.print_response(
"What have we been talking about, do you know my name?", stream=True
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openai/chat/memory.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/openai/chat/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Whats happening in France?")
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openai/chat/tool_use.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/openai/responses/db.py | """Run `uv pip install ddgs sqlalchemy openai` to install dependencies."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.openai import OpenAIResponses
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=OpenAIResponses(id="gpt-4o"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openai/responses/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/openai/responses/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf openai` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.openai import OpenAIResponses
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(table_name="recipes", db_url=db_url),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(model=OpenAIResponses(id="gpt-4o"), knowledge=knowledge)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openai/responses/knowledge.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/openai/responses/memory.py | """
This recipe shows how to use personalized memories and summaries in an agent.
Steps:
1. Run: `./cookbook/scripts/run_pgvector.sh` to start a postgres container with pgvector
2. Run: `uv pip install openai sqlalchemy 'psycopg[binary]' pgvector` to install the dependencies
3. Run: `python cookbook/agents/personalized_memories_and_summaries.py` to run the agent
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.openai import OpenAIResponses
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=OpenAIResponses(id="gpt-4o"),
user_id="test_user",
session_id="test_session",
# Pass the database to the Agent
db=db,
# Enable user memories
update_memory_on_run=True,
# Enable session summaries
enable_session_summaries=True,
# Show debug logs so, you can see the memory being created
)
# -*- Share personal information
agent.print_response("My name is john billings?", stream=True)
# -*- Print memories and summary
if agent.db:
pprint(agent.get_user_memories(user_id="test_user"))
pprint(
agent.get_session(session_id="test_session").summary # type: ignore
)
# -*- Share personal information
agent.print_response("I live in nyc?", stream=True)
# -*- Print memories
if agent.db:
pprint(agent.get_user_memories(user_id="test_user"))
pprint(
agent.get_session(session_id="test_session").summary # type: ignore
)
# -*- Share personal information
agent.print_response("I'm going to a concert tomorrow?", stream=True)
# -*- Print memories
if agent.db:
pprint(agent.get_user_memories(user_id="test_user"))
pprint(
agent.get_session(session_id="test_session").summary # type: ignore
)
# Ask about the conversation
agent.print_response(
"What have we been talking about, do you know my name?", stream=True
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openai/responses/memory.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/openai/responses/pdf_input_local.py | """
Openai Pdf Input Local
======================
Cookbook example for `openai/responses/pdf_input_local.py`.
"""
from pathlib import Path
from agno.agent import Agent
from agno.media import File
from agno.models.openai.responses import OpenAIResponses
from agno.utils.media import download_file
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
pdf_path = Path(__file__).parent.joinpath("ThaiRecipes.pdf")
# Download the file using the download_file function
download_file(
"https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf", str(pdf_path)
)
agent = Agent(
model=OpenAIResponses(id="gpt-5.2"),
tools=[{"type": "file_search"}],
markdown=True,
add_history_to_context=True,
)
agent.print_response(
"Summarize the contents of the attached file.",
files=[File(filepath=pdf_path)],
)
agent.print_response("Suggest me a recipe from the attached file.")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openai/responses/pdf_input_local.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/openai/responses/pdf_input_url.py | """
Openai Pdf Input Url
====================
Cookbook example for `openai/responses/pdf_input_url.py`.
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.media import File
from agno.models.openai.responses import OpenAIResponses
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database for the Agent Session to be stored
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=OpenAIResponses(id="gpt-5.2"),
db=db,
tools=[{"type": "file_search"}, {"type": "web_search_preview"}],
markdown=True,
)
agent.print_response(
"Summarize the contents of the attached file and search the web for more information.",
files=[File(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")],
)
# Get the stored Agent session, to check the response citations
session = agent.get_session()
if session and session.runs and session.runs[-1].citations:
print("Citations:")
print(session.runs[-1].citations)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openai/responses/pdf_input_url.py",
"license": "Apache License 2.0",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/openai/responses/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.openai import OpenAIResponses
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenAIResponses(id="gpt-4o"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Whats happening in France?")
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/openai/responses/tool_use.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/perplexity/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf openai google.generativeai` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.perplexity import Perplexity
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(
table_name="recipes",
db_url=db_url,
embedder=OpenAIEmbedder(),
),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(model=Perplexity(id="sonar-pro"), knowledge=knowledge)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/perplexity/knowledge.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/perplexity/memory.py | """
This recipe shows how to use personalized memories and summaries in an agent.
Steps:
1. Run: `./cookbook/scripts/run_pgvector.sh` to start a postgres container with pgvector
2. Run: `uv pip install openai sqlalchemy 'psycopg[binary]' pgvector` to install the dependencies
3. Run: `python cookbook/agents/personalized_memories_and_summaries.py` to run the agent
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.perplexity import Perplexity
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
agent = Agent(
model=Perplexity(id="sonar-pro"),
# Store the memories and summary in a database
db=PostgresDb(db_url=db_url),
update_memory_on_run=True,
enable_session_summaries=True,
)
# -*- Share personal information
agent.print_response("My name is john billings?", stream=True)
# -*- Print memories and summary
if agent.db:
pprint(agent.get_user_memories(user_id="test_user"))
pprint(
agent.get_session(session_id="test_session").summary # type: ignore
)
# -*- Share personal information
agent.print_response("I live in nyc?", stream=True)
# -*- Print memories and summary
if agent.db:
pprint(agent.get_user_memories(user_id="test_user"))
pprint(
agent.get_session(session_id="test_session").summary # type: ignore
)
# -*- Share personal information
agent.print_response("I'm going to a concert tomorrow?", stream=True)
# -*- Print memories and summary
if agent.db:
pprint(agent.get_user_memories(user_id="test_user"))
pprint(
agent.get_session(session_id="test_session").summary # type: ignore
)
# Ask about the conversation
agent.print_response(
"What have we been talking about, do you know my name?", stream=True
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/perplexity/memory.py",
"license": "Apache License 2.0",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/requesty/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.requesty import Requesty
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=Requesty(id="openai/gpt-4o"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/requesty/tool_use.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/siliconflow/tool_use.py | """Run `uv pip install duckduckgo-search` to install dependencies."""
from agno.agent import Agent
from agno.models.siliconflow import Siliconflow
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
"""
The current version of the siliconflow-chat model's Function Calling capability is stable and supports tool integration effectively.
"""
agent = Agent(
model=Siliconflow(id="openai/gpt-oss-120b"),
tools=[WebSearchTools()],
show_tool_calls=True,
markdown=True,
debug_mode=True,
)
agent.print_response("What happing in America?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/siliconflow/tool_use.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/together/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.together import Together
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=Together(id="meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Whats happening in France?")
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/together/tool_use.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/vercel/knowledge.py | """Run `uv pip install ddgs` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.knowledge import Knowledge
from agno.models.vercel import V0
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(table_name="recipes", db_url=db_url),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(model=V0(id="v0-1.0-md"), knowledge=knowledge)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/vercel/knowledge.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/vertexai/claude/db.py | """Run `uv pip install ddgs sqlalchemy anthropic` to install dependencies."""
from agno.agent import Agent
from agno.db.sqlite import SqliteDb
from agno.models.vertexai.claude import Claude
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db = SqliteDb(db_file="tmp/data.db")
agent = Agent(
model=Claude(id="claude-sonnet-4@20250514"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/vertexai/claude/db.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/vertexai/claude/knowledge.py | """Run `uv pip install ddgs sqlalchemy pgvector pypdf anthropic openai` to install dependencies."""
from agno.agent import Agent
from agno.knowledge.embedder.openai import OpenAIEmbedder
from agno.knowledge.knowledge import Knowledge
from agno.models.vertexai.claude import Claude
from agno.vectordb.pgvector import PgVector
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
knowledge = Knowledge(
vector_db=PgVector(
table_name="recipes",
db_url=db_url,
embedder=OpenAIEmbedder(),
),
)
# Add content to the knowledge
knowledge.insert(url="https://agno-public.s3.amazonaws.com/recipes/ThaiRecipes.pdf")
agent = Agent(
model=Claude(id="claude-sonnet-4@20250514"),
knowledge=knowledge,
)
agent.print_response("How to make Thai curry?", markdown=True)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/vertexai/claude/knowledge.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/vertexai/claude/memory.py | """
This recipe shows how to use personalized memories and summaries in an agent.
Steps:
1. Run: `./cookbook/scripts/run_pgvector.sh` to start a postgres container with pgvector
2. Run: `uv pip install anthropic sqlalchemy 'psycopg[binary]' pgvector` to install the dependencies
3. Run: `python cookbook/92_models/anthropic/memory.py` to run the agent
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.vertexai.claude import Claude
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=Claude(id="claude-sonnet-4@20250514"),
# Pass the database to the Agent
db=db,
# Store the memories and summary in the database
update_memory_on_run=True,
enable_session_summaries=True,
)
# -*- Share personal information
agent.print_response("My name is john billings?", stream=True)
# -*- Share personal information
agent.print_response("I live in nyc?", stream=True)
# -*- Share personal information
agent.print_response("I'm going to a concert tomorrow?", stream=True)
# Ask about the conversation
agent.print_response(
"What have we been talking about, do you know my name?", stream=True
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/vertexai/claude/memory.py",
"license": "Apache License 2.0",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/vertexai/claude/tool_use.py | """Run `uv pip install ddgs` to install dependencies."""
import asyncio
from agno.agent import Agent
from agno.models.vertexai.claude import Claude
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=Claude(id="claude-sonnet-4@20250514"),
tools=[WebSearchTools()],
markdown=True,
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
# --- Sync ---
agent.print_response("Whats happening in France?")
# --- Sync + Streaming ---
agent.print_response("Whats happening in France?", stream=True)
# --- Async + Streaming ---
asyncio.run(agent.aprint_response("Whats happening in France?", stream=True))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/vertexai/claude/tool_use.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/vllm/db.py | """Run `uv pip install sqlalchemy` and ensure Postgres is running (`./cookbook/scripts/run_pgvector.sh`)."""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.vllm import VLLM
from agno.tools.websearch import WebSearchTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Setup the database
db_url = "postgresql+psycopg://ai:ai@localhost:5532/ai"
db = PostgresDb(db_url=db_url)
agent = Agent(
model=VLLM(id="Qwen/Qwen2.5-7B-Instruct"),
db=db,
tools=[WebSearchTools()],
add_history_to_context=True,
)
agent.print_response("How many people live in Canada?")
agent.print_response("What is their national anthem called?")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/vllm/db.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/vllm/memory.py | """
Personalized memory and session summaries with vLLM.
Prerequisites:
1. Start a Postgres + pgvector container (helper script is provided):
./cookbook/scripts/run_pgvector.sh
2. Install dependencies:
uv pip install sqlalchemy 'psycopg[binary]' pgvector
3. Run a vLLM server (any open model). Example with Phi-3:
vllm serve microsoft/Phi-3-mini-128k-instruct \
--dtype float32 \
--enable-auto-tool-choice \
--tool-call-parser pythonic
Then execute this script – it will remember facts you tell it and generate a
summary.
"""
from agno.agent import Agent
from agno.db.postgres import PostgresDb
from agno.models.vllm import VLLM
from rich.pretty import pprint
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
# Change this if your Postgres container is running elsewhere
DB_URL = "postgresql+psycopg://ai:ai@localhost:5532/ai"
agent = Agent(
model=VLLM(id="microsoft/Phi-3-mini-128k-instruct"),
db=PostgresDb(db_url=DB_URL),
update_memory_on_run=True,
enable_session_summaries=True,
)
# -*- Share personal information
agent.print_response("My name is john billings?", stream=True)
# -*- Print memories and summary
if agent.db:
pprint(agent.get_user_memories(user_id="test_user"))
pprint(
agent.get_session(session_id="test_session").summary # type: ignore
)
# -*- Share personal information
agent.print_response("I live in nyc?", stream=True)
# -*- Print memories and summary
if agent.db:
pprint(agent.get_user_memories(user_id="test_user"))
pprint(
agent.get_session(session_id="test_session").summary # type: ignore
)
# -*- Share personal information
agent.print_response("I'm going to a concert tomorrow?", stream=True)
# -*- Print memories and summary
if agent.db:
pprint(agent.get_user_memories(user_id="test_user"))
pprint(
agent.get_session(session_id="test_session").summary # type: ignore
)
# Ask about the conversation
agent.print_response(
"What have we been talking about, do you know my name?", stream=True
)
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/vllm/memory.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/90_models/xai/finance_agent.py | """️ Finance Agent - Your Personal Market Analyst!
This example shows how to create a sophisticated financial analyst that provides
comprehensive market insights using real-time data. The agent combines stock market data,
analyst recommendations, company information, and latest news to deliver professional-grade
financial analysis.
Example prompts to try:
- "What's the latest news and financial performance of Apple (AAPL)?"
- "Give me a detailed analysis of Tesla's (TSLA) current market position"
- "How are Microsoft's (MSFT) financials looking? Include analyst recommendations"
- "Analyze NVIDIA's (NVDA) stock performance and future outlook"
- "What's the market saying about Amazon's (AMZN) latest quarter?"
Run: `uv pip install openai yfinance agno` to install the dependencies
"""
from textwrap import dedent
from agno.agent import Agent
from agno.models.xai import xAI
from agno.tools.yfinance import YFinanceTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
finance_agent = Agent(
model=xAI(id="grok-3-mini-beta"),
tools=[YFinanceTools()],
instructions=dedent("""\
You are a seasoned Wall Street analyst with deep expertise in market analysis!
Follow these steps for comprehensive financial analysis:
1. Market Overview
- Latest stock price
- 52-week high and low
2. Financial Deep Dive
- Key metrics (P/E, Market Cap, EPS)
3. Professional Insights
- Analyst recommendations breakdown
- Recent rating changes
4. Market Context
- Industry trends and positioning
- Competitive analysis
- Market sentiment indicators
Your reporting style:
- Begin with an executive summary
- Use tables for data presentation
- Include clear section headers
- Add emoji indicators for trends ( )
- Highlight key insights with bullet points
- Compare metrics to industry averages
- Include technical term explanations
- End with a forward-looking analysis
Risk Disclosure:
- Always highlight potential risk factors
- Note market uncertainties
- Mention relevant regulatory concerns
"""),
add_datetime_to_context=True,
markdown=True,
)
# Example usage with detailed market analysis request
finance_agent.print_response(
"Write a comprehensive report on TSLA",
stream=True,
)
# # Semiconductor market analysis example
# finance_agent.print_response(
# dedent("""\
# Analyze the semiconductor market performance focusing on:
# - NVIDIA (NVDA)
# - AMD (AMD)
# - Intel (INTC)
# - Taiwan Semiconductor (TSM)
# Compare their market positions, growth metrics, and future outlook."""),
# stream=True,
# )
# # Automotive market analysis example
# finance_agent.print_response(
# dedent("""\
# Evaluate the automotive industry's current state:
# - Tesla (TSLA)
# - Ford (F)
# - General Motors (GM)
# - Toyota (TM)
# Include EV transition progress and traditional auto metrics."""),
# stream=True,
# )
# More example prompts to explore:
"""
Advanced analysis queries:
1. "Compare Tesla's valuation metrics with traditional automakers"
2. "Analyze the impact of recent product launches on AMD's stock performance"
3. "How do Meta's financial metrics compare to its social media peers?"
4. "Evaluate Netflix's subscriber growth impact on financial metrics"
5. "Break down Amazon's revenue streams and segment performance"
Industry-specific analyses:
Semiconductor Market:
1. "How is the chip shortage affecting TSMC's market position?"
2. "Compare NVIDIA's AI chip revenue growth with competitors"
3. "Analyze Intel's foundry strategy impact on stock performance"
4. "Evaluate semiconductor equipment makers like ASML and Applied Materials"
Automotive Industry:
1. "Compare EV manufacturers' production metrics and margins"
2. "Analyze traditional automakers' EV transition progress"
3. "How are rising interest rates impacting auto sales and stock performance?"
4. "Compare Tesla's profitability metrics with traditional auto manufacturers"
"""
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
pass
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/90_models/xai/finance_agent.py",
"license": "Apache License 2.0",
"lines": 107,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
agno-agi/agno:cookbook/91_tools/mcp/dynamic_headers/client.py | """Agent with MCP tools using Dynamic Headers"""
import asyncio
from typing import TYPE_CHECKING, Optional
from agno.agent import Agent
from agno.models.openai import OpenAIChat
from agno.run import RunContext
from agno.tools.mcp import MCPTools
if TYPE_CHECKING:
from agno.agent import Agent
from agno.team import Team
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
async def main():
"""Example showing dynamic headers with different users."""
# Step 1: Define your header provider
# This function can receive RunContext, Agent and/or Team based on its signature
def header_provider(
run_context: RunContext,
agent: Optional["Agent"] = None,
team: Optional["Team"] = None,
) -> dict:
"""
Generate dynamic headers from RunContext and Agent.
The header_provider can accept any combination of these parameters:
- run_context: The RunContext for the current agent or team run
- agent: The contextual Agent instance
- team: The contextual Team instance
The RunContext contains:
- run_id: Unique ID for this agent run
- user_id: User ID passed to agent.arun()
- session_id: Session ID passed to agent.arun()
- metadata: Dict of custom metadata passed to agent.arun()
"""
headers = {
"X-User-ID": run_context.user_id or "unknown",
"X-Session-ID": run_context.session_id or "unknown",
"X-Run-ID": run_context.run_id,
"X-Tenant-ID": run_context.metadata.get("tenant_id", "no-tenant")
if run_context.metadata
else "no-tenant",
# You can also access agent and team properties if needed
"X-Agent-Name": agent.name
if agent
else team.name
if team
else "unnamed-agno-entity",
}
return headers
# Step 2: Create MCPTools with header_provider
# This enables dynamic headers for all MCP tool calls
mcp_tools = MCPTools(
url="http://localhost:8000/mcp", # Your MCP server URL
transport="streamable-http", # Use streamable-http or sse for headers
header_provider=header_provider, # This enables dynamic headers!
)
# Step 3: Connect to MCP server
await mcp_tools.connect()
print("Connected to MCP server")
print(f" Available tools: {list(mcp_tools.functions.keys())}\n")
try:
# Step 4: Create agent with MCP tools
agent_1 = Agent(
name="agent-1",
model=OpenAIChat(id="gpt-5.2"),
tools=[mcp_tools],
markdown=False,
)
# Step 5: Run agent with different users
# The agent automatically creates RunContext and injects it into tools!
# Example 1: User "neel"
print("=" * 60)
print("Example 1: Running as user 'neel'")
print("=" * 60)
response1 = await agent_1.arun(
"Please use the greet tool to greet me. My name is neel.",
user_id="neel", # ← Goes into RunContext.user_id
session_id="session-1", # ← Goes into RunContext.session_id
metadata={ # ← Goes into RunContext.metadata
"tenant_id": "tenant-1",
},
)
print(f"Response: {response1.content}\n")
# Example 2: User "dirk"
print("=" * 60)
print("Example 2: Running as user 'dirk'")
print("=" * 60)
agent_2 = Agent(
name="agent-2",
model=OpenAIChat(id="gpt-5.2"),
tools=[mcp_tools],
markdown=False,
)
response2 = await agent_2.arun(
"Please use the greet tool to greet me. My name is dirk.",
user_id="dirk", # Different user!
session_id="session-2", # Different session!
metadata={
"tenant_id": "tenant-2", # Different tenant!
},
)
print(f"Response: {response2.content}\n")
print("=" * 60)
print("Success! Check your MCP server logs to see the headers.")
print("=" * 60)
finally:
# Step 6: Clean up
await mcp_tools.close()
print("\nConnection closed")
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/91_tools/mcp/dynamic_headers/client.py",
"license": "Apache License 2.0",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:cookbook/91_tools/mcp/local_server/client.py | """
Client
=============================
Demonstrates client.
"""
import asyncio
from agno.agent import Agent
from agno.models.groq import Groq
from agno.tools.mcp import MCPTools
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
async def run_agent(message: str) -> None:
# Initialize the MCP server
async with (
MCPTools(
"fastmcp run cookbook/90_tools/mcp/local_server/server.py", # Supply the command to run the MCP server
) as mcp_tools,
):
agent = Agent(
model=Groq(id="llama-3.3-70b-versatile"),
tools=[mcp_tools],
markdown=True,
)
await agent.aprint_response(message, stream=True)
# Example usage
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
asyncio.run(run_agent("What is the weather in San Francisco?"))
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/91_tools/mcp/local_server/client.py",
"license": "Apache License 2.0",
"lines": 31,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/91_tools/mcp/local_server/server.py | """
`fastmcp` is required for this demo.
```bash
uv pip install fastmcp
```
Run this with `fastmcp run cookbook/90_tools/mcp/local_server/server.py`
"""
from fastmcp import FastMCP
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
mcp = FastMCP("weather_tools")
@mcp.tool()
def get_weather(city: str) -> str:
return f"The weather in {city} is sunny"
@mcp.tool()
def get_temperature(city: str) -> str:
return f"The temperature in {city} is 70 degrees"
# ---------------------------------------------------------------------------
# Run Agent
# ---------------------------------------------------------------------------
if __name__ == "__main__":
mcp.run(transport="stdio")
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/91_tools/mcp/local_server/server.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/92_integrations/a2a/basic_agent/basic_agent.py | """
Basic A2A Agent Executor
========================
Implements an A2A executor that routes incoming text to an Agno agent.
"""
from a2a.server.agent_execution import AgentExecutor, RequestContext
from a2a.server.events import EventQueue
from a2a.types import Part, TextPart
from a2a.utils import new_agent_text_message
from agno.agent import Agent, Message, RunOutput
from agno.models.openai import OpenAIChat
from typing_extensions import override
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
agent = Agent(
model=OpenAIChat(id="gpt-5.2"),
)
# ---------------------------------------------------------------------------
# Create Executor
# ---------------------------------------------------------------------------
class BasicAgentExecutor(AgentExecutor):
"""Test AgentProxy implementation."""
def __init__(self):
self.agent = agent
@override
async def execute(
self,
context: RequestContext,
event_queue: EventQueue,
) -> None:
message: Message = Message(role="user", content="")
for part in context.message.parts:
if isinstance(part, Part):
if isinstance(part.root, TextPart):
message.content = part.root.text
break
result: RunOutput = await self.agent.arun(message)
event_queue.enqueue_event(new_agent_text_message(result.content))
@override
async def cancel(self, context: RequestContext, event_queue: EventQueue) -> None:
raise Exception("Cancel not supported")
# ---------------------------------------------------------------------------
# Run Example
# ---------------------------------------------------------------------------
if __name__ == "__main__":
print(
"Run `python cookbook/92_integrations/a2a/basic_agent/__main__.py` to start the A2A server."
)
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/92_integrations/a2a/basic_agent/basic_agent.py",
"license": "Apache License 2.0",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:cookbook/92_integrations/discord/agent_with_media.py | """
Discord Agent With Media
========================
Runs a Discord bot that can analyze user-provided media.
"""
from agno.agent import Agent
from agno.integrations.discord import DiscordClient
from agno.models.google import Gemini
# ---------------------------------------------------------------------------
# Create Agent
# ---------------------------------------------------------------------------
media_agent = Agent(
name="Media Agent",
model=Gemini(id="gemini-3-flash-preview"),
description="A Media processing agent",
instructions="Analyze images, audios and videos sent by the user",
add_history_to_context=True,
num_history_runs=3,
add_datetime_to_context=True,
markdown=True,
)
discord_agent = DiscordClient(media_agent)
# ---------------------------------------------------------------------------
# Run Discord Bot
# ---------------------------------------------------------------------------
if __name__ == "__main__":
discord_agent.serve()
| {
"repo_id": "agno-agi/agno",
"file_path": "cookbook/92_integrations/discord/agent_with_media.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
agno-agi/agno:libs/agno/agno/learn/stores/decision_log.py | """
Decision Log Store
==================
Storage backend for Decision Log learning type.
Records decisions made by agents with reasoning, context, and outcomes.
Useful for auditing, debugging, and learning from past decisions.
Key Features:
- Log decisions with reasoning and context
- Record outcomes for feedback loops
- Search past decisions by type, time range, or content
- Agent tools for explicit decision logging
Scope:
- Decisions are stored per agent/session
- Can be queried by agent_id, session_id, or time range
Supported Modes:
- ALWAYS: Automatic extraction of decisions from tool calls
- AGENTIC: Agent explicitly logs decisions via tools
"""
import uuid
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from os import getenv
from textwrap import dedent
from typing import Any, Callable, List, Optional, Union
from agno.learn.config import DecisionLogConfig, LearningMode
from agno.learn.schemas import DecisionLog
from agno.learn.stores.protocol import LearningStore
from agno.learn.utils import from_dict_safe, to_dict_safe
from agno.utils.log import (
log_debug,
log_warning,
set_log_level_to_debug,
set_log_level_to_info,
)
try:
from agno.db.base import AsyncBaseDb, BaseDb
from agno.models.message import Message
except ImportError:
pass
@dataclass
class DecisionLogStore(LearningStore):
"""Storage backend for Decision Log learning type.
Records and retrieves decisions made by agents. Decisions include
the choice made, reasoning, context, and optionally the outcome.
Args:
config: DecisionLogConfig with all settings including db and model.
debug_mode: Enable debug logging.
"""
config: DecisionLogConfig = field(default_factory=DecisionLogConfig)
debug_mode: bool = False
# State tracking (internal)
decisions_updated: bool = field(default=False, init=False)
_schema: Any = field(default=None, init=False)
def __post_init__(self):
self._schema = self.config.schema or DecisionLog
# =========================================================================
# LearningStore Protocol Implementation
# =========================================================================
@property
def learning_type(self) -> str:
"""Unique identifier for this learning type."""
return "decision_log"
@property
def schema(self) -> Any:
"""Schema class used for decisions."""
return self._schema
def recall(
self,
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
decision_type: Optional[str] = None,
limit: int = 10,
days: Optional[int] = None,
**kwargs,
) -> Optional[List[DecisionLog]]:
"""Retrieve recent decisions.
Args:
agent_id: Filter by agent (optional).
session_id: Filter by session (optional).
decision_type: Filter by decision type (optional).
limit: Maximum number of decisions to return.
days: Only return decisions from last N days.
**kwargs: Additional context (ignored).
Returns:
List of decisions, or None if none found.
"""
return self.search(
agent_id=agent_id,
session_id=session_id,
decision_type=decision_type,
limit=limit,
days=days,
)
async def arecall(
self,
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
decision_type: Optional[str] = None,
limit: int = 10,
days: Optional[int] = None,
**kwargs,
) -> Optional[List[DecisionLog]]:
"""Async version of recall."""
return await self.asearch(
agent_id=agent_id,
session_id=session_id,
decision_type=decision_type,
limit=limit,
days=days,
)
def process(
self,
messages: List[Any],
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> None:
"""Extract decisions from messages (tool calls, etc).
In ALWAYS mode, this extracts decisions from tool calls and
significant response choices. In AGENTIC mode, this is a no-op
as decisions are logged explicitly via tools.
Args:
messages: Conversation messages to analyze.
agent_id: Agent context.
session_id: Session context.
user_id: User context.
team_id: Team context.
**kwargs: Additional context (ignored).
"""
if self.config.mode != LearningMode.ALWAYS:
return
if not messages:
return
# Extract decisions from tool calls in messages
self._extract_decisions_from_messages(
messages=messages,
agent_id=agent_id,
session_id=session_id,
user_id=user_id,
team_id=team_id,
)
async def aprocess(
self,
messages: List[Any],
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> None:
"""Async version of process."""
if self.config.mode != LearningMode.ALWAYS:
return
if not messages:
return
await self._aextract_decisions_from_messages(
messages=messages,
agent_id=agent_id,
session_id=session_id,
user_id=user_id,
team_id=team_id,
)
def build_context(self, data: Any) -> str:
"""Build context for the agent.
Formats recent decisions for injection into the agent's system prompt.
Args:
data: List of decisions from recall().
Returns:
Context string to inject into the agent's system prompt.
"""
if not data:
if self._should_expose_tools:
return dedent("""\
<decision_log>
No recent decisions logged.
Use `log_decision` to record significant decisions with reasoning.
Use `search_decisions` to find past decisions.
</decision_log>""")
return ""
decisions = data if isinstance(data, list) else [data]
context = "<decision_log>\n"
context += "Recent decisions:\n\n"
for decision in decisions[:5]: # Limit to 5 most recent
if isinstance(decision, DecisionLog):
context += f"- **{decision.decision}**\n"
if decision.reasoning:
context += f" Reasoning: {decision.reasoning}\n"
if decision.outcome:
context += f" Outcome: {decision.outcome}\n"
context += "\n"
elif isinstance(decision, dict):
context += f"- **{decision.get('decision', 'Unknown')}**\n"
if decision.get("reasoning"):
context += f" Reasoning: {decision['reasoning']}\n"
if decision.get("outcome"):
context += f" Outcome: {decision['outcome']}\n"
context += "\n"
if self._should_expose_tools:
context += dedent("""
Use `log_decision` to record new decisions.
Use `search_decisions` to find past decisions.
Use `record_outcome` to update a decision with its outcome.
""")
context += "</decision_log>"
return context
def get_tools(
self,
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> List[Callable]:
"""Get tools to expose to agent.
Args:
agent_id: Agent context.
session_id: Session context.
user_id: User context.
team_id: Team context.
**kwargs: Additional context (ignored).
Returns:
List containing decision logging and search tools if enabled.
"""
if not self._should_expose_tools:
return []
return self.get_agent_tools(
agent_id=agent_id,
session_id=session_id,
user_id=user_id,
team_id=team_id,
)
async def aget_tools(
self,
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
team_id: Optional[str] = None,
**kwargs,
) -> List[Callable]:
"""Async version of get_tools."""
if not self._should_expose_tools:
return []
return await self.aget_agent_tools(
agent_id=agent_id,
session_id=session_id,
user_id=user_id,
team_id=team_id,
)
@property
def was_updated(self) -> bool:
"""Check if decisions were updated in last operation."""
return self.decisions_updated
@property
def _should_expose_tools(self) -> bool:
"""Check if tools should be exposed to the agent."""
return self.config.mode == LearningMode.AGENTIC or self.config.enable_agent_tools
# =========================================================================
# Properties
# =========================================================================
@property
def db(self) -> Optional[Union["BaseDb", "AsyncBaseDb"]]:
"""Database backend."""
return self.config.db
@property
def model(self):
"""Model for extraction."""
return self.config.model
# =========================================================================
# Debug/Logging
# =========================================================================
def set_log_level(self):
"""Set log level based on debug_mode or environment variable."""
if self.debug_mode or getenv("AGNO_DEBUG", "false").lower() == "true":
self.debug_mode = True
set_log_level_to_debug()
else:
set_log_level_to_info()
# =========================================================================
# Agent Tools
# =========================================================================
def get_agent_tools(
self,
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> List[Callable]:
"""Get the tools to expose to the agent."""
tools = []
if self.config.agent_can_save:
log_decision = self._build_log_decision_tool(
agent_id=agent_id,
session_id=session_id,
user_id=user_id,
team_id=team_id,
)
if log_decision:
tools.append(log_decision)
record_outcome = self._build_record_outcome_tool(
agent_id=agent_id,
team_id=team_id,
)
if record_outcome:
tools.append(record_outcome)
if self.config.agent_can_search:
search_decisions = self._build_search_decisions_tool(
agent_id=agent_id,
session_id=session_id,
)
if search_decisions:
tools.append(search_decisions)
return tools
async def aget_agent_tools(
self,
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> List[Callable]:
"""Async version of get_agent_tools."""
tools = []
if self.config.agent_can_save:
log_decision = await self._abuild_log_decision_tool(
agent_id=agent_id,
session_id=session_id,
user_id=user_id,
team_id=team_id,
)
if log_decision:
tools.append(log_decision)
record_outcome = await self._abuild_record_outcome_tool(
agent_id=agent_id,
team_id=team_id,
)
if record_outcome:
tools.append(record_outcome)
if self.config.agent_can_search:
search_decisions = await self._abuild_search_decisions_tool(
agent_id=agent_id,
session_id=session_id,
)
if search_decisions:
tools.append(search_decisions)
return tools
def _build_log_decision_tool(
self,
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> Optional[Callable]:
"""Build the log_decision tool."""
store = self
def log_decision(
decision: str,
reasoning: Optional[str] = None,
decision_type: Optional[str] = None,
context: Optional[str] = None,
alternatives: Optional[str] = None,
confidence: Optional[float] = None,
) -> str:
"""Log a significant decision with reasoning.
Use this to record important choices you make, especially:
- Tool selection decisions
- Response style choices
- When you decide to ask for clarification
- When you choose between different approaches
Args:
decision: What you decided to do.
reasoning: Why you made this decision.
decision_type: Category (tool_selection, response_style, clarification, etc).
context: The situation that required this decision.
alternatives: Other options you considered (comma-separated).
confidence: How confident you are (0.0 to 1.0).
Returns:
Confirmation with decision ID.
"""
try:
decision_id = f"dec_{uuid.uuid4().hex[:8]}"
alt_list = [a.strip() for a in alternatives.split(",")] if alternatives else None
decision_obj = DecisionLog(
id=decision_id,
decision=decision,
reasoning=reasoning,
decision_type=decision_type,
context=context,
alternatives=alt_list,
confidence=confidence,
session_id=session_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
created_at=datetime.utcnow().isoformat(),
)
store.save(decision=decision_obj)
log_debug(f"DecisionLogStore: Logged decision {decision_id}")
return f"Decision logged: {decision_id}"
except Exception as e:
log_warning(f"Error logging decision: {e}")
return f"Error: {e}"
return log_decision
async def _abuild_log_decision_tool(
self,
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> Optional[Callable]:
"""Async version of _build_log_decision_tool."""
store = self
async def log_decision(
decision: str,
reasoning: Optional[str] = None,
decision_type: Optional[str] = None,
context: Optional[str] = None,
alternatives: Optional[str] = None,
confidence: Optional[float] = None,
) -> str:
"""Log a significant decision with reasoning."""
try:
decision_id = f"dec_{uuid.uuid4().hex[:8]}"
alt_list = [a.strip() for a in alternatives.split(",")] if alternatives else None
decision_obj = DecisionLog(
id=decision_id,
decision=decision,
reasoning=reasoning,
decision_type=decision_type,
context=context,
alternatives=alt_list,
confidence=confidence,
session_id=session_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
created_at=datetime.utcnow().isoformat(),
)
await store.asave(decision=decision_obj)
log_debug(f"DecisionLogStore: Logged decision {decision_id}")
return f"Decision logged: {decision_id}"
except Exception as e:
log_warning(f"Error logging decision: {e}")
return f"Error: {e}"
return log_decision
def _build_record_outcome_tool(
self,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> Optional[Callable]:
"""Build the record_outcome tool."""
store = self
def record_outcome(
decision_id: str,
outcome: str,
outcome_quality: Optional[str] = None,
) -> str:
"""Record the outcome of a previous decision.
Use this to update a decision with what actually happened.
This helps build feedback loops for learning.
Args:
decision_id: The ID of the decision to update.
outcome: What happened as a result of the decision.
outcome_quality: Was it good, bad, or neutral?
Returns:
Confirmation message.
"""
try:
success = store.update_outcome(
decision_id=decision_id,
outcome=outcome,
outcome_quality=outcome_quality,
)
if success:
return f"Outcome recorded for decision {decision_id}"
else:
return f"Decision {decision_id} not found"
except Exception as e:
log_warning(f"Error recording outcome: {e}")
return f"Error: {e}"
return record_outcome
async def _abuild_record_outcome_tool(
self,
agent_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> Optional[Callable]:
"""Async version of _build_record_outcome_tool."""
store = self
async def record_outcome(
decision_id: str,
outcome: str,
outcome_quality: Optional[str] = None,
) -> str:
"""Record the outcome of a previous decision."""
try:
success = await store.aupdate_outcome(
decision_id=decision_id,
outcome=outcome,
outcome_quality=outcome_quality,
)
if success:
return f"Outcome recorded for decision {decision_id}"
else:
return f"Decision {decision_id} not found"
except Exception as e:
log_warning(f"Error recording outcome: {e}")
return f"Error: {e}"
return record_outcome
def _build_search_decisions_tool(
self,
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
) -> Optional[Callable]:
"""Build the search_decisions tool."""
store = self
def search_decisions(
query: Optional[str] = None,
decision_type: Optional[str] = None,
days: Optional[int] = None,
limit: int = 5,
) -> str:
"""Search past decisions.
Use this to find relevant past decisions for context.
Args:
query: Text to search for in decisions.
decision_type: Filter by type (tool_selection, response_style, etc).
days: Only search last N days.
limit: Maximum results to return.
Returns:
Formatted list of matching decisions.
"""
try:
results = store.search(
query=query,
decision_type=decision_type,
days=days,
limit=limit,
agent_id=agent_id,
)
if not results:
return "No matching decisions found."
output = []
for d in results:
line = f"[{d.id}] {d.decision}"
if d.reasoning:
line += f" - {d.reasoning[:50]}..."
if d.outcome:
line += f" -> {d.outcome[:30]}..."
output.append(line)
return "\n".join(output)
except Exception as e:
log_warning(f"Error searching decisions: {e}")
return f"Error: {e}"
return search_decisions
async def _abuild_search_decisions_tool(
self,
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
) -> Optional[Callable]:
"""Async version of _build_search_decisions_tool."""
store = self
async def search_decisions(
query: Optional[str] = None,
decision_type: Optional[str] = None,
days: Optional[int] = None,
limit: int = 5,
) -> str:
"""Search past decisions."""
try:
results = await store.asearch(
query=query,
decision_type=decision_type,
days=days,
limit=limit,
agent_id=agent_id,
)
if not results:
return "No matching decisions found."
output = []
for d in results:
line = f"[{d.id}] {d.decision}"
if d.reasoning:
line += f" - {d.reasoning[:50]}..."
if d.outcome:
line += f" -> {d.outcome[:30]}..."
output.append(line)
return "\n".join(output)
except Exception as e:
log_warning(f"Error searching decisions: {e}")
return f"Error: {e}"
return search_decisions
# =========================================================================
# Read Operations
# =========================================================================
def search(
self,
query: Optional[str] = None,
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
decision_type: Optional[str] = None,
days: Optional[int] = None,
limit: int = 10,
) -> List[DecisionLog]:
"""Search decisions with filters.
Args:
query: Text to search for.
agent_id: Filter by agent.
session_id: Filter by session.
decision_type: Filter by type.
days: Only last N days.
limit: Maximum results.
Returns:
List of matching decisions.
"""
if not self.db:
return []
# Ensure sync db for sync method
if not isinstance(self.db, BaseDb):
return []
try:
# Get all matching records
results = self.db.get_learnings(
learning_type=self.learning_type,
agent_id=agent_id,
limit=limit * 3, # Over-fetch for filtering
)
if not results:
return []
decisions = []
cutoff_date = None
if days:
cutoff_date = datetime.utcnow() - timedelta(days=days)
for record in results:
content = record.get("content") if isinstance(record, dict) else None
if not content:
continue
decision = from_dict_safe(DecisionLog, content)
if not decision:
continue
# Apply filters
if decision_type and decision.decision_type != decision_type:
continue
if cutoff_date and decision.created_at:
try:
created = datetime.fromisoformat(decision.created_at.replace("Z", "+00:00"))
if created < cutoff_date:
continue
except (ValueError, AttributeError):
pass
if query:
query_lower = query.lower()
text = decision.to_text().lower()
if query_lower not in text:
continue
decisions.append(decision)
if len(decisions) >= limit:
break
return decisions
except Exception as e:
log_debug(f"DecisionLogStore.search failed: {e}")
return []
async def asearch(
self,
query: Optional[str] = None,
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
decision_type: Optional[str] = None,
days: Optional[int] = None,
limit: int = 10,
) -> List[DecisionLog]:
"""Async version of search."""
if not self.db:
return []
try:
if isinstance(self.db, AsyncBaseDb):
results = await self.db.get_learnings(
learning_type=self.learning_type,
agent_id=agent_id,
limit=limit * 3,
)
else:
results = self.db.get_learnings(
learning_type=self.learning_type,
agent_id=agent_id,
limit=limit * 3,
)
if not results:
return []
decisions = []
cutoff_date = None
if days:
cutoff_date = datetime.utcnow() - timedelta(days=days)
for record in results:
content = record.get("content") if isinstance(record, dict) else None
if not content:
continue
decision = from_dict_safe(DecisionLog, content)
if not decision:
continue
if decision_type and decision.decision_type != decision_type:
continue
if cutoff_date and decision.created_at:
try:
created = datetime.fromisoformat(decision.created_at.replace("Z", "+00:00"))
if created < cutoff_date:
continue
except (ValueError, AttributeError):
pass
if query:
query_lower = query.lower()
text = decision.to_text().lower()
if query_lower not in text:
continue
decisions.append(decision)
if len(decisions) >= limit:
break
return decisions
except Exception as e:
log_debug(f"DecisionLogStore.asearch failed: {e}")
return []
def get(self, decision_id: str) -> Optional[DecisionLog]:
"""Get a specific decision by ID."""
if not self.db:
return None
# Ensure sync db for sync method
if not isinstance(self.db, BaseDb):
return None
try:
# Get learnings and filter by decision_id in content
results = self.db.get_learnings(
learning_type=self.learning_type,
limit=100,
)
if not results:
return None
for record in results:
content = record.get("content") if isinstance(record, dict) else None
if content and content.get("id") == decision_id:
return from_dict_safe(DecisionLog, content)
return None
except Exception as e:
log_debug(f"DecisionLogStore.get failed: {e}")
return None
async def aget(self, decision_id: str) -> Optional[DecisionLog]:
"""Async version of get."""
if not self.db:
return None
try:
# Get learnings and filter by decision_id in content
if isinstance(self.db, AsyncBaseDb):
results = await self.db.get_learnings(
learning_type=self.learning_type,
limit=100,
)
else:
results = self.db.get_learnings(
learning_type=self.learning_type,
limit=100,
)
if not results:
return None
for record in results:
content = record.get("content") if isinstance(record, dict) else None
if content and content.get("id") == decision_id:
return from_dict_safe(DecisionLog, content)
return None
except Exception as e:
log_debug(f"DecisionLogStore.aget failed: {e}")
return None
# =========================================================================
# Write Operations
# =========================================================================
def save(self, decision: DecisionLog) -> None:
"""Save a decision to the database."""
if not self.db or not decision:
return
try:
content = to_dict_safe(decision)
if not content:
return
self.db.upsert_learning(
id=decision.id,
learning_type=self.learning_type,
agent_id=decision.agent_id,
session_id=decision.session_id,
user_id=decision.user_id,
team_id=decision.team_id,
content=content,
)
self.decisions_updated = True
log_debug(f"DecisionLogStore.save: saved decision {decision.id}")
except Exception as e:
log_debug(f"DecisionLogStore.save failed: {e}")
async def asave(self, decision: DecisionLog) -> None:
"""Async version of save."""
if not self.db or not decision:
return
try:
content = to_dict_safe(decision)
if not content:
return
if isinstance(self.db, AsyncBaseDb):
await self.db.upsert_learning(
id=decision.id,
learning_type=self.learning_type,
agent_id=decision.agent_id,
session_id=decision.session_id,
user_id=decision.user_id,
team_id=decision.team_id,
content=content,
)
else:
self.db.upsert_learning(
id=decision.id,
learning_type=self.learning_type,
agent_id=decision.agent_id,
session_id=decision.session_id,
user_id=decision.user_id,
team_id=decision.team_id,
content=content,
)
self.decisions_updated = True
log_debug(f"DecisionLogStore.asave: saved decision {decision.id}")
except Exception as e:
log_debug(f"DecisionLogStore.asave failed: {e}")
def update_outcome(
self,
decision_id: str,
outcome: str,
outcome_quality: Optional[str] = None,
) -> bool:
"""Update a decision with its outcome."""
decision = self.get(decision_id=decision_id)
if not decision:
return False
decision.outcome = outcome
decision.outcome_quality = outcome_quality
decision.updated_at = datetime.utcnow().isoformat()
self.save(decision=decision)
return True
async def aupdate_outcome(
self,
decision_id: str,
outcome: str,
outcome_quality: Optional[str] = None,
) -> bool:
"""Async version of update_outcome."""
decision = await self.aget(decision_id=decision_id)
if not decision:
return False
decision.outcome = outcome
decision.outcome_quality = outcome_quality
decision.updated_at = datetime.utcnow().isoformat()
await self.asave(decision=decision)
return True
# =========================================================================
# Extraction (ALWAYS mode)
# =========================================================================
def _extract_decisions_from_messages(
self,
messages: List["Message"],
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> None:
"""Extract decisions from tool calls in messages."""
for msg in messages:
if not hasattr(msg, "tool_calls") or not msg.tool_calls:
continue
for tool_call in msg.tool_calls:
tool_name = getattr(tool_call, "name", None) or getattr(
getattr(tool_call, "function", None), "name", None
)
if not tool_name:
continue
decision_id = f"dec_{uuid.uuid4().hex[:8]}"
decision = DecisionLog(
id=decision_id,
decision=f"Called tool: {tool_name}",
decision_type="tool_selection",
context="During conversation with user",
session_id=session_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
created_at=datetime.utcnow().isoformat(),
)
self.save(decision=decision)
async def _aextract_decisions_from_messages(
self,
messages: List["Message"],
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
user_id: Optional[str] = None,
team_id: Optional[str] = None,
) -> None:
"""Async version of _extract_decisions_from_messages."""
for msg in messages:
if not hasattr(msg, "tool_calls") or not msg.tool_calls:
continue
for tool_call in msg.tool_calls:
tool_name = getattr(tool_call, "name", None) or getattr(
getattr(tool_call, "function", None), "name", None
)
if not tool_name:
continue
decision_id = f"dec_{uuid.uuid4().hex[:8]}"
decision = DecisionLog(
id=decision_id,
decision=f"Called tool: {tool_name}",
decision_type="tool_selection",
context="During conversation with user",
session_id=session_id,
user_id=user_id,
agent_id=agent_id,
team_id=team_id,
created_at=datetime.utcnow().isoformat(),
)
await self.asave(decision=decision)
# =========================================================================
# Representation
# =========================================================================
def __repr__(self) -> str:
"""String representation for debugging."""
has_db = self.db is not None
has_model = self.model is not None
return (
f"DecisionLogStore("
f"mode={self.config.mode.value}, "
f"db={has_db}, "
f"model={has_model}, "
f"enable_agent_tools={self.config.enable_agent_tools})"
)
def print(
self,
agent_id: Optional[str] = None,
session_id: Optional[str] = None,
limit: int = 10,
*,
raw: bool = False,
) -> None:
"""Print formatted decision log.
Args:
agent_id: Filter by agent.
session_id: Filter by session.
limit: Maximum decisions to show.
raw: If True, print raw dict using pprint.
"""
from agno.learn.utils import print_panel
decisions = self.search(
agent_id=agent_id,
session_id=session_id,
limit=limit,
)
lines = []
for d in decisions:
lines.append(f"[{d.id}] {d.decision}")
if d.reasoning:
lines.append(f" Reasoning: {d.reasoning}")
if d.outcome:
lines.append(f" Outcome: {d.outcome}")
lines.append("")
subtitle = agent_id or session_id or "all"
print_panel(
title="Decision Log",
subtitle=subtitle,
lines=lines,
empty_message="No decisions logged",
raw_data=decisions,
raw=raw,
)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/learn/stores/decision_log.py",
"license": "Apache License 2.0",
"lines": 972,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/knowledge/filesystem.py | """
FileSystem Knowledge
====================
A Knowledge implementation that allows retrieval from files in a local directory.
Implements the KnowledgeProtocol and provides three tools:
- grep_file: Search for patterns in file contents
- list_files: List files matching a glob pattern
- get_file: Read the full contents of a specific file
"""
from dataclasses import dataclass, field
from os import walk as os_walk
from os.path import isabs as path_isabs
from pathlib import Path
from re import IGNORECASE
from re import compile as re_compile
from re import error as re_error
from re import escape as re_escape
from typing import Any, List, Optional
from agno.knowledge.document import Document
from agno.utils.log import log_debug, log_warning
@dataclass
class FileSystemKnowledge:
"""Knowledge implementation that searches files in a local directory.
Implements the KnowledgeProtocol and provides three tools to agents:
- grep_file(query): Search for patterns in file contents
- list_files(pattern): List files matching a glob pattern
- get_file(path): Read the full contents of a specific file
Example:
```python
from agno.agent import Agent
from agno.knowledge.filesystem import FileSystemKnowledge
from agno.models.openai import OpenAIChat
# Create knowledge for a directory
fs_knowledge = FileSystemKnowledge(base_dir="/path/to/code")
# Agent automatically gets grep_file, list_files, get_file tools
agent = Agent(
model=OpenAIChat(id="gpt-4o"),
knowledge=fs_knowledge,
search_knowledge=True,
)
# Agent can now search, list, and read files
agent.print_response("Find where main() is defined")
```
"""
base_dir: str
max_results: int = 50
include_patterns: List[str] = field(default_factory=list)
exclude_patterns: List[str] = field(
default_factory=lambda: [".git", "__pycache__", "node_modules", ".venv", "venv"]
)
def __post_init__(self):
self.base_path = Path(self.base_dir).resolve()
if not self.base_path.exists():
raise ValueError(f"Directory does not exist: {self.base_dir}")
if not self.base_path.is_dir():
raise ValueError(f"Path is not a directory: {self.base_dir}")
def _should_include_file(self, file_path: Path) -> bool:
"""Check if a file should be included based on patterns."""
path_str = str(file_path)
# Check exclude patterns
for pattern in self.exclude_patterns:
if pattern in path_str:
return False
# Check include patterns (if specified)
if self.include_patterns:
import fnmatch
for pattern in self.include_patterns:
if fnmatch.fnmatch(file_path.name, pattern):
return True
return False
return True
def _list_files(self, query: str, max_results: Optional[int] = None) -> List[Document]:
"""List files matching the query pattern (glob-style)."""
import fnmatch
results: List[Document] = []
limit = max_results or self.max_results
for root, dirs, files in os_walk(self.base_path):
# Filter out excluded directories
dirs[:] = [d for d in dirs if not any(excl in d for excl in self.exclude_patterns)]
for filename in files:
if len(results) >= limit:
break
file_path = Path(root) / filename
if not self._should_include_file(file_path):
continue
rel_path = file_path.relative_to(self.base_path)
# Match against query pattern (check both filename and relative path)
if query and query != "*":
if not (fnmatch.fnmatch(filename, query) or fnmatch.fnmatch(str(rel_path), query)):
continue
results.append(
Document(
name=str(rel_path),
content=str(rel_path),
meta_data={
"type": "file_listing",
"absolute_path": str(file_path),
"extension": file_path.suffix,
"size": file_path.stat().st_size,
},
)
)
if len(results) >= limit:
break
log_debug(f"Found {len(results)} files matching pattern: {query}")
return results
def _get_file(self, query: str) -> List[Document]:
"""Get the contents of a specific file."""
# Handle both relative and absolute paths
if path_isabs(query):
file_path = Path(query)
else:
file_path = self.base_path / query
if not file_path.exists():
log_warning(f"File not found: {query}")
return []
if not file_path.is_file():
log_warning(f"Path is not a file: {query}")
return []
try:
content = file_path.read_text(encoding="utf-8", errors="replace")
rel_path = file_path.relative_to(self.base_path) if file_path.is_relative_to(self.base_path) else file_path
return [
Document(
name=str(rel_path),
content=content,
meta_data={
"type": "file_content",
"absolute_path": str(file_path),
"extension": file_path.suffix,
"size": len(content),
"lines": content.count("\n") + 1,
},
)
]
except Exception as e:
log_warning(f"Error reading file {query}: {e}")
return []
def _grep(self, query: str, max_results: Optional[int] = None) -> List[Document]:
"""Search for a pattern within file contents."""
results: List[Document] = []
limit = max_results or self.max_results
try:
pattern = re_compile(query, IGNORECASE)
except re_error:
# If not a valid regex, treat as literal string
pattern = re_compile(re_escape(query), IGNORECASE)
for root, dirs, files in os_walk(self.base_path):
# Filter out excluded directories
dirs[:] = [d for d in dirs if not any(excl in d for excl in self.exclude_patterns)]
for filename in files:
if len(results) >= limit:
break
file_path = Path(root) / filename
if not self._should_include_file(file_path):
continue
try:
content = file_path.read_text(encoding="utf-8", errors="replace")
matches = list(pattern.finditer(content))
if matches:
# Extract matching lines with context
lines = content.split("\n")
matching_lines: List[dict[str, Any]] = []
for match in matches[:10]: # Limit matches per file
# Find the line number
line_start = content.count("\n", 0, match.start())
line_num = line_start + 1
# Get context (1 line before and after)
start_idx = max(0, line_start - 1)
end_idx = min(len(lines), line_start + 2)
context_lines = lines[start_idx:end_idx]
matching_lines.append(
{
"line": line_num,
"match": match.group(),
"context": "\n".join(context_lines),
}
)
rel_path = file_path.relative_to(self.base_path)
results.append(
Document(
name=str(rel_path),
content="\n---\n".join(str(m["context"]) for m in matching_lines),
meta_data={
"type": "grep_result",
"absolute_path": str(file_path),
"match_count": len(matches),
"matches": matching_lines[:5], # Include first 5 match details
},
)
)
except Exception as e:
# Skip files that can't be read (binary, permissions, etc.)
log_debug(f"Skipping file {file_path}: {e}")
continue
if len(results) >= limit:
break
log_debug(f"Found {len(results)} files with matches for: {query}")
return results
# ========================================================================
# Protocol Implementation (build_context, get_tools, retrieve)
# ========================================================================
def build_context(self, **kwargs) -> str:
"""Build context string for the agent's system prompt.
Returns instructions about the three available filesystem tools.
Args:
**kwargs: Additional context (unused).
Returns:
Context string describing available tools.
"""
from textwrap import dedent
return dedent(
f"""
You have access to a filesystem knowledge base containing documents at: {self.base_dir}
IMPORTANT: You MUST use these tools to search and read files before answering questions.
Do NOT answer from your own knowledge - always search the files first.
Available tools:
- grep_file(query): Search for keywords or patterns in file contents. Use this to find relevant information.
- list_files(pattern): List available files. Use "*" to see all files, or "*.md" for specific types.
- get_file(path): Read the full contents of a specific file.
When answering questions:
1. First use grep_file to search for relevant terms in the documents
2. Or use list_files to see what documents are available, then get_file to read them
3. Base your answer on what you find in the files
"""
).strip()
def get_tools(self, **kwargs) -> List[Any]:
"""Get tools to expose to the agent.
Returns three filesystem tools: grep_file, list_files, get_file.
Args:
**kwargs: Additional context (unused).
Returns:
List of filesystem tools.
"""
return [
self._create_grep_tool(),
self._create_list_files_tool(),
self._create_get_file_tool(),
]
async def aget_tools(self, **kwargs) -> List[Any]:
"""Async version of get_tools."""
return self.get_tools(**kwargs)
def _create_grep_tool(self) -> Any:
"""Create the grep_file tool."""
from agno.tools.function import Function
def grep_file(query: str, max_results: int = 20) -> str:
"""Search the knowledge base files for a keyword or pattern.
Use this tool to find information in the documents. Search for relevant
terms from the user's question to find answers.
Args:
query: The keyword or pattern to search for (e.g., "coffee", "cappuccino", "brewing").
max_results: Maximum number of files to return (default: 20).
Returns:
Matching content from files with context around each match.
"""
docs = self._grep(query, max_results=max_results)
if not docs:
return f"No matches found for: {query}"
results = []
for doc in docs:
results.append(f"### {doc.name}\n{doc.content}")
return "\n\n".join(results)
return Function.from_callable(grep_file, name="grep_file")
def _create_list_files_tool(self) -> Any:
"""Create the list_files tool."""
from agno.tools.function import Function
def list_files(pattern: str = "*", max_results: int = 50) -> str:
"""List available files in the knowledge base.
Use this to see what documents are available to search.
Args:
pattern: Glob pattern to match (e.g., "*.md", "*.txt"). Default: "*" for all files.
max_results: Maximum number of files to return (default: 50).
Returns:
List of available file paths.
"""
docs = self._list_files(pattern, max_results=max_results)
if not docs:
return f"No files found matching: {pattern}"
file_list = [doc.name for doc in docs]
return f"Found {len(file_list)} files:\n" + "\n".join(f"- {f}" for f in file_list)
return Function.from_callable(list_files, name="list_files")
def _create_get_file_tool(self) -> Any:
"""Create the get_file tool."""
from agno.tools.function import Function
def get_file(path: str) -> str:
"""Read the full contents of a document from the knowledge base.
Use this after list_files to read a specific document.
Args:
path: Path to the file (e.g., "coffee.md", "guide.txt").
Returns:
The full file contents.
"""
docs = self._get_file(path)
if not docs:
return f"File not found: {path}"
doc = docs[0]
return f"### {doc.name}\n```\n{doc.content}\n```"
return Function.from_callable(get_file, name="get_file")
def retrieve(
self,
query: str,
max_results: Optional[int] = None,
**kwargs,
) -> List[Document]:
"""Retrieve documents for context injection.
Uses grep as the default retrieval method since it's most likely
to return relevant results for a natural language query.
Args:
query: The query string.
max_results: Maximum number of results.
**kwargs: Additional parameters.
Returns:
List of Document objects.
"""
return self._grep(query, max_results=max_results or 10)
async def aretrieve(
self,
query: str,
max_results: Optional[int] = None,
**kwargs,
) -> List[Document]:
"""Async version of retrieve."""
return self.retrieve(query, max_results=max_results, **kwargs)
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/filesystem.py",
"license": "Apache License 2.0",
"lines": 323,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
agno-agi/agno:libs/agno/agno/knowledge/protocol.py | """
Knowledge Protocol
==================
Defines the minimal interface that knowledge implementations must implement.
This protocol enables:
- Custom knowledge bases to be used with agents
- Each implementation defines its own tools and context
- Flexible tool naming (not forced to use 'search')
- Type safety with Protocol typing
"""
from typing import Callable, List, Protocol, runtime_checkable
from agno.knowledge.document import Document
@runtime_checkable
class KnowledgeProtocol(Protocol):
"""Minimal protocol for knowledge implementations.
Enables custom knowledge bases to be used with agents.
Each implementation defines what tools it exposes and what
context/instructions it provides to the agent.
Required methods:
- build_context(): Return instructions for the agent's system prompt
- get_tools(): Return tools to expose to the agent
- aget_tools(): Async version of get_tools
Optional methods:
- retrieve(): Default retrieval for context injection (add_knowledge_to_context)
- aretrieve(): Async version of retrieve
Example:
```python
from agno.knowledge.protocol import KnowledgeProtocol
from agno.knowledge.document import Document
class MyKnowledge:
def build_context(self, **kwargs) -> str:
return "Use search_docs to find information."
def get_tools(self, **kwargs) -> List[Callable]:
return [self.search_docs]
async def aget_tools(self, **kwargs) -> List[Callable]:
return [self.search_docs]
def search_docs(self, query: str) -> str:
# Your search implementation
return "Results for: " + query
# Optional: for add_knowledge_to_context feature
def retrieve(self, query: str, **kwargs) -> List[Document]:
results = self._internal_search(query)
return [Document(content=r) for r in results]
# MyKnowledge satisfies KnowledgeProtocol
agent = Agent(knowledge=MyKnowledge())
```
"""
def build_context(self, **kwargs) -> str:
"""Build context string for the agent's system prompt.
Returns instructions about how to use this knowledge,
what tools are available, and any usage guidelines.
Args:
**kwargs: Context including enable_agentic_filters, etc.
Returns:
Formatted context string to inject into system prompt.
"""
...
def get_tools(self, **kwargs) -> List[Callable]:
"""Get tools to expose to the agent.
Returns callable tools that the agent can use to interact
with this knowledge. Each implementation decides what
tools make sense (e.g., search, grep, list_files, query_db).
Args:
**kwargs: Context including run_response, run_context,
async_mode, enable_agentic_filters, agent, etc.
Returns:
List of callable tools.
"""
...
async def aget_tools(self, **kwargs) -> List[Callable]:
"""Async version of get_tools.
Args:
**kwargs: Same as get_tools.
Returns:
List of callable tools.
"""
...
# Optional methods - used by add_knowledge_to_context feature
# Implementations that don't support context injection can omit these
def retrieve(self, query: str, **kwargs) -> List[Document]:
"""Retrieve documents for context injection.
Used by the add_knowledge_to_context feature to pre-fetch
relevant documents into the user message. This is optional;
if not implemented, add_knowledge_to_context will be skipped.
Args:
query: The query string.
**kwargs: Additional parameters (max_results, filters, etc.)
Returns:
List of Document objects.
"""
...
async def aretrieve(self, query: str, **kwargs) -> List[Document]:
"""Async version of retrieve.
Args:
query: The query string.
**kwargs: Additional parameters.
Returns:
List of Document objects.
"""
...
| {
"repo_id": "agno-agi/agno",
"file_path": "libs/agno/agno/knowledge/protocol.py",
"license": "Apache License 2.0",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.