sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
letta-ai/letta:letta/services/mcp_server_manager.py | import json
import os
import secrets
import uuid
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional, Tuple, Union
from fastapi import HTTPException
from sqlalchemy import delete, desc, null, select
from starlette.requests import Request
import letta.constants as constants
from letta.functions.mcp_client.types import (
MCPServerType,
MCPTool,
MCPToolHealth,
SSEServerConfig,
StdioServerConfig,
StreamableHTTPServerConfig,
)
from letta.functions.schema_generator import normalize_mcp_schema
from letta.functions.schema_validator import validate_complete_json_schema
from letta.log import get_logger
from letta.orm.errors import NoResultFound
from letta.orm.mcp_oauth import MCPOAuth, OAuthSessionStatus
from letta.orm.mcp_server import MCPServer as MCPServerModel, MCPTools as MCPToolsModel
from letta.orm.tool import Tool as ToolModel
from letta.schemas.mcp import (
MCPOAuthSession,
MCPOAuthSessionCreate,
MCPOAuthSessionUpdate,
MCPServer,
MCPServerResyncResult,
UpdateMCPServer,
UpdateSSEMCPServer,
UpdateStdioMCPServer,
UpdateStreamableHTTPMCPServer,
)
from letta.schemas.mcp_server import CreateMCPServerRequest, CreateSSEMCPServer, CreateStdioMCPServer, CreateStreamableHTTPMCPServer
from letta.schemas.secret import Secret
from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate
from letta.schemas.user import User as PydanticUser
from letta.server.db import db_registry
from letta.services.mcp.fastmcp_client import AsyncFastMCPSSEClient, AsyncFastMCPStreamableHTTPClient
from letta.services.mcp.server_side_oauth import ServerSideOAuth
from letta.services.mcp.sse_client import MCP_CONFIG_TOPLEVEL_KEY
from letta.services.mcp.stdio_client import AsyncStdioMCPClient
from letta.services.tool_manager import ToolManager
from letta.settings import tool_settings
from letta.utils import enforce_types, printd, safe_create_task
logger = get_logger(__name__)
class MCPServerManager:
"""Manager class to handle business logic related to MCP."""
def __init__(self):
# TODO: timeouts?
self.tool_manager = ToolManager()
self.cached_mcp_servers = {} # maps id -> async connection
# MCPTools mapping table management methods
@enforce_types
async def create_mcp_tool_mapping(self, mcp_server_id: str, tool_id: str, actor: PydanticUser) -> None:
"""Create a mapping between an MCP server and a tool."""
async with db_registry.async_session() as session:
mapping = MCPToolsModel(
id=f"mcp-tool-mapping-{uuid.uuid4()}",
mcp_server_id=mcp_server_id,
tool_id=tool_id,
organization_id=actor.organization_id,
)
await mapping.create_async(session, actor=actor)
@enforce_types
async def delete_mcp_tool_mappings_by_server(self, mcp_server_id: str, actor: PydanticUser) -> None:
"""Delete all tool mappings for a specific MCP server."""
async with db_registry.async_session() as session:
await session.execute(
delete(MCPToolsModel).where(
MCPToolsModel.mcp_server_id == mcp_server_id,
MCPToolsModel.organization_id == actor.organization_id,
)
)
# context manager now handles commits
# await session.commit()
@enforce_types
async def get_tool_ids_by_mcp_server(self, mcp_server_id: str, actor: PydanticUser) -> List[str]:
"""Get all tool IDs associated with an MCP server."""
async with db_registry.async_session() as session:
result = await session.execute(
select(MCPToolsModel.tool_id).where(
MCPToolsModel.mcp_server_id == mcp_server_id,
MCPToolsModel.organization_id == actor.organization_id,
)
)
return [row[0] for row in result.fetchall()]
@enforce_types
async def get_mcp_server_id_by_tool(self, tool_id: str, actor: PydanticUser) -> Optional[str]:
"""Get the MCP server ID associated with a tool."""
async with db_registry.async_session() as session:
result = await session.execute(
select(MCPToolsModel.mcp_server_id).where(
MCPToolsModel.tool_id == tool_id,
MCPToolsModel.organization_id == actor.organization_id,
)
)
row = result.fetchone()
return row[0] if row else None
@enforce_types
async def list_tools_by_mcp_server_from_db(self, mcp_server_id: str, actor: PydanticUser) -> List[PydanticTool]:
"""
Get tools associated with an MCP server from the database using the MCPTools mapping.
This is more efficient than fetching from the MCP server directly.
"""
# First get all tool IDs associated with this MCP server
tool_ids = await self.get_tool_ids_by_mcp_server(mcp_server_id, actor)
if not tool_ids:
return []
# Fetch all tools in a single query
async with db_registry.async_session() as session:
result = await session.execute(
select(ToolModel).where(
ToolModel.id.in_(tool_ids),
ToolModel.organization_id == actor.organization_id,
)
)
tools = result.scalars().all()
return [tool.to_pydantic() for tool in tools]
@enforce_types
async def get_tool_by_mcp_server(self, mcp_server_id: str, tool_id: str, actor: PydanticUser) -> Optional[PydanticTool]:
"""
Get a specific tool that belongs to an MCP server.
Verifies the tool is associated with the MCP server via the mapping table.
"""
async with db_registry.async_session() as session:
# Check if the tool is associated with this MCP server
result = await session.execute(
select(MCPToolsModel).where(
MCPToolsModel.mcp_server_id == mcp_server_id,
MCPToolsModel.tool_id == tool_id,
MCPToolsModel.organization_id == actor.organization_id,
)
)
mapping = result.scalar_one_or_none()
if not mapping:
return None
# Fetch the tool
tool = await ToolModel.read_async(db_session=session, identifier=tool_id, actor=actor)
return tool.to_pydantic()
@enforce_types
async def list_mcp_server_tools(self, mcp_server_id: str, actor: PydanticUser, agent_id: Optional[str] = None) -> List[MCPTool]:
"""Get a list of all tools for a specific MCP server by server ID."""
mcp_client = None
try:
mcp_config = await self.get_mcp_server_by_id_async(mcp_server_id, actor=actor)
server_config = await mcp_config.to_config_async()
mcp_client = await self.get_mcp_client(server_config, actor, agent_id=agent_id)
await mcp_client.connect_to_server()
# list tools
tools = await mcp_client.list_tools()
# Add health information to each tool
for tool in tools:
# Try to normalize the schema and re-validate
if tool.inputSchema:
tool.inputSchema = normalize_mcp_schema(tool.inputSchema)
health_status, reasons = validate_complete_json_schema(tool.inputSchema)
tool.health = MCPToolHealth(status=health_status.value, reasons=reasons)
return tools
except Exception as e:
# MCP tool listing errors are often due to connection/configuration issues, not system errors
# Log at info level to avoid triggering Sentry alerts for expected failures
logger.warning(f"Error listing tools for MCP server {mcp_server_id}: {e}")
raise e
finally:
if mcp_client:
await mcp_client.cleanup()
@enforce_types
async def execute_mcp_server_tool(
self,
mcp_server_id: str,
tool_id: str,
tool_args: Optional[Dict[str, Any]],
environment_variables: Dict[str, str],
actor: PydanticUser,
agent_id: Optional[str] = None,
) -> Tuple[str, bool]:
"""Call a specific tool from a specific MCP server by IDs."""
mcp_client = None
try:
# Get the tool to find its actual name
async with db_registry.async_session() as session:
tool = await ToolModel.read_async(db_session=session, identifier=tool_id, actor=actor)
tool_name = tool.name
# Get the MCP server config
mcp_config = await self.get_mcp_server_by_id_async(mcp_server_id, actor=actor)
server_config = await mcp_config.to_config_async(environment_variables)
mcp_client = await self.get_mcp_client(server_config, actor, agent_id=agent_id)
await mcp_client.connect_to_server()
# call tool
result, success = await mcp_client.execute_tool(tool_name, tool_args)
logger.info(f"MCP Result: {result}, Success: {success}")
return result, success
finally:
if mcp_client:
await mcp_client.cleanup()
@enforce_types
async def add_tool_from_mcp_server(self, mcp_server_id: str, mcp_tool_name: str, actor: PydanticUser) -> PydanticTool:
"""Add a tool from an MCP server to the Letta tool registry."""
# Get the MCP server to get its name
mcp_server = await self.get_mcp_server_by_id_async(mcp_server_id, actor=actor)
mcp_server_name = mcp_server.server_name
mcp_tools = await self.list_mcp_server_tools(mcp_server_id, actor=actor)
for mcp_tool in mcp_tools:
# TODO: @jnjpng move health check to tool class
if mcp_tool.name == mcp_tool_name:
# Check tool health - but try normalization first for INVALID schemas
if mcp_tool.health and mcp_tool.health.status == "INVALID":
logger.info(f"Attempting to normalize INVALID schema for tool {mcp_tool_name}")
logger.info(f"Original health reasons: {mcp_tool.health.reasons}")
# Try to normalize the schema and re-validate
try:
# Normalize the schema to fix common issues
logger.debug(f"Normalizing schema for {mcp_tool_name}")
normalized_schema = normalize_mcp_schema(mcp_tool.inputSchema)
# Re-validate after normalization
logger.debug(f"Re-validating schema for {mcp_tool_name}")
health_status, health_reasons = validate_complete_json_schema(normalized_schema)
logger.info(f"After normalization: status={health_status.value}, reasons={health_reasons}")
# Update the tool's schema and health (use inputSchema, not input_schema)
mcp_tool.inputSchema = normalized_schema
mcp_tool.health.status = health_status.value
mcp_tool.health.reasons = health_reasons
# Log the normalization result
if health_status.value != "INVALID":
logger.info(f"✓ MCP tool {mcp_tool_name} schema normalized successfully: {health_status.value}")
else:
logger.warning(f"MCP tool {mcp_tool_name} still INVALID after normalization. Reasons: {health_reasons}")
except Exception as e:
logger.error(f"Failed to normalize schema for tool {mcp_tool_name}: {e}", exc_info=True)
# After normalization attempt, check if still INVALID
if mcp_tool.health and mcp_tool.health.status == "INVALID":
logger.warning(f"Tool {mcp_tool_name} has potentially invalid schema. Reasons: {', '.join(mcp_tool.health.reasons)}")
tool_create = ToolCreate.from_mcp(mcp_server_name=mcp_server_name, mcp_tool=mcp_tool)
created_tool = await self.tool_manager.create_mcp_tool_async(
tool_create=tool_create, mcp_server_name=mcp_server_name, mcp_server_id=mcp_server_id, actor=actor
)
# Create mapping in MCPTools table
if created_tool:
await self.create_mcp_tool_mapping(mcp_server_id, created_tool.id, actor)
return created_tool
# failed to add - handle error?
return None
@enforce_types
async def resync_mcp_server_tools(
self, mcp_server_id: str, actor: PydanticUser, agent_id: Optional[str] = None
) -> MCPServerResyncResult:
"""
Resync tools for an MCP server by:
1. Fetching current tools from the MCP server
2. Deleting tools that no longer exist on the server
3. Updating schemas for existing tools
4. Adding new tools from the server
Returns a result with:
- deleted: List of deleted tool names
- updated: List of updated tool names
- added: List of added tool names
"""
# Get the MCP server to get its name
mcp_server = await self.get_mcp_server_by_id_async(mcp_server_id, actor=actor)
mcp_server_name = mcp_server.server_name
# Fetch current tools from MCP server
try:
current_mcp_tools = await self.list_mcp_server_tools(mcp_server_id, actor=actor, agent_id=agent_id)
except Exception as e:
logger.error(f"Failed to fetch tools from MCP server {mcp_server_name}: {e}")
raise HTTPException(
status_code=404,
detail={
"code": "MCPServerUnavailable",
"message": f"Could not connect to MCP server {mcp_server_name} to resync tools",
"error": str(e),
},
)
# Get all persisted tools for this MCP server
async with db_registry.async_session() as session:
# Query for tools with MCP metadata matching this server
# Using JSON path query to filter by metadata
persisted_tools = await ToolModel.list_async(
db_session=session,
organization_id=actor.organization_id,
)
# Filter tools that belong to this MCP server
mcp_tools = []
for tool in persisted_tools:
if tool.metadata_ and constants.MCP_TOOL_TAG_NAME_PREFIX in tool.metadata_:
if tool.metadata_[constants.MCP_TOOL_TAG_NAME_PREFIX].get("server_id") == mcp_server_id:
mcp_tools.append(tool)
# Create maps for easier comparison
current_tool_map = {tool.name: tool for tool in current_mcp_tools}
persisted_tool_map = {tool.name: tool for tool in mcp_tools}
deleted_tools = []
updated_tools = []
added_tools = []
# 1. Delete tools that no longer exist on the server
for tool_name, persisted_tool in persisted_tool_map.items():
if tool_name not in current_tool_map:
# Delete the tool (cascade will handle agent detachment)
await persisted_tool.hard_delete_async(db_session=session, actor=actor)
deleted_tools.append(tool_name)
logger.info(f"Deleted MCP tool {tool_name} as it no longer exists on server {mcp_server_name}")
# Commit deletions
# context manager now handles commits
# await session.commit()
# 2. Update existing tools and add new tools
for tool_name, current_tool in current_tool_map.items():
if tool_name in persisted_tool_map:
# Update existing tool
persisted_tool = persisted_tool_map[tool_name]
tool_create = ToolCreate.from_mcp(mcp_server_name=mcp_server_name, mcp_tool=current_tool)
# Check if schema has changed
if persisted_tool.json_schema != tool_create.json_schema:
# Update the tool
update_data = ToolUpdate(
description=tool_create.description,
json_schema=tool_create.json_schema,
source_code=tool_create.source_code,
)
await self.tool_manager.update_tool_by_id_async(tool_id=persisted_tool.id, tool_update=update_data, actor=actor)
updated_tools.append(tool_name)
logger.info(f"Updated MCP tool {tool_name} with new schema from server {mcp_server_name}")
else:
# Add new tool
# Skip INVALID tools
if current_tool.health and current_tool.health.status == "INVALID":
logger.warning(
f"Skipping invalid tool {tool_name} from MCP server {mcp_server_name}: {', '.join(current_tool.health.reasons)}"
)
continue
tool_create = ToolCreate.from_mcp(mcp_server_name=mcp_server_name, mcp_tool=current_tool)
created_tool = await self.tool_manager.create_mcp_tool_async(
tool_create=tool_create, mcp_server_name=mcp_server_name, mcp_server_id=mcp_server_id, actor=actor
)
# Create mapping in MCPTools table
if created_tool:
await self.create_mcp_tool_mapping(mcp_server_id, created_tool.id, actor)
added_tools.append(tool_name)
logger.info(f"Added new MCP tool {tool_name} from server {mcp_server_name} with mapping")
return MCPServerResyncResult(
deleted=deleted_tools,
updated=updated_tools,
added=added_tools,
)
@enforce_types
async def list_mcp_servers(self, actor: PydanticUser) -> List[MCPServer]:
"""List all MCP servers available"""
async with db_registry.async_session() as session:
mcp_servers = await MCPServerModel.list_async(
db_session=session,
organization_id=actor.organization_id,
# SqlalchemyBase.list_async defaults to limit=50; MCP servers should not be capped.
# Use a higher limit until we implement proper pagination in the API/SDK.
limit=200,
)
return [mcp_server.to_pydantic() for mcp_server in mcp_servers]
@enforce_types
async def create_or_update_mcp_server(self, pydantic_mcp_server: MCPServer, actor: PydanticUser) -> MCPServer:
"""Create a new tool based on the ToolCreate schema."""
mcp_server_id = await self.get_mcp_server_id_by_name(mcp_server_name=pydantic_mcp_server.server_name, actor=actor)
if mcp_server_id:
# Put to dict and remove fields that should not be reset
update_data = pydantic_mcp_server.model_dump(exclude_unset=True, exclude_none=True)
# If there's anything to update (can only update the configs, not the name)
# TODO: pass in custom headers for update as well?
if update_data:
if pydantic_mcp_server.server_type == MCPServerType.SSE:
update_request = UpdateSSEMCPServer(server_url=pydantic_mcp_server.server_url, token=pydantic_mcp_server.token)
elif pydantic_mcp_server.server_type == MCPServerType.STDIO:
update_request = UpdateStdioMCPServer(stdio_config=pydantic_mcp_server.stdio_config)
elif pydantic_mcp_server.server_type == MCPServerType.STREAMABLE_HTTP:
update_request = UpdateStreamableHTTPMCPServer(
server_url=pydantic_mcp_server.server_url, auth_token=pydantic_mcp_server.token
)
else:
raise ValueError(f"Unsupported server type: {pydantic_mcp_server.server_type}")
mcp_server = await self.update_mcp_server_by_id(mcp_server_id, update_request, actor)
else:
printd(
f"`create_or_update_mcp_server` was called with user_id={actor.id}, organization_id={actor.organization_id}, name={pydantic_mcp_server.server_name}, but found existing mcp server with nothing to update."
)
mcp_server = await self.get_mcp_server_by_id_async(mcp_server_id, actor=actor)
else:
mcp_server = await self.create_mcp_server(pydantic_mcp_server, actor=actor)
return mcp_server
@enforce_types
async def create_mcp_server(self, pydantic_mcp_server: MCPServer, actor: PydanticUser) -> MCPServer:
"""Create a new MCP server."""
async with db_registry.async_session() as session:
try:
# Set the organization id at the ORM layer
pydantic_mcp_server.organization_id = actor.organization_id
# Explicitly populate encrypted fields (async to avoid blocking event loop)
if pydantic_mcp_server.token is not None:
pydantic_mcp_server.token_enc = await Secret.from_plaintext_async(pydantic_mcp_server.token)
if pydantic_mcp_server.custom_headers is not None:
# custom_headers is a Dict[str, str], serialize to JSON then encrypt
import json
json_str = json.dumps(pydantic_mcp_server.custom_headers)
pydantic_mcp_server.custom_headers_enc = await Secret.from_plaintext_async(json_str)
mcp_server_data = pydantic_mcp_server.model_dump(to_orm=True)
# Ensure custom_headers None is stored as SQL NULL, not JSON null
if mcp_server_data.get("custom_headers") is None:
mcp_server_data.pop("custom_headers", None)
mcp_server = MCPServerModel(**mcp_server_data)
mcp_server = await mcp_server.create_async(session, actor=actor, no_commit=True)
# Link existing OAuth sessions for the same user and server URL
# This ensures OAuth sessions created during testing get linked to the server
# Also updates the server_name to match the new MCP server's name
server_url = getattr(mcp_server, "server_url", None)
server_name = getattr(mcp_server, "server_name", None)
if server_url:
result = await session.execute(
select(MCPOAuth).where(
MCPOAuth.server_url == server_url,
MCPOAuth.organization_id == actor.organization_id,
MCPOAuth.user_id == actor.id, # Only link sessions for the same user
MCPOAuth.server_id.is_(None), # Only update sessions not already linked
)
)
oauth_sessions = result.scalars().all()
# TODO: @jnjpng we should update sessions in bulk
for oauth_session in oauth_sessions:
oauth_session.server_id = mcp_server.id
# Update server_name to match the persisted MCP server's name
if server_name:
oauth_session.server_name = server_name
await oauth_session.update_async(db_session=session, actor=actor, no_commit=True)
if oauth_sessions:
logger.info(
f"Linked {len(oauth_sessions)} OAuth sessions to MCP server {mcp_server.id} "
f"(URL: {server_url}, name: {server_name}) for user {actor.id}"
)
# context manager now handles commits
# await session.commit()
return mcp_server.to_pydantic()
except Exception:
await session.rollback()
raise
@enforce_types
async def create_mcp_server_from_config(
self, server_config: Union[StdioServerConfig, SSEServerConfig, StreamableHTTPServerConfig], actor: PydanticUser
) -> MCPServer:
"""
Create an MCP server from a config object, handling encryption of sensitive fields.
This method converts the server config to an MCPServer model and encrypts
sensitive fields like tokens and custom headers.
"""
# Create base MCPServer object
if isinstance(server_config, StdioServerConfig):
# Check if stdio MCP servers are disabled (not suitable for multi-tenant deployments)
if tool_settings.mcp_disable_stdio:
raise ValueError("MCP stdio servers are disabled. Set MCP_DISABLE_STDIO=false to enable them.")
mcp_server = MCPServer(server_name=server_config.server_name, server_type=server_config.type, stdio_config=server_config)
elif isinstance(server_config, SSEServerConfig):
mcp_server = MCPServer(
server_name=server_config.server_name,
server_type=server_config.type,
server_url=server_config.server_url,
)
# Encrypt sensitive fields (async to avoid blocking event loop)
token = server_config.resolve_token()
if token:
token_secret = await Secret.from_plaintext_async(token)
mcp_server.set_token_secret(token_secret)
if server_config.custom_headers:
# Convert dict to JSON string, then encrypt as Secret
headers_json = json.dumps(server_config.custom_headers)
headers_secret = await Secret.from_plaintext_async(headers_json)
mcp_server.set_custom_headers_secret(headers_secret)
elif isinstance(server_config, StreamableHTTPServerConfig):
mcp_server = MCPServer(
server_name=server_config.server_name,
server_type=server_config.type,
server_url=server_config.server_url,
)
# Encrypt sensitive fields (async to avoid blocking event loop)
token = server_config.resolve_token()
if token:
token_secret = await Secret.from_plaintext_async(token)
mcp_server.set_token_secret(token_secret)
if server_config.custom_headers:
# Convert dict to JSON string, then encrypt as Secret
headers_json = json.dumps(server_config.custom_headers)
headers_secret = await Secret.from_plaintext_async(headers_json)
mcp_server.set_custom_headers_secret(headers_secret)
else:
raise ValueError(f"Unsupported server config type: {type(server_config)}")
return mcp_server
@enforce_types
async def create_mcp_server_from_request(self, request: CreateMCPServerRequest, actor: PydanticUser) -> MCPServer:
"""
Create an MCP server from a request object.
"""
# Convert CreateMCPServerUnion to ServerConfig union by adding server_name
config_type_map = {
CreateStdioMCPServer: StdioServerConfig,
CreateSSEMCPServer: SSEServerConfig,
CreateStreamableHTTPMCPServer: StreamableHTTPServerConfig,
}
config_dict = request.config.model_dump(exclude={"mcp_server_type"})
config_dict["server_name"] = request.server_name
config_dict["type"] = request.config.mcp_server_type
server_config = config_type_map[type(request.config)](**config_dict)
# Create the MCP server object (with encryption of sensitive fields)
mcp_server = await self.create_mcp_server_from_config(server_config, actor)
# Persist to database and sync tools
return await self.create_mcp_server_with_tools(mcp_server, actor)
@enforce_types
async def create_mcp_server_from_config_with_tools(
self, server_config: Union[StdioServerConfig, SSEServerConfig, StreamableHTTPServerConfig], actor: PydanticUser
) -> MCPServer:
"""
Create an MCP server from a config object and optimistically sync its tools.
This method handles encryption of sensitive fields and then creates the server
with automatic tool synchronization.
"""
# Convert config to MCPServer with encryption
mcp_server = await self.create_mcp_server_from_config(server_config, actor)
# Create the server with tools
return await self.create_mcp_server_with_tools(mcp_server, actor)
@enforce_types
async def create_mcp_server_with_tools(self, pydantic_mcp_server: MCPServer, actor: PydanticUser) -> MCPServer:
"""
Create a new MCP server and optimistically sync its tools.
This method:
1. Creates the MCP server record
2. Attempts to connect and fetch tools
3. Persists valid tools in parallel (best-effort)
"""
# First, create the MCP server
created_server = await self.create_mcp_server(pydantic_mcp_server, actor)
# Optimistically try to sync tools
try:
logger.info(f"Attempting to auto-sync tools from MCP server: {created_server.server_name}")
# List all tools from the MCP server
mcp_tools = await self.list_mcp_server_tools(created_server.id, actor=actor)
# Filter out invalid tools
valid_tools = [tool for tool in mcp_tools if not (tool.health and tool.health.status == "INVALID")]
# Register tools sequentially to avoid exhausting database connection pool
# When an MCP server has many tools (e.g., 50+), concurrent tool creation and mapping
# can create too many simultaneous database connections, causing pool exhaustion errors
if valid_tools:
results = []
successful_count = 0
for mcp_tool in valid_tools:
tool_create = ToolCreate.from_mcp(mcp_server_name=created_server.server_name, mcp_tool=mcp_tool)
try:
result = await self.tool_manager.create_mcp_tool_async(
tool_create=tool_create,
mcp_server_name=created_server.server_name,
mcp_server_id=created_server.id,
actor=actor,
)
results.append(result)
# Create mapping for successful tool
if result:
try:
await self.create_mcp_tool_mapping(created_server.id, result.id, actor)
successful_count += 1
except Exception as e:
logger.warning(f"Failed to create mapping for tool {result.id}: {e}")
except Exception as e:
results.append(e)
failed = len(results) - successful_count
logger.info(
f"Auto-sync completed for MCP server {created_server.server_name}: "
f"{successful_count} tools persisted with mappings, {failed} failed, "
f"{len(mcp_tools) - len(valid_tools)} invalid tools skipped"
)
else:
logger.info(f"No valid tools found to sync from MCP server {created_server.server_name}")
except Exception as e:
# Log the error but don't fail the server creation
logger.warning(
f"Failed to auto-sync tools from MCP server {created_server.server_name}: {e}. "
f"Server was created successfully but tools were not persisted."
)
return created_server
@enforce_types
async def update_mcp_server_by_id(self, mcp_server_id: str, mcp_server_update: UpdateMCPServer, actor: PydanticUser) -> MCPServer:
"""Update a tool by its ID with the given ToolUpdate object."""
async with db_registry.async_session() as session:
# Fetch the tool by ID
mcp_server = await MCPServerModel.read_async(db_session=session, identifier=mcp_server_id, actor=actor)
# Update tool attributes with only the fields that were explicitly set
update_data = mcp_server_update.model_dump(to_orm=True, exclude_unset=True)
# If renaming, proactively resolve name collisions within the same organization
new_name = update_data.get("server_name")
if new_name and new_name != getattr(mcp_server, "server_name", None):
# Look for another server with the same name in this org
existing = await MCPServerModel.list_async(
db_session=session,
organization_id=actor.organization_id,
server_name=new_name,
)
# Delete conflicting entries that are not the current server
for other in existing:
if other.id != mcp_server.id:
await session.execute(
delete(MCPServerModel).where(
MCPServerModel.id == other.id,
MCPServerModel.organization_id == actor.organization_id,
)
)
# Handle encryption for token if provided
# Only re-encrypt if the value has actually changed
if "token" in update_data and update_data["token"] is not None:
# Check if value changed
existing_token = None
if mcp_server.token_enc:
existing_secret = Secret.from_encrypted(mcp_server.token_enc)
existing_token = await existing_secret.get_plaintext_async()
elif mcp_server.token:
existing_token = mcp_server.token
# Only re-encrypt if different (async to avoid blocking event loop)
if existing_token != update_data["token"]:
token_secret = await Secret.from_plaintext_async(update_data["token"])
mcp_server.token_enc = token_secret.get_encrypted()
# Keep plaintext for dual-write during migration
mcp_server.token = update_data["token"]
# Remove from update_data since we set directly on mcp_server
update_data.pop("token", None)
update_data.pop("token_enc", None)
# Handle encryption for custom_headers if provided
# Only re-encrypt if the value has actually changed
if "custom_headers" in update_data:
if update_data["custom_headers"] is not None:
# custom_headers is a Dict[str, str], serialize to JSON then encrypt
import json
json_str = json.dumps(update_data["custom_headers"])
# Check if value changed
existing_headers_json = None
if mcp_server.custom_headers_enc:
existing_secret = Secret.from_encrypted(mcp_server.custom_headers_enc)
existing_headers_json = await existing_secret.get_plaintext_async()
elif mcp_server.custom_headers:
existing_headers_json = json.dumps(mcp_server.custom_headers)
# Only re-encrypt if different (async to avoid blocking event loop)
if existing_headers_json != json_str:
headers_secret = await Secret.from_plaintext_async(json_str)
mcp_server.custom_headers_enc = headers_secret.get_encrypted()
# Keep plaintext for dual-write during migration
mcp_server.custom_headers = update_data["custom_headers"]
# Remove from update_data since we set directly on mcp_server
update_data.pop("custom_headers", None)
update_data.pop("custom_headers_enc", None)
else:
# Ensure custom_headers None is stored as SQL NULL, not JSON null
update_data.pop("custom_headers", None)
setattr(mcp_server, "custom_headers", null())
setattr(mcp_server, "custom_headers_enc", None)
for key, value in update_data.items():
setattr(mcp_server, key, value)
mcp_server = await mcp_server.update_async(db_session=session, actor=actor)
# Save the updated tool to the database mcp_server = await mcp_server.update_async(db_session=session, actor=actor)
return mcp_server.to_pydantic()
@enforce_types
async def update_mcp_server_by_name(self, mcp_server_name: str, mcp_server_update: UpdateMCPServer, actor: PydanticUser) -> MCPServer:
"""Update an MCP server by its name."""
mcp_server_id = await self.get_mcp_server_id_by_name(mcp_server_name, actor)
if not mcp_server_id:
raise HTTPException(
status_code=404,
detail={
"code": "MCPServerNotFoundError",
"message": f"MCP server {mcp_server_name} not found",
"mcp_server_name": mcp_server_name,
},
)
return await self.update_mcp_server_by_id(mcp_server_id, mcp_server_update, actor)
@enforce_types
async def get_mcp_server_id_by_name(self, mcp_server_name: str, actor: PydanticUser) -> Optional[str]:
"""Retrieve a MCP server by its name and a user"""
try:
async with db_registry.async_session() as session:
mcp_server = await MCPServerModel.read_async(db_session=session, server_name=mcp_server_name, actor=actor)
return mcp_server.id
except NoResultFound:
return None
@enforce_types
async def get_mcp_server_by_id_async(self, mcp_server_id: str, actor: PydanticUser) -> MCPServer:
"""Fetch a tool by its ID."""
async with db_registry.async_session() as session:
# Retrieve tool by id using the Tool model's read method
mcp_server = await MCPServerModel.read_async(db_session=session, identifier=mcp_server_id, actor=actor)
# Convert the SQLAlchemy Tool object to PydanticTool
return mcp_server.to_pydantic()
@enforce_types
async def get_mcp_servers_by_ids(self, mcp_server_ids: List[str], actor: PydanticUser) -> List[MCPServer]:
"""Fetch multiple MCP servers by their IDs in a single query."""
if not mcp_server_ids:
return []
async with db_registry.async_session() as session:
mcp_servers = await MCPServerModel.list_async(
db_session=session,
organization_id=actor.organization_id,
id=mcp_server_ids, # This will use the IN operator
)
return [mcp_server.to_pydantic() for mcp_server in mcp_servers]
@enforce_types
async def get_mcp_server(self, mcp_server_name: str, actor: PydanticUser) -> PydanticTool:
"""Get a MCP server by name."""
async with db_registry.async_session() as session:
mcp_server_id = await self.get_mcp_server_id_by_name(mcp_server_name, actor)
mcp_server = await MCPServerModel.read_async(db_session=session, identifier=mcp_server_id, actor=actor)
if not mcp_server:
raise HTTPException(
status_code=404, # Not Found
detail={
"code": "MCPServerNotFoundError",
"message": f"MCP server {mcp_server_name} not found",
"mcp_server_name": mcp_server_name,
},
)
return mcp_server.to_pydantic()
@enforce_types
async def delete_mcp_server_by_id(self, mcp_server_id: str, actor: PydanticUser) -> None:
"""Delete a MCP server by its ID and associated tools and OAuth sessions."""
async with db_registry.async_session() as session:
try:
mcp_server = await MCPServerModel.read_async(db_session=session, identifier=mcp_server_id, actor=actor)
if not mcp_server:
raise NoResultFound(f"MCP server with id {mcp_server_id} not found.")
server_url = getattr(mcp_server, "server_url", None)
# Get all tools with matching metadata
stmt = select(ToolModel).where(ToolModel.organization_id == actor.organization_id)
result = await session.execute(stmt)
all_tools = result.scalars().all()
# Filter and delete tools that belong to this MCP server
tools_deleted = 0
for tool in all_tools:
if tool.metadata_ and constants.MCP_TOOL_TAG_NAME_PREFIX in tool.metadata_:
if tool.metadata_[constants.MCP_TOOL_TAG_NAME_PREFIX].get("server_id") == mcp_server_id:
await tool.hard_delete_async(db_session=session, actor=actor)
tools_deleted = 1
logger.info(f"Deleted MCP tool {tool.name} associated with MCP server {mcp_server_id}")
if tools_deleted > 0:
logger.info(f"Deleted {tools_deleted} MCP tools associated with MCP server {mcp_server_id}")
# Delete all MCPTools mappings for this server
await session.execute(
delete(MCPToolsModel).where(
MCPToolsModel.mcp_server_id == mcp_server_id,
MCPToolsModel.organization_id == actor.organization_id,
)
)
logger.info(f"Deleted MCPTools mappings for MCP server {mcp_server_id}")
# Delete OAuth sessions associated with this MCP server
# 1. Delete sessions directly linked to this server (server_id matches)
# 2. Delete orphaned pending sessions (server_id IS NULL) for same server_url + user
# 3. Keep authorized sessions linked to OTHER MCP servers (different server_id)
oauth_count = 0
# Delete sessions directly linked to this server
result = await session.execute(
delete(MCPOAuth).where(
MCPOAuth.server_id == mcp_server_id,
MCPOAuth.organization_id == actor.organization_id,
)
)
oauth_count += result.rowcount
# Delete orphaned sessions (no server_id) for same server_url + user
if server_url:
result = await session.execute(
delete(MCPOAuth).where(
MCPOAuth.server_url == server_url,
MCPOAuth.server_id.is_(None), # Only orphaned sessions (not linked to any server)
MCPOAuth.organization_id == actor.organization_id,
MCPOAuth.user_id == actor.id,
)
)
oauth_count += result.rowcount
if oauth_count > 0:
logger.info(
f"Deleted {oauth_count} OAuth sessions for MCP server {mcp_server_id} (URL: {server_url}) for user {actor.id}"
)
# Delete the MCP server, will cascade delete to linked OAuth sessions
await session.execute(
delete(MCPServerModel).where(
MCPServerModel.id == mcp_server_id,
MCPServerModel.organization_id == actor.organization_id,
)
)
# context manager now handles commits
# await session.commit()
except NoResultFound:
await session.rollback()
raise ValueError(f"MCP server with id {mcp_server_id} not found.")
except Exception as e:
await session.rollback()
logger.error(f"Failed to delete MCP server {mcp_server_id}: {e}")
raise
def read_mcp_config(self) -> dict[str, Union[SSEServerConfig, StdioServerConfig, StreamableHTTPServerConfig]]:
mcp_server_list = {}
# Attempt to read from ~/.letta/mcp_config.json
mcp_config_path = os.path.join(constants.LETTA_DIR, constants.MCP_CONFIG_NAME)
if os.path.exists(mcp_config_path):
with open(mcp_config_path, "r") as f:
try:
mcp_config = json.load(f)
except Exception as e:
# Config parsing errors are user configuration issues, not system errors
logger.warning(f"Failed to parse MCP config file ({mcp_config_path}) as json: {e}")
return mcp_server_list
# Proper formatting is "mcpServers" key at the top level,
# then a dict with the MCP server name as the key,
# with the value being the schema from StdioServerParameters
if MCP_CONFIG_TOPLEVEL_KEY in mcp_config:
for server_name, server_params_raw in mcp_config[MCP_CONFIG_TOPLEVEL_KEY].items():
# No support for duplicate server names
if server_name in mcp_server_list:
# Duplicate server names are configuration issues, not system errors
logger.warning(f"Duplicate MCP server name found (skipping): {server_name}")
continue
if "url" in server_params_raw:
# Attempt to parse the server params as an SSE server
try:
server_params = SSEServerConfig(
server_name=server_name,
server_url=server_params_raw["url"],
auth_header=server_params_raw.get("auth_header", None),
auth_token=server_params_raw.get("auth_token", None),
headers=server_params_raw.get("headers", None),
)
mcp_server_list[server_name] = server_params
except Exception as e:
# Config parsing errors are user configuration issues, not system errors
logger.warning(f"Failed to parse server params for MCP server {server_name} (skipping): {e}")
continue
else:
# Attempt to parse the server params as a StdioServerParameters
try:
server_params = StdioServerConfig(
server_name=server_name,
command=server_params_raw["command"],
args=server_params_raw.get("args", []),
env=server_params_raw.get("env", {}),
)
mcp_server_list[server_name] = server_params
except Exception as e:
# Config parsing errors are user configuration issues, not system errors
logger.warning(f"Failed to parse server params for MCP server {server_name} (skipping): {e}")
continue
return mcp_server_list
async def get_mcp_client(
self,
server_config: Union[SSEServerConfig, StdioServerConfig, StreamableHTTPServerConfig],
actor: PydanticUser,
oauth: Optional[ServerSideOAuth] = None,
agent_id: Optional[str] = None,
) -> Union[AsyncFastMCPSSEClient, AsyncStdioMCPClient, AsyncFastMCPStreamableHTTPClient]:
"""
Helper function to create the appropriate MCP client based on server configuration.
Args:
server_config: The server configuration object
actor: The user making the request
oauth: Optional ServerSideOAuth instance for authentication
agent_id: Optional agent ID for request headers
Returns:
The appropriate MCP client instance
Raises:
ValueError: If server config type is not supported
"""
# If no OAuth provider is provided, check if we have stored OAuth credentials
if oauth is None and hasattr(server_config, "server_url"):
oauth_session = await self.get_oauth_session_by_server(server_config.server_url, actor, status=OAuthSessionStatus.AUTHORIZED)
# Check if access token exists by attempting to decrypt it
if oauth_session and oauth_session.access_token_enc and await oauth_session.access_token_enc.get_plaintext_async():
# Create ServerSideOAuth from stored credentials
oauth = ServerSideOAuth(
mcp_url=oauth_session.server_url,
session_id=oauth_session.id,
mcp_manager=self,
actor=actor,
redirect_uri=oauth_session.redirect_uri,
)
if server_config.type == MCPServerType.SSE:
server_config = SSEServerConfig(**server_config.model_dump())
return AsyncFastMCPSSEClient(server_config=server_config, oauth=oauth, agent_id=agent_id)
elif server_config.type == MCPServerType.STDIO:
# Check if stdio MCP servers are disabled (not suitable for multi-tenant deployments)
if tool_settings.mcp_disable_stdio:
raise ValueError("MCP stdio servers are disabled. Set MCP_DISABLE_STDIO=false to enable them.")
server_config = StdioServerConfig(**server_config.model_dump())
return AsyncStdioMCPClient(server_config=server_config, oauth_provider=None, agent_id=agent_id)
elif server_config.type == MCPServerType.STREAMABLE_HTTP:
server_config = StreamableHTTPServerConfig(**server_config.model_dump())
return AsyncFastMCPStreamableHTTPClient(server_config=server_config, oauth=oauth, agent_id=agent_id)
else:
raise ValueError(f"Unsupported server config type: {type(server_config)}")
# OAuth-related methods
async def _oauth_orm_to_pydantic_async(self, oauth_session: MCPOAuth) -> MCPOAuthSession:
"""
Convert OAuth ORM model to Pydantic model, handling decryption of sensitive fields.
Note: Prefers encrypted columns (_enc fields), falls back to legacy plaintext columns.
"""
# Get decrypted values - prefer encrypted, fallback to legacy plaintext
access_token_secret = Secret.from_encrypted(oauth_session.access_token_enc)
access_token = await access_token_secret.get_plaintext_async()
refresh_token_secret = Secret.from_encrypted(oauth_session.refresh_token_enc)
refresh_token = await refresh_token_secret.get_plaintext_async()
client_secret_secret = Secret.from_encrypted(oauth_session.client_secret_enc)
client_secret = await client_secret_secret.get_plaintext_async()
authorization_code_secret = Secret.from_encrypted(oauth_session.authorization_code_enc)
authorization_code = await authorization_code_secret.get_plaintext_async()
# Create the Pydantic object with encrypted fields as Secret objects
pydantic_session = MCPOAuthSession(
id=oauth_session.id,
state=oauth_session.state,
server_id=oauth_session.server_id,
server_url=oauth_session.server_url,
server_name=oauth_session.server_name,
user_id=oauth_session.user_id,
organization_id=oauth_session.organization_id,
authorization_url=oauth_session.authorization_url,
authorization_code=authorization_code,
access_token=access_token,
refresh_token=refresh_token,
token_type=oauth_session.token_type,
expires_at=oauth_session.expires_at,
scope=oauth_session.scope,
client_id=oauth_session.client_id,
client_secret=client_secret,
redirect_uri=oauth_session.redirect_uri,
status=oauth_session.status,
created_at=oauth_session.created_at,
updated_at=oauth_session.updated_at,
# Encrypted fields as Secret objects (converted from encrypted strings in DB)
authorization_code_enc=Secret.from_encrypted(oauth_session.authorization_code_enc)
if oauth_session.authorization_code_enc
else None,
access_token_enc=Secret.from_encrypted(oauth_session.access_token_enc) if oauth_session.access_token_enc else None,
refresh_token_enc=Secret.from_encrypted(oauth_session.refresh_token_enc) if oauth_session.refresh_token_enc else None,
client_secret_enc=Secret.from_encrypted(oauth_session.client_secret_enc) if oauth_session.client_secret_enc else None,
)
return pydantic_session
@enforce_types
async def create_oauth_session(self, session_create: MCPOAuthSessionCreate, actor: PydanticUser) -> MCPOAuthSession:
"""Create a new OAuth session for MCP server authentication."""
async with db_registry.async_session() as session:
# Create the OAuth session with a unique state
oauth_session = MCPOAuth(
id="mcp-oauth-" + str(uuid.uuid4())[:8],
state=secrets.token_urlsafe(32),
server_url=session_create.server_url,
server_name=session_create.server_name,
user_id=session_create.user_id,
organization_id=session_create.organization_id,
status=OAuthSessionStatus.PENDING,
created_at=datetime.now(),
updated_at=datetime.now(),
)
oauth_session = await oauth_session.create_async(session, actor=actor)
# Convert to Pydantic model - note: new sessions won't have tokens yet
return await self._oauth_orm_to_pydantic_async(oauth_session)
@enforce_types
async def get_oauth_session_by_id(self, session_id: str, actor: PydanticUser) -> Optional[MCPOAuthSession]:
"""Get an OAuth session by its ID."""
async with db_registry.async_session() as session:
try:
oauth_session = await MCPOAuth.read_async(db_session=session, identifier=session_id, actor=actor)
return await self._oauth_orm_to_pydantic_async(oauth_session)
except NoResultFound:
return None
@enforce_types
async def get_oauth_session_by_server(
self, server_url: str, actor: PydanticUser, status: Optional[OAuthSessionStatus] = None
) -> Optional[MCPOAuthSession]:
"""Get the latest OAuth session by server URL, organization, and user.
Args:
server_url: The MCP server URL
actor: The user making the request
status: Optional status filter. If None, returns the most recent session regardless of status.
If specified, only returns sessions with that status.
"""
async with db_registry.async_session() as session:
# Query for OAuth session matching organization, user, server URL
# Order by updated_at desc to get the most recent record
query = select(MCPOAuth).where(
MCPOAuth.organization_id == actor.organization_id,
MCPOAuth.user_id == actor.id,
MCPOAuth.server_url == server_url,
)
# Optionally filter by status
if status is not None:
query = query.where(MCPOAuth.status == status)
result = await session.execute(query.order_by(desc(MCPOAuth.updated_at)).limit(1))
oauth_session = result.scalar_one_or_none()
if not oauth_session:
return None
return await self._oauth_orm_to_pydantic_async(oauth_session)
@enforce_types
async def get_oauth_session_by_state(self, state: str) -> Optional[MCPOAuthSession]:
"""Get an OAuth session by its state parameter (used in static callback URI flow)."""
async with db_registry.async_session() as session:
result = await session.execute(select(MCPOAuth).where(MCPOAuth.state == state).limit(1))
oauth_session = result.scalar_one_or_none()
if not oauth_session:
return None
return await self._oauth_orm_to_pydantic_async(oauth_session)
@enforce_types
async def update_oauth_session(self, session_id: str, session_update: MCPOAuthSessionUpdate, actor: PydanticUser) -> MCPOAuthSession:
"""Update an existing OAuth session."""
async with db_registry.async_session() as session:
oauth_session = await MCPOAuth.read_async(db_session=session, identifier=session_id, actor=actor)
# Update fields that are provided
if session_update.state is not None:
oauth_session.state = session_update.state
if session_update.authorization_url is not None:
oauth_session.authorization_url = session_update.authorization_url
# Handle encryption for authorization_code
# Only re-encrypt if the value has actually changed
if session_update.authorization_code is not None:
# Check if value changed
existing_code = None
if oauth_session.authorization_code_enc:
existing_secret = Secret.from_encrypted(oauth_session.authorization_code_enc)
existing_code = await existing_secret.get_plaintext_async()
elif oauth_session.authorization_code:
existing_code = oauth_session.authorization_code
# Only re-encrypt if different (async to avoid blocking event loop)
if existing_code != session_update.authorization_code:
code_secret = await Secret.from_plaintext_async(session_update.authorization_code)
oauth_session.authorization_code_enc = code_secret.get_encrypted()
# Keep plaintext for dual-write during migration
oauth_session.authorization_code = session_update.authorization_code
# Handle encryption for access_token
# Only re-encrypt if the value has actually changed
if session_update.access_token is not None:
# Check if value changed
existing_token = None
if oauth_session.access_token_enc:
existing_secret = Secret.from_encrypted(oauth_session.access_token_enc)
existing_token = await existing_secret.get_plaintext_async()
elif oauth_session.access_token:
existing_token = oauth_session.access_token
# Only re-encrypt if different (async to avoid blocking event loop)
if existing_token != session_update.access_token:
token_secret = await Secret.from_plaintext_async(session_update.access_token)
oauth_session.access_token_enc = token_secret.get_encrypted()
# Keep plaintext for dual-write during migration
oauth_session.access_token = session_update.access_token
# Handle encryption for refresh_token
# Only re-encrypt if the value has actually changed
if session_update.refresh_token is not None:
# Check if value changed
existing_refresh = None
if oauth_session.refresh_token_enc:
existing_secret = Secret.from_encrypted(oauth_session.refresh_token_enc)
existing_refresh = await existing_secret.get_plaintext_async()
elif oauth_session.refresh_token:
existing_refresh = oauth_session.refresh_token
# Only re-encrypt if different (async to avoid blocking event loop)
if existing_refresh != session_update.refresh_token:
refresh_secret = await Secret.from_plaintext_async(session_update.refresh_token)
oauth_session.refresh_token_enc = refresh_secret.get_encrypted()
# Keep plaintext for dual-write during migration
oauth_session.refresh_token = session_update.refresh_token
if session_update.token_type is not None:
oauth_session.token_type = session_update.token_type
if session_update.expires_at is not None:
oauth_session.expires_at = session_update.expires_at
if session_update.scope is not None:
oauth_session.scope = session_update.scope
if session_update.client_id is not None:
oauth_session.client_id = session_update.client_id
# Handle encryption for client_secret
# Only re-encrypt if the value has actually changed
if session_update.client_secret is not None:
# Check if value changed
existing_secret_val = None
if oauth_session.client_secret_enc:
existing_secret = Secret.from_encrypted(oauth_session.client_secret_enc)
existing_secret_val = await existing_secret.get_plaintext_async()
elif oauth_session.client_secret:
existing_secret_val = oauth_session.client_secret
# Only re-encrypt if different (async to avoid blocking event loop)
if existing_secret_val != session_update.client_secret:
client_secret_encrypted = await Secret.from_plaintext_async(session_update.client_secret)
oauth_session.client_secret_enc = client_secret_encrypted.get_encrypted()
# Keep plaintext for dual-write during migration
oauth_session.client_secret = session_update.client_secret
if session_update.redirect_uri is not None:
oauth_session.redirect_uri = session_update.redirect_uri
if session_update.status is not None:
oauth_session.status = session_update.status
# Always update the updated_at timestamp
oauth_session.updated_at = datetime.now()
oauth_session = await oauth_session.update_async(db_session=session, actor=actor)
return await self._oauth_orm_to_pydantic_async(oauth_session)
@enforce_types
async def delete_oauth_session(self, session_id: str, actor: PydanticUser) -> None:
"""Delete an OAuth session."""
async with db_registry.async_session() as session:
try:
oauth_session = await MCPOAuth.read_async(db_session=session, identifier=session_id, actor=actor)
await oauth_session.hard_delete_async(db_session=session, actor=actor)
except NoResultFound:
raise ValueError(f"OAuth session with id {session_id} not found.")
@enforce_types
async def cleanup_expired_oauth_sessions(self, max_age_hours: int = 24) -> int:
"""Clean up expired OAuth sessions and return the count of deleted sessions."""
cutoff_time = datetime.now() - timedelta(hours=max_age_hours)
async with db_registry.async_session() as session:
# Find expired sessions
result = await session.execute(select(MCPOAuth).where(MCPOAuth.created_at < cutoff_time))
expired_sessions = result.scalars().all()
# Delete expired sessions using async ORM method
for oauth_session in expired_sessions:
await oauth_session.hard_delete_async(db_session=session, actor=None)
if expired_sessions:
logger.info(f"Cleaned up {len(expired_sessions)} expired OAuth sessions")
return len(expired_sessions)
@enforce_types
async def handle_oauth_flow(
self,
request: Union[SSEServerConfig, StdioServerConfig, StreamableHTTPServerConfig],
actor: PydanticUser,
http_request: Optional[Request] = None,
):
"""
Handle OAuth flow for MCP server connection and yield SSE events.
Args:
request: The server configuration
actor: The user making the request
http_request: The HTTP request object
Yields:
SSE events during OAuth flow
Returns:
Tuple of (temp_client, connect_task) after yielding events
"""
import asyncio
from letta.services.mcp.oauth_utils import oauth_stream_event
from letta.services.mcp.types import OauthStreamEvent
# OAuth required, yield state to client to prepare to handle authorization URL
# Note: Existing AUTHORIZED sessions are already checked upstream in get_mcp_client
yield oauth_stream_event(OauthStreamEvent.OAUTH_REQUIRED, message="OAuth authentication required")
# Create new OAuth session for each test connection attempt
# Note: Old pending sessions will be cleaned up when an MCP server is created/deleted
session_create = MCPOAuthSessionCreate(
server_url=request.server_url,
server_name=request.server_name,
user_id=actor.id,
organization_id=actor.organization_id,
)
oauth_session = await self.create_oauth_session(session_create, actor)
session_id = oauth_session.id
# TODO: @jnjpng make this check more robust and remove direct os.getenv
# Check if request is from web frontend to determine redirect URI
is_web_request = (
http_request
and http_request.headers
and http_request.headers.get("user-agent", "") == "Next.js Middleware"
and http_request.headers.__contains__("x-organization-id")
)
# Check if request is from letta-code CLI (uses web callback for OAuth)
is_letta_code_request = http_request and http_request.headers and http_request.headers.get("x-letta-source", "") == "letta-code"
logo_uri = None
NEXT_PUBLIC_CURRENT_HOST = os.getenv("NEXT_PUBLIC_CURRENT_HOST")
LETTA_AGENTS_ENDPOINT = os.getenv("LETTA_AGENTS_ENDPOINT")
if (is_web_request or is_letta_code_request) and NEXT_PUBLIC_CURRENT_HOST:
# Use static callback URI - session is identified via state parameter
redirect_uri = f"{NEXT_PUBLIC_CURRENT_HOST}/oauth/callback/mcp"
logo_uri = f"{NEXT_PUBLIC_CURRENT_HOST}/seo/favicon.svg"
elif LETTA_AGENTS_ENDPOINT:
# API and SDK usage should call core server directly
# Use static callback URI - session is identified via state parameter
redirect_uri = f"{LETTA_AGENTS_ENDPOINT}/v1/tools/mcp/oauth/callback"
else:
logger.error(
f"No redirect URI found for request and base urls: {http_request.headers if http_request else 'No headers'} {NEXT_PUBLIC_CURRENT_HOST} {LETTA_AGENTS_ENDPOINT}"
)
raise HTTPException(status_code=400, detail="No redirect URI found")
# Create ServerSideOAuth for FastMCP client
oauth = ServerSideOAuth(
mcp_url=request.server_url,
session_id=session_id,
mcp_manager=self,
actor=actor,
redirect_uri=redirect_uri,
url_callback=None, # URL is stored by redirect_handler
logo_uri=logo_uri,
)
# Get authorization URL by triggering OAuth flow
temp_client = None
connect_task = None
try:
temp_client = await self.get_mcp_client(request, actor, oauth)
# Run connect_to_server in background to avoid blocking
# This will trigger the OAuth flow and the redirect_handler will save the authorization URL to database
connect_task = safe_create_task(temp_client.connect_to_server(), label="mcp_oauth_connect")
# Give the OAuth flow time to trigger and save the URL
await asyncio.sleep(1.0)
# Fetch the authorization URL from database and yield state to client to proceed with handling authorization URL
auth_session = await self.get_oauth_session_by_id(session_id, actor)
if auth_session and auth_session.authorization_url:
yield oauth_stream_event(OauthStreamEvent.AUTHORIZATION_URL, url=auth_session.authorization_url, session_id=session_id)
# Wait for user authorization (with timeout), client should render loading state until user completes the flow and /mcp/oauth/callback/{session_id} is hit
yield oauth_stream_event(OauthStreamEvent.WAITING_FOR_AUTH, message="Waiting for user authorization...")
# Callback handler will poll for authorization code and state and update the OAuth session
await connect_task
tools = await temp_client.list_tools(serialize=True)
yield oauth_stream_event(OauthStreamEvent.SUCCESS, tools=tools)
except Exception as e:
logger.error(f"Error triggering OAuth flow: {e}")
yield oauth_stream_event(OauthStreamEvent.ERROR, message=f"Failed to trigger OAuth: {str(e)}")
raise e
finally:
# Clean up resources
if connect_task and not connect_task.done():
connect_task.cancel()
try:
await connect_task
except asyncio.CancelledError:
pass
if temp_client:
try:
await temp_client.cleanup()
except Exception as cleanup_error:
logger.warning(f"Error during temp MCP client cleanup: {cleanup_error}")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/mcp_server_manager.py",
"license": "Apache License 2.0",
"lines": 1221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/streaming_service.py | import json
import time
from typing import AsyncIterator, Optional, Union
from uuid import uuid4
from fastapi.responses import StreamingResponse
from openai.types.chat import ChatCompletionChunk
from openai.types.chat.chat_completion_chunk import Choice, ChoiceDelta
from letta.agents.agent_loop import AgentLoop
from letta.agents.base_agent_v2 import BaseAgentV2
from letta.constants import REDIS_RUN_ID_PREFIX
from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client
from letta.errors import (
LettaInvalidArgumentError,
LettaServiceUnavailableError,
LLMAuthenticationError,
LLMEmptyResponseError,
LLMError,
LLMRateLimitError,
LLMTimeoutError,
PendingApprovalError,
)
from letta.helpers.datetime_helpers import get_utc_timestamp_ns
from letta.log import get_logger
from letta.otel.context import get_ctx_attributes
from letta.otel.metric_registry import MetricRegistry
from letta.schemas.agent import AgentState
from letta.schemas.enums import AgentType, MessageStreamStatus, RunStatus
from letta.schemas.job import LettaRequestConfig
from letta.schemas.letta_message import AssistantMessage, LettaErrorMessage, MessageType
from letta.schemas.letta_message_content import TextContent
from letta.schemas.letta_request import ClientToolSchema, LettaStreamingRequest
from letta.schemas.letta_response import LettaResponse
from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType
from letta.schemas.message import MessageCreate
from letta.schemas.provider_trace import BillingContext
from letta.schemas.run import Run as PydanticRun, RunUpdate
from letta.schemas.usage import LettaUsageStatistics
from letta.schemas.user import User
from letta.server.rest_api.redis_stream_manager import create_background_stream_processor, redis_sse_stream_generator
from letta.server.rest_api.streaming_response import (
RunCancelledException,
StreamingResponseWithStatusCode,
add_keepalive_to_stream,
cancellation_aware_stream_wrapper,
get_cancellation_event_for_run,
)
from letta.server.rest_api.utils import capture_sentry_exception
from letta.services.conversation_manager import ConversationManager
from letta.services.run_manager import RunManager
from letta.settings import settings
from letta.utils import safe_create_task
logger = get_logger(__name__)
class StreamingService:
"""
Service for managing agent streaming responses.
Handles run creation, stream generation, error handling, and format conversion.
"""
def __init__(self, server):
"""
Initialize the streaming service.
Args:
server: The SyncServer instance for accessing managers and services
"""
self.server = server
self.runs_manager = RunManager() if settings.track_agent_run else None
async def create_agent_stream(
self,
agent_id: str,
actor: User,
request: LettaStreamingRequest,
run_type: str = "streaming",
conversation_id: Optional[str] = None,
should_lock: bool = False,
billing_context: "BillingContext | None" = None,
) -> tuple[Optional[PydanticRun], Union[StreamingResponse, LettaResponse]]:
"""
Create a streaming response for an agent.
Args:
agent_id: The agent ID to stream from
actor: The user making the request
request: The LettaStreamingRequest containing all request parameters
run_type: Type of run for tracking
conversation_id: Optional conversation ID for conversation-scoped messaging
should_lock: If True and conversation_id is None, use agent_id as lock key
Returns:
Tuple of (run object or None, streaming response)
"""
request_start_timestamp_ns = get_utc_timestamp_ns()
MetricRegistry().user_message_counter.add(1, get_ctx_attributes())
# get redis client
redis_client = await get_redis_client()
# load agent and check eligibility
agent = await self.server.agent_manager.get_agent_by_id_async(
agent_id,
actor,
include_relationships=["memory", "multi_agent_group", "sources", "tool_exec_environment_variables", "tools", "tags"],
)
# Apply conversation-level model override if set (lower priority than request override)
if conversation_id and not request.override_model:
conversation = await ConversationManager().get_conversation_by_id(
conversation_id=conversation_id,
actor=actor,
)
if conversation.model:
conversation_llm_config = await self.server.get_llm_config_from_handle_async(
actor=actor,
handle=conversation.model,
)
if conversation.model_settings is not None:
update_params = conversation.model_settings._to_legacy_config_params()
# Don't clobber max_tokens with the Pydantic default when the caller
# didn't explicitly provide max_output_tokens.
if "max_output_tokens" not in conversation.model_settings.model_fields_set:
update_params.pop("max_tokens", None)
conversation_llm_config = conversation_llm_config.model_copy(update=update_params)
agent = agent.model_copy(update={"llm_config": conversation_llm_config})
# Handle model override if specified in the request
if request.override_model:
override_llm_config = await self.server.get_llm_config_from_handle_async(
actor=actor,
handle=request.override_model,
)
# Create a copy of agent state with the overridden llm_config
agent = agent.model_copy(update={"llm_config": override_llm_config})
model_compatible_token_streaming = self._is_token_streaming_compatible(agent)
# Determine lock key: use conversation_id if provided, else agent_id if should_lock
lock_key = conversation_id if conversation_id else (agent_id if should_lock else None)
# Attempt to acquire lock if lock_key is set
# This prevents concurrent message processing for the same conversation/agent
# Skip locking if Redis is not available (graceful degradation)
if lock_key and not isinstance(redis_client, NoopAsyncRedisClient):
await redis_client.acquire_conversation_lock(
conversation_id=lock_key,
token=str(uuid4()),
)
# create run if tracking is enabled
run = None
run_update_metadata = None
try:
if settings.track_agent_run:
run = await self._create_run(agent_id, request, run_type, actor, conversation_id=conversation_id)
await redis_client.set(f"{REDIS_RUN_ID_PREFIX}:{agent_id}", run.id if run else None)
# use agent loop for streaming
agent_loop = AgentLoop.load(agent_state=agent, actor=actor)
# create the base stream with error handling
raw_stream = self._create_error_aware_stream(
agent_loop=agent_loop,
messages=request.messages,
max_steps=request.max_steps,
stream_tokens=request.stream_tokens and model_compatible_token_streaming,
run_id=run.id if run else None,
use_assistant_message=request.use_assistant_message,
request_start_timestamp_ns=request_start_timestamp_ns,
include_return_message_types=request.include_return_message_types,
actor=actor,
conversation_id=conversation_id,
lock_key=lock_key, # For lock release (may differ from conversation_id)
client_tools=request.client_tools,
include_compaction_messages=request.include_compaction_messages,
billing_context=billing_context,
)
# handle background streaming if requested
if request.background and settings.track_agent_run:
if isinstance(redis_client, NoopAsyncRedisClient):
raise LettaServiceUnavailableError(
f"Background streaming requires Redis to be running. "
f"Please ensure Redis is properly configured. "
f"LETTA_REDIS_HOST: {settings.redis_host}, LETTA_REDIS_PORT: {settings.redis_port}",
service_name="redis",
)
# Wrap the agent loop stream with cancellation awareness for background task
background_stream = raw_stream
if settings.enable_cancellation_aware_streaming and run:
background_stream = cancellation_aware_stream_wrapper(
stream_generator=raw_stream,
run_manager=self.runs_manager,
run_id=run.id,
actor=actor,
cancellation_event=get_cancellation_event_for_run(run.id),
)
safe_create_task(
create_background_stream_processor(
stream_generator=background_stream,
redis_client=redis_client,
run_id=run.id,
run_manager=self.server.run_manager,
actor=actor,
conversation_id=lock_key, # Use lock_key for lock release
),
label=f"background_stream_processor_{run.id}",
)
raw_stream = redis_sse_stream_generator(
redis_client=redis_client,
run_id=run.id,
)
# wrap client stream with cancellation awareness if enabled and tracking runs
stream = raw_stream
if settings.enable_cancellation_aware_streaming and settings.track_agent_run and run and not request.background:
stream = cancellation_aware_stream_wrapper(
stream_generator=raw_stream,
run_manager=self.runs_manager,
run_id=run.id,
actor=actor,
cancellation_event=get_cancellation_event_for_run(run.id),
)
# conditionally wrap with keepalive based on request parameter
if request.include_pings and settings.enable_keepalive:
stream = add_keepalive_to_stream(stream, keepalive_interval=settings.keepalive_interval, run_id=run.id)
result = StreamingResponseWithStatusCode(
stream,
media_type="text/event-stream",
)
# update run status to running before returning
if settings.track_agent_run and run:
# refetch run since it may have been updated by another service
run = await self.server.run_manager.get_run_by_id(run_id=run.id, actor=actor)
if run.status == RunStatus.created:
run_status = RunStatus.running
else:
# don't override run status if it has already been updated
run_status = None
return run, result
except PendingApprovalError as e:
if settings.track_agent_run:
run_update_metadata = {"error": str(e)}
run_status = RunStatus.failed
raise
except Exception as e:
if settings.track_agent_run:
run_update_metadata = {"error": str(e)}
run_status = RunStatus.failed
raise
finally:
if settings.track_agent_run and run and run_status:
await self.server.run_manager.update_run_by_id_async(
run_id=run.id,
conversation_id=lock_key, # Use lock_key for lock release
update=RunUpdate(status=run_status, metadata=run_update_metadata),
actor=actor,
)
async def create_agent_stream_openai_chat_completions(
self,
agent_id: str,
actor: User,
request: LettaStreamingRequest,
) -> StreamingResponse:
"""
Create OpenAI-compatible chat completions streaming response.
Transforms Letta's internal streaming format to match OpenAI's
ChatCompletionChunk schema, filtering out internal tool execution
and only streaming assistant text responses.
Args:
agent_id: The agent ID to stream from
actor: The user making the request
request: The LettaStreamingRequest containing all request parameters
Returns:
StreamingResponse with OpenAI-formatted SSE chunks
"""
# load agent to get model info for the completion chunks
agent = await self.server.agent_manager.get_agent_by_id_async(agent_id, actor)
# create standard Letta stream (returns SSE-formatted stream)
run, letta_stream_response = await self.create_agent_stream(
agent_id=agent_id,
actor=actor,
request=request,
run_type="openai_chat_completions",
)
# extract the stream iterator from the response
if isinstance(letta_stream_response, StreamingResponseWithStatusCode):
letta_stream = letta_stream_response.body_iterator
else:
raise LettaInvalidArgumentError(
"Agent is not compatible with streaming mode",
argument_name="model",
)
# create transformer with agent's model info
model_name = agent.llm_config.model if agent.llm_config else "unknown"
completion_id = f"chatcmpl-{run.id if run else str(uuid4())}"
transformer = OpenAIChatCompletionsStreamTransformer(
model=model_name,
completion_id=completion_id,
)
# transform Letta SSE stream to OpenAI format (parser handles SSE strings)
openai_stream = transformer.transform_stream(letta_stream)
return StreamingResponse(
openai_stream,
media_type="text/event-stream",
)
def _create_error_aware_stream(
self,
agent_loop: BaseAgentV2,
messages: list[MessageCreate],
max_steps: int,
stream_tokens: bool,
run_id: Optional[str],
use_assistant_message: bool,
request_start_timestamp_ns: int,
include_return_message_types: Optional[list[MessageType]],
actor: User,
conversation_id: Optional[str] = None,
lock_key: Optional[str] = None,
client_tools: Optional[list[ClientToolSchema]] = None,
include_compaction_messages: bool = False,
billing_context: BillingContext | None = None,
) -> AsyncIterator:
"""
Create a stream with unified error handling.
Returns:
Async iterator that yields chunks with proper error handling
"""
async def error_aware_stream():
"""Stream that handles early LLM errors gracefully in streaming format."""
run_status = None
stop_reason = None
error_data = None
saw_done = False
saw_error = False
try:
stream = agent_loop.stream(
input_messages=messages,
max_steps=max_steps,
stream_tokens=stream_tokens,
run_id=run_id,
use_assistant_message=use_assistant_message,
request_start_timestamp_ns=request_start_timestamp_ns,
include_return_message_types=include_return_message_types,
conversation_id=conversation_id,
client_tools=client_tools,
include_compaction_messages=include_compaction_messages,
billing_context=billing_context,
)
async for chunk in stream:
# Track terminal events (check at line start to avoid false positives in message content)
if isinstance(chunk, str):
if "\ndata: [DONE]" in chunk or chunk.startswith("data: [DONE]"):
saw_done = True
if "\nevent: error" in chunk or chunk.startswith("event: error"):
saw_error = True
yield chunk
# Stream completed - check if we got a terminal event
if not saw_done and not saw_error:
# Stream ended without terminal - treat as error to avoid hanging clients
logger.error(
f"Stream for run {run_id} ended without terminal event. "
f"Agent stop_reason: {agent_loop.stop_reason}. Emitting error + [DONE]."
)
stop_reason = LettaStopReason(stop_reason=StopReasonType.error)
error_message = LettaErrorMessage(
run_id=run_id,
error_type="stream_incomplete",
message="Stream ended unexpectedly without a terminal event.",
detail=None,
)
error_data = {"error": error_message.model_dump()}
yield f"data: {stop_reason.model_dump_json()}\n\n"
yield f"event: error\ndata: {error_message.model_dump_json()}\n\n"
yield "data: [DONE]\n\n"
saw_error = True
saw_done = True
run_status = RunStatus.failed
else:
# set run status after successful completion
if agent_loop.stop_reason and agent_loop.stop_reason.stop_reason.value == "cancelled":
run_status = RunStatus.cancelled
else:
run_status = RunStatus.completed
stop_reason = agent_loop.stop_reason if agent_loop.stop_reason else LettaStopReason(stop_reason=StopReasonType.end_turn)
except LLMTimeoutError as e:
run_status = RunStatus.failed
stop_reason = LettaStopReason(stop_reason=StopReasonType.llm_api_error)
error_message = LettaErrorMessage(
run_id=run_id,
error_type="llm_timeout",
message="The LLM request timed out. Please try again.",
detail=str(e),
)
error_data = {"error": error_message.model_dump()}
logger.error(f"Run {run_id} stopped with LLM timeout error: {e}, error_data: {error_message.model_dump()}")
yield f"data: {stop_reason.model_dump_json()}\n\n"
yield f"event: error\ndata: {error_message.model_dump_json()}\n\n"
# Send [DONE] marker to properly close the stream
yield "data: [DONE]\n\n"
except LLMRateLimitError as e:
run_status = RunStatus.failed
stop_reason = LettaStopReason(stop_reason=StopReasonType.llm_api_error)
error_message = LettaErrorMessage(
run_id=run_id,
error_type="llm_rate_limit",
message="Rate limit exceeded for LLM model provider. Please wait before making another request.",
detail=str(e),
)
error_data = {"error": error_message.model_dump()}
logger.warning(f"Run {run_id} stopped with LLM rate limit error: {e}, error_data: {error_message.model_dump()}")
yield f"data: {stop_reason.model_dump_json()}\n\n"
yield f"event: error\ndata: {error_message.model_dump_json()}\n\n"
# Send [DONE] marker to properly close the stream
yield "data: [DONE]\n\n"
except LLMAuthenticationError as e:
run_status = RunStatus.failed
stop_reason = LettaStopReason(stop_reason=StopReasonType.llm_api_error)
error_message = LettaErrorMessage(
run_id=run_id,
error_type="llm_authentication",
message="Authentication failed with the LLM model provider.",
detail=str(e),
)
error_data = {"error": error_message.model_dump()}
logger.warning(f"Run {run_id} stopped with LLM authentication error: {e}, error_data: {error_message.model_dump()}")
yield f"data: {stop_reason.model_dump_json()}\n\n"
yield f"event: error\ndata: {error_message.model_dump_json()}\n\n"
# Send [DONE] marker to properly close the stream
yield "data: [DONE]\n\n"
except LLMEmptyResponseError as e:
run_status = RunStatus.failed
stop_reason = LettaStopReason(stop_reason=StopReasonType.invalid_llm_response)
error_message = LettaErrorMessage(
run_id=run_id,
error_type="llm_empty_response",
message="LLM returned an empty response.",
detail=str(e),
)
error_data = {"error": error_message.model_dump()}
logger.warning(f"Run {run_id} stopped with LLM empty response: {e}, error_data: {error_message.model_dump()}")
yield f"data: {stop_reason.model_dump_json()}\n\n"
yield f"event: error\ndata: {error_message.model_dump_json()}\n\n"
# Send [DONE] marker to properly close the stream
yield "data: [DONE]\n\n"
except LLMError as e:
run_status = RunStatus.failed
stop_reason = LettaStopReason(stop_reason=StopReasonType.llm_api_error)
error_message = LettaErrorMessage(
run_id=run_id,
error_type="llm_error",
message="An error occurred with the LLM request.",
detail=str(e),
)
error_data = {"error": error_message.model_dump()}
logger.error(f"Run {run_id} stopped with LLM error: {e}, error_data: {error_message.model_dump()}")
yield f"data: {stop_reason.model_dump_json()}\n\n"
yield f"event: error\ndata: {error_message.model_dump_json()}\n\n"
# Send [DONE] marker to properly close the stream
yield "data: [DONE]\n\n"
except RunCancelledException:
# Run was explicitly cancelled - this is not an error
# The cancellation has already been handled by cancellation_aware_stream_wrapper
logger.info(f"Run {run_id} was cancelled, exiting stream gracefully")
# Send [DONE] to properly close the stream
yield "data: [DONE]\n\n"
# Don't update run status in finally - cancellation is already recorded
run_status = None # Signal to finally block to skip update
except Exception as e:
run_status = RunStatus.failed
stop_reason = LettaStopReason(stop_reason=StopReasonType.error)
# Use repr() if str() is empty (happens with Exception() with no args)
error_detail = str(e) or repr(e)
error_message = LettaErrorMessage(
run_id=run_id,
error_type="internal_error",
message="An unknown error occurred with the LLM streaming request.",
detail=error_detail,
)
error_data = {"error": error_message.model_dump()}
logger.error(f"Run {run_id} stopped with unknown error: {error_detail}, error_data: {error_message.model_dump()}")
yield f"data: {stop_reason.model_dump_json()}\n\n"
yield f"event: error\ndata: {error_message.model_dump_json()}\n\n"
# Send [DONE] marker to properly close the stream
yield "data: [DONE]\n\n"
# Capture for Sentry but don't re-raise to allow stream to complete gracefully
capture_sentry_exception(e)
finally:
# always update run status, whether success or failure
if run_id and self.runs_manager and run_status:
# Extract stop_reason enum value from LettaStopReason object
stop_reason_value = stop_reason.stop_reason if stop_reason else StopReasonType.error.value
await self.runs_manager.update_run_by_id_async(
run_id=run_id,
conversation_id=lock_key, # Use lock_key for lock release
update=RunUpdate(status=run_status, stop_reason=stop_reason_value, metadata=error_data),
actor=actor,
)
return error_aware_stream()
def _is_token_streaming_compatible(self, agent: AgentState) -> bool:
"""Check if agent's model supports token-level streaming."""
base_compatible = agent.llm_config.model_endpoint_type in [
"anthropic",
"openai",
"bedrock",
"deepseek",
"zai",
"chatgpt_oauth",
"minimax",
"openrouter",
]
google_letta_v1 = agent.agent_type == AgentType.letta_v1_agent and agent.llm_config.model_endpoint_type in [
"google_ai",
"google_vertex",
]
return base_compatible or google_letta_v1
async def _create_run(
self, agent_id: str, request: LettaStreamingRequest, run_type: str, actor: User, conversation_id: Optional[str] = None
) -> PydanticRun:
"""Create a run for tracking execution."""
run = await self.runs_manager.create_run(
pydantic_run=PydanticRun(
agent_id=agent_id,
conversation_id=conversation_id,
background=request.background or False,
metadata={
"run_type": run_type,
},
request_config=LettaRequestConfig.from_letta_request(request),
),
actor=actor,
)
return run
async def _update_run_status(
self,
run_id: str,
status: RunStatus,
actor: User,
error: Optional[str] = None,
stop_reason: Optional[str] = None,
conversation_id: Optional[str] = None,
):
"""Update the status of a run."""
if not self.runs_manager:
return
update = RunUpdate(status=status)
if error:
update.metadata = {"error": error}
if stop_reason:
update.stop_reason = stop_reason
await self.runs_manager.update_run_by_id_async(
run_id=run_id,
update=update,
actor=actor,
conversation_id=conversation_id,
)
class OpenAIChatCompletionsStreamTransformer:
"""
Transforms Letta streaming messages into OpenAI ChatCompletionChunk format.
Filters out internal tool execution and only streams assistant text responses.
"""
def __init__(self, model: str, completion_id: str):
"""
Initialize the transformer.
Args:
model: Model name to include in chunks
completion_id: Unique ID for this completion (format: chatcmpl-{uuid})
"""
self.model = model
self.completion_id = completion_id
self.first_chunk = True
self.created = int(time.time())
# TODO: This is lowkey really ugly and poor code design, but this works fine for now
def _parse_sse_chunk(self, sse_string: str):
"""
Parse SSE-formatted string back into a message object.
Args:
sse_string: SSE formatted string like "data: {...}\n\n"
Returns:
Parsed message object or None if can't parse
"""
try:
# strip SSE formatting
if sse_string.startswith("data: "):
json_str = sse_string[6:].strip()
# handle [DONE] marker
if json_str == "[DONE]":
return MessageStreamStatus.done
# parse JSON
data = json.loads(json_str)
# reconstruct message object based on message_type
message_type = data.get("message_type")
if message_type == "assistant_message":
return AssistantMessage(**data)
elif message_type == "usage_statistics":
return LettaUsageStatistics(**data)
elif message_type == "stop_reason":
# skip stop_reason, we use [DONE] instead
return None
else:
# other message types we skip
return None
return None
except Exception as e:
logger.warning(f"Failed to parse SSE chunk: {e}")
return None
async def transform_stream(self, letta_stream: AsyncIterator) -> AsyncIterator[str]:
"""
Transform Letta stream to OpenAI ChatCompletionChunk SSE format.
Args:
letta_stream: Async iterator of Letta messages (may be SSE strings or objects)
Yields:
SSE-formatted strings: "data: {json}\n\n"
"""
try:
async for raw_chunk in letta_stream:
# parse SSE string if needed
if isinstance(raw_chunk, str):
chunk = self._parse_sse_chunk(raw_chunk)
if chunk is None:
continue # skip unparseable or filtered chunks
else:
chunk = raw_chunk
# only process assistant messages
if isinstance(chunk, AssistantMessage):
async for sse_chunk in self._process_assistant_message(chunk):
print(f"CHUNK: {sse_chunk}")
yield sse_chunk
# handle completion status
elif chunk == MessageStreamStatus.done:
# emit final chunk with finish_reason
final_chunk = ChatCompletionChunk(
id=self.completion_id,
object="chat.completion.chunk",
created=self.created,
model=self.model,
choices=[
Choice(
index=0,
delta=ChoiceDelta(),
finish_reason="stop",
)
],
)
yield f"data: {final_chunk.model_dump_json()}\n\n"
yield "data: [DONE]\n\n"
except Exception as e:
logger.error(f"Error in OpenAI stream transformation: {e}", exc_info=True)
error_chunk = {"error": {"message": str(e), "type": "server_error"}}
yield f"data: {json.dumps(error_chunk)}\n\n"
async def _process_assistant_message(self, message: AssistantMessage) -> AsyncIterator[str]:
"""
Convert AssistantMessage to OpenAI ChatCompletionChunk(s).
Args:
message: Letta AssistantMessage with content
Yields:
SSE-formatted chunk strings
"""
# extract text from content (can be string or list of TextContent)
text_content = self._extract_text_content(message.content)
if not text_content:
return
# emit role on first chunk only
if self.first_chunk:
self.first_chunk = False
# first chunk includes role
chunk = ChatCompletionChunk(
id=self.completion_id,
object="chat.completion.chunk",
created=self.created,
model=self.model,
choices=[
Choice(
index=0,
delta=ChoiceDelta(role="assistant", content=text_content),
finish_reason=None,
)
],
)
else:
# subsequent chunks just have content
chunk = ChatCompletionChunk(
id=self.completion_id,
object="chat.completion.chunk",
created=self.created,
model=self.model,
choices=[
Choice(
index=0,
delta=ChoiceDelta(content=text_content),
finish_reason=None,
)
],
)
yield f"data: {chunk.model_dump_json()}\n\n"
def _extract_text_content(self, content: Union[str, list[TextContent]]) -> str:
"""
Extract text string from content field.
Args:
content: Either a string or list of TextContent objects
Returns:
Extracted text string
"""
if isinstance(content, str):
return content
elif isinstance(content, list):
# concatenate all TextContent items
text_parts = []
for item in content:
if isinstance(item, TextContent):
text_parts.append(item.text)
return "".join(text_parts)
return ""
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/streaming_service.py",
"license": "Apache License 2.0",
"lines": 695,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/validators.py | import inspect
import re
from functools import wraps
from typing import Annotated, Optional
from fastapi import Path, Query
from letta.errors import LettaInvalidArgumentError
from letta.schemas.enums import PrimitiveType # PrimitiveType is now in schemas.enums
# Map from PrimitiveType to the actual prefix string (which is just the enum value)
PRIMITIVE_ID_PREFIXES = {primitive_type: primitive_type.value for primitive_type in PrimitiveType}
PRIMITIVE_ID_PATTERNS = {
# f-string interpolation gets confused because of the regex's required curly braces {}
prefix: re.compile("^" + prefix + "-[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$")
for prefix in PRIMITIVE_ID_PREFIXES.values()
}
def _create_path_validator_factory(primitive: str):
"""
Creates a factory function that returns a fresh Path validator.
This avoids shared state issues when the same validator is used
across multiple endpoints with different parameter names.
"""
def factory():
return Path(
description=f"The ID of the {primitive} in the format '{primitive}-<uuid4>'",
pattern=PRIMITIVE_ID_PATTERNS[primitive].pattern,
examples=[f"{primitive}-123e4567-e89b-42d3-8456-426614174000"],
min_length=len(primitive) + 1 + 36,
max_length=len(primitive) + 1 + 36,
)
return factory
# PATH_VALIDATORS now contains factory functions, not Path objects
# Usage: folder_id: str = PATH_VALIDATORS[PrimitiveType.FOLDER.value]()
PATH_VALIDATORS = {primitive_type.value: _create_path_validator_factory(primitive_type.value) for primitive_type in PrimitiveType}
def _create_conversation_id_or_default_path_validator_factory():
"""Conversation IDs with support for 'default' and agent IDs (backwards compatibility)."""
conversation_primitive = PrimitiveType.CONVERSATION.value
agent_primitive = PrimitiveType.AGENT.value
conversation_pattern = PRIMITIVE_ID_PATTERNS[conversation_primitive].pattern
agent_pattern = PRIMITIVE_ID_PATTERNS[agent_primitive].pattern
# Make the full regex accept: conversation ID, agent ID, or 'default'.
# Patterns already contain ^...$ anchors, so strip them for the alternation.
conversation_or_agent_or_default_pattern = f"^(default|{conversation_pattern[1:-1]}|{agent_pattern[1:-1]})$"
def factory():
return Path(
description=(
f"The conversation identifier. Can be a conversation ID ('{conversation_primitive}-<uuid4>'), "
f"'default' for agent-direct mode (with agent_id parameter), "
f"or an agent ID ('{agent_primitive}-<uuid4>') for backwards compatibility (deprecated)."
),
pattern=conversation_or_agent_or_default_pattern,
examples=[
"default",
f"{conversation_primitive}-123e4567-e89b-42d3-8456-426614174000",
f"{agent_primitive}-123e4567-e89b-42d3-8456-426614174000",
],
min_length=1,
max_length=max(len(conversation_primitive), len(agent_primitive)) + 1 + 36,
)
return factory
# Type aliases for common ID types
# These can be used directly in route handler signatures for cleaner code
AgentId = Annotated[str, PATH_VALIDATORS[PrimitiveType.AGENT.value]()]
ToolId = Annotated[str, PATH_VALIDATORS[PrimitiveType.TOOL.value]()]
SourceId = Annotated[str, PATH_VALIDATORS[PrimitiveType.SOURCE.value]()]
BlockId = Annotated[str, PATH_VALIDATORS[PrimitiveType.BLOCK.value]()]
MessageId = Annotated[str, PATH_VALIDATORS[PrimitiveType.MESSAGE.value]()]
RunId = Annotated[str, PATH_VALIDATORS[PrimitiveType.RUN.value]()]
JobId = Annotated[str, PATH_VALIDATORS[PrimitiveType.JOB.value]()]
GroupId = Annotated[str, PATH_VALIDATORS[PrimitiveType.GROUP.value]()]
FileId = Annotated[str, PATH_VALIDATORS[PrimitiveType.FILE.value]()]
FolderId = Annotated[str, PATH_VALIDATORS[PrimitiveType.FOLDER.value]()]
ArchiveId = Annotated[str, PATH_VALIDATORS[PrimitiveType.ARCHIVE.value]()]
PassageId = Annotated[str, PATH_VALIDATORS[PrimitiveType.PASSAGE.value]()]
ProviderId = Annotated[str, PATH_VALIDATORS[PrimitiveType.PROVIDER.value]()]
SandboxConfigId = Annotated[str, PATH_VALIDATORS[PrimitiveType.SANDBOX_CONFIG.value]()]
StepId = Annotated[str, PATH_VALIDATORS[PrimitiveType.STEP.value]()]
IdentityId = Annotated[str, PATH_VALIDATORS[PrimitiveType.IDENTITY.value]()]
ConversationId = Annotated[str, PATH_VALIDATORS[PrimitiveType.CONVERSATION.value]()]
# Conversation ID with support for 'default' and agent IDs (for agent-direct mode endpoints)
# Backwards compatible - agent-* will be deprecated in favor of conversation_id='default' + agent_id param
ConversationIdOrDefault = Annotated[str, _create_conversation_id_or_default_path_validator_factory()()]
# Infrastructure types
McpServerId = Annotated[str, PATH_VALIDATORS[PrimitiveType.MCP_SERVER.value]()]
McpOAuthId = Annotated[str, PATH_VALIDATORS[PrimitiveType.MCP_OAUTH.value]()]
FileAgentId = Annotated[str, PATH_VALIDATORS[PrimitiveType.FILE_AGENT.value]()]
# Configuration types
SandboxEnvId = Annotated[str, PATH_VALIDATORS[PrimitiveType.SANDBOX_ENV.value]()]
AgentEnvId = Annotated[str, PATH_VALIDATORS[PrimitiveType.AGENT_ENV.value]()]
# Core entity types
UserId = Annotated[str, PATH_VALIDATORS[PrimitiveType.USER.value]()]
OrganizationId = Annotated[str, PATH_VALIDATORS[PrimitiveType.ORGANIZATION.value]()]
ToolRuleId = Annotated[str, PATH_VALIDATORS[PrimitiveType.TOOL_RULE.value]()]
# Batch processing types
BatchItemId = Annotated[str, PATH_VALIDATORS[PrimitiveType.BATCH_ITEM.value]()]
BatchRequestId = Annotated[str, PATH_VALIDATORS[PrimitiveType.BATCH_REQUEST.value]()]
# Telemetry types
ProviderTraceId = Annotated[str, PATH_VALIDATORS[PrimitiveType.PROVIDER_TRACE.value]()]
def raise_on_invalid_id(param_name: str, expected_prefix: PrimitiveType):
"""
Decorator that validates an ID parameter has the expected prefix format.
Can be stacked multiple times on the same function to validate different IDs.
Args:
param_name: The name of the function parameter to validate (e.g., "agent_id")
expected_prefix: The expected primitive type (e.g., PrimitiveType.AGENT)
Example:
@raise_on_invalid_id(param_name="agent_id", expected_prefix=PrimitiveType.AGENT)
@raise_on_invalid_id(param_name="folder_id", expected_prefix=PrimitiveType.FOLDER)
def my_function(agent_id: str, folder_id: str):
pass
"""
def decorator(function):
@wraps(function)
def wrapper(*args, **kwargs):
sig = inspect.signature(function)
bound_args = sig.bind(*args, **kwargs)
bound_args.apply_defaults()
if param_name in bound_args.arguments:
arg_value = bound_args.arguments[param_name]
if arg_value is not None:
prefix = PRIMITIVE_ID_PREFIXES[expected_prefix]
if PRIMITIVE_ID_PATTERNS[prefix].match(arg_value) is None:
raise LettaInvalidArgumentError(
message=f"Invalid {expected_prefix.value} ID format: {arg_value}. Expected format: '{prefix}-<uuid4>'",
argument_name=param_name,
)
return function(*args, **kwargs)
return wrapper
return decorator
# =============================================================================
# Query Parameter Validators
# =============================================================================
# Format validators for common query parameters to match frontend constraints
def _create_id_query_validator(primitive: str):
"""
Creates a Query validator for ID parameters with format validation.
Args:
primitive: The primitive type prefix (e.g., "agent", "tool")
Returns:
A Query validator with pattern matching
"""
return Query(
description=f"The ID of the {primitive} in the format '{primitive}-<uuid4>'",
pattern=PRIMITIVE_ID_PATTERNS[primitive].pattern,
examples=[f"{primitive}-123e4567-e89b-42d3-8456-426614174000"],
min_length=len(primitive) + 1 + 36,
max_length=len(primitive) + 1 + 36,
)
# Query parameter ID validators with format checking
AgentIdQuery = Annotated[Optional[str], _create_id_query_validator(PrimitiveType.AGENT.value)]
ToolIdQuery = Annotated[Optional[str], _create_id_query_validator(PrimitiveType.TOOL.value)]
SourceIdQuery = Annotated[Optional[str], _create_id_query_validator(PrimitiveType.SOURCE.value)]
BlockIdQuery = Annotated[Optional[str], _create_id_query_validator(PrimitiveType.BLOCK.value)]
MessageIdQuery = Annotated[Optional[str], _create_id_query_validator(PrimitiveType.MESSAGE.value)]
RunIdQuery = Annotated[Optional[str], _create_id_query_validator(PrimitiveType.RUN.value)]
JobIdQuery = Annotated[Optional[str], _create_id_query_validator(PrimitiveType.JOB.value)]
GroupIdQuery = Annotated[Optional[str], _create_id_query_validator(PrimitiveType.GROUP.value)]
IdentityIdQuery = Annotated[Optional[str], _create_id_query_validator(PrimitiveType.IDENTITY.value)]
UserIdQuery = Annotated[Optional[str], _create_id_query_validator(PrimitiveType.USER.value)]
UserIdQueryRequired = Annotated[str, _create_id_query_validator(PrimitiveType.USER.value)]
# =============================================================================
# String Field Validators
# =============================================================================
# Format validators for common string fields
# Label validator: alphanumeric, hyphens, underscores, max 50 chars
BlockLabelQuery = Annotated[
Optional[str],
Query(
description="Label to include (alphanumeric, hyphens, underscores, forward slashes)",
pattern=r"^[a-zA-Z0-9_/-]+$",
min_length=1,
max_length=50,
examples=["human", "persona", "the_label_of-a-block", "the_label_of-a-block/with-forward-slash"],
),
]
# Name validator: similar to label but allows spaces, max 100 chars
BlockNameQuery = Annotated[
Optional[str],
Query(
description="Name filter (alphanumeric, spaces, hyphens, underscores)",
pattern=r"^[a-zA-Z0-9 _-]+$",
min_length=1,
max_length=100,
examples=["My Agent", "test_tool", "default-config"],
),
]
# Search query validator: general text search, max 200 chars
BlockLabelSearchQuery = Annotated[
Optional[str],
Query(
description="Search blocks by label. If provided, returns blocks whose label matches the search query. This is a full-text search on block labels.",
pattern=r"^[a-zA-Z0-9_/-]+$",
min_length=1,
max_length=50,
examples=["human", "persona", "the_label_of-a-block", "the_label_of-a-block/with-forward-slash"],
),
]
BlockValueSearchQuery = Annotated[
Optional[str],
Query(
description="Search blocks by value. If provided, returns blocks whose value matches the search query. This is a full-text search on block values.",
min_length=1,
max_length=200,
),
]
BlockDescriptionSearchQuery = Annotated[
Optional[str],
Query(
description="Search blocks by description. If provided, returns blocks whose description matches the search query. This is a full-text search on block descriptions.",
min_length=1,
max_length=200,
),
]
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/validators.py",
"license": "Apache License 2.0",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:tests/managers/test_provider_manager.py | """Tests for ProviderManager encryption/decryption logic."""
import pytest
from letta.orm.provider import Provider as ProviderModel
from letta.schemas.enums import ProviderCategory, ProviderType
from letta.schemas.providers import ProviderCreate, ProviderUpdate
from letta.schemas.secret import Secret
from letta.server.db import db_registry
from letta.services.organization_manager import OrganizationManager
from letta.services.provider_manager import ProviderManager
from letta.services.user_manager import UserManager
from letta.settings import settings
@pytest.fixture
async def default_organization():
"""Fixture to create and return the default organization."""
manager = OrganizationManager()
org = await manager.create_default_organization_async()
yield org
@pytest.fixture
async def default_user(default_organization):
"""Fixture to create and return the default user within the default organization."""
manager = UserManager()
user = await manager.create_default_actor_async(org_id=default_organization.id)
yield user
@pytest.fixture
async def provider_manager():
"""Fixture to create and return a ProviderManager instance."""
return ProviderManager()
@pytest.fixture
def encryption_key():
"""Fixture to ensure encryption key is set for tests."""
original_key = settings.encryption_key
# Set a test encryption key if not already set
if not settings.encryption_key:
settings.encryption_key = "test-encryption-key-32-bytes!!"
yield settings.encryption_key
# Restore original
settings.encryption_key = original_key
# ======================================================================================================================
# Provider Encryption Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_provider_create_encrypts_api_key(provider_manager, default_user, encryption_key):
"""Test that creating a provider encrypts the api_key and stores it in api_key_enc."""
# Create a provider with plaintext api_key
provider_create = ProviderCreate(
name="test-openai-provider",
provider_type=ProviderType.openai,
api_key="sk-test-plaintext-api-key-12345",
base_url="https://api.openai.com/v1",
)
# Create provider through manager
created_provider = await provider_manager.create_provider_async(provider_create, actor=default_user)
# Verify provider was created
assert created_provider is not None
assert created_provider.name == "test-openai-provider"
assert created_provider.provider_type == ProviderType.openai
# Verify encrypted api_key can be decrypted
assert created_provider.api_key_enc.get_plaintext() == "sk-test-plaintext-api-key-12345"
# Read directly from database to verify encryption
async with db_registry.async_session() as session:
provider_orm = await ProviderModel.read_async(
db_session=session,
identifier=created_provider.id,
actor=default_user,
)
# Verify encrypted column is populated and decrypts correctly
assert provider_orm.api_key_enc is not None
decrypted = Secret.from_encrypted(provider_orm.api_key_enc).get_plaintext()
assert decrypted == "sk-test-plaintext-api-key-12345"
@pytest.mark.asyncio
async def test_provider_read_decrypts_api_key(provider_manager, default_user, encryption_key):
"""Test that reading a provider decrypts the api_key from api_key_enc."""
# Create a provider
provider_create = ProviderCreate(
name="test-anthropic-provider",
provider_type=ProviderType.anthropic,
api_key="sk-ant-test-key-67890",
)
created_provider = await provider_manager.create_provider_async(provider_create, actor=default_user)
provider_id = created_provider.id
# Read the provider back
retrieved_provider = await provider_manager.get_provider_async(provider_id, actor=default_user)
# Verify the api_key is decrypted correctly via api_key_enc
decrypted_key = retrieved_provider.api_key_enc.get_plaintext()
assert decrypted_key == "sk-ant-test-key-67890"
@pytest.mark.asyncio
async def test_provider_update_encrypts_new_api_key(provider_manager, default_user, encryption_key):
"""Test that updating a provider's api_key encrypts the new value."""
# Create initial provider
provider_create = ProviderCreate(
name="test-groq-provider",
provider_type=ProviderType.groq,
api_key="gsk-initial-key-123",
)
created_provider = await provider_manager.create_provider_async(provider_create, actor=default_user)
provider_id = created_provider.id
# Update the api_key
provider_update = ProviderUpdate(
api_key="gsk-updated-key-456",
)
updated_provider = await provider_manager.update_provider_async(provider_id, provider_update, actor=default_user)
# Verify the updated key is accessible via the encrypted field
assert updated_provider.api_key_enc.get_plaintext() == "gsk-updated-key-456"
# Read from DB to verify new encrypted value
async with db_registry.async_session() as session:
provider_orm = await ProviderModel.read_async(
db_session=session,
identifier=provider_id,
actor=default_user,
)
assert provider_orm.api_key_enc is not None
decrypted = Secret.from_encrypted(provider_orm.api_key_enc).get_plaintext()
assert decrypted == "gsk-updated-key-456"
@pytest.mark.asyncio
async def test_bedrock_credentials_encryption(provider_manager, default_user, encryption_key):
"""Test that Bedrock provider encrypts both access_key and api_key (secret_key)."""
# Create Bedrock provider with both keys
provider_create = ProviderCreate(
name="test-bedrock-provider",
provider_type=ProviderType.bedrock,
api_key="secret-access-key-xyz", # This is the secret key
access_key="access-key-id-abc", # This is the access key ID
region="us-east-1",
)
created_provider = await provider_manager.create_provider_async(provider_create, actor=default_user)
# Verify both keys are accessible via encrypted fields
assert created_provider.api_key_enc.get_plaintext() == "secret-access-key-xyz"
assert created_provider.access_key_enc.get_plaintext() == "access-key-id-abc"
# Read from DB to verify both are encrypted
async with db_registry.async_session() as session:
provider_orm = await ProviderModel.read_async(
db_session=session,
identifier=created_provider.id,
actor=default_user,
)
# Verify both encrypted columns are populated
assert provider_orm.api_key_enc is not None
assert provider_orm.access_key_enc is not None
# Verify encrypted values are different from plaintext
assert Secret.from_encrypted(provider_orm.api_key_enc).get_plaintext() == "secret-access-key-xyz"
assert Secret.from_encrypted(provider_orm.access_key_enc).get_plaintext() == "access-key-id-abc"
# Test the manager method for getting Bedrock credentials
access_key, secret_key, region = await provider_manager.get_bedrock_credentials_async("test-bedrock-provider", actor=default_user)
assert access_key == "access-key-id-abc"
assert secret_key == "secret-access-key-xyz"
assert region == "us-east-1"
@pytest.mark.asyncio
async def test_provider_secret_not_exposed_in_logs(provider_manager, default_user, encryption_key):
"""Test that Secret objects don't expose plaintext in string representations."""
# Create a provider
provider_create = ProviderCreate(
name="test-secret-provider",
provider_type=ProviderType.openai,
api_key="sk-very-secret-key-do-not-log",
)
created_provider = await provider_manager.create_provider_async(provider_create, actor=default_user)
# Get the Secret object
api_key_secret = created_provider.api_key_enc
# Verify string representation doesn't expose the key
secret_str = str(api_key_secret)
secret_repr = repr(api_key_secret)
assert "sk-very-secret-key-do-not-log" not in secret_str
assert "sk-very-secret-key-do-not-log" not in secret_repr
assert "****" in secret_str or "Secret" in secret_str
assert "****" in secret_repr or "Secret" in secret_repr
@pytest.mark.asyncio
async def test_provider_pydantic_to_orm_serialization(provider_manager, default_user, encryption_key):
"""Test the full Pydantic → ORM → Pydantic round-trip maintains data integrity."""
# Create a provider through the normal flow
provider_create = ProviderCreate(
name="test-roundtrip-provider",
provider_type=ProviderType.openai,
api_key="sk-roundtrip-test-key-999",
base_url="https://api.openai.com/v1",
)
# Step 1: Create provider (Pydantic → ORM)
created_provider = await provider_manager.create_provider_async(provider_create, actor=default_user)
original_api_key = created_provider.api_key_enc.get_plaintext()
# Step 2: Read provider back (ORM → Pydantic)
retrieved_provider = await provider_manager.get_provider_async(created_provider.id, actor=default_user)
# Verify data integrity
assert retrieved_provider.api_key_enc.get_plaintext() == original_api_key
assert retrieved_provider.name == "test-roundtrip-provider"
assert retrieved_provider.provider_type == ProviderType.openai
assert retrieved_provider.base_url == "https://api.openai.com/v1"
# Verify Secret object works correctly
api_key_secret = retrieved_provider.api_key_enc
assert api_key_secret.get_plaintext() == original_api_key
# Step 3: Convert to ORM again (should preserve encrypted field)
orm_data = retrieved_provider.model_dump(to_orm=True)
# Verify encrypted field is in the ORM data
assert "api_key_enc" in orm_data
assert orm_data["api_key_enc"] is not None
assert Secret.from_encrypted(orm_data["api_key_enc"]).get_plaintext() == original_api_key
@pytest.mark.asyncio
async def test_provider_with_none_api_key(provider_manager, default_user, encryption_key):
"""Test that providers can be created with None api_key (some providers may not need it)."""
# Create a provider without an api_key
provider_create = ProviderCreate(
name="test-no-key-provider",
provider_type=ProviderType.ollama,
api_key="", # Empty string
base_url="http://localhost:11434",
)
created_provider = await provider_manager.create_provider_async(provider_create, actor=default_user)
# Verify provider was created
assert created_provider is not None
assert created_provider.name == "test-no-key-provider"
# Read from DB
async with db_registry.async_session() as session:
provider_orm = await ProviderModel.read_async(
db_session=session,
identifier=created_provider.id,
actor=default_user,
)
# api_key_enc should handle empty string appropriately
assert provider_orm.api_key_enc is not None
assert Secret.from_encrypted(provider_orm.api_key_enc).get_plaintext() == ""
@pytest.mark.asyncio
async def test_list_providers_decrypts_all(provider_manager, default_user, encryption_key):
"""Test that listing multiple providers decrypts all their api_keys correctly."""
# Create multiple providers
providers_to_create = [
ProviderCreate(name=f"test-provider-{i}", provider_type=ProviderType.openai, api_key=f"sk-key-{i}") for i in range(3)
]
created_ids = []
for provider_create in providers_to_create:
provider = await provider_manager.create_provider_async(provider_create, actor=default_user)
created_ids.append(provider.id)
# List all providers
all_providers = await provider_manager.list_providers_async(actor=default_user)
# Filter to our test providers
test_providers = [p for p in all_providers if p.id in created_ids]
# Verify all are decrypted correctly
assert len(test_providers) == 3
for i, provider in enumerate(sorted(test_providers, key=lambda p: p.name)):
secret = provider.api_key_enc
assert secret.get_plaintext() == f"sk-key-{i}"
# ======================================================================================================================
# Handle to Config Conversion Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_handle_to_llm_config_conversion(provider_manager, default_user):
"""Test that handle to LLMConfig conversion works correctly with database lookup."""
from letta.orm.errors import NoResultFound
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.llm_config import LLMConfig
# Create a test provider
provider_create = ProviderCreate(
name="test-handle-provider", provider_type=ProviderType.openai, api_key="sk-test-handle-key", base_url="https://api.openai.com/v1"
)
provider = await provider_manager.create_provider_async(provider_create, actor=default_user)
# Sync some test models
llm_models = [
LLMConfig(
model="gpt-4",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=8192,
handle="test-handle-provider/gpt-4",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
LLMConfig(
model="gpt-3.5-turbo",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=4096,
handle="test-handle-provider/gpt-3.5-turbo",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
]
embedding_models = [
EmbeddingConfig(
embedding_model="text-embedding-ada-002",
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_dim=1536,
embedding_chunk_size=300,
handle="test-handle-provider/text-embedding-ada-002",
)
]
await provider_manager.sync_provider_models_async(
provider=provider, llm_models=llm_models, embedding_models=embedding_models, organization_id=default_user.organization_id
)
# Test LLM config from handle
llm_config = await provider_manager.get_llm_config_from_handle(handle="test-handle-provider/gpt-4", actor=default_user)
# Verify the returned config
assert llm_config.model == "gpt-4"
assert llm_config.handle == "test-handle-provider/gpt-4"
assert llm_config.context_window == 8192
assert llm_config.model_endpoint == "https://api.openai.com/v1"
assert llm_config.provider_name == "test-handle-provider"
# Test embedding config from handle
embedding_config = await provider_manager.get_embedding_config_from_handle(
handle="test-handle-provider/text-embedding-ada-002", actor=default_user
)
# Verify the returned config
assert embedding_config.embedding_model == "text-embedding-ada-002"
assert embedding_config.handle == "test-handle-provider/text-embedding-ada-002"
assert embedding_config.embedding_dim == 1536
assert embedding_config.embedding_chunk_size == 300
assert embedding_config.embedding_endpoint == "https://api.openai.com/v1"
# Test context window limit override would be done at server level
# The provider_manager method doesn't support context_window_limit directly
# Test error handling for non-existent handle
with pytest.raises(NoResultFound):
await provider_manager.get_llm_config_from_handle(handle="nonexistent/model", actor=default_user)
@pytest.mark.asyncio
async def test_byok_provider_auto_syncs_models(provider_manager, default_user, monkeypatch):
"""Test that creating a BYOK provider attempts to sync its models."""
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.llm_config import LLMConfig
# Mock the list_llm_models_async method
async def mock_list_llm():
return [
LLMConfig(
model="gpt-4o",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle="openai/gpt-4o",
provider_name="openai",
provider_category=ProviderCategory.base,
),
LLMConfig(
model="gpt-4",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=8192,
handle="openai/gpt-4",
provider_name="openai",
provider_category=ProviderCategory.base,
),
]
# Mock the list_embedding_models_async method
async def mock_list_embedding():
return [
EmbeddingConfig(
embedding_model="text-embedding-ada-002",
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_dim=1536,
embedding_chunk_size=300,
handle="openai/text-embedding-ada-002",
)
]
# Mock the _sync_default_models_for_provider method directly
async def mock_sync(provider, actor):
# Get mock models and update them for this provider
llm_models = await mock_list_llm()
embedding_models = await mock_list_embedding()
# Update models to match the BYOK provider
for model in llm_models:
model.provider_name = provider.name
model.handle = f"{provider.name}/{model.model}"
model.provider_category = provider.provider_category
for model in embedding_models:
model.handle = f"{provider.name}/{model.embedding_model}"
# Call sync_provider_models_async with mock data
await provider_manager.sync_provider_models_async(
provider=provider, llm_models=llm_models, embedding_models=embedding_models, organization_id=actor.organization_id
)
monkeypatch.setattr(provider_manager, "_sync_default_models_for_provider", mock_sync)
# Create a BYOK OpenAI provider (simulates UI "Add API Key" flow)
provider_create = ProviderCreate(name="my-openai-key", provider_type=ProviderType.openai, api_key="sk-my-personal-key-123")
# Create the BYOK provider (is_byok=True is the default)
provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=True)
# Verify provider was created
assert provider.name == "my-openai-key"
assert provider.provider_type == ProviderType.openai
# List models for this provider - they should have been auto-synced
models = await provider_manager.list_models_async(actor=default_user, provider_id=provider.id)
# Should have both LLM and embedding models
llm_models = [m for m in models if m.model_type == "llm"]
embedding_models = [m for m in models if m.model_type == "embedding"]
assert len(llm_models) > 0, "No LLM models were synced"
assert len(embedding_models) > 0, "No embedding models were synced"
# Verify handles are correctly formatted with BYOK provider name
for model in models:
assert model.handle.startswith(f"{provider.name}/")
# Test that we can get LLM config from handle
llm_config = await provider_manager.get_llm_config_from_handle(handle="my-openai-key/gpt-4o", actor=default_user)
assert llm_config.model == "gpt-4o"
assert llm_config.provider_name == "my-openai-key"
# ======================================================================================================================
# Server Startup Provider Sync Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_server_startup_syncs_base_providers(default_user, default_organization, monkeypatch):
"""Test that server startup properly syncs base provider models from environment.
This test simulates the server startup process and verifies that:
1. Base providers from environment variables are synced to database
2. Provider models are fetched from mocked API endpoints
3. Models are properly persisted to the database with correct metadata
4. Models can be retrieved using handles
"""
from letta.server.server import SyncServer
# Mock OpenAI API responses
mock_openai_models = {
"data": [
{
"id": "gpt-4",
"object": "model",
"created": 1687882411,
"owned_by": "openai",
"max_model_len": 8192,
},
{
"id": "gpt-4-turbo",
"object": "model",
"created": 1712361441,
"owned_by": "system",
"max_model_len": 128000,
},
{
"id": "text-embedding-ada-002",
"object": "model",
"created": 1671217299,
"owned_by": "openai-internal",
},
{
"id": "gpt-4-vision", # Should be filtered out by OpenAI provider logic (has disallowed keyword)
"object": "model",
"created": 1698959748,
"owned_by": "system",
"max_model_len": 8192,
},
]
}
# Mock Anthropic API responses
mock_anthropic_models = {
"data": [
{
"id": "claude-3-5-sonnet-20241022",
"type": "model",
"display_name": "Claude 3.5 Sonnet",
"created_at": "2024-10-22T00:00:00Z",
},
{
"id": "claude-3-opus-20240229",
"type": "model",
"display_name": "Claude 3 Opus",
"created_at": "2024-02-29T00:00:00Z",
},
]
}
# Mock the API calls for OpenAI
async def mock_openai_get_model_list_async(*args, **kwargs):
return mock_openai_models
# Mock Anthropic models.list() response as an async iterable
# (the real SDK returns an AsyncPage that supports async iteration)
class MockAnthropicModelItem:
def __init__(self, data):
self._data = data
def model_dump(self):
return self._data
class MockAnthropicAsyncPage:
def __init__(self, items):
self._items = [MockAnthropicModelItem(item) for item in items]
def __aiter__(self):
return self._async_iter()
async def _async_iter(self):
for item in self._items:
yield item
# Mock the Anthropic AsyncAnthropic client
# NOTE: list() must be a regular (non-async) method that returns an async iterable,
# because the real Anthropic SDK's models.list() returns an AsyncPage (which has __aiter__)
# directly, and the code uses `async for model in client.models.list()`.
class MockAnthropicModels:
def list(self):
return MockAnthropicAsyncPage(mock_anthropic_models["data"])
class MockAsyncAnthropic:
def __init__(self, *args, **kwargs):
self.models = MockAnthropicModels()
# Patch the actual API calling functions
monkeypatch.setattr(
"letta.llm_api.openai.openai_get_model_list_async",
mock_openai_get_model_list_async,
)
monkeypatch.setattr(
"anthropic.AsyncAnthropic",
MockAsyncAnthropic,
)
# Clear ALL provider-related env vars first to ensure clean state
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False)
monkeypatch.delenv("GEMINI_API_KEY", raising=False)
monkeypatch.delenv("GOOGLE_CLOUD_PROJECT", raising=False)
monkeypatch.delenv("GOOGLE_CLOUD_LOCATION", raising=False)
monkeypatch.delenv("AZURE_API_KEY", raising=False)
monkeypatch.delenv("GROQ_API_KEY", raising=False)
monkeypatch.delenv("TOGETHER_API_KEY", raising=False)
monkeypatch.delenv("VLLM_API_BASE", raising=False)
monkeypatch.delenv("SGLANG_API_BASE", raising=False)
monkeypatch.delenv("AWS_ACCESS_KEY_ID", raising=False)
monkeypatch.delenv("AWS_SECRET_ACCESS_KEY", raising=False)
monkeypatch.delenv("LMSTUDIO_BASE_URL", raising=False)
monkeypatch.delenv("DEEPSEEK_API_KEY", raising=False)
monkeypatch.delenv("XAI_API_KEY", raising=False)
monkeypatch.delenv("OPENROUTER_API_KEY", raising=False)
monkeypatch.delenv("ZAI_API_KEY", raising=False)
# Set environment variables to enable only OpenAI and Anthropic
monkeypatch.setenv("OPENAI_API_KEY", "sk-test-key-12345")
monkeypatch.setenv("ANTHROPIC_API_KEY", "sk-ant-test-key-67890")
# Reload model_settings to pick up new env vars
from letta.settings import model_settings
monkeypatch.setattr(model_settings, "openai_api_key", "sk-test-key-12345")
monkeypatch.setattr(model_settings, "anthropic_api_key", "sk-ant-test-key-67890")
monkeypatch.setattr(model_settings, "gemini_api_key", None)
monkeypatch.setattr(model_settings, "google_cloud_project", None)
monkeypatch.setattr(model_settings, "google_cloud_location", None)
monkeypatch.setattr(model_settings, "azure_api_key", None)
monkeypatch.setattr(model_settings, "groq_api_key", None)
monkeypatch.setattr(model_settings, "together_api_key", None)
monkeypatch.setattr(model_settings, "vllm_api_base", None)
monkeypatch.setattr(model_settings, "sglang_api_base", None)
monkeypatch.setattr(model_settings, "aws_access_key_id", None)
monkeypatch.setattr(model_settings, "aws_secret_access_key", None)
monkeypatch.setattr(model_settings, "lmstudio_base_url", None)
monkeypatch.setattr(model_settings, "deepseek_api_key", None)
monkeypatch.setattr(model_settings, "xai_api_key", None)
monkeypatch.setattr(model_settings, "openrouter_api_key", None)
monkeypatch.setattr(model_settings, "zai_api_key", None)
# Create server instance (this will load enabled providers from environment)
server = SyncServer(init_with_default_org_and_user=False)
# Manually set up the default user/org (since we disabled auto-init)
server.default_user = default_user
server.default_org = default_organization
# Verify enabled providers were loaded
assert len(server._enabled_providers) == 3 # Exactly: letta, openai, anthropic
enabled_provider_names = [p.name for p in server._enabled_providers]
assert "letta" in enabled_provider_names
assert "openai" in enabled_provider_names
assert "anthropic" in enabled_provider_names
# First, sync base providers to database (this is what init_async does)
await server.provider_manager.sync_base_providers(
base_providers=server._enabled_providers,
actor=default_user,
)
# Now call the actual _sync_provider_models_async method
# This simulates what happens during server startup
await server._sync_provider_models_async()
# Verify OpenAI models were synced
openai_providers = await server.provider_manager.list_providers_async(
name="openai",
actor=default_user,
)
assert len(openai_providers) == 1, "OpenAI provider should exist"
openai_provider = openai_providers[0]
# Check OpenAI LLM models
openai_llm_models = await server.provider_manager.list_models_async(
actor=default_user,
provider_id=openai_provider.id,
model_type="llm",
)
# Should have gpt-4 and gpt-4-turbo (gpt-4-vision filtered out due to "vision" keyword)
assert len(openai_llm_models) >= 2, f"Expected at least 2 OpenAI LLM models, got {len(openai_llm_models)}"
openai_model_names = [m.name for m in openai_llm_models]
assert "gpt-4" in openai_model_names
assert "gpt-4-turbo" in openai_model_names
# Check OpenAI embedding models
openai_embedding_models = await server.provider_manager.list_models_async(
actor=default_user,
provider_id=openai_provider.id,
model_type="embedding",
)
assert len(openai_embedding_models) >= 1, "Expected at least 1 OpenAI embedding model"
embedding_model_names = [m.name for m in openai_embedding_models]
assert "text-embedding-ada-002" in embedding_model_names
# Verify model metadata is correct
gpt4_models = [m for m in openai_llm_models if m.name == "gpt-4"]
assert len(gpt4_models) > 0, "gpt-4 model should exist"
gpt4_model = gpt4_models[0]
assert gpt4_model.handle == "openai/gpt-4"
assert gpt4_model.model_endpoint_type == "openai"
assert gpt4_model.max_context_window == 8192
assert gpt4_model.enabled is True
# Verify Anthropic models were synced
anthropic_providers = await server.provider_manager.list_providers_async(
name="anthropic",
actor=default_user,
)
assert len(anthropic_providers) == 1, "Anthropic provider should exist"
anthropic_provider = anthropic_providers[0]
anthropic_llm_models = await server.provider_manager.list_models_async(
actor=default_user,
provider_id=anthropic_provider.id,
model_type="llm",
)
# Should have Claude models
assert len(anthropic_llm_models) >= 2, f"Expected at least 2 Anthropic models, got {len(anthropic_llm_models)}"
anthropic_model_names = [m.name for m in anthropic_llm_models]
assert "claude-3-5-sonnet-20241022" in anthropic_model_names
assert "claude-3-opus-20240229" in anthropic_model_names
# Test that we can retrieve LLMConfig from handle
llm_config = await server.provider_manager.get_llm_config_from_handle(
handle="openai/gpt-4",
actor=default_user,
)
assert llm_config.model == "gpt-4"
assert llm_config.handle == "openai/gpt-4"
assert llm_config.provider_name == "openai"
assert llm_config.context_window == 8192
# Test that we can retrieve EmbeddingConfig from handle
embedding_config = await server.provider_manager.get_embedding_config_from_handle(
handle="openai/text-embedding-ada-002",
actor=default_user,
)
assert embedding_config.embedding_model == "text-embedding-ada-002"
assert embedding_config.handle == "openai/text-embedding-ada-002"
assert embedding_config.embedding_dim == 1536
@pytest.mark.asyncio
async def test_server_startup_handles_disabled_providers(default_user, default_organization, monkeypatch):
"""Test that server startup properly handles providers that are no longer enabled.
This test verifies that:
1. Base providers that are no longer enabled (env vars removed) are deleted
2. BYOK providers that are no longer enabled are NOT deleted (user-created)
3. The sync process handles providers gracefully when API calls fail
"""
from letta.schemas.providers import ProviderCreate
from letta.server.server import SyncServer
# First, manually create providers in the database
provider_manager = ProviderManager()
# Create a base OpenAI provider (simulating it was synced before)
base_openai_create = ProviderCreate(
name="openai",
provider_type=ProviderType.openai,
api_key="sk-old-key",
base_url="https://api.openai.com/v1",
)
base_openai = await provider_manager.create_provider_async(
base_openai_create,
actor=default_user,
is_byok=False, # This is a base provider
)
# Create a BYOK provider (user-created)
byok_provider_create = ProviderCreate(
name="my-custom-openai",
provider_type=ProviderType.openai,
api_key="sk-my-key",
base_url="https://api.openai.com/v1",
)
byok_provider = await provider_manager.create_provider_async(
byok_provider_create,
actor=default_user,
is_byok=True,
)
assert byok_provider.provider_category == ProviderCategory.byok
# Now create server with NO environment variables set (all base providers disabled)
monkeypatch.delenv("OPENAI_API_KEY", raising=False)
monkeypatch.delenv("ANTHROPIC_API_KEY", raising=False)
from letta.settings import model_settings
monkeypatch.setattr(model_settings, "openai_api_key", None)
monkeypatch.setattr(model_settings, "anthropic_api_key", None)
# Create server instance
server = SyncServer(init_with_default_org_and_user=False)
server.default_user = default_user
server.default_org = default_organization
# Verify only letta provider is enabled (no openai)
enabled_names = [p.name for p in server._enabled_providers]
assert "letta" in enabled_names
assert "openai" not in enabled_names
# Sync base providers (should not include openai anymore)
await server.provider_manager.sync_base_providers(
base_providers=server._enabled_providers,
actor=default_user,
)
# Call _sync_provider_models_async
await server._sync_provider_models_async()
# Verify base OpenAI provider was deleted (no longer enabled)
try:
await server.provider_manager.get_provider_async(base_openai.id, actor=default_user)
assert False, "Base OpenAI provider should have been deleted"
except Exception:
# Expected - provider should not exist
pass
# Verify BYOK provider still exists (should NOT be deleted)
byok_still_exists = await server.provider_manager.get_provider_async(
byok_provider.id,
actor=default_user,
)
assert byok_still_exists is not None
assert byok_still_exists.name == "my-custom-openai"
assert byok_still_exists.provider_category == ProviderCategory.byok
@pytest.mark.asyncio
async def test_server_startup_handles_api_errors_gracefully(default_user, default_organization, monkeypatch):
"""Test that server startup handles API errors gracefully without crashing.
This test verifies that:
1. If a provider's API call fails during sync, it logs an error but continues
2. Other providers can still sync successfully
3. The server startup completes without crashing
"""
from letta.server.server import SyncServer
# Mock OpenAI to fail
async def mock_openai_fail(*args, **kwargs):
raise Exception("OpenAI API is down")
# Mock Anthropic to succeed (as async iterable, matching real SDK pagination)
mock_anthropic_data = [
{
"id": "claude-3-5-sonnet-20241022",
"type": "model",
"display_name": "Claude 3.5 Sonnet",
"created_at": "2024-10-22T00:00:00Z",
}
]
class MockAnthropicModelItem:
def __init__(self, data):
self._data = data
def model_dump(self):
return self._data
class MockAnthropicAsyncPage:
def __init__(self, items):
self._items = [MockAnthropicModelItem(item) for item in items]
def __aiter__(self):
return self._async_iter()
async def _async_iter(self):
for item in self._items:
yield item
# NOTE: The real SDK's models.list() is a regular (non-async) method that
# returns an AsyncPaginator (which is async-iterable).
class MockAnthropicModels:
def list(self):
return MockAnthropicAsyncPage(mock_anthropic_data)
class MockAsyncAnthropic:
def __init__(self, *args, **kwargs):
self.models = MockAnthropicModels()
monkeypatch.setattr(
"letta.llm_api.openai.openai_get_model_list_async",
mock_openai_fail,
)
monkeypatch.setattr(
"anthropic.AsyncAnthropic",
MockAsyncAnthropic,
)
# Set environment variables
monkeypatch.setenv("OPENAI_API_KEY", "sk-test-key")
monkeypatch.setenv("ANTHROPIC_API_KEY", "sk-ant-test-key")
from letta.settings import model_settings
monkeypatch.setattr(model_settings, "openai_api_key", "sk-test-key")
monkeypatch.setattr(model_settings, "anthropic_api_key", "sk-ant-test-key")
# Create server
server = SyncServer(init_with_default_org_and_user=False)
server.default_user = default_user
server.default_org = default_organization
# Sync base providers
await server.provider_manager.sync_base_providers(
base_providers=server._enabled_providers,
actor=default_user,
)
# This should NOT crash even though OpenAI fails
await server._sync_provider_models_async()
# Verify Anthropic still synced successfully
anthropic_providers = await server.provider_manager.list_providers_async(
name="anthropic",
actor=default_user,
)
assert len(anthropic_providers) == 1
anthropic_models = await server.provider_manager.list_models_async(
actor=default_user,
provider_id=anthropic_providers[0].id,
model_type="llm",
)
assert len(anthropic_models) >= 1, "Anthropic models should have synced despite OpenAI failure"
# OpenAI should have no models (sync failed)
openai_providers = await server.provider_manager.list_providers_async(
name="openai",
actor=default_user,
)
if len(openai_providers) > 0:
await server.provider_manager.list_models_async(
actor=default_user,
provider_id=openai_providers[0].id,
)
# Models might exist from previous runs, but the sync attempt should have been logged as failed
# The key is that the server didn't crash
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_provider_manager.py",
"license": "Apache License 2.0",
"lines": 777,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:letta/interfaces/anthropic_parallel_tool_call_streaming_interface.py | import asyncio
import json
from collections.abc import AsyncGenerator
from datetime import datetime, timezone
from enum import Enum
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from opentelemetry.trace import Span
from letta.schemas.usage import LettaUsageStatistics
from anthropic import AsyncStream
from anthropic.types.beta import (
BetaInputJSONDelta,
BetaRawContentBlockDeltaEvent,
BetaRawContentBlockStartEvent,
BetaRawContentBlockStopEvent,
BetaRawMessageDeltaEvent,
BetaRawMessageStartEvent,
BetaRawMessageStopEvent,
BetaRawMessageStreamEvent,
BetaRedactedThinkingBlock,
BetaSignatureDelta,
BetaTextBlock,
BetaTextDelta,
BetaThinkingBlock,
BetaThinkingDelta,
BetaToolUseBlock,
)
from letta.log import get_logger
from letta.schemas.letta_message import (
ApprovalRequestMessage,
AssistantMessage,
HiddenReasoningMessage,
LettaMessage,
ReasoningMessage,
ToolCallDelta,
ToolCallMessage,
)
from letta.schemas.letta_message_content import ReasoningContent, RedactedReasoningContent, TextContent
from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType
from letta.schemas.message import Message
from letta.schemas.openai.chat_completion_response import FunctionCall, ToolCall
from letta.server.rest_api.json_parser import JSONParser, PydanticJSONParser
from letta.server.rest_api.streaming_response import RunCancelledException
from letta.server.rest_api.utils import decrement_message_uuid
logger = get_logger(__name__)
# TODO: These modes aren't used right now - but can be useful we do multiple sequential tool calling within one Claude message
class EventMode(Enum):
TEXT = "TEXT"
TOOL_USE = "TOOL_USE"
THINKING = "THINKING"
REDACTED_THINKING = "REDACTED_THINKING"
# TODO: There's a duplicate version of this in anthropic_streaming_interface
class SimpleAnthropicStreamingInterface:
"""
A simpler version of AnthropicStreamingInterface focused on streaming assistant text and
tool call deltas. Updated to support parallel tool calling by collecting completed
ToolUse blocks (from content_block stop events) and exposing all finalized tool calls
via get_tool_call_objects().
Notes:
- We keep emitting the stream (text and tool-call deltas) as before for latency.
- We no longer rely on accumulating partial JSON to build the final tool call; instead
we read the finalized ToolUse input from the stop event and store it.
- Multiple tool calls within a single message (parallel tool use) are collected and
can be returned to the agent as a list.
"""
def __init__(
self,
requires_approval_tools: list = [],
run_id: str | None = None,
step_id: str | None = None,
):
self.json_parser: JSONParser = PydanticJSONParser()
self.run_id = run_id
self.step_id = step_id
# Premake IDs for database writes
self.letta_message_id = Message.generate_id()
self.anthropic_mode = None
self.message_id = None
self.accumulated_inner_thoughts = []
self.tool_call_id = None
self.tool_call_name = None
self.accumulated_tool_call_args = ""
self.previous_parse = {}
self.thinking_signature = None
# usage trackers
self.input_tokens = 0
self.output_tokens = 0
self.model = None
# cache tracking (Anthropic-specific)
self.cache_read_tokens = 0
self.cache_creation_tokens = 0
# Raw usage from provider (for transparent logging in provider trace)
self.raw_usage: dict | None = None
# reasoning object trackers
self.reasoning_messages = []
# assistant object trackers
self.assistant_messages: list[AssistantMessage] = []
# Buffer to hold tool call messages until inner thoughts are complete
self.tool_call_buffer = []
self.inner_thoughts_complete = False
# Buffer to handle partial XML tags across chunks
self.partial_tag_buffer = ""
self.requires_approval_tools = requires_approval_tools
# Collected finalized tool calls (supports parallel tool use)
self.collected_tool_calls: list[ToolCall] = []
# Track active tool_use blocks by stream index for parallel tool calling
# { index: {"id": str, "name": str, "args_parts": list[str]} }
self.active_tool_uses: dict[int, dict[str, object]] = {}
# Maintain start order and indexed collection for stable ordering
self._tool_use_start_order: list[int] = []
self._collected_indexed: list[tuple[int, ToolCall]] = []
def get_tool_call_objects(self) -> list[ToolCall]:
"""Return all finalized tool calls collected during this message (parallel supported)."""
# Prefer indexed ordering if available
if self._collected_indexed:
return [
call
for _, call in sorted(
self._collected_indexed,
key=lambda x: self._tool_use_start_order.index(x[0]) if x[0] in self._tool_use_start_order else x[0],
)
]
return self.collected_tool_calls
# This exists for legacy compatibility
def get_tool_call_object(self) -> Optional[ToolCall]:
tool_calls = self.get_tool_call_objects()
if tool_calls:
return tool_calls[0]
return None
def get_usage_statistics(self) -> "LettaUsageStatistics":
"""Extract usage statistics from accumulated streaming data.
Returns:
LettaUsageStatistics with token counts from the stream.
"""
from letta.schemas.usage import LettaUsageStatistics
# Anthropic: input_tokens is NON-cached only, must add cache tokens for total
actual_input_tokens = (self.input_tokens or 0) + (self.cache_read_tokens or 0) + (self.cache_creation_tokens or 0)
return LettaUsageStatistics(
prompt_tokens=actual_input_tokens,
completion_tokens=self.output_tokens or 0,
total_tokens=actual_input_tokens + (self.output_tokens or 0),
cached_input_tokens=self.cache_read_tokens if self.cache_read_tokens else None,
cache_write_tokens=self.cache_creation_tokens if self.cache_creation_tokens else None,
reasoning_tokens=None, # Anthropic doesn't report reasoning tokens separately
)
def get_reasoning_content(self) -> list[TextContent | ReasoningContent | RedactedReasoningContent]:
def _process_group(
group: list[ReasoningMessage | HiddenReasoningMessage | AssistantMessage],
group_type: str,
) -> TextContent | ReasoningContent | RedactedReasoningContent:
if group_type == "reasoning":
reasoning_text = "".join(chunk.reasoning for chunk in group).strip()
is_native = any(chunk.source == "reasoner_model" for chunk in group)
signature = next((chunk.signature for chunk in group if chunk.signature is not None), None)
if is_native:
return ReasoningContent(is_native=is_native, reasoning=reasoning_text, signature=signature)
else:
return TextContent(text=reasoning_text)
elif group_type == "redacted":
redacted_text = "".join(chunk.hidden_reasoning for chunk in group if chunk.hidden_reasoning is not None)
return RedactedReasoningContent(data=redacted_text)
elif group_type == "text":
parts: list[str] = []
for chunk in group:
if isinstance(chunk.content, list):
parts.append("".join([c.text for c in chunk.content]))
else:
parts.append(chunk.content)
return TextContent(text="".join(parts))
else:
raise ValueError("Unexpected group type")
merged = []
current_group = []
current_group_type = None # "reasoning" or "redacted"
for msg in self.reasoning_messages:
# Determine the type of the current message
if isinstance(msg, HiddenReasoningMessage):
msg_type = "redacted"
elif isinstance(msg, ReasoningMessage):
msg_type = "reasoning"
elif isinstance(msg, AssistantMessage):
msg_type = "text"
else:
raise ValueError("Unexpected message type")
# Initialize group type if not set
if current_group_type is None:
current_group_type = msg_type
# If the type changes, process the current group
if msg_type != current_group_type:
merged.append(_process_group(current_group, current_group_type))
current_group = []
current_group_type = msg_type
current_group.append(msg)
# Process the final group, if any.
if current_group:
merged.append(_process_group(current_group, current_group_type))
return merged
def get_content(self) -> list[TextContent | ReasoningContent | RedactedReasoningContent]:
return self.get_reasoning_content()
async def process(
self,
stream: AsyncStream[BetaRawMessageStreamEvent],
ttft_span: Optional["Span"] = None,
) -> AsyncGenerator[LettaMessage | LettaStopReason, None]:
prev_message_type = None
message_index = 0
event = None
try:
async with stream:
async for event in stream:
try:
async for message in self._process_event(event, ttft_span, prev_message_type, message_index):
new_message_type = message.message_type
if new_message_type != prev_message_type:
if prev_message_type != None:
message_index += 1
prev_message_type = new_message_type
# print(f"Yielding message: {message}")
yield message
except (asyncio.CancelledError, RunCancelledException) as e:
import traceback
logger.info("Cancelled stream attempt but overriding (%s) %s: %s", type(e).__name__, e, traceback.format_exc())
async for message in self._process_event(event, ttft_span, prev_message_type, message_index):
new_message_type = message.message_type
if new_message_type != prev_message_type:
if prev_message_type != None:
message_index += 1
prev_message_type = new_message_type
yield message
# Don't raise the exception here
continue
except Exception as e:
import traceback
logger.error("Error processing stream: %s\n%s", e, traceback.format_exc())
if ttft_span:
ttft_span.add_event(
name="stop_reason",
attributes={"stop_reason": StopReasonType.error.value, "error": str(e), "stacktrace": traceback.format_exc()},
)
yield LettaStopReason(stop_reason=StopReasonType.error)
# Transform Anthropic errors into our custom error types for consistent handling
from letta.llm_api.anthropic_client import AnthropicClient
client = AnthropicClient()
transformed_error = client.handle_llm_error(e)
raise transformed_error
finally:
logger.info("AnthropicStreamingInterface: Stream processing complete.")
async def _process_event(
self,
event: BetaRawMessageStreamEvent,
ttft_span: Optional["Span"] = None,
prev_message_type: Optional[str] = None,
message_index: int = 0,
) -> AsyncGenerator[LettaMessage | LettaStopReason, None]:
"""Process a single event from the Anthropic stream and yield any resulting messages.
Args:
event: The event to process
Yields:
Messages generated from processing this event
"""
if isinstance(event, BetaRawContentBlockStartEvent):
content = event.content_block
if isinstance(content, BetaTextBlock):
self.anthropic_mode = EventMode.TEXT
# TODO: Can capture citations, etc.
elif isinstance(content, BetaToolUseBlock):
# New tool_use block started at this index
self.anthropic_mode = EventMode.TOOL_USE
self.active_tool_uses[event.index] = {"id": content.id, "name": content.name, "args_parts": []}
if event.index not in self._tool_use_start_order:
self._tool_use_start_order.append(event.index)
# Emit an initial tool call delta for this new block
name = content.name
call_id = content.id
# Initialize arguments from the start event's input (often {}) to avoid undefined in UIs
if name in self.requires_approval_tools:
tool_call_msg = ApprovalRequestMessage(
id=decrement_message_uuid(self.letta_message_id),
# Do not emit placeholder arguments here to avoid UI duplicates
tool_call=ToolCallDelta(name=name, tool_call_id=call_id),
tool_calls=ToolCallDelta(name=name, tool_call_id=call_id),
date=datetime.now(timezone.utc).isoformat(),
otid=Message.generate_otid_from_id(decrement_message_uuid(self.letta_message_id), -1),
run_id=self.run_id,
step_id=self.step_id,
)
else:
if prev_message_type and prev_message_type != "tool_call_message":
message_index += 1
tool_call_msg = ToolCallMessage(
id=self.letta_message_id,
# Do not emit placeholder arguments here to avoid UI duplicates
tool_call=ToolCallDelta(name=name, tool_call_id=call_id),
tool_calls=ToolCallDelta(name=name, tool_call_id=call_id),
date=datetime.now(timezone.utc).isoformat(),
otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
run_id=self.run_id,
step_id=self.step_id,
)
prev_message_type = tool_call_msg.message_type
yield tool_call_msg
elif isinstance(content, BetaThinkingBlock):
self.anthropic_mode = EventMode.THINKING
# TODO: Can capture signature, etc.
elif isinstance(content, BetaRedactedThinkingBlock):
self.anthropic_mode = EventMode.REDACTED_THINKING
if prev_message_type and prev_message_type != "hidden_reasoning_message":
message_index += 1
hidden_reasoning_message = HiddenReasoningMessage(
id=self.letta_message_id,
state="redacted",
hidden_reasoning=content.data,
date=datetime.now(timezone.utc).isoformat(),
otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
run_id=self.run_id,
step_id=self.step_id,
)
self.reasoning_messages.append(hidden_reasoning_message)
prev_message_type = hidden_reasoning_message.message_type
yield hidden_reasoning_message
elif isinstance(event, BetaRawContentBlockDeltaEvent):
delta = event.delta
if isinstance(delta, BetaTextDelta):
# Safety check
if not self.anthropic_mode == EventMode.TEXT:
raise RuntimeError(f"Streaming integrity failed - received BetaTextDelta object while not in TEXT EventMode: {delta}")
if prev_message_type and prev_message_type != "assistant_message":
message_index += 1
assistant_msg = AssistantMessage(
id=self.letta_message_id,
content=delta.text,
date=datetime.now(timezone.utc).isoformat(),
otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
run_id=self.run_id,
step_id=self.step_id,
)
# self.assistant_messages.append(assistant_msg)
self.reasoning_messages.append(assistant_msg)
prev_message_type = assistant_msg.message_type
yield assistant_msg
elif isinstance(delta, BetaInputJSONDelta):
# Append partial JSON for the specific tool_use block at this index
if not self.anthropic_mode == EventMode.TOOL_USE:
raise RuntimeError(
f"Streaming integrity failed - received BetaInputJSONDelta object while not in TOOL_USE EventMode: {delta}"
)
ctx = self.active_tool_uses.get(event.index)
if ctx is None:
# Defensive: initialize if missing
self.active_tool_uses[event.index] = {
"id": self.tool_call_id or "",
"name": self.tool_call_name or "",
"args_parts": [],
}
ctx = self.active_tool_uses[event.index]
# Append only non-empty partials
if delta.partial_json:
# Append fragment to args_parts to avoid O(n^2) string growth
args_parts = ctx.get("args_parts") if isinstance(ctx.get("args_parts"), list) else None
if args_parts is None:
args_parts = []
ctx["args_parts"] = args_parts
args_parts.append(delta.partial_json)
else:
# Skip streaming a no-op delta to prevent duplicate placeholders in UI
return
name = ctx.get("name")
call_id = ctx.get("id")
if name in self.requires_approval_tools:
tool_call_msg = ApprovalRequestMessage(
id=decrement_message_uuid(self.letta_message_id),
tool_call=ToolCallDelta(name=name, tool_call_id=call_id, arguments=delta.partial_json),
tool_calls=ToolCallDelta(name=name, tool_call_id=call_id, arguments=delta.partial_json),
date=datetime.now(timezone.utc).isoformat(),
otid=Message.generate_otid_from_id(decrement_message_uuid(self.letta_message_id), -1),
run_id=self.run_id,
step_id=self.step_id,
)
else:
if prev_message_type and prev_message_type != "tool_call_message":
message_index += 1
tool_call_msg = ToolCallMessage(
id=self.letta_message_id,
tool_call=ToolCallDelta(name=name, tool_call_id=call_id, arguments=delta.partial_json),
tool_calls=ToolCallDelta(name=name, tool_call_id=call_id, arguments=delta.partial_json),
date=datetime.now(timezone.utc).isoformat(),
otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
run_id=self.run_id,
step_id=self.step_id,
)
prev_message_type = tool_call_msg.message_type
yield tool_call_msg
elif isinstance(delta, BetaThinkingDelta):
# Safety check
if not self.anthropic_mode == EventMode.THINKING:
raise RuntimeError(
f"Streaming integrity failed - received BetaThinkingBlock object while not in THINKING EventMode: {delta}"
)
# Only emit reasoning message if we have actual content
if delta.thinking and delta.thinking.strip():
if prev_message_type and prev_message_type != "reasoning_message":
message_index += 1
reasoning_message = ReasoningMessage(
id=self.letta_message_id,
source="reasoner_model",
reasoning=delta.thinking,
signature=self.thinking_signature,
date=datetime.now(timezone.utc).isoformat(),
otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
run_id=self.run_id,
step_id=self.step_id,
)
self.reasoning_messages.append(reasoning_message)
prev_message_type = reasoning_message.message_type
yield reasoning_message
elif isinstance(delta, BetaSignatureDelta):
# Safety check
if not self.anthropic_mode == EventMode.THINKING:
raise RuntimeError(
f"Streaming integrity failed - received BetaSignatureDelta object while not in THINKING EventMode: {delta}"
)
# Store signature but don't emit empty reasoning message
# Signature will be attached when actual thinking content arrives
self.thinking_signature = delta.signature
# Update the last reasoning message with the signature so it gets persisted
if self.reasoning_messages:
last_msg = self.reasoning_messages[-1]
if isinstance(last_msg, ReasoningMessage):
last_msg.signature = delta.signature
elif isinstance(event, BetaRawMessageStartEvent):
self.message_id = event.message.id
self.input_tokens += event.message.usage.input_tokens
self.output_tokens += event.message.usage.output_tokens
self.model = event.message.model
# Capture cache data if available
usage = event.message.usage
if hasattr(usage, "cache_read_input_tokens") and usage.cache_read_input_tokens:
self.cache_read_tokens += usage.cache_read_input_tokens
if hasattr(usage, "cache_creation_input_tokens") and usage.cache_creation_input_tokens:
self.cache_creation_tokens += usage.cache_creation_input_tokens
# Store raw usage for transparent provider trace logging
try:
self.raw_usage = usage.model_dump(exclude_none=True)
except Exception as e:
logger.error(f"Failed to capture raw_usage from Anthropic: {e}")
self.raw_usage = None
elif isinstance(event, BetaRawMessageDeltaEvent):
# Per Anthropic docs: "The token counts shown in the usage field of the
# message_delta event are *cumulative*." So we assign, not accumulate.
self.output_tokens = event.usage.output_tokens
elif isinstance(event, BetaRawMessageStopEvent):
# Update raw_usage with final accumulated values for accurate provider trace logging
if self.raw_usage:
self.raw_usage["input_tokens"] = self.input_tokens
self.raw_usage["output_tokens"] = self.output_tokens
if self.cache_read_tokens:
self.raw_usage["cache_read_input_tokens"] = self.cache_read_tokens
if self.cache_creation_tokens:
self.raw_usage["cache_creation_input_tokens"] = self.cache_creation_tokens
elif isinstance(event, BetaRawContentBlockStopEvent):
# Finalize the tool_use block at this index using accumulated deltas
ctx = self.active_tool_uses.pop(event.index, None)
if ctx is not None and ctx.get("id") and ctx.get("name") is not None:
parts = ctx.get("args_parts") if isinstance(ctx.get("args_parts"), list) else None
raw_args = "".join(parts) if parts else ""
try:
# Prefer strict JSON load, fallback to permissive parser
tool_input = json.loads(raw_args) if raw_args else {}
except json.JSONDecodeError:
try:
tool_input = self.json_parser.parse(raw_args) if raw_args else {}
except Exception:
tool_input = {}
arguments = json.dumps(tool_input)
finalized = ToolCall(id=ctx["id"], function=FunctionCall(arguments=arguments, name=ctx["name"]))
# Keep both raw list and indexed list for compatibility
self.collected_tool_calls.append(finalized)
self._collected_indexed.append((event.index, finalized))
# Reset mode when a content block ends
self.anthropic_mode = None
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/interfaces/anthropic_parallel_tool_call_streaming_interface.py",
"license": "Apache License 2.0",
"lines": 480,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:tests/test_provider_api.py | import os
import threading
import pytest
from dotenv import load_dotenv
from letta_client import Letta
from letta_client.core.api_error import ApiError
from tests.utils import wait_for_server
# Constants
SERVER_PORT = 8283
def run_server():
load_dotenv()
from letta.server.rest_api.app import start_server
print("Starting server...")
start_server(debug=True)
@pytest.fixture(scope="module")
def client(request):
# Get URL from environment or start server
api_url = os.getenv("LETTA_API_URL")
server_url = os.getenv("LETTA_SERVER_URL", f"http://localhost:{SERVER_PORT}")
if not os.getenv("LETTA_SERVER_URL"):
print("Starting server thread")
thread = threading.Thread(target=run_server, daemon=True)
thread.start()
wait_for_server(server_url)
print("Running client tests with server:", server_url)
# Overide the base_url if the LETTA_API_URL is set
base_url = api_url if api_url else server_url
# create the Letta client
yield Letta(base_url=base_url, token=None)
@pytest.fixture
def test_provider(client: Letta):
"""Create a test provider for testing."""
# Create a provider with a test API key
provider = client.providers.create(
provider_type="openai",
api_key="test-api-key-123",
name="test-openai-provider",
)
yield provider
# Clean up - delete the provider
try:
client.providers.delete(provider.id)
except ApiError:
# Provider might already be deleted
pass
def test_check_existing_provider_success(client: Letta, test_provider):
"""Test checking an existing provider with valid credentials."""
# This test assumes the test_provider has valid credentials
# In a real scenario, you would need to use actual valid API keys
# For this test, we'll check that the endpoint is callable
try:
response = client.providers.check(test_provider.id)
# If we get here, the endpoint is working
assert response is not None
except ApiError as e:
# Expected for invalid API key - just verify the endpoint exists
# and returns 401 for invalid credentials
assert e.status_code in [401, 500] # 401 for auth error, 500 for connection error
def test_check_existing_provider_not_found(client: Letta):
"""Test checking a provider that doesn't exist."""
fake_provider_id = "00000000-0000-0000-0000-000000000000"
with pytest.raises(ApiError) as exc_info:
client.providers.check(fake_provider_id)
# Should return 404 for provider not found
assert exc_info.value.status_code == 404
def test_check_existing_provider_unauthorized(client: Letta, test_provider):
"""Test checking an existing provider with invalid API key."""
# The test provider has a test API key which will fail authentication
with pytest.raises(ApiError) as exc_info:
client.providers.check(test_provider.id)
# Should return 401 for invalid API key
# or 500 if the provider check fails for other reasons
assert exc_info.value.status_code in [401, 500]
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_provider_api.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:alembic/versions/c734cfc0d595_add_runs_metrics_table.py | """add runs_metrics table
Revision ID: c734cfc0d595
Revises: 038e68cdf0df
Create Date: 2025-10-08 14:35:23.302204
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "c734cfc0d595"
down_revision: Union[str, None] = "038e68cdf0df"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"run_metrics",
sa.Column("id", sa.String(), nullable=False),
sa.Column("run_start_ns", sa.BigInteger(), nullable=True),
sa.Column("run_ns", sa.BigInteger(), nullable=True),
sa.Column("num_steps", sa.Integer(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("FALSE"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("project_id", sa.String(), nullable=True),
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("base_template_id", sa.String(), nullable=True),
sa.Column("template_id", sa.String(), nullable=True),
sa.Column("deployment_id", sa.String(), nullable=True),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["id"], ["runs.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("run_metrics")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/c734cfc0d595_add_runs_metrics_table.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/database_utils.py | """
Database URI utilities for consistent database connection handling across the application.
This module provides utilities for parsing and converting database URIs to ensure
consistent behavior between the main application, alembic migrations, and other
database-related components.
"""
from typing import Optional
from urllib.parse import urlparse, urlunparse
def parse_database_uri(uri: str) -> dict[str, Optional[str]]:
"""
Parse a database URI into its components.
Args:
uri: Database URI (e.g., postgresql://user:pass@host:port/db)
Returns:
Dictionary with parsed components: scheme, driver, user, password, host, port, database
"""
parsed = urlparse(uri)
# Extract driver from scheme (e.g., postgresql+asyncpg -> asyncpg)
scheme_parts = parsed.scheme.split("+")
base_scheme = scheme_parts[0] if scheme_parts else ""
driver = scheme_parts[1] if len(scheme_parts) > 1 else None
return {
"scheme": base_scheme,
"driver": driver,
"user": parsed.username,
"password": parsed.password,
"host": parsed.hostname,
"port": str(parsed.port) if parsed.port else None,
"database": parsed.path.lstrip("/") if parsed.path else None,
"query": parsed.query,
"fragment": parsed.fragment,
}
def build_database_uri(
scheme: str = "postgresql",
driver: Optional[str] = None,
user: Optional[str] = None,
password: Optional[str] = None,
host: Optional[str] = None,
port: Optional[str] = None,
database: Optional[str] = None,
query: Optional[str] = None,
fragment: Optional[str] = None,
) -> str:
"""
Build a database URI from components.
Args:
scheme: Base scheme (e.g., "postgresql")
driver: Driver name (e.g., "asyncpg", "pg8000")
user: Username
password: Password
host: Hostname
port: Port number
database: Database name
query: Query string
fragment: Fragment
Returns:
Complete database URI
"""
# Combine scheme and driver
full_scheme = f"{scheme}+{driver}" if driver else scheme
# Build netloc (user:password@host:port)
netloc_parts = []
if user:
if password:
netloc_parts.append(f"{user}:{password}")
else:
netloc_parts.append(user)
if host:
if port:
netloc_parts.append(f"{host}:{port}")
else:
netloc_parts.append(host)
netloc = "@".join(netloc_parts) if netloc_parts else ""
# Build path
path = f"/{database}" if database else ""
# Build the URI
return urlunparse((full_scheme, netloc, path, "", query or "", fragment or ""))
def convert_to_async_uri(uri: str) -> str:
"""
Convert a database URI to use the asyncpg driver for async operations.
Args:
uri: Original database URI
Returns:
URI with asyncpg driver and ssl parameter adjustments
"""
components = parse_database_uri(uri)
# Convert to asyncpg driver
components["driver"] = "asyncpg"
# Build the new URI
new_uri = build_database_uri(**components)
# Replace sslmode= with ssl= for asyncpg compatibility
new_uri = new_uri.replace("sslmode=", "ssl=")
return new_uri
def convert_to_sync_uri(uri: str) -> str:
"""
Convert a database URI to use the pg8000 driver for sync operations (alembic).
Args:
uri: Original database URI
Returns:
URI with pg8000 driver and sslmode parameter adjustments
"""
components = parse_database_uri(uri)
# Convert to pg8000 driver
components["driver"] = "pg8000"
# Build the new URI
new_uri = build_database_uri(**components)
# Replace ssl= with sslmode= for pg8000 compatibility
new_uri = new_uri.replace("ssl=", "sslmode=")
return new_uri
def get_database_uri_for_context(uri: str, context: str = "async") -> str:
"""
Get the appropriate database URI for a specific context.
Args:
uri: Original database URI
context: Context type ("async" for asyncpg, "sync" for pg8000, "alembic" for pg8000)
Returns:
URI formatted for the specified context
"""
if context in ["async"]:
return convert_to_async_uri(uri)
elif context in ["sync", "alembic"]:
return convert_to_sync_uri(uri)
else:
raise ValueError(f"Unknown context: {context}. Must be 'async', 'sync', or 'alembic'")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/database_utils.py",
"license": "Apache License 2.0",
"lines": 124,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
letta-ai/letta:letta/orm/run_metrics.py | from datetime import datetime, timezone
from typing import TYPE_CHECKING, List, Optional
from sqlalchemy import JSON, BigInteger, ForeignKey, Integer
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Mapped, Session, mapped_column, relationship
from letta.orm.mixins import AgentMixin, OrganizationMixin, ProjectMixin, TemplateMixin
from letta.orm.sqlalchemy_base import SqlalchemyBase
from letta.schemas.run_metrics import RunMetrics as PydanticRunMetrics
from letta.schemas.user import User
from letta.settings import DatabaseChoice, settings
if TYPE_CHECKING:
from letta.orm.agent import Agent
from letta.orm.run import Run
class RunMetrics(SqlalchemyBase, ProjectMixin, AgentMixin, OrganizationMixin, TemplateMixin):
"""Tracks performance metrics for agent steps."""
__tablename__ = "run_metrics"
__pydantic_model__ = PydanticRunMetrics
id: Mapped[str] = mapped_column(
ForeignKey("runs.id", ondelete="CASCADE"),
primary_key=True,
doc="The unique identifier of the run this metric belongs to (also serves as PK)",
)
run_start_ns: Mapped[Optional[int]] = mapped_column(
BigInteger,
nullable=True,
doc="The timestamp of the start of the run in nanoseconds",
)
run_ns: Mapped[Optional[int]] = mapped_column(
BigInteger,
nullable=True,
doc="Total time for the run in nanoseconds",
)
num_steps: Mapped[Optional[int]] = mapped_column(
Integer,
nullable=True,
doc="The number of steps in the run",
)
tools_used: Mapped[Optional[List[str]]] = mapped_column(
JSON,
nullable=True,
doc="List of tool IDs that were used in this run",
)
run: Mapped[Optional["Run"]] = relationship("Run", foreign_keys=[id])
agent: Mapped[Optional["Agent"]] = relationship("Agent")
def create(
self,
db_session: Session,
actor: Optional[User] = None,
no_commit: bool = False,
) -> "RunMetrics":
"""Override create to handle SQLite timestamp issues"""
# For SQLite, explicitly set timestamps as server_default may not work
if settings.database_engine == DatabaseChoice.SQLITE:
now = datetime.now(timezone.utc)
if not self.created_at:
self.created_at = now
if not self.updated_at:
self.updated_at = now
return super().create(db_session, actor=actor, no_commit=no_commit)
async def create_async(
self,
db_session: AsyncSession,
actor: Optional[User] = None,
no_commit: bool = False,
no_refresh: bool = False,
) -> "RunMetrics":
"""Override create_async to handle SQLite timestamp issues"""
# For SQLite, explicitly set timestamps as server_default may not work
if settings.database_engine == DatabaseChoice.SQLITE:
now = datetime.now(timezone.utc)
if not self.created_at:
self.created_at = now
if not self.updated_at:
self.updated_at = now
return await super().create_async(db_session, actor=actor, no_commit=no_commit, no_refresh=no_refresh)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/orm/run_metrics.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/schemas/run_metrics.py | from typing import List, Optional
from pydantic import Field
from letta.schemas.enums import PrimitiveType
from letta.schemas.letta_base import LettaBase
class RunMetricsBase(LettaBase):
__id_prefix__ = PrimitiveType.RUN.value
class RunMetrics(RunMetricsBase):
id: str = Field(..., description="The id of the run this metric belongs to (matches runs.id).")
organization_id: Optional[str] = Field(None, description="The unique identifier of the organization.")
agent_id: Optional[str] = Field(None, description="The unique identifier of the agent.")
project_id: Optional[str] = Field(None, description="The project that the run belongs to (cloud only).")
run_start_ns: Optional[int] = Field(None, description="The timestamp of the start of the run in nanoseconds.")
run_ns: Optional[int] = Field(None, description="Total time for the run in nanoseconds.")
num_steps: Optional[int] = Field(None, description="The number of steps in the run.")
tools_used: Optional[List[str]] = Field(None, description="List of tool IDs that were used in this run.")
template_id: Optional[str] = Field(None, description="The template ID that the run belongs to (cloud only).")
base_template_id: Optional[str] = Field(None, description="The base template ID that the run belongs to (cloud only).")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/schemas/run_metrics.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/038e68cdf0df_add_cascades_to_blocks_agents_fks_set_.py | """add cascades to blocks_agents FKs; set initially immediate
Revision ID: 038e68cdf0df
Revises: b6061da886ee
Create Date: 2025-10-07 13:01:17.872405
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "038e68cdf0df"
down_revision: Union[str, None] = "b6061da886ee"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(op.f("blocks_agents_agent_id_fkey"), "blocks_agents", type_="foreignkey")
op.drop_constraint(op.f("fk_block_id_label"), "blocks_agents", type_="foreignkey")
op.create_foreign_key(
"fk_block_id_label",
"blocks_agents",
"block",
["block_id", "block_label"],
["id", "label"],
onupdate="CASCADE",
ondelete="CASCADE",
initially="IMMEDIATE",
deferrable=True,
)
op.create_foreign_key(None, "blocks_agents", "agents", ["agent_id"], ["id"], ondelete="CASCADE")
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "blocks_agents", type_="foreignkey")
op.drop_constraint("fk_block_id_label", "blocks_agents", type_="foreignkey")
op.create_foreign_key(
op.f("fk_block_id_label"),
"blocks_agents",
"block",
["block_id", "block_label"],
["id", "label"],
initially="DEFERRED",
deferrable=True,
)
op.create_foreign_key(op.f("blocks_agents_agent_id_fkey"), "blocks_agents", "agents", ["agent_id"], ["id"])
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/038e68cdf0df_add_cascades_to_blocks_agents_fks_set_.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/3bc3c031fbe4_create_new_runs_table_and_remove_legacy_.py | """create new runs table and remove legacy tables
Revision ID: 3bc3c031fbe4
Revises: 567e9fe06270
Create Date: 2025-10-03 12:10:51.065067
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "3bc3c031fbe4"
down_revision: Union[str, None] = "567e9fe06270"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_index("ix_agents_project_id", "agents", ["project_id"], unique=False)
op.create_index("ix_messages_run_id", "messages", ["run_id"], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_messages_run_id", table_name="messages")
op.drop_index("ix_agents_project_id", table_name="agents")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/3bc3c031fbe4_create_new_runs_table_and_remove_legacy_.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/3d2e9fb40a3c_add_indexes_for_feedback.py | """Add additional indexes
Revision ID: 3d2e9fb40a3c
Revises: 57bcea83af3f
Create Date: 2025-09-20 00:00:00.000000
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "3d2e9fb40a3c"
down_revision: Union[str, None] = "57bcea83af3f"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def _create_index_if_missing(index_name: str, table_name: str, columns: list[str], unique: bool = False) -> None:
"""Create an index if it does not already exist.
Uses SQLAlchemy inspector to avoid duplicate index errors across environments.
"""
bind = op.get_bind()
inspector = sa.inspect(bind)
existing = {ix["name"] for ix in inspector.get_indexes(table_name)}
if index_name not in existing:
op.create_index(index_name, table_name, columns, unique=unique)
def upgrade() -> None:
# files_agents: speed up WHERE agent_id IN (...)
_create_index_if_missing("ix_files_agents_agent_id", "files_agents", ["agent_id"])
# block: speed up common org+deployment filters
_create_index_if_missing(
"ix_block_organization_id_deployment_id",
"block",
["organization_id", "deployment_id"],
)
# agents: speed up common org+deployment filters
_create_index_if_missing(
"ix_agents_organization_id_deployment_id",
"agents",
["organization_id", "deployment_id"],
)
# Note: The index on block.current_history_entry_id (ix_block_current_history_entry_id)
# already exists from prior migrations. If drift is suspected, consider verifying
# and recreating it manually to avoid duplicate indexes under different names.
def downgrade() -> None:
# Drop indexes added in this migration (ignore if missing for portability)
for name, table in [
("ix_agents_organization_id_deployment_id", "agents"),
("ix_block_organization_id_deployment_id", "block"),
("ix_files_agents_agent_id", "files_agents"),
]:
try:
op.drop_index(name, table_name=table)
except Exception:
# Be permissive in environments where indexes may have different names
pass
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/3d2e9fb40a3c_add_indexes_for_feedback.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/567e9fe06270_create_new_runs_table_and_remove_legacy_.py | """create new runs table and remove legacy tables
Revision ID: 567e9fe06270
Revises: 3d2e9fb40a3c
Create Date: 2025-09-22 15:22:28.651178
"""
from typing import Sequence, Union
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "567e9fe06270"
down_revision: Union[str, None] = "3d2e9fb40a3c"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"runs",
sa.Column("id", sa.String(), nullable=False),
sa.Column("status", sa.String(), nullable=False),
sa.Column("completed_at", sa.DateTime(), nullable=True),
sa.Column("stop_reason", sa.String(), nullable=True),
sa.Column("background", sa.Boolean(), nullable=True),
sa.Column("metadata_", sa.JSON(), nullable=True),
sa.Column("request_config", sa.JSON(), nullable=True),
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("callback_url", sa.String(), nullable=True),
sa.Column("callback_sent_at", sa.DateTime(), nullable=True),
sa.Column("callback_status_code", sa.Integer(), nullable=True),
sa.Column("callback_error", sa.String(), nullable=True),
sa.Column("ttft_ns", sa.BigInteger(), nullable=True),
sa.Column("total_duration_ns", sa.BigInteger(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("FALSE"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("project_id", sa.String(), nullable=True),
sa.Column("base_template_id", sa.String(), nullable=True),
sa.Column("template_id", sa.String(), nullable=True),
sa.Column("deployment_id", sa.String(), nullable=True),
sa.ForeignKeyConstraint(
["agent_id"],
["agents.id"],
),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index("ix_runs_agent_id", "runs", ["agent_id"], unique=False)
op.create_index("ix_runs_created_at", "runs", ["created_at", "id"], unique=False)
op.create_index("ix_runs_organization_id", "runs", ["organization_id"], unique=False)
op.drop_index(op.f("ix_agents_runs_agent_id_run_id"), table_name="agents_runs")
op.drop_index(op.f("ix_agents_runs_run_id_agent_id"), table_name="agents_runs")
op.drop_table("agents_runs")
op.drop_table("job_messages")
op.add_column("messages", sa.Column("run_id", sa.String(), nullable=True))
op.create_foreign_key("fk_messages_run_id", "messages", "runs", ["run_id"], ["id"], ondelete="SET NULL")
op.add_column("step_metrics", sa.Column("run_id", sa.String(), nullable=True))
op.drop_constraint(op.f("step_metrics_job_id_fkey"), "step_metrics", type_="foreignkey")
op.create_foreign_key("fk_step_metrics_run_id", "step_metrics", "runs", ["run_id"], ["id"], ondelete="SET NULL")
op.drop_column("step_metrics", "job_id")
op.add_column("steps", sa.Column("run_id", sa.String(), nullable=True))
op.drop_index(op.f("ix_steps_job_id"), table_name="steps")
op.create_index("ix_steps_run_id", "steps", ["run_id"], unique=False)
op.drop_constraint(op.f("fk_steps_job_id"), "steps", type_="foreignkey")
op.create_foreign_key("fk_steps_run_id", "steps", "runs", ["run_id"], ["id"], ondelete="SET NULL")
op.drop_column("steps", "job_id")
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("steps", sa.Column("job_id", sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_constraint("fk_steps_run_id", "steps", type_="foreignkey")
op.create_foreign_key(op.f("fk_steps_job_id"), "steps", "jobs", ["job_id"], ["id"], ondelete="SET NULL")
op.drop_index("ix_steps_run_id", table_name="steps")
op.create_index(op.f("ix_steps_job_id"), "steps", ["job_id"], unique=False)
op.drop_column("steps", "run_id")
op.add_column("step_metrics", sa.Column("job_id", sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_constraint("fk_step_metrics_run_id", "step_metrics", type_="foreignkey")
op.create_foreign_key(op.f("step_metrics_job_id_fkey"), "step_metrics", "jobs", ["job_id"], ["id"], ondelete="SET NULL")
op.drop_column("step_metrics", "run_id")
op.drop_constraint("fk_messages_run_id", "messages", type_="foreignkey")
op.drop_column("messages", "run_id")
op.create_table(
"job_messages",
sa.Column("id", sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column("job_id", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column("message_id", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column("created_at", postgresql.TIMESTAMP(timezone=True), server_default=sa.text("now()"), autoincrement=False, nullable=True),
sa.Column("updated_at", postgresql.TIMESTAMP(timezone=True), server_default=sa.text("now()"), autoincrement=False, nullable=True),
sa.Column("is_deleted", sa.BOOLEAN(), server_default=sa.text("false"), autoincrement=False, nullable=False),
sa.Column("_created_by_id", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column("_last_updated_by_id", sa.VARCHAR(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(["job_id"], ["jobs.id"], name=op.f("fk_job_messages_job_id"), ondelete="CASCADE"),
sa.ForeignKeyConstraint(["message_id"], ["messages.id"], name=op.f("fk_job_messages_message_id"), ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id", name=op.f("pk_job_messages")),
sa.UniqueConstraint(
"job_id", "message_id", name=op.f("unique_job_message"), postgresql_include=[], postgresql_nulls_not_distinct=False
),
)
op.create_table(
"agents_runs",
sa.Column("agent_id", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.Column("run_id", sa.VARCHAR(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], name=op.f("agents_runs_agent_id_fkey")),
sa.ForeignKeyConstraint(["run_id"], ["jobs.id"], name=op.f("agents_runs_run_id_fkey")),
sa.PrimaryKeyConstraint("agent_id", "run_id", name=op.f("unique_agent_run")),
)
op.create_index(op.f("ix_agents_runs_run_id_agent_id"), "agents_runs", ["run_id", "agent_id"], unique=False)
op.create_index(op.f("ix_agents_runs_agent_id_run_id"), "agents_runs", ["agent_id", "run_id"], unique=False)
op.drop_index("ix_runs_organization_id", table_name="runs")
op.drop_index("ix_runs_created_at", table_name="runs")
op.drop_index("ix_runs_agent_id", table_name="runs")
op.drop_table("runs")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/567e9fe06270_create_new_runs_table_and_remove_legacy_.py",
"license": "Apache License 2.0",
"lines": 118,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/57bcea83af3f_add_various_indexes.py | """add various indexes
Revision ID: 57bcea83af3f
Revises: 5973fd8b8c60
Create Date: 2025-09-19 10:58:19.658106
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "57bcea83af3f"
down_revision: Union[str, None] = "5973fd8b8c60"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_index("ix_block_hidden", "block", ["hidden"], unique=False)
op.create_index("ix_block_is_template", "block", ["is_template"], unique=False)
op.create_index("ix_block_org_project_template", "block", ["organization_id", "project_id", "is_template"], unique=False)
op.create_index("ix_block_organization_id", "block", ["organization_id"], unique=False)
op.create_index("ix_block_project_id", "block", ["project_id"], unique=False)
op.create_index("ix_jobs_user_id", "jobs", ["user_id"], unique=False)
op.create_index("ix_steps_job_id", "steps", ["job_id"], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_steps_job_id", table_name="steps")
op.drop_index("ix_jobs_user_id", table_name="jobs")
op.drop_index("ix_block_project_id", table_name="block")
op.drop_index("ix_block_organization_id", table_name="block")
op.drop_index("ix_block_org_project_template", table_name="block")
op.drop_index("ix_block_is_template", table_name="block")
op.drop_index("ix_block_hidden", table_name="block")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/57bcea83af3f_add_various_indexes.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/5973fd8b8c60_add_agents_runs_table.py | """add agents_runs table
Revision ID: 5973fd8b8c60
Revises: eff256d296cb
Create Date: 2025-09-18 10:52:46.270241
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "5973fd8b8c60"
down_revision: Union[str, None] = "eff256d296cb"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"agents_runs",
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("run_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(
["agent_id"],
["agents.id"],
),
sa.ForeignKeyConstraint(
["run_id"],
["jobs.id"],
),
sa.PrimaryKeyConstraint("agent_id", "run_id"),
sa.UniqueConstraint("agent_id", "run_id", name="unique_agent_run"),
)
op.create_index("ix_agents_runs_agent_id_run_id", "agents_runs", ["agent_id", "run_id"], unique=False)
op.create_index("ix_agents_runs_run_id_agent_id", "agents_runs", ["run_id", "agent_id"], unique=False)
op.add_column("jobs", sa.Column("background", sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("jobs", "background")
op.drop_index("ix_agents_runs_run_id_agent_id", table_name="agents_runs")
op.drop_index("ix_agents_runs_agent_id_run_id", table_name="agents_runs")
op.drop_table("agents_runs")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/5973fd8b8c60_add_agents_runs_table.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/89b595051e48_replace_composite_runs_index.py | """replace composite runs index
Revision ID: 89b595051e48
Revises: f9ad1c25fd2b
Create Date: 2025-10-06 13:17:09.918439
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "89b595051e48"
down_revision: Union[str, None] = "f9ad1c25fd2b"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f("ix_messages_run_err_sequence"), table_name="messages")
op.create_index("ix_messages_run_sequence", "messages", ["run_id", "sequence_id"], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_messages_run_sequence", table_name="messages")
op.create_index(op.f("ix_messages_run_err_sequence"), "messages", ["run_id", "is_err", "sequence_id"], unique=False)
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/89b595051e48_replace_composite_runs_index.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/b6061da886ee_add_encrypted_columns.py | """add encrypted columns
Revision ID: b6061da886ee
Revises: 89b595051e48
Create Date: 2025-10-06 14:55:32.554544
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "b6061da886ee"
down_revision: Union[str, None] = "89b595051e48"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("agent_environment_variables", sa.Column("value_enc", sa.Text(), nullable=True))
op.add_column("mcp_oauth", sa.Column("authorization_code_enc", sa.Text(), nullable=True))
op.add_column("providers", sa.Column("api_key_enc", sa.Text(), nullable=True))
op.add_column("providers", sa.Column("access_key_enc", sa.Text(), nullable=True))
op.add_column("sandbox_environment_variables", sa.Column("value_enc", sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("sandbox_environment_variables", "value_enc")
op.drop_column("providers", "access_key_enc")
op.drop_column("providers", "api_key_enc")
op.drop_column("mcp_oauth", "authorization_code_enc")
op.drop_column("agent_environment_variables", "value_enc")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/b6061da886ee_add_encrypted_columns.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/f9ad1c25fd2b_add_query_optimizing_runs_listing.py | """add query optimizing runs listing
Revision ID: f9ad1c25fd2b
Revises: 3bc3c031fbe4
Create Date: 2025-10-04 00:44:06.663817
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "f9ad1c25fd2b"
down_revision: Union[str, None] = "3bc3c031fbe4"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_index("ix_messages_run_err_sequence", "messages", ["run_id", "is_err", "sequence_id"], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_messages_run_err_sequence", table_name="messages")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/f9ad1c25fd2b_add_query_optimizing_runs_listing.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/adapters/simple_llm_request_adapter.py | from typing import AsyncGenerator
from letta.adapters.letta_llm_request_adapter import LettaLLMRequestAdapter
from letta.errors import LLMError
from letta.helpers.datetime_helpers import get_utc_timestamp_ns
from letta.schemas.enums import LLMCallType
from letta.schemas.letta_message import LettaMessage
from letta.schemas.letta_message_content import OmittedReasoningContent, ReasoningContent, TextContent
from letta.schemas.usage import normalize_cache_tokens, normalize_reasoning_tokens
class SimpleLLMRequestAdapter(LettaLLMRequestAdapter):
"""Simplifying assumptions:
- No inner thoughts in kwargs
- No forced tool calls
- Content native as assistant message
"""
async def invoke_llm(
self,
request_data: dict,
messages: list,
tools: list,
use_assistant_message: bool,
requires_approval_tools: list[str] = [],
step_id: str | None = None,
actor: str | None = None,
) -> AsyncGenerator[LettaMessage | None, None]:
"""
Execute a blocking LLM request and yield the response.
This adapter:
1. Makes a blocking request to the LLM
2. Converts the response to chat completion format
3. Extracts reasoning and tool call information
4. Updates all instance variables
5. Yields nothing (blocking mode doesn't stream)
"""
# Store request data
self.request_data = request_data
# Set telemetry context and make the blocking LLM request
self.llm_client.set_telemetry_context(
telemetry_manager=self.telemetry_manager,
step_id=step_id,
agent_id=self.agent_id,
agent_tags=self.agent_tags,
run_id=self.run_id,
call_type=LLMCallType.agent_step,
org_id=self.org_id,
user_id=self.user_id,
llm_config=self.llm_config.model_dump() if self.llm_config else None,
billing_context=self.billing_context,
)
try:
self.response_data = await self.llm_client.request_async_with_telemetry(request_data, self.llm_config)
except Exception as e:
if isinstance(e, LLMError):
raise
raise self.llm_client.handle_llm_error(e, llm_config=self.llm_config)
self.llm_request_finish_timestamp_ns = get_utc_timestamp_ns()
# Convert response to chat completion format
self.chat_completions_response = await self.llm_client.convert_response_to_chat_completion(
self.response_data, messages, self.llm_config
)
# Extract reasoning content from the response
if self.chat_completions_response.choices[0].message.reasoning_content:
self.reasoning_content = [
ReasoningContent(
reasoning=self.chat_completions_response.choices[0].message.reasoning_content,
is_native=True,
signature=self.chat_completions_response.choices[0].message.reasoning_content_signature,
)
]
elif self.chat_completions_response.choices[0].message.omitted_reasoning_content:
self.reasoning_content = [OmittedReasoningContent()]
else:
# logger.info("No reasoning content found.")
self.reasoning_content = None
if self.chat_completions_response.choices[0].message.content:
# NOTE: big difference - 'content' goes into 'content'
# Reasoning placed into content for legacy reasons
# Carry thought_signature on TextContent when ReasoningContent doesn't exist to hold it
# (e.g. Gemini 2.5 Flash with include_thoughts=False still returns thought_signature)
orphan_sig = (
self.chat_completions_response.choices[0].message.reasoning_content_signature if not self.reasoning_content else None
)
self.content = [TextContent(text=self.chat_completions_response.choices[0].message.content, signature=orphan_sig)]
else:
self.content = None
if self.reasoning_content and len(self.reasoning_content) > 0:
# Temp workaround to consolidate parts to persist reasoning content, this should be integrated better
self.content = self.reasoning_content + (self.content or [])
# Extract tool call
tool_calls = self.chat_completions_response.choices[0].message.tool_calls or []
self.tool_calls = list(tool_calls)
self.tool_call = self.tool_calls[0] if self.tool_calls else None
# Extract logprobs if present
self.logprobs = self.chat_completions_response.choices[0].logprobs
# Extract usage statistics
self.usage.step_count = 1
self.usage.completion_tokens = self.chat_completions_response.usage.completion_tokens
self.usage.prompt_tokens = self.chat_completions_response.usage.prompt_tokens
self.usage.total_tokens = self.chat_completions_response.usage.total_tokens
# Extract cache and reasoning token details using normalized helpers
usage = self.chat_completions_response.usage
self.usage.cached_input_tokens, self.usage.cache_write_tokens = normalize_cache_tokens(usage.prompt_tokens_details)
self.usage.reasoning_tokens = normalize_reasoning_tokens(usage.completion_tokens_details)
self.log_provider_trace(step_id=step_id, actor=actor)
yield None
return
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/adapters/simple_llm_request_adapter.py",
"license": "Apache License 2.0",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/adapters/simple_llm_stream_adapter.py | from typing import AsyncGenerator, List
from letta.adapters.letta_llm_stream_adapter import LettaLLMStreamAdapter
from letta.errors import LLMError
from letta.log import get_logger
logger = get_logger(__name__)
from letta.helpers.datetime_helpers import get_utc_timestamp_ns
from letta.interfaces.anthropic_parallel_tool_call_streaming_interface import SimpleAnthropicStreamingInterface
from letta.interfaces.gemini_streaming_interface import SimpleGeminiStreamingInterface
from letta.interfaces.openai_streaming_interface import SimpleOpenAIResponsesStreamingInterface, SimpleOpenAIStreamingInterface
from letta.otel.tracing import log_attributes, safe_json_dumps, trace_method
from letta.schemas.enums import ProviderType
from letta.schemas.letta_message import LettaMessage
from letta.schemas.letta_message_content import LettaMessageContentUnion
from letta.schemas.provider_trace import ProviderTrace
from letta.schemas.user import User
from letta.server.rest_api.streaming_response import get_cancellation_event_for_run
from letta.settings import settings
from letta.utils import safe_create_task
class SimpleLLMStreamAdapter(LettaLLMStreamAdapter):
"""
Adapter for handling streaming LLM requests with immediate token yielding.
This adapter supports real-time streaming of tokens from the LLM, providing
minimal time-to-first-token (TTFT) latency. It uses specialized streaming
interfaces for different providers (OpenAI, Anthropic) to handle their
specific streaming formats.
"""
def _extract_tool_calls(self) -> list:
"""extract tool calls from interface, trying parallel API first then single API"""
# try multi-call api if available
if hasattr(self.interface, "get_tool_call_objects"):
try:
calls = self.interface.get_tool_call_objects()
if calls:
return calls
except Exception:
pass
# fallback to single-call api
try:
single = self.interface.get_tool_call_object()
return [single] if single else []
except Exception:
return []
async def invoke_llm(
self,
request_data: dict,
messages: list,
tools: list,
use_assistant_message: bool, # NOTE: not used
requires_approval_tools: list[str] = [],
step_id: str | None = None,
actor: User | None = None,
) -> AsyncGenerator[LettaMessage, None]:
"""
Execute a streaming LLM request and yield tokens/chunks as they arrive.
This adapter:
1. Makes a streaming request to the LLM
2. Yields chunks immediately for minimal TTFT
3. Accumulates response data through the streaming interface
4. Updates all instance variables after streaming completes
"""
# Store request data
self.request_data = request_data
# Track request start time for latency calculation
request_start_ns = get_utc_timestamp_ns()
# Get cancellation event for this run to enable graceful cancellation (before branching)
cancellation_event = get_cancellation_event_for_run(self.run_id) if self.run_id else None
# Instantiate streaming interface
if self.llm_config.model_endpoint_type in [ProviderType.anthropic, ProviderType.bedrock, ProviderType.minimax]:
# NOTE: different
self.interface = SimpleAnthropicStreamingInterface(
requires_approval_tools=requires_approval_tools,
run_id=self.run_id,
step_id=step_id,
)
elif self.llm_config.model_endpoint_type in [
ProviderType.openai,
ProviderType.deepseek,
ProviderType.openrouter,
ProviderType.zai,
ProviderType.chatgpt_oauth,
]:
# Decide interface based on payload shape
use_responses = "input" in request_data and "messages" not in request_data
# No support for Responses API proxy
is_proxy = self.llm_config.provider_name == "lmstudio_openai"
# ChatGPT OAuth always uses Responses API format
if self.llm_config.model_endpoint_type == ProviderType.chatgpt_oauth:
use_responses = True
is_proxy = False
if use_responses and not is_proxy:
self.interface = SimpleOpenAIResponsesStreamingInterface(
is_openai_proxy=False,
messages=messages,
tools=tools,
requires_approval_tools=requires_approval_tools,
run_id=self.run_id,
step_id=step_id,
cancellation_event=cancellation_event,
)
else:
self.interface = SimpleOpenAIStreamingInterface(
is_openai_proxy=self.llm_config.provider_name == "lmstudio_openai",
messages=messages,
tools=tools,
requires_approval_tools=requires_approval_tools,
model=self.llm_config.model,
run_id=self.run_id,
step_id=step_id,
cancellation_event=cancellation_event,
)
elif self.llm_config.model_endpoint_type in [ProviderType.google_ai, ProviderType.google_vertex]:
self.interface = SimpleGeminiStreamingInterface(
requires_approval_tools=requires_approval_tools,
run_id=self.run_id,
step_id=step_id,
cancellation_event=cancellation_event,
)
else:
raise ValueError(f"Streaming not supported for provider {self.llm_config.model_endpoint_type}")
# Start the streaming request (map provider errors to common LLMError types)
try:
# Gemini uses async generator pattern (no await) to maintain connection lifecycle
# Other providers return awaitables that resolve to iterators
if self.llm_config.model_endpoint_type in [ProviderType.google_ai, ProviderType.google_vertex]:
stream = self.llm_client.stream_async(request_data, self.llm_config)
else:
stream = await self.llm_client.stream_async(request_data, self.llm_config)
except Exception as e:
self.llm_request_finish_timestamp_ns = get_utc_timestamp_ns()
latency_ms = int((self.llm_request_finish_timestamp_ns - request_start_ns) / 1_000_000)
await self.llm_client.log_provider_trace_async(
request_data=request_data,
response_json=None,
llm_config=self.llm_config,
latency_ms=latency_ms,
error_msg=str(e),
error_type=type(e).__name__,
)
if isinstance(e, LLMError):
raise
raise self.llm_client.handle_llm_error(e, llm_config=self.llm_config)
# Process the stream and yield chunks immediately for TTFT
try:
async for chunk in self.interface.process(stream): # TODO: add ttft span
# Yield each chunk immediately as it arrives
yield chunk
except Exception as e:
self.llm_request_finish_timestamp_ns = get_utc_timestamp_ns()
latency_ms = int((self.llm_request_finish_timestamp_ns - request_start_ns) / 1_000_000)
await self.llm_client.log_provider_trace_async(
request_data=request_data,
response_json=None,
llm_config=self.llm_config,
latency_ms=latency_ms,
error_msg=str(e),
error_type=type(e).__name__,
)
if isinstance(e, LLMError):
raise
raise self.llm_client.handle_llm_error(e, llm_config=self.llm_config)
# After streaming completes, extract the accumulated data
self.llm_request_finish_timestamp_ns = get_utc_timestamp_ns()
# extract tool calls from interface (supports both single and parallel calls)
self.tool_calls = self._extract_tool_calls()
# preserve legacy single-call field for existing consumers
self.tool_call = self.tool_calls[-1] if self.tool_calls else None
# Extract reasoning content from the interface
# TODO this should probably just be called "content"?
# self.reasoning_content = self.interface.get_reasoning_content()
# Extract all content parts
self.content: List[LettaMessageContentUnion] = self.interface.get_content()
# Extract usage statistics from the interface
# Each interface implements get_usage_statistics() with provider-specific logic
self.usage = self.interface.get_usage_statistics()
self.usage.step_count = 1
# Store any additional data from the interface
self.message_id = self.interface.letta_message_id
# Populate finish_reason for downstream continuation logic.
# In Responses streaming, max_output_tokens is expressed via incomplete_details.reason.
if hasattr(self.interface, "final_response") and self.interface.final_response is not None:
resp = self.interface.final_response
incomplete_details = getattr(resp, "incomplete_details", None)
incomplete_reason = getattr(incomplete_details, "reason", None) if incomplete_details else None
if incomplete_reason == "max_output_tokens":
self._finish_reason = "length"
elif incomplete_reason == "content_filter":
self._finish_reason = "content_filter"
elif incomplete_reason is not None:
# Unknown incomplete reason — preserve it as-is for diagnostics
self._finish_reason = incomplete_reason
elif getattr(resp, "status", None) == "completed":
self._finish_reason = "stop"
# Log request and response data
self.log_provider_trace(step_id=step_id, actor=actor)
@trace_method
def log_provider_trace(self, step_id: str | None, actor: User | None) -> None:
"""
Log provider trace data for telemetry purposes in a fire-and-forget manner.
Creates an async task to log the request/response data without blocking
the main execution flow. For streaming adapters, this includes the final
tool call and reasoning content collected during streaming.
Args:
step_id: The step ID associated with this request for logging purposes
actor: The user associated with this request for logging purposes
"""
if step_id is None or actor is None:
return
response_json = {
"content": {
"tool_call": self.tool_call.model_dump_json() if self.tool_call else None,
# "reasoning": [content.model_dump_json() for content in self.reasoning_content],
# NOTE: different
# TODO potentially split this into both content and reasoning?
"content": [content.model_dump_json() for content in self.content],
},
"id": self.interface.message_id,
"model": self.interface.model,
"role": "assistant",
# "stop_reason": "",
# "stop_sequence": None,
"type": "message",
# Use raw_usage if available for transparent provider trace logging, else fallback
"usage": self.interface.raw_usage
if hasattr(self.interface, "raw_usage") and self.interface.raw_usage
else {
"input_tokens": self.usage.prompt_tokens,
"output_tokens": self.usage.completion_tokens,
},
}
log_attributes(
{
"request_data": safe_json_dumps(self.request_data),
"response_data": safe_json_dumps(response_json),
}
)
if settings.track_provider_trace:
safe_create_task(
self.telemetry_manager.create_provider_trace_async(
actor=actor,
provider_trace=ProviderTrace(
request_json=self.request_data,
response_json=response_json,
step_id=step_id,
agent_id=self.agent_id,
agent_tags=self.agent_tags,
run_id=self.run_id,
call_type=self.call_type,
org_id=self.org_id,
user_id=self.user_id,
llm_config=self.llm_config.model_dump() if self.llm_config else None,
billing_context=self.billing_context,
),
),
label="create_provider_trace",
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/adapters/simple_llm_stream_adapter.py",
"license": "Apache License 2.0",
"lines": 255,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/agents/letta_agent_v3.py | import asyncio
import json
import uuid
from typing import Any, AsyncGenerator, Dict, Optional
from opentelemetry.trace import Span
from letta.adapters.letta_llm_adapter import LettaLLMAdapter
from letta.adapters.sglang_native_adapter import SGLangNativeAdapter
from letta.adapters.simple_llm_request_adapter import SimpleLLMRequestAdapter
from letta.adapters.simple_llm_stream_adapter import SimpleLLMStreamAdapter
from letta.agents.helpers import (
_build_rule_violation_result,
_load_last_function_response,
_maybe_get_approval_messages,
_maybe_get_pending_tool_call_message,
_prepare_in_context_messages_no_persist_async,
_safe_load_tool_call_str,
generate_step_id,
merge_and_validate_prefilled_args,
)
from letta.agents.letta_agent_v2 import LettaAgentV2
from letta.constants import DEFAULT_MAX_STEPS, NON_USER_MSG_PREFIX, REQUEST_HEARTBEAT_PARAM
from letta.errors import ContextWindowExceededError, LLMEmptyResponseError, LLMError, SystemPromptTokenExceededError
from letta.helpers import ToolRulesSolver
from letta.helpers.datetime_helpers import get_utc_time, get_utc_timestamp_ns
from letta.helpers.tool_execution_helper import enable_strict_mode
from letta.local_llm.constants import INNER_THOUGHTS_KWARG
from letta.otel.tracing import trace_method
from letta.schemas.agent import AgentState
from letta.schemas.enums import LLMCallType
from letta.schemas.letta_message import (
ApprovalReturn,
CompactionStats,
EventMessage,
LettaErrorMessage,
LettaMessage,
MessageType,
SummaryMessage,
extract_compaction_stats_from_packed_json,
)
from letta.schemas.letta_message_content import OmittedReasoningContent, ReasoningContent, RedactedReasoningContent, TextContent
from letta.schemas.letta_request import ClientToolSchema
from letta.schemas.letta_response import LettaResponse, TurnTokenData
from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType
from letta.schemas.message import Message, MessageCreate, ToolReturn
from letta.schemas.openai.chat_completion_response import ChoiceLogprobs, ToolCall, ToolCallDenial, UsageStatistics
from letta.schemas.provider_trace import BillingContext
from letta.schemas.step import StepProgression
from letta.schemas.step_metrics import StepMetrics
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.schemas.user import User
from letta.server.rest_api.utils import (
create_approval_request_message_from_llm_response,
create_letta_messages_from_llm_response,
create_parallel_tool_messages_from_llm_response,
create_tool_returns_for_denials,
)
from letta.services.conversation_manager import ConversationManager
from letta.services.helpers.tool_parser_helper import runtime_override_tool_json_schema
from letta.services.summarizer.compact import compact_messages
from letta.services.summarizer.summarizer_config import CompactionSettings
from letta.services.summarizer.summarizer_sliding_window import count_tokens
from letta.settings import settings, summarizer_settings
from letta.system import package_function_response
from letta.utils import safe_create_task_with_return, validate_function_response
def extract_compaction_stats_from_message(message: Message) -> CompactionStats | None:
"""
Extract CompactionStats from a Message object's packed content.
Args:
message: Message object with packed JSON content
Returns:
CompactionStats if found and valid, None otherwise
"""
try:
if message.content and len(message.content) == 1:
text_content = message.content[0].text
return extract_compaction_stats_from_packed_json(text_content)
except AttributeError:
pass
return None
class LettaAgentV3(LettaAgentV2):
"""
Similar to V2, but stripped down / simplified, while also generalized:
* Supports non-tool returns
* No inner thoughts in kwargs
* No heartbeats (loops happen on tool calls)
TODOs:
* Support tool rules
* Support Gemini / OpenAI client
"""
def __init__(
self,
agent_state: AgentState,
actor: User,
conversation_id: str | None = None,
):
super().__init__(agent_state, actor)
# Set conversation_id after parent init (which calls _initialize_state)
self.conversation_id = conversation_id
def _initialize_state(self):
super()._initialize_state()
self._require_tool_call = False
# Approximate token count for the *current* in-context buffer, used
# only for proactive summarization / eviction logic. This is derived
# from per-step usage but can be updated after summarization without
# affecting step-level telemetry.
self.context_token_estimate: int | None = None
self.in_context_messages: list[Message] = [] # in-memory tracker
# Conversation mode: when set, messages are tracked per-conversation
self.conversation_id: str | None = None
# Client-side tools passed in the request (executed by client, not server)
self.client_tools: list[ClientToolSchema] = []
# Log probabilities from the most recent LLM call (for RL training)
self.logprobs: ChoiceLogprobs | None = None
# Multi-turn token tracking for RL training (accumulated across all LLM calls)
self.turns: list[TurnTokenData] = []
self.return_token_ids: bool = False
def _compute_tool_return_truncation_chars(self) -> int:
"""Compute a dynamic cap for tool returns in requests.
Heuristic: ~20% of context window × 4 chars/token, minimum 5k chars.
This prevents any single tool return from consuming too much context.
"""
try:
cap = int(self.agent_state.llm_config.context_window * 0.2 * 4) # 20% of tokens → chars
except Exception:
cap = 5000
return max(5000, cap)
@trace_method
async def step(
self,
input_messages: list[MessageCreate],
max_steps: int = DEFAULT_MAX_STEPS,
run_id: str | None = None,
use_assistant_message: bool = True, # NOTE: not used
include_return_message_types: list[MessageType] | None = None,
request_start_timestamp_ns: int | None = None,
conversation_id: str | None = None,
client_tools: list[ClientToolSchema] | None = None,
include_compaction_messages: bool = False,
billing_context: "BillingContext | None" = None,
) -> LettaResponse:
"""
Execute the agent loop in blocking mode, returning all messages at once.
Args:
input_messages: List of new messages to process
max_steps: Maximum number of agent steps to execute
run_id: Optional job/run ID for tracking
use_assistant_message: Whether to use assistant message format
include_return_message_types: Filter for which message types to return
request_start_timestamp_ns: Start time for tracking request duration
conversation_id: Optional conversation ID for conversation-scoped messaging
client_tools: Optional list of client-side tools. When called, execution pauses
for client to provide tool returns.
include_compaction_messages: Whether to include SummaryMessage/EventMessage in response
and use role=summary for stored summary messages.
Returns:
LettaResponse: Complete response with all messages and metadata
"""
self._initialize_state()
self.conversation_id = conversation_id
self.client_tools = client_tools or []
# Apply conversation-specific block overrides if conversation_id is provided
if conversation_id:
self.agent_state = await ConversationManager().apply_isolated_blocks_to_agent_state(
agent_state=self.agent_state,
conversation_id=conversation_id,
actor=self.actor,
)
request_span = self._request_checkpoint_start(request_start_timestamp_ns=request_start_timestamp_ns)
response_letta_messages = []
# Prepare in-context messages (conversation mode if conversation_id provided)
curr_in_context_messages, input_messages_to_persist = await _prepare_in_context_messages_no_persist_async(
input_messages,
self.agent_state,
self.message_manager,
self.actor,
run_id,
conversation_id=conversation_id,
)
follow_up_messages = []
if len(input_messages_to_persist) > 1 and input_messages_to_persist[0].role == "approval":
follow_up_messages = input_messages_to_persist[1:]
input_messages_to_persist = [input_messages_to_persist[0]]
self.in_context_messages = curr_in_context_messages
# Check if we should use SGLang native adapter for multi-turn RL training
use_sglang_native = (
self.agent_state.llm_config.return_token_ids
and self.agent_state.llm_config.handle
and self.agent_state.llm_config.handle.startswith("sglang/")
)
self.return_token_ids = use_sglang_native
if use_sglang_native:
# Use SGLang native adapter for multi-turn RL training
llm_adapter = SGLangNativeAdapter(
llm_client=self.llm_client,
llm_config=self.agent_state.llm_config,
call_type=LLMCallType.agent_step,
agent_id=self.agent_state.id,
agent_tags=self.agent_state.tags,
run_id=run_id,
org_id=self.actor.organization_id,
user_id=self.actor.id,
)
# Reset turns tracking for this step
self.turns = []
else:
llm_adapter = SimpleLLMRequestAdapter(
llm_client=self.llm_client,
llm_config=self.agent_state.llm_config,
call_type=LLMCallType.agent_step,
agent_id=self.agent_state.id,
agent_tags=self.agent_state.tags,
run_id=run_id,
org_id=self.actor.organization_id,
user_id=self.actor.id,
billing_context=billing_context,
)
credit_task = None
for i in range(max_steps):
if i == 1 and follow_up_messages:
input_messages_to_persist = follow_up_messages
follow_up_messages = []
# Await credit check from previous iteration before running next step
if credit_task is not None:
if not await credit_task:
self.should_continue = False
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.insufficient_credits)
break
credit_task = None
response = self._step(
# we append input_messages_to_persist since they aren't checkpointed as in-context until the end of the step (may be rolled back)
messages=list(self.in_context_messages + input_messages_to_persist),
input_messages_to_persist=input_messages_to_persist,
llm_adapter=llm_adapter,
run_id=run_id,
# use_assistant_message=use_assistant_message,
include_return_message_types=include_return_message_types,
request_start_timestamp_ns=request_start_timestamp_ns,
include_compaction_messages=include_compaction_messages,
)
input_messages_to_persist = [] # clear after first step
async for chunk in response:
response_letta_messages.append(chunk)
# Check if step was cancelled - break out of the step loop
if not self.should_continue and self.stop_reason.stop_reason == StopReasonType.cancelled.value:
break
# TODO: persist the input messages if successful first step completion
# TODO: persist the new messages / step / run
## Proactive summarization if approaching context limit
# if (
# self.context_token_estimate is not None
# and self.context_token_estimate > self.agent_state.llm_config.context_window * SUMMARIZATION_TRIGGER_MULTIPLIER
# and not self.agent_state.message_buffer_autoclear
# ):
# self.logger.warning(
# f"Step usage ({self.last_step_usage.total_tokens} tokens) approaching "
# f"context limit ({self.agent_state.llm_config.context_window}), triggering summarization."
# )
# in_context_messages = await self.summarize_conversation_history(
# in_context_messages=in_context_messages,
# new_letta_messages=self.response_messages,
# total_tokens=self.context_token_estimate,
# force=True,
# )
# # Clear to avoid duplication in next iteration
# self.response_messages = []
if not self.should_continue:
break
# Fire credit check to run in parallel with loop overhead / next step setup
credit_task = safe_create_task_with_return(self._check_credits())
# input_messages_to_persist = []
if i == max_steps - 1 and self.stop_reason is None:
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.max_steps.value)
## Rebuild context window after stepping (safety net)
# if not self.agent_state.message_buffer_autoclear:
# if self.context_token_estimate is not None:
# await self.summarize_conversation_history(
# in_context_messages=in_context_messages,
# new_letta_messages=self.response_messages,
# total_tokens=self.context_token_estimate,
# force=False,
# )
# else:
# self.logger.warning(
# "Post-loop summarization skipped: last_step_usage is None. "
# "No step completed successfully or usage stats were not updated."
# )
if self.stop_reason is None:
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.end_turn.value)
# construct the response
response_letta_messages = Message.to_letta_messages_from_list(
self.response_messages,
use_assistant_message=False, # NOTE: set to false
reverse=False,
text_is_assistant_message=True,
)
if include_return_message_types:
response_letta_messages = [m for m in response_letta_messages if m.message_type in include_return_message_types]
# Set context_tokens to expose actual context window usage (vs accumulated prompt_tokens)
self.usage.context_tokens = self.context_token_estimate
result = LettaResponse(
messages=response_letta_messages,
stop_reason=self.stop_reason,
usage=self.usage,
logprobs=self.logprobs,
turns=self.turns if self.return_token_ids and self.turns else None,
)
if run_id:
if self.job_update_metadata is None:
self.job_update_metadata = {}
self.job_update_metadata["result"] = result.model_dump(mode="json")
await self._request_checkpoint_finish(
request_span=request_span, request_start_timestamp_ns=request_start_timestamp_ns, run_id=run_id
)
return result
@trace_method
async def stream(
self,
input_messages: list[MessageCreate],
max_steps: int = DEFAULT_MAX_STEPS,
stream_tokens: bool = False,
run_id: str | None = None,
use_assistant_message: bool = True, # NOTE: not used
include_return_message_types: list[MessageType] | None = None,
request_start_timestamp_ns: int | None = None,
conversation_id: str | None = None,
client_tools: list[ClientToolSchema] | None = None,
include_compaction_messages: bool = False,
billing_context: BillingContext | None = None,
) -> AsyncGenerator[str, None]:
"""
Execute the agent loop in streaming mode, yielding chunks as they become available.
If stream_tokens is True, individual tokens are streamed as they arrive from the LLM,
providing the lowest latency experience, otherwise each complete step (reasoning +
tool call + tool return) is yielded as it completes.
Args:
input_messages: List of new messages to process
max_steps: Maximum number of agent steps to execute
stream_tokens: Whether to stream back individual tokens. Not all llm
providers offer native token streaming functionality; in these cases,
this api streams back steps rather than individual tokens.
run_id: Optional job/run ID for tracking
use_assistant_message: Whether to use assistant message format
include_return_message_types: Filter for which message types to return
request_start_timestamp_ns: Start time for tracking request duration
conversation_id: Optional conversation ID for conversation-scoped messaging
client_tools: Optional list of client-side tools. When called, execution pauses
for client to provide tool returns.
Yields:
str: JSON-formatted SSE data chunks for each completed step
"""
self._initialize_state()
self.conversation_id = conversation_id
self.client_tools = client_tools or []
request_span = self._request_checkpoint_start(request_start_timestamp_ns=request_start_timestamp_ns)
response_letta_messages = []
first_chunk = True
# Apply conversation-specific block overrides if conversation_id is provided
if conversation_id:
self.agent_state = await ConversationManager().apply_isolated_blocks_to_agent_state(
agent_state=self.agent_state,
conversation_id=conversation_id,
actor=self.actor,
)
# Check if we should use SGLang native adapter for multi-turn RL training
use_sglang_native = (
self.agent_state.llm_config.return_token_ids
and self.agent_state.llm_config.handle
and self.agent_state.llm_config.handle.startswith("sglang/")
)
self.return_token_ids = use_sglang_native
if stream_tokens:
llm_adapter = SimpleLLMStreamAdapter(
llm_client=self.llm_client,
llm_config=self.agent_state.llm_config,
call_type=LLMCallType.agent_step,
agent_id=self.agent_state.id,
agent_tags=self.agent_state.tags,
run_id=run_id,
org_id=self.actor.organization_id,
user_id=self.actor.id,
billing_context=billing_context,
)
elif use_sglang_native:
# Use SGLang native adapter for multi-turn RL training
llm_adapter = SGLangNativeAdapter(
llm_client=self.llm_client,
llm_config=self.agent_state.llm_config,
call_type=LLMCallType.agent_step,
agent_id=self.agent_state.id,
agent_tags=self.agent_state.tags,
run_id=run_id,
org_id=self.actor.organization_id,
user_id=self.actor.id,
billing_context=billing_context,
)
# Reset turns tracking for this step
self.turns = []
else:
llm_adapter = SimpleLLMRequestAdapter(
llm_client=self.llm_client,
llm_config=self.agent_state.llm_config,
call_type=LLMCallType.agent_step,
agent_id=self.agent_state.id,
agent_tags=self.agent_state.tags,
run_id=run_id,
org_id=self.actor.organization_id,
user_id=self.actor.id,
billing_context=billing_context,
)
try:
# Prepare in-context messages (conversation mode if conversation_id provided)
in_context_messages, input_messages_to_persist = await _prepare_in_context_messages_no_persist_async(
input_messages,
self.agent_state,
self.message_manager,
self.actor,
run_id,
conversation_id=conversation_id,
)
follow_up_messages = []
if len(input_messages_to_persist) > 1 and input_messages_to_persist[0].role == "approval":
follow_up_messages = input_messages_to_persist[1:]
input_messages_to_persist = [input_messages_to_persist[0]]
self.in_context_messages = in_context_messages
credit_task = None
for i in range(max_steps):
if i == 1 and follow_up_messages:
input_messages_to_persist = follow_up_messages
follow_up_messages = []
# Await credit check from previous iteration before running next step
if credit_task is not None:
if not await credit_task:
self.should_continue = False
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.insufficient_credits)
break
credit_task = None
response = self._step(
# we append input_messages_to_persist since they aren't checkpointed as in-context until the end of the step (may be rolled back)
messages=list(self.in_context_messages + input_messages_to_persist),
input_messages_to_persist=input_messages_to_persist,
llm_adapter=llm_adapter,
run_id=run_id,
# use_assistant_message=use_assistant_message,
include_return_message_types=include_return_message_types,
request_start_timestamp_ns=request_start_timestamp_ns,
include_compaction_messages=include_compaction_messages,
)
input_messages_to_persist = [] # clear after first step
async for chunk in response:
response_letta_messages.append(chunk)
if first_chunk:
request_span = self._request_checkpoint_ttft(request_span, request_start_timestamp_ns)
# Log chunks with missing id or otid for debugging.
# Compaction EventMessage is intentionally metadata-only and may omit otid.
is_compaction_event = isinstance(chunk, EventMessage) and chunk.event_type == "compaction"
if isinstance(chunk, LettaMessage) and (not chunk.id or not chunk.otid) and not is_compaction_event:
self.logger.warning(
"Streaming chunk missing id or otid: message_type=%s id=%s otid=%s step_id=%s",
chunk.message_type,
chunk.id,
chunk.otid,
chunk.step_id,
)
yield f"data: {chunk.model_dump_json()}\n\n"
first_chunk = False
# Check if step was cancelled - break out of the step loop
if not self.should_continue and self.stop_reason.stop_reason == StopReasonType.cancelled.value:
break
# refresh in-context messages (TODO: remove?)
# in_context_messages = await self._refresh_messages(in_context_messages)
if not self.should_continue:
break
# Fire credit check to run in parallel with loop overhead / next step setup
credit_task = safe_create_task_with_return(self._check_credits())
if i == max_steps - 1 and self.stop_reason is None:
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.max_steps.value)
## Rebuild context window after stepping (safety net)
# if not self.agent_state.message_buffer_autoclear:
# if self.context_token_estimate is not None:
# await self.summarize_conversation_history(
# in_context_messages=in_context_messages,
# new_letta_messages=self.response_messages,
# total_tokens=self.context_token_estimate,
# force=False,
# )
# else:
# self.logger.warning(
# "Post-loop summarization skipped: last_step_usage is None. "
# "No step completed successfully or usage stats were not updated."
# )
if self.stop_reason is None:
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.end_turn.value)
except Exception as e:
# Use repr() if str() is empty (happens with Exception() with no args)
error_detail = str(e) or repr(e)
self.logger.warning(f"Error during agent stream: {error_detail}", exc_info=True)
# Set stop_reason if not already set
if self.stop_reason is None:
# Classify error type
if isinstance(e, LLMError):
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.llm_api_error.value)
else:
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.error.value)
if first_chunk:
# Raise if no chunks sent yet (response not started, can return error status code)
raise
else:
yield f"data: {self.stop_reason.model_dump_json()}\n\n"
# Mid-stream error: yield error event to client in SSE format
error_message = LettaErrorMessage(
run_id=run_id,
error_type="internal_error",
message="An error occurred during agent execution.",
detail=error_detail,
)
yield f"event: error\ndata: {error_message.model_dump_json()}\n\n"
# Return immediately - don't fall through to finish chunks
# This prevents sending end_turn finish chunks after an error
return
# Cleanup and finalize (only runs if no exception occurred)
try:
# Set context_tokens to expose actual context window usage (vs accumulated prompt_tokens)
self.usage.context_tokens = self.context_token_estimate
if run_id:
# Filter out LettaStopReason from messages (only valid in LettaStreamingResponse, not LettaResponse)
filtered_messages = [m for m in response_letta_messages if not isinstance(m, LettaStopReason)]
result = LettaResponse(
messages=filtered_messages,
stop_reason=self.stop_reason,
usage=self.usage,
logprobs=self.logprobs,
turns=self.turns if self.return_token_ids and self.turns else None,
)
if self.job_update_metadata is None:
self.job_update_metadata = {}
self.job_update_metadata["result"] = result.model_dump(mode="json")
await self._request_checkpoint_finish(
request_span=request_span, request_start_timestamp_ns=request_start_timestamp_ns, run_id=run_id
)
for finish_chunk in self.get_finish_chunks_for_stream(self.usage, self.stop_reason):
yield f"data: {finish_chunk}\n\n"
except Exception as cleanup_error:
# Error during cleanup/finalization - ensure we still send a terminal event
self.logger.error(f"Error during stream cleanup: {cleanup_error}", exc_info=True)
# Set stop_reason if not already set
if self.stop_reason is None:
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.error.value)
yield f"data: {self.stop_reason.model_dump_json()}\n\n"
# Send error event
error_message = LettaErrorMessage(
run_id=run_id,
error_type="cleanup_error",
message="An error occurred during stream finalization.",
detail=str(cleanup_error),
)
yield f"event: error\ndata: {error_message.model_dump_json()}\n\n"
# Note: we don't send finish chunks here since we already errored
async def _check_for_system_prompt_overflow(self, system_message):
"""
Since the system prompt cannot be compacted, we need to check to see if it is the cause of the context overflow
"""
system_prompt_token_estimate = await count_tokens(
actor=self.actor,
llm_config=self.agent_state.llm_config,
messages=[system_message],
)
if system_prompt_token_estimate is not None and system_prompt_token_estimate >= self.agent_state.llm_config.context_window:
self.should_continue = False
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.context_window_overflow_in_system_prompt.value)
raise SystemPromptTokenExceededError(
system_prompt_token_estimate=system_prompt_token_estimate,
context_window=self.agent_state.llm_config.context_window,
)
async def _checkpoint_messages(self, run_id: str, step_id: str, new_messages: list[Message], in_context_messages: list[Message]):
"""
Checkpoint the current message state - run this only when the current messages are 'safe' - meaning the step has completed successfully.
This handles:
- Persisting the new messages into the `messages` table
- Updating the in-memory trackers for in-context messages (`self.in_context_messages`) and agent state (`self.agent_state.message_ids`)
- Updating the DB with the current in-context messages (`self.agent_state.message_ids`) OR conversation_messages table
Args:
run_id: The run ID to associate with the messages
step_id: The step ID to associate with the messages
new_messages: The new messages to persist
in_context_messages: The current in-context messages
"""
# make sure all the new messages have the correct run_id, step_id, and conversation_id
for message in new_messages:
message.step_id = step_id
message.run_id = run_id
message.conversation_id = self.conversation_id
# persist the new message objects - ONLY place where messages are persisted
await self.message_manager.create_many_messages_async(
new_messages,
actor=self.actor,
run_id=run_id,
project_id=self.agent_state.project_id,
template_id=self.agent_state.template_id,
)
if self.conversation_id:
# Conversation mode: update conversation_messages table
# Add new messages to conversation tracking
new_message_ids = [m.id for m in new_messages]
if new_message_ids:
await ConversationManager().add_messages_to_conversation(
conversation_id=self.conversation_id,
agent_id=self.agent_state.id,
message_ids=new_message_ids,
actor=self.actor,
)
# Update which messages are in context
# Note: update_in_context_messages also updates positions to preserve order
await ConversationManager().update_in_context_messages(
conversation_id=self.conversation_id,
in_context_message_ids=[m.id for m in in_context_messages],
actor=self.actor,
)
else:
# Default mode: update agent.message_ids
await self.agent_manager.update_message_ids_async(
agent_id=self.agent_state.id,
message_ids=[m.id for m in in_context_messages],
actor=self.actor,
)
self.agent_state.message_ids = [m.id for m in in_context_messages] # update in-memory state
self.in_context_messages = in_context_messages # update in-memory state
def _create_compaction_event_message(
self,
step_id: str | None,
run_id: str | None,
trigger: str,
) -> EventMessage:
"""
Create an EventMessage to notify the client that compaction is starting.
Args:
step_id: The current step ID
run_id: The current run ID
trigger: The trigger that caused compaction (e.g., "context_window_exceeded", "post_step_context_check")
Returns:
EventMessage to yield before compaction starts
"""
return EventMessage(
id=str(uuid.uuid4()),
date=get_utc_time(),
event_type="compaction",
event_data={
"trigger": trigger,
"context_token_estimate": self.context_token_estimate,
"context_window": self.agent_state.llm_config.context_window,
},
run_id=run_id,
step_id=step_id,
)
def _create_summary_result_message(
self,
summary_message: Message,
summary_text: str,
step_id: str | None,
run_id: str | None,
include_compaction_messages: bool,
) -> list[LettaMessage]:
"""
Create the summary message to yield to the client after compaction completes.
Args:
summary_message: The persisted summary Message object
summary_text: The raw summary text (unpacked)
step_id: The current step ID
run_id: The current run ID
include_compaction_messages: If True, return SummaryMessage; if False, return UserMessage
Returns:
List of LettaMessage objects to yield to the client
"""
if include_compaction_messages:
# Extract compaction_stats from the packed message content if available
compaction_stats = extract_compaction_stats_from_message(summary_message)
# New behavior: structured SummaryMessage
return [
SummaryMessage(
id=summary_message.id,
date=summary_message.created_at,
summary=summary_text,
otid=Message.generate_otid_from_id(summary_message.id, 0),
step_id=step_id,
run_id=run_id,
compaction_stats=compaction_stats,
),
]
else:
# Old behavior: UserMessage with packed JSON
messages = list(Message.to_letta_messages(summary_message))
# Set otid on returned messages (summary Message doesn't have otid set at creation)
for i, msg in enumerate(messages):
if not msg.otid:
msg.otid = Message.generate_otid_from_id(summary_message.id, i)
return messages
@trace_method
async def _step(
self,
messages: list[Message], # current in-context messages
llm_adapter: LettaLLMAdapter,
input_messages_to_persist: list[Message] | None = None,
run_id: str | None = None,
# use_assistant_message: bool = True,
include_return_message_types: list[MessageType] | None = None,
request_start_timestamp_ns: int | None = None,
remaining_turns: int = -1,
dry_run: bool = False,
enforce_run_id_set: bool = True,
include_compaction_messages: bool = False,
) -> AsyncGenerator[LettaMessage | dict, None]:
"""
Execute a single agent step (one LLM call and tool execution).
This is the core execution method that all public methods (step, stream_steps,
stream_tokens) funnel through. It handles the complete flow of making an LLM
request, processing the response, executing tools, and persisting messages.
Args:
messages: Current in-context messages
llm_adapter: Adapter for LLM interaction (blocking or streaming)
input_messages_to_persist: New messages to persist after execution
run_id: Optional job/run ID for tracking
include_return_message_types: Filter for which message types to yield
request_start_timestamp_ns: Start time for tracking request duration
remaining_turns: Number of turns remaining (for max_steps enforcement)
dry_run: If true, only build and return the request without executing
Yields:
LettaMessage or dict: Chunks for streaming mode, or request data for dry_run
"""
if enforce_run_id_set and run_id is None:
raise AssertionError("run_id is required when enforce_run_id_set is True")
input_messages_to_persist = input_messages_to_persist or []
if self.context_token_estimate is None:
self.logger.warning("Context token estimate is not set")
step_progression = StepProgression.START
caught_exception = None
# TODO(@caren): clean this up
tool_calls, content, agent_step_span, _first_chunk, step_id, logged_step, _step_start_ns, step_metrics = (
None,
None,
None,
None,
None,
None,
None,
None,
)
try:
self.last_function_response = _load_last_function_response(messages)
valid_tools = await self._get_valid_tools()
require_tool_call = self.tool_rules_solver.should_force_tool_call()
if self._require_tool_call != require_tool_call:
if require_tool_call:
self.logger.info("switching to constrained mode (forcing tool call)")
else:
self.logger.info("switching to unconstrained mode (allowing non-tool responses)")
self._require_tool_call = require_tool_call
# Refresh messages at the start of each step to scrub inner thoughts.
# NOTE: We skip system prompt refresh during normal steps to preserve prefix caching.
# The system prompt is only rebuilt after compaction or message reset.
try:
messages = await self._refresh_messages(messages, force_system_prompt_refresh=False)
except Exception as e:
self.logger.warning(f"Failed to refresh messages at step start: {e}")
approval_request, approval_response = _maybe_get_approval_messages(messages)
tool_call_denials, tool_returns = [], []
if approval_request and approval_response:
# case of handling approval responses
content = approval_request.content
# Get tool calls that are pending
backfill_tool_call_id = approval_request.tool_calls[0].id # legacy case
if approval_response.approvals:
approved_tool_call_ids = {
backfill_tool_call_id if a.tool_call_id.startswith("message-") else a.tool_call_id
for a in approval_response.approvals
if isinstance(a, ApprovalReturn) and a.approve
}
else:
approved_tool_call_ids = {}
tool_calls = [tool_call for tool_call in approval_request.tool_calls if tool_call.id in approved_tool_call_ids]
pending_tool_call_message = _maybe_get_pending_tool_call_message(messages)
if pending_tool_call_message:
tool_calls.extend(pending_tool_call_message.tool_calls)
# Get tool calls that were denied
if approval_response.approvals:
denies = {d.tool_call_id: d for d in approval_response.approvals if isinstance(d, ApprovalReturn) and not d.approve}
else:
denies = {}
tool_call_denials = [
ToolCallDenial(**t.model_dump(), reason=denies.get(t.id).reason) for t in approval_request.tool_calls if t.id in denies
]
# Get tool calls that were executed client side
if approval_response.approvals:
tool_returns = [r for r in approval_response.approvals if isinstance(r, ToolReturn)]
# Validate that the approval response contains meaningful data
# If all three lists are empty, this is a malformed approval response
if not tool_calls and not tool_call_denials and not tool_returns:
self.logger.error(
f"Invalid approval response: approval_response.approvals is {approval_response.approvals} "
f"but no tool calls, denials, or returns were extracted. "
f"This likely indicates a corrupted or malformed approval payload."
)
self.should_continue = False
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.invalid_tool_call.value)
return
step_id = approval_request.step_id
if step_id is None:
# Old approval messages may not have step_id set - generate a new one
self.logger.warning(f"Approval request message {approval_request.id} has no step_id, generating new step_id")
step_id = generate_step_id()
step_progression, logged_step, step_metrics, agent_step_span = await self._step_checkpoint_start(
step_id=step_id, run_id=run_id
)
else:
step_metrics = await self.step_manager.get_step_metrics_async(step_id=step_id, actor=self.actor)
else:
# Check for job cancellation at the start of each step
if run_id and await self._check_run_cancellation(run_id):
self.should_continue = False
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.cancelled.value)
self.logger.info(f"Agent execution cancelled for run {run_id}")
return
step_id = generate_step_id()
step_progression, logged_step, step_metrics, agent_step_span = await self._step_checkpoint_start(
step_id=step_id, run_id=run_id
)
force_tool_call = valid_tools[0]["name"] if len(valid_tools) == 1 and self._require_tool_call else None
for llm_request_attempt in range(summarizer_settings.max_summarizer_retries + 1):
try:
request_data = self.llm_client.build_request_data(
agent_type=self.agent_state.agent_type,
messages=messages,
llm_config=self.agent_state.llm_config,
tools=valid_tools,
force_tool_call=force_tool_call,
requires_subsequent_tool_call=self._require_tool_call,
tool_return_truncation_chars=self._compute_tool_return_truncation_chars(),
)
# TODO: Extend to more providers, and also approval tool rules
# TODO: this entire code block should be inside of the clients
# Enable parallel tool use when no tool rules are attached
try:
no_tool_rules = (
not self.agent_state.tool_rules
or len([t for t in self.agent_state.tool_rules if t.type != "requires_approval"]) == 0
)
# Anthropic/Bedrock/MiniMax parallel tool use (MiniMax uses Anthropic-compatible API)
if self.agent_state.llm_config.model_endpoint_type in ["anthropic", "bedrock", "minimax"]:
if (
isinstance(request_data.get("tool_choice"), dict)
and "disable_parallel_tool_use" in request_data["tool_choice"]
):
# Gate parallel tool use on both: no tool rules and toggled on
if no_tool_rules and self.agent_state.llm_config.parallel_tool_calls:
request_data["tool_choice"]["disable_parallel_tool_use"] = False
else:
# Explicitly disable when tool rules present or llm_config toggled off
request_data["tool_choice"]["disable_parallel_tool_use"] = True
# OpenAI parallel tool use
elif self.agent_state.llm_config.model_endpoint_type == "openai":
# For OpenAI, we control parallel tool calling via parallel_tool_calls field
# Only allow parallel tool calls when no tool rules and enabled in config
if "parallel_tool_calls" in request_data:
if no_tool_rules and self.agent_state.llm_config.parallel_tool_calls:
request_data["parallel_tool_calls"] = True
else:
request_data["parallel_tool_calls"] = False
# Gemini (Google AI/Vertex) parallel tool use
elif self.agent_state.llm_config.model_endpoint_type in ["google_ai", "google_vertex"]:
# Gemini supports parallel tool calling natively through multiple parts in the response
# We just need to ensure the config flag is set for tracking purposes
# The actual handling happens in GoogleVertexClient.convert_response_to_chat_completion
pass # No specific request_data field needed for Gemini
except Exception:
# if this fails, we simply don't enable parallel tool use
pass
if dry_run:
yield request_data
return
step_progression, step_metrics = self._step_checkpoint_llm_request_start(step_metrics, agent_step_span)
invocation = llm_adapter.invoke_llm(
request_data=request_data,
messages=messages,
tools=valid_tools,
use_assistant_message=False, # NOTE: set to false
requires_approval_tools=self.tool_rules_solver.get_requires_approval_tools(
set([t["name"] for t in valid_tools])
)
+ [ct.name for ct in self.client_tools],
step_id=step_id,
actor=self.actor,
)
async for chunk in invocation:
if llm_adapter.supports_token_streaming():
if include_return_message_types is None or chunk.message_type in include_return_message_types:
yield chunk
# If you've reached this point without an error, break out of retry loop
break
except ValueError as e:
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.invalid_llm_response.value)
raise e
except LLMEmptyResponseError as e:
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.invalid_llm_response.value)
raise e
except LLMError as e:
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.llm_api_error.value)
raise e
except Exception as e:
if isinstance(e, ContextWindowExceededError) and llm_request_attempt < summarizer_settings.max_summarizer_retries:
# Retry case
self.logger.info(
f"Context window exceeded (error {e}), trying to compact messages attempt {llm_request_attempt + 1} of {summarizer_settings.max_summarizer_retries + 1}"
)
try:
# Capture pre-compaction state for metadata
context_tokens_before = self.context_token_estimate
messages_count_before = len(messages)
# Yield event notification before compaction starts
if include_compaction_messages:
yield self._create_compaction_event_message(
step_id=step_id,
run_id=run_id,
trigger="context_window_exceeded",
)
# Ensure system prompt is recompiled before summarization so compaction
# operates on the latest system+memory state (including recent repairs).
# NOTE: we no longer refresh the system prompt before compaction so we can leverage cache for self mode
# messages = await self._refresh_messages(messages, force_system_prompt_refresh=True)
summary_message, messages, summary_text = await self.compact(
messages,
trigger_threshold=self.agent_state.llm_config.context_window,
run_id=run_id,
step_id=step_id,
use_summary_role=include_compaction_messages,
trigger="context_window_exceeded",
context_tokens_before=context_tokens_before,
messages_count_before=messages_count_before,
)
# Recompile the persisted system prompt after compaction so subsequent
# turns load the repaired system+memory state from message_ids[0].
await self.agent_manager.rebuild_system_prompt_async(
agent_id=self.agent_state.id,
actor=self.actor,
force=True,
update_timestamp=True,
)
# Force system prompt rebuild after compaction to update memory blocks and timestamps
messages = await self._refresh_messages(messages, force_system_prompt_refresh=True)
self.logger.info("Summarization succeeded, continuing to retry LLM request")
# Persist the summary message
self.response_messages.append(summary_message)
await self._checkpoint_messages(
run_id=run_id,
step_id=step_id,
new_messages=[summary_message],
in_context_messages=messages,
)
# Yield summary result message to client
for msg in self._create_summary_result_message(
summary_message=summary_message,
summary_text=summary_text,
step_id=step_id,
run_id=run_id,
include_compaction_messages=include_compaction_messages,
):
yield msg
continue
except SystemPromptTokenExceededError:
self.should_continue = False
self.stop_reason = LettaStopReason(
stop_reason=StopReasonType.context_window_overflow_in_system_prompt.value
)
raise
except Exception as e:
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.error.value)
self.logger.error(f"Unknown error occured for summarization run {run_id}: {e}")
raise e
else:
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.error.value)
self.logger.error(f"Unknown error occured for run {run_id}: {e}")
raise e
step_progression, step_metrics = self._step_checkpoint_llm_request_finish(
step_metrics, agent_step_span, llm_adapter.llm_request_finish_timestamp_ns
)
# update metrics
self._update_global_usage_stats(llm_adapter.usage)
self.context_token_estimate = llm_adapter.usage.total_tokens
self.logger.info(f"Context token estimate after LLM request: {self.context_token_estimate}")
# Extract logprobs if present (for RL training)
if llm_adapter.logprobs is not None:
self.logprobs = llm_adapter.logprobs
# Track turn data for multi-turn RL training (SGLang native mode)
if self.return_token_ids and hasattr(llm_adapter, "output_ids") and llm_adapter.output_ids:
self.turns.append(
TurnTokenData(
role="assistant",
output_ids=llm_adapter.output_ids,
output_token_logprobs=llm_adapter.output_token_logprobs,
content=llm_adapter.chat_completions_response.choices[0].message.content
if llm_adapter.chat_completions_response
else None,
)
)
# Handle the AI response with the extracted data (supports multiple tool calls)
# Gather tool calls - check for multi-call API first, then fall back to single
if hasattr(llm_adapter, "tool_calls") and llm_adapter.tool_calls:
tool_calls = llm_adapter.tool_calls
elif llm_adapter.tool_call is not None:
tool_calls = [llm_adapter.tool_call]
else:
tool_calls = []
# Enforce parallel_tool_calls=false by truncating to first tool call
# Some providers (e.g. Gemini) don't respect this setting via API, so we enforce it client-side
if len(tool_calls) > 1 and not self.agent_state.llm_config.parallel_tool_calls:
self.logger.warning(
f"LLM returned {len(tool_calls)} tool calls but parallel_tool_calls=false. "
f"Truncating to first tool call: {tool_calls[0].function.name}"
)
tool_calls = [tool_calls[0]]
# get the new generated `Message` objects from handling the LLM response
new_messages, self.should_continue, self.stop_reason = await self._handle_ai_response(
tool_calls=tool_calls,
valid_tool_names=[tool["name"] for tool in valid_tools],
tool_rules_solver=self.tool_rules_solver,
usage=UsageStatistics(
completion_tokens=self.usage.completion_tokens,
prompt_tokens=self.usage.prompt_tokens,
total_tokens=self.usage.total_tokens,
),
content=content or llm_adapter.content,
pre_computed_assistant_message_id=llm_adapter.message_id,
step_id=step_id,
initial_messages=[], # input_messages_to_persist, # TODO: deprecate - super confusing
agent_step_span=agent_step_span,
is_final_step=(remaining_turns == 0),
run_id=run_id,
step_metrics=step_metrics,
is_approval_response=approval_response is not None,
tool_call_denials=tool_call_denials,
tool_returns=tool_returns,
finish_reason=llm_adapter.finish_reason,
)
# extend trackers with new messages
self.response_messages.extend(new_messages)
messages.extend(new_messages)
# Track tool return turns for multi-turn RL training
if self.return_token_ids:
for msg in new_messages:
if msg.role == "tool":
# Get tool return content
tool_content = None
tool_name = None
if hasattr(msg, "tool_returns") and msg.tool_returns:
# Aggregate all tool returns into content (func_response is the actual content)
parts = []
for tr in msg.tool_returns:
if hasattr(tr, "func_response") and tr.func_response:
if isinstance(tr.func_response, str):
parts.append(tr.func_response)
else:
parts.append(str(tr.func_response))
tool_content = "\n".join(parts)
elif hasattr(msg, "content") and msg.content:
tool_content = msg.content if isinstance(msg.content, str) else str(msg.content)
if hasattr(msg, "name"):
tool_name = msg.name
if tool_content:
self.turns.append(
TurnTokenData(
role="tool",
content=tool_content,
tool_name=tool_name,
)
)
# step(...) has successfully completed! now we can persist messages and update the in-context messages + save metrics
# persistence needs to happen before streaming to minimize chances of agent getting into an inconsistent state
step_progression, step_metrics = await self._step_checkpoint_finish(step_metrics, agent_step_span, logged_step)
await self._checkpoint_messages(
run_id=run_id,
step_id=step_id,
new_messages=input_messages_to_persist + new_messages,
in_context_messages=messages, # update the in-context messages
)
# yield back generated messages
if llm_adapter.supports_token_streaming():
if tool_calls:
# Stream each tool return if tools were executed
response_tool_returns = [msg for msg in new_messages if msg.role == "tool"]
for tr in response_tool_returns:
# Skip streaming for aggregated parallel tool returns (no per-call tool_call_id)
if tr.tool_call_id is None and tr.tool_returns:
continue
tool_return_letta = tr.to_letta_messages()[0]
if include_return_message_types is None or tool_return_letta.message_type in include_return_message_types:
yield tool_return_letta
else:
# TODO: modify this use step_response_messages
filter_user_messages = [m for m in new_messages if m.role != "user"]
letta_messages = Message.to_letta_messages_from_list(
filter_user_messages,
use_assistant_message=False, # NOTE: set to false
reverse=False,
# text_is_assistant_message=(self.agent_state.agent_type == AgentType.react_agent),
text_is_assistant_message=True,
)
for message in letta_messages:
if include_return_message_types is None or message.message_type in include_return_message_types:
yield message
# check compaction
if self.context_token_estimate is not None and self.context_token_estimate > self.agent_state.llm_config.context_window:
self.logger.info(
f"Context window exceeded (current: {self.context_token_estimate}, threshold: {self.agent_state.llm_config.context_window}), trying to compact messages"
)
# Capture pre-compaction state for metadata
context_tokens_before = self.context_token_estimate
messages_count_before = len(messages)
# Yield event notification before compaction starts
if include_compaction_messages:
yield self._create_compaction_event_message(
step_id=step_id,
run_id=run_id,
trigger="post_step_context_check",
)
try:
# Ensure system prompt is recompiled before summarization so compaction
# operates on the latest system+memory state (including recent repairs).
# NOTE: we no longer refresh the system prompt before compaction so we can leverage cache for self mode
# messages = await self._refresh_messages(messages, force_system_prompt_refresh=True)
summary_message, messages, summary_text = await self.compact(
messages,
trigger_threshold=self.agent_state.llm_config.context_window,
run_id=run_id,
step_id=step_id,
use_summary_role=include_compaction_messages,
trigger="post_step_context_check",
context_tokens_before=context_tokens_before,
messages_count_before=messages_count_before,
)
# Recompile the persisted system prompt after compaction so subsequent
# turns load the repaired system+memory state from message_ids[0].
await self.agent_manager.rebuild_system_prompt_async(
agent_id=self.agent_state.id,
actor=self.actor,
force=True,
update_timestamp=True,
)
# Force system prompt rebuild after compaction to update memory blocks and timestamps
messages = await self._refresh_messages(messages, force_system_prompt_refresh=True)
# TODO: persist + return the summary message
# TODO: convert this to a SummaryMessage
self.response_messages.append(summary_message)
# Yield summary result message to client
for msg in self._create_summary_result_message(
summary_message=summary_message,
summary_text=summary_text,
step_id=step_id,
run_id=run_id,
include_compaction_messages=include_compaction_messages,
):
yield msg
await self._checkpoint_messages(
run_id=run_id,
step_id=step_id,
new_messages=[summary_message],
in_context_messages=messages,
)
except SystemPromptTokenExceededError:
self.should_continue = False
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.context_window_overflow_in_system_prompt.value)
raise
except Exception as e:
caught_exception = e
# NOTE: message persistence does not happen in the case of an exception (rollback to previous state)
# Use repr() if str() is empty (happens with Exception() with no args)
error_detail = str(e) or repr(e)
self.logger.warning(f"Error during step processing: {error_detail}")
self.job_update_metadata = {"error": error_detail}
# Stop the agent loop on any exception to prevent wasteful retry loops
# (e.g., if post-step compaction fails, we don't want to keep retrying)
self.should_continue = False
self.logger.warning(
f"Agent loop stopped due to exception (step_progression={step_progression.name}, "
f"exception_type={type(e).__name__}): {error_detail}"
)
# This indicates we failed after we decided to stop stepping, which indicates a bug with our flow.
if not self.stop_reason:
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.error.value)
elif self.stop_reason.stop_reason in (StopReasonType.end_turn, StopReasonType.max_steps, StopReasonType.tool_rule):
self.logger.warning("Error occurred during step processing, with valid stop reason: %s", self.stop_reason.stop_reason)
elif self.stop_reason.stop_reason not in (
StopReasonType.no_tool_call,
StopReasonType.invalid_tool_call,
StopReasonType.invalid_llm_response,
StopReasonType.llm_api_error,
StopReasonType.context_window_overflow_in_system_prompt,
):
self.logger.warning("Error occurred during step processing, with unexpected stop reason: %s", self.stop_reason.stop_reason)
raise e
finally:
# always make sure we update the step/run metadata
self.logger.debug("Running cleanup for agent loop run: %s", run_id)
self.logger.info("Running final update. Step Progression: %s", step_progression)
try:
if step_progression == StepProgression.FINISHED:
if not self.should_continue:
if self.stop_reason is None:
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.end_turn.value)
if logged_step and step_id:
await self.step_manager.update_step_stop_reason(self.actor, step_id, self.stop_reason.stop_reason)
if not self.stop_reason or self.stop_reason.stop_reason != StopReasonType.context_window_overflow_in_system_prompt:
# only return if the stop reason is not context window overflow in system prompt
return
if step_progression < StepProgression.STEP_LOGGED:
# Error occurred before step was fully logged
import traceback
if logged_step:
await self.step_manager.update_step_error_async(
actor=self.actor,
step_id=step_id, # Use original step_id for telemetry
error_type=type(caught_exception).__name__ if caught_exception is not None else "Unknown",
error_message=str(caught_exception) if caught_exception is not None else "Unknown error",
error_traceback=traceback.format_exc(),
stop_reason=self.stop_reason,
)
elif step_progression <= StepProgression.LOGGED_TRACE:
if self.stop_reason is None:
self.logger.warning("Error in step after logging step")
self.stop_reason = LettaStopReason(stop_reason=StopReasonType.error.value)
if logged_step:
await self.step_manager.update_step_stop_reason(self.actor, step_id, self.stop_reason.stop_reason)
else:
self.logger.warning("Invalid StepProgression value")
# Do tracking for failure cases. Can consolidate with success conditions later.
if settings.track_stop_reason:
await self._log_request(request_start_timestamp_ns, None, self.job_update_metadata, is_error=True, run_id=run_id)
# Record partial step metrics on failure (capture whatever timing data we have)
if logged_step and step_metrics and step_progression < StepProgression.FINISHED:
# Calculate total step time up to the failure point
step_metrics.step_ns = get_utc_timestamp_ns() - step_metrics.step_start_ns
await self._record_step_metrics(
step_id=step_id,
step_metrics=step_metrics,
run_id=run_id,
)
except Exception as e:
self.logger.warning(f"Error during post-completion step tracking: {e}")
@trace_method
async def _handle_ai_response(
self,
valid_tool_names: list[str],
tool_rules_solver: ToolRulesSolver,
usage: UsageStatistics,
content: list[TextContent | ReasoningContent | RedactedReasoningContent | OmittedReasoningContent] | None = None,
pre_computed_assistant_message_id: str | None = None,
step_id: str | None = None,
initial_messages: list[Message] | None = None,
agent_step_span: Span | None = None,
is_final_step: bool | None = None,
run_id: str | None = None,
step_metrics: StepMetrics = None,
is_approval_response: bool | None = None,
tool_calls: list[ToolCall] = [],
tool_call_denials: list[ToolCallDenial] = [],
tool_returns: list[ToolReturn] = [],
finish_reason: str | None = None,
) -> tuple[list[Message], bool, LettaStopReason | None]:
"""
Handle the final AI response once streaming completes, execute / validate tool calls,
decide whether we should keep stepping, and persist state.
Unified approach: treats single and multi-tool calls uniformly to reduce code duplication.
"""
# 1. Handle no-tool cases (content-only or no-op)
if not tool_calls and not tool_call_denials and not tool_returns:
# Case 1a: No tool call, no content (LLM no-op)
if content is None or len(content) == 0:
# Check if there are required-before-exit tools that haven't been called
uncalled = tool_rules_solver.get_uncalled_required_tools(available_tools=set([t.name for t in self.agent_state.tools]))
if uncalled:
heartbeat_reason = (
f"{NON_USER_MSG_PREFIX}ToolRuleViolated: You must call {', '.join(uncalled)} at least once to exit the loop."
)
from letta.server.rest_api.utils import create_heartbeat_system_message
heartbeat_msg = create_heartbeat_system_message(
agent_id=self.agent_state.id,
model=self.agent_state.llm_config.model,
function_call_success=True,
timezone=self.agent_state.timezone,
heartbeat_reason=heartbeat_reason,
run_id=run_id,
)
messages_to_persist = (initial_messages or []) + [heartbeat_msg]
continue_stepping, stop_reason = True, None
else:
# No required tools remaining, end turn without persisting no-op
continue_stepping = False
stop_reason = LettaStopReason(stop_reason=StopReasonType.end_turn.value)
messages_to_persist = initial_messages or []
# Case 1b: No tool call but has content
else:
continue_stepping, heartbeat_reason, stop_reason = self._decide_continuation(
agent_state=self.agent_state,
tool_call_name=None,
tool_rule_violated=False,
tool_rules_solver=tool_rules_solver,
is_final_step=is_final_step,
finish_reason=finish_reason,
)
assistant_message = create_letta_messages_from_llm_response(
agent_id=self.agent_state.id,
model=self.agent_state.llm_config.model,
function_name=None,
function_arguments=None,
tool_execution_result=None,
tool_call_id=None,
function_response=None,
timezone=self.agent_state.timezone,
continue_stepping=continue_stepping,
heartbeat_reason=heartbeat_reason,
reasoning_content=content,
pre_computed_assistant_message_id=pre_computed_assistant_message_id,
step_id=step_id,
run_id=run_id,
is_approval_response=is_approval_response,
force_set_request_heartbeat=False,
add_heartbeat_on_continue=bool(heartbeat_reason),
)
messages_to_persist = (initial_messages or []) + assistant_message
return messages_to_persist, continue_stepping, stop_reason
# 2. Check whether tool call requires approval (includes client-side tools)
if not is_approval_response:
# Get names of client-side tools (these are executed by client, not server)
client_tool_names = {ct.name for ct in self.client_tools} if self.client_tools else set()
# Tools requiring approval: requires_approval tools OR client-side tools
requested_tool_calls = [
t
for t in tool_calls
if tool_rules_solver.is_requires_approval_tool(t.function.name) or t.function.name in client_tool_names
]
allowed_tool_calls = [
t
for t in tool_calls
if not tool_rules_solver.is_requires_approval_tool(t.function.name) and t.function.name not in client_tool_names
]
if requested_tool_calls:
approval_messages = create_approval_request_message_from_llm_response(
agent_id=self.agent_state.id,
model=self.agent_state.llm_config.model,
requested_tool_calls=requested_tool_calls,
allowed_tool_calls=allowed_tool_calls,
reasoning_content=content,
pre_computed_assistant_message_id=pre_computed_assistant_message_id,
step_id=step_id,
run_id=run_id,
)
messages_to_persist = (initial_messages or []) + approval_messages
return messages_to_persist, False, LettaStopReason(stop_reason=StopReasonType.requires_approval.value)
result_tool_returns = []
# 3. Handle client side tool execution
if tool_returns:
# Clamp client-side tool returns before persisting (JSON-aware: truncate only the 'message' field)
try:
cap = self._compute_tool_return_truncation_chars()
except Exception:
cap = 5000
for tr in tool_returns:
try:
if tr.func_response and isinstance(tr.func_response, str):
parsed = json.loads(tr.func_response)
if isinstance(parsed, dict) and "message" in parsed and isinstance(parsed["message"], str):
msg = parsed["message"]
if len(msg) > cap:
original_len = len(msg)
parsed["message"] = msg[:cap] + f"... [truncated {original_len - cap} chars]"
tr.func_response = json.dumps(parsed)
self.logger.warning(f"Truncated client-side tool return message from {original_len} to {cap} chars")
else:
# Fallback to raw string truncation if not a dict with 'message'
if len(tr.func_response) > cap:
original_len = len(tr.func_response)
tr.func_response = tr.func_response[:cap] + f"... [truncated {original_len - cap} chars]"
self.logger.warning(f"Truncated client-side tool return (raw) from {original_len} to {cap} chars")
except json.JSONDecodeError:
# Non-JSON or unexpected shape; truncate as raw string
if tr.func_response and len(tr.func_response) > cap:
original_len = len(tr.func_response)
tr.func_response = tr.func_response[:cap] + f"... [truncated {original_len - cap} chars]"
self.logger.warning(f"Truncated client-side tool return (non-JSON) from {original_len} to {cap} chars")
except Exception as e:
# Unexpected error; log and skip truncation for this return
self.logger.warning(f"Failed to truncate client-side tool return: {e}")
continue_stepping = True
stop_reason = None
result_tool_returns = tool_returns
# 4. Handle denial cases
if tool_call_denials:
# Convert ToolCallDenial objects to ToolReturn objects using shared helper
# Group denials by reason to potentially batch them, but for now process individually
for tool_call_denial in tool_call_denials:
denial_returns = create_tool_returns_for_denials(
tool_calls=[tool_call_denial],
denial_reason=tool_call_denial.reason,
timezone=self.agent_state.timezone,
)
result_tool_returns.extend(denial_returns)
# 5. Unified tool execution path (works for both single and multiple tools)
# 5. Unified tool execution path (works for both single and multiple tools)
# Note: Parallel tool calling with tool rules is validated at agent create/update time.
# At runtime, we trust that if tool_rules exist, parallel_tool_calls=false is enforced earlier.
# 5a. Prepare execution specs for all tools
exec_specs = []
for tc in tool_calls:
call_id = tc.id or f"call_{uuid.uuid4().hex[:8]}"
name = tc.function.name
args = _safe_load_tool_call_str(tc.function.arguments)
args.pop(REQUEST_HEARTBEAT_PARAM, None)
args.pop(INNER_THOUGHTS_KWARG, None)
# Validate against allowed tools
tool_rule_violated = name not in valid_tool_names and not is_approval_response
# Handle prefilled args if present
if not tool_rule_violated:
prefill_args = tool_rules_solver.last_prefilled_args_by_tool.get(name)
if prefill_args:
target_tool = next((t for t in self.agent_state.tools if t.name == name), None)
provenance = tool_rules_solver.last_prefilled_args_provenance.get(name)
try:
args = merge_and_validate_prefilled_args(
tool=target_tool,
llm_args=args,
prefilled_args=prefill_args,
)
except ValueError as ve:
# Invalid prefilled args - create error result
error_prefix = "Invalid prefilled tool arguments from tool rules"
prov_suffix = f" (source={provenance})" if provenance else ""
err_msg = f"{error_prefix}{prov_suffix}: {str(ve)}"
exec_specs.append(
{
"id": call_id,
"name": name,
"args": args,
"violated": False,
"error": err_msg,
}
)
continue
exec_specs.append(
{
"id": call_id,
"name": name,
"args": args,
"violated": tool_rule_violated,
"error": None,
}
)
# 5c. Execute tools (sequentially for single, parallel for multiple)
async def _run_one(spec: Dict[str, Any]):
if spec.get("error"):
return ToolExecutionResult(status="error", func_return=spec["error"]), 0
if spec["violated"]:
result = _build_rule_violation_result(spec["name"], valid_tool_names, tool_rules_solver)
return result, 0
t0 = get_utc_timestamp_ns()
target_tool = next((x for x in self.agent_state.tools if x.name == spec["name"]), None)
res = await self._execute_tool(
target_tool=target_tool,
tool_args=spec["args"],
agent_state=self.agent_state,
agent_step_span=agent_step_span,
step_id=step_id,
)
dt = get_utc_timestamp_ns() - t0
return res, dt
if len(exec_specs) == 1:
results = [await _run_one(exec_specs[0])]
else:
# separate tools by parallel execution capability
parallel_items = []
serial_items = []
for idx, spec in enumerate(exec_specs):
target_tool = next((x for x in self.agent_state.tools if x.name == spec["name"]), None)
if target_tool and target_tool.enable_parallel_execution:
parallel_items.append((idx, spec))
else:
serial_items.append((idx, spec))
# execute all parallel tools concurrently and all serial tools sequentially
results = [None] * len(exec_specs)
parallel_results = await asyncio.gather(*[_run_one(spec) for _, spec in parallel_items]) if parallel_items else []
for (idx, _), result in zip(parallel_items, parallel_results):
results[idx] = result
for idx, spec in serial_items:
results[idx] = await _run_one(spec)
# 5d. Update metrics with execution time
if step_metrics is not None and results:
step_metrics.tool_execution_ns = max(dt for _, dt in results)
# 5e. Process results and compute function responses
function_responses: list[Optional[str]] = []
persisted_continue_flags: list[bool] = []
persisted_stop_reasons: list[LettaStopReason | None] = []
for idx, spec in enumerate(exec_specs):
tool_execution_result, _ = results[idx]
has_prefill_error = bool(spec.get("error"))
# Validate and format function response
truncate = spec["name"] not in {"conversation_search", "conversation_search_date", "archival_memory_search"}
return_char_limit = next((t.return_char_limit for t in self.agent_state.tools if t.name == spec["name"]), None)
function_response_string = validate_function_response(
tool_execution_result.func_return,
return_char_limit=return_char_limit,
truncate=truncate,
)
function_responses.append(function_response_string)
# Update last function response (for tool rules)
self.last_function_response = package_function_response(
was_success=tool_execution_result.success_flag,
response_string=function_response_string,
timezone=self.agent_state.timezone,
)
# Register successful tool call with solver
if not spec["violated"] and not has_prefill_error:
tool_rules_solver.register_tool_call(spec["name"])
# Decide continuation for this tool
if has_prefill_error:
cont = False
_hb_reason = None
sr = LettaStopReason(stop_reason=StopReasonType.invalid_tool_call.value)
else:
cont, _hb_reason, sr = self._decide_continuation(
agent_state=self.agent_state,
tool_call_name=spec["name"],
tool_rule_violated=spec["violated"],
tool_rules_solver=tool_rules_solver,
is_final_step=(is_final_step and idx == len(exec_specs) - 1),
finish_reason=finish_reason,
)
persisted_continue_flags.append(cont)
persisted_stop_reasons.append(sr)
# 5f. Create messages using parallel message creation (works for both single and multi)
tool_call_specs = [{"name": s["name"], "arguments": s["args"], "id": s["id"]} for s in exec_specs]
tool_execution_results = [res for (res, _) in results]
# Use the parallel message creation function for both single and multiple tools
parallel_messages = create_parallel_tool_messages_from_llm_response(
agent_id=self.agent_state.id,
model=self.agent_state.llm_config.model,
tool_call_specs=tool_call_specs,
tool_execution_results=tool_execution_results,
function_responses=function_responses,
timezone=self.agent_state.timezone,
run_id=run_id,
step_id=step_id,
reasoning_content=content,
pre_computed_assistant_message_id=pre_computed_assistant_message_id,
is_approval_response=is_approval_response,
tool_returns=result_tool_returns,
)
messages_to_persist: list[Message] = (initial_messages or []) + parallel_messages
# Set run_id and step_id on all messages before persisting
for message in messages_to_persist:
if message.run_id is None:
message.run_id = run_id
if message.step_id is None:
message.step_id = step_id
# 5g. Aggregate continuation decisions
aggregate_continue = any(persisted_continue_flags) if persisted_continue_flags else False
aggregate_continue = aggregate_continue or tool_call_denials or tool_returns
# Determine aggregate stop reason
aggregate_stop_reason = None
for sr in persisted_stop_reasons:
if sr is not None:
aggregate_stop_reason = sr
# For parallel tool calls, always continue to allow the agent to process/summarize results
# unless a terminal tool was called or we hit max steps
if len(exec_specs) > 1:
has_terminal = any(sr and sr.stop_reason == StopReasonType.tool_rule.value for sr in persisted_stop_reasons)
is_max_steps = any(sr and sr.stop_reason == StopReasonType.max_steps.value for sr in persisted_stop_reasons)
if not has_terminal and not is_max_steps:
# Force continuation for parallel tool execution
aggregate_continue = True
aggregate_stop_reason = None
return messages_to_persist, aggregate_continue, aggregate_stop_reason
@trace_method
def _decide_continuation(
self,
agent_state: AgentState,
tool_call_name: Optional[str],
tool_rule_violated: bool,
tool_rules_solver: ToolRulesSolver,
is_final_step: bool | None,
finish_reason: str | None = None,
) -> tuple[bool, str | None, LettaStopReason | None]:
"""
In v3 loop, we apply the following rules:
1. Did not call a tool? Loop ends
2. Called a tool? Loop continues. This can be:
2a. Called tool, tool executed successfully
2b. Called tool, tool failed to execute
2c. Called tool + tool rule violation (did not execute)
"""
continue_stepping = True # Default continue
continuation_reason: str | None = None
stop_reason: LettaStopReason | None = None
if tool_call_name is None:
# No tool call – if there are required-before-exit tools uncalled, keep stepping
# and provide explicit feedback to the model; otherwise end the loop.
uncalled = tool_rules_solver.get_uncalled_required_tools(available_tools=set([t.name for t in agent_state.tools]))
if uncalled and not is_final_step:
reason = f"{NON_USER_MSG_PREFIX}ToolRuleViolated: You must call {', '.join(uncalled)} at least once to exit the loop."
return True, reason, None
# No required tools remaining → end turn
# Check if the LLM hit max_tokens (finish_reason == "length")
if finish_reason == "length":
return False, None, LettaStopReason(stop_reason=StopReasonType.max_tokens_exceeded.value)
return False, None, LettaStopReason(stop_reason=StopReasonType.end_turn.value)
else:
if tool_rule_violated:
continue_stepping = True
continuation_reason = f"{NON_USER_MSG_PREFIX}Continuing: tool rule violation."
else:
tool_rules_solver.register_tool_call(tool_call_name)
if tool_rules_solver.is_terminal_tool(tool_call_name):
stop_reason = LettaStopReason(stop_reason=StopReasonType.tool_rule.value)
continue_stepping = False
elif tool_rules_solver.has_children_tools(tool_call_name):
continue_stepping = True
continuation_reason = f"{NON_USER_MSG_PREFIX}Continuing: child tool rule."
elif tool_rules_solver.is_continue_tool(tool_call_name):
continue_stepping = True
continuation_reason = f"{NON_USER_MSG_PREFIX}Continuing: continue tool rule."
# – hard stop overrides –
if is_final_step:
continue_stepping = False
stop_reason = LettaStopReason(stop_reason=StopReasonType.max_steps.value)
else:
uncalled = tool_rules_solver.get_uncalled_required_tools(available_tools=set([t.name for t in agent_state.tools]))
if uncalled:
continue_stepping = True
continuation_reason = (
f"{NON_USER_MSG_PREFIX}Continuing, user expects these tools: [{', '.join(uncalled)}] to be called still."
)
stop_reason = None # reset – we’re still going
return continue_stepping, continuation_reason, stop_reason
@trace_method
async def _get_valid_tools(self):
tools = self.agent_state.tools
valid_tool_names = self.tool_rules_solver.get_allowed_tool_names(
available_tools=set([t.name for t in tools]),
last_function_response=self.last_function_response,
error_on_empty=False, # Return empty list instead of raising error
) or list(set(t.name for t in tools))
# Get client tool names to filter out server tools with same name (client tools override)
client_tool_names = {ct.name for ct in self.client_tools} if self.client_tools else set()
# Build allowed tools from server tools, excluding those overridden by client tools
allowed_tools = [
enable_strict_mode(t.json_schema, strict=self.agent_state.llm_config.strict)
for t in tools
if t.name in set(valid_tool_names) and t.name not in client_tool_names
]
# Merge client-side tools (use flat format matching enable_strict_mode output)
if self.client_tools:
for ct in self.client_tools:
client_tool_schema = {
"name": ct.name,
"description": ct.description,
"parameters": ct.parameters or {"type": "object", "properties": {}},
}
allowed_tools.append(client_tool_schema)
terminal_tool_names = {rule.tool_name for rule in self.tool_rules_solver.terminal_tool_rules}
allowed_tools = runtime_override_tool_json_schema(
tool_list=allowed_tools,
response_format=self.agent_state.response_format,
request_heartbeat=False, # NOTE: difference for v3 (don't add request heartbeat)
terminal_tools=terminal_tool_names,
)
return allowed_tools
@trace_method
async def compact(
self,
messages,
trigger_threshold: Optional[int] = None,
compaction_settings: Optional["CompactionSettings"] = None,
run_id: Optional[str] = None,
step_id: Optional[str] = None,
use_summary_role: bool = False,
trigger: Optional[str] = None,
context_tokens_before: Optional[int] = None,
messages_count_before: Optional[int] = None,
) -> tuple[Message, list[Message], str]:
"""Compact the current in-context messages for this agent.
Compaction uses a summarizer LLM configuration derived from
``compaction_settings.model`` when provided. This mirrors how agent
creation derives defaults from provider-specific ModelSettings, but is
localized to summarization.
Args:
use_summary_role: If True, the summary message will be created with
role=summary instead of role=user. This enables first-class
summary message handling in the database and API responses.
trigger: What triggered the compaction (e.g., "context_window_exceeded", "post_step_context_check").
context_tokens_before: Token count before compaction (for stats).
messages_count_before: Message count before compaction (for stats).
"""
# Determine compaction settings: passed-in > agent's > global defaults
effective_compaction_settings = compaction_settings or self.agent_state.compaction_settings
result = await compact_messages(
actor=self.actor,
agent_id=self.agent_state.id,
agent_llm_config=self.agent_state.llm_config,
telemetry_manager=self.telemetry_manager,
llm_client=self.llm_client,
agent_type=self.agent_state.agent_type,
messages=messages,
timezone=self.agent_state.timezone,
compaction_settings=effective_compaction_settings,
agent_tags=self.agent_state.tags,
tools=await self._get_valid_tools(), # Pass json schemas including client tools for cache compatibility (for self compaction)
trigger_threshold=trigger_threshold,
run_id=run_id,
step_id=step_id,
use_summary_role=use_summary_role,
trigger=trigger,
context_tokens_before=context_tokens_before,
messages_count_before=messages_count_before,
)
# Update the agent's context token estimate
self.context_token_estimate = result.context_token_estimate
return result.summary_message, result.compacted_messages, result.summary_text
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/agents/letta_agent_v3.py",
"license": "Apache License 2.0",
"lines": 1721,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/interfaces/gemini_streaming_interface.py | import asyncio
import base64
import json
from collections.abc import AsyncGenerator
from datetime import datetime, timezone
from typing import TYPE_CHECKING, AsyncIterator, List, Optional
if TYPE_CHECKING:
from opentelemetry.trace import Span
from letta.schemas.usage import LettaUsageStatistics
from google.genai.types import (
GenerateContentResponse,
)
from letta.log import get_logger
from letta.schemas.letta_message import (
ApprovalRequestMessage,
AssistantMessage,
LettaMessage,
ReasoningMessage,
ToolCallDelta,
ToolCallMessage,
)
from letta.schemas.letta_message_content import (
ReasoningContent,
TextContent,
ToolCallContent,
)
from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType
from letta.schemas.message import Message
from letta.schemas.openai.chat_completion_response import FunctionCall, ToolCall
from letta.server.rest_api.streaming_response import RunCancelledException
from letta.server.rest_api.utils import decrement_message_uuid
from letta.utils import get_tool_call_id
logger = get_logger(__name__)
class SimpleGeminiStreamingInterface:
"""
Encapsulates the logic for streaming responses from Gemini API:
https://ai.google.dev/gemini-api/docs/text-generation#streaming-responses
"""
def __init__(
self,
requires_approval_tools: list = [],
run_id: str | None = None,
step_id: str | None = None,
cancellation_event: Optional["asyncio.Event"] = None,
):
self.run_id = run_id
self.step_id = step_id
self.cancellation_event = cancellation_event
# self.messages = messages
# self.tools = tools
self.requires_approval_tools = requires_approval_tools
# ID responses used
self.message_id = None
# In Gemini streaming, tool call comes all at once
self.tool_call_id: str | None = None
self.tool_call_name: str | None = None
self.tool_call_args: dict | None = None # NOTE: Not a str!
self.collected_tool_calls: list[ToolCall] = []
# NOTE: signature only is included if tools are present
self.thinking_signature: str | None = None
# Regular text content too (avoid O(n^2) by accumulating parts)
self._text_parts: list[str] = []
self.text_content: str | None = None # legacy; not used elsewhere
# Premake IDs for database writes
self.letta_message_id = Message.generate_id()
# self.model = model
# Sadly, Gemini's encrypted reasoning logic forces us to store stream parts in state
self.content_parts: List[ReasoningContent | TextContent | ToolCallContent] = []
# Token counters
self.input_tokens = 0
self.output_tokens = 0
# Cache token tracking (Gemini uses cached_content_token_count)
# None means "not reported by provider", 0 means "provider reported 0"
self.cached_tokens: int | None = None
# Thinking/reasoning token tracking (Gemini uses thoughts_token_count)
# None means "not reported by provider", 0 means "provider reported 0"
self.thinking_tokens: int | None = None
# Raw usage from provider (for transparent logging in provider trace)
self.raw_usage: dict | None = None
# Track cancellation status
self.stream_was_cancelled: bool = False
def get_content(self) -> List[ReasoningContent | TextContent | ToolCallContent]:
"""This is (unusually) in chunked format, instead of merged"""
has_reasoning = any(isinstance(c, ReasoningContent) for c in self.content_parts)
for content in self.content_parts:
if isinstance(content, ReasoningContent):
content.signature = self.thinking_signature
elif isinstance(content, TextContent) and not has_reasoning and self.thinking_signature:
content.signature = self.thinking_signature
return self.content_parts
def get_tool_call_object(self) -> ToolCall:
"""Useful for agent loop"""
if self.collected_tool_calls:
return self.collected_tool_calls[-1]
if self.tool_call_id is None:
raise ValueError("No tool call ID available")
if self.tool_call_name is None:
raise ValueError("No tool call name available")
if self.tool_call_args is None:
raise ValueError("No tool call arguments available")
tool_call_args_str = json.dumps(self.tool_call_args)
return ToolCall(id=self.tool_call_id, function=FunctionCall(name=self.tool_call_name, arguments=tool_call_args_str))
def get_tool_call_objects(self) -> list[ToolCall]:
"""Return all finalized tool calls collected during this message (parallel supported)."""
return list(self.collected_tool_calls)
def get_usage_statistics(self) -> "LettaUsageStatistics":
"""Extract usage statistics from accumulated streaming data.
Returns:
LettaUsageStatistics with token counts from the stream.
Note:
Gemini uses `thinking_tokens` instead of `reasoning_tokens` (OpenAI o1/o3).
"""
from letta.schemas.usage import LettaUsageStatistics
return LettaUsageStatistics(
prompt_tokens=self.input_tokens or 0,
completion_tokens=self.output_tokens or 0,
total_tokens=(self.input_tokens or 0) + (self.output_tokens or 0),
# Gemini: input_tokens is already total, cached_tokens is a subset (not additive)
cached_input_tokens=self.cached_tokens,
cache_write_tokens=None, # Gemini doesn't report cache write tokens
reasoning_tokens=self.thinking_tokens, # Gemini uses thinking_tokens
)
async def process(
self,
stream: AsyncIterator[GenerateContentResponse],
ttft_span: Optional["Span"] = None,
) -> AsyncGenerator[LettaMessage | LettaStopReason, None]:
"""
Iterates over the Gemini stream, yielding SSE events.
It also collects tokens and detects if a tool call is triggered.
"""
prev_message_type = None
message_index = 0
try:
async for event in stream:
try:
async for message in self._process_event(event, ttft_span, prev_message_type, message_index):
new_message_type = message.message_type
if new_message_type != prev_message_type:
if prev_message_type != None:
message_index += 1
prev_message_type = new_message_type
yield message
except (asyncio.CancelledError, RunCancelledException) as e:
import traceback
logger.info("Cancelled stream attempt but overriding (%s) %s: %s", type(e).__name__, e, traceback.format_exc())
async for message in self._process_event(event, ttft_span, prev_message_type, message_index):
new_message_type = message.message_type
if new_message_type != prev_message_type:
if prev_message_type != None:
message_index += 1
prev_message_type = new_message_type
yield message
# Don't raise the exception here
continue
except Exception as e:
import traceback
logger.exception("Error processing stream: %s", e)
if ttft_span:
ttft_span.add_event(
name="stop_reason",
attributes={"stop_reason": StopReasonType.error.value, "error": str(e), "stacktrace": traceback.format_exc()},
)
yield LettaStopReason(stop_reason=StopReasonType.error)
raise e
finally:
# Check if cancellation was signaled via shared event
if self.cancellation_event and self.cancellation_event.is_set():
self.stream_was_cancelled = True
logger.info(f"GeminiStreamingInterface: Stream processing complete. stream was cancelled: {self.stream_was_cancelled}")
async def _process_event(
self,
event: GenerateContentResponse,
ttft_span: Optional["Span"] = None,
prev_message_type: Optional[str] = None,
message_index: int = 0,
) -> AsyncGenerator[LettaMessage | LettaStopReason, None]:
# Every event has usage data + model info on it,
# so we can continually extract
self.model = event.model_version
self.message_id = event.response_id
usage_metadata = event.usage_metadata
if usage_metadata:
if usage_metadata.prompt_token_count:
self.input_tokens = usage_metadata.prompt_token_count
# Use candidates_token_count directly for output tokens.
# Do NOT use (total_token_count - prompt_token_count) as that incorrectly
# includes thinking/reasoning tokens which can be 10-100x the actual output.
if usage_metadata.candidates_token_count:
self.output_tokens = usage_metadata.candidates_token_count
# Capture cache token data (Gemini uses cached_content_token_count)
# Use `is not None` to capture 0 values (meaning "provider reported 0 cached tokens")
if hasattr(usage_metadata, "cached_content_token_count") and usage_metadata.cached_content_token_count is not None:
self.cached_tokens = usage_metadata.cached_content_token_count
# Capture thinking/reasoning token data (Gemini uses thoughts_token_count)
# Use `is not None` to capture 0 values (meaning "provider reported 0 reasoning tokens")
if hasattr(usage_metadata, "thoughts_token_count") and usage_metadata.thoughts_token_count is not None:
self.thinking_tokens = usage_metadata.thoughts_token_count
# Store raw usage for transparent provider trace logging
try:
self.raw_usage = (
usage_metadata.to_json_dict()
if hasattr(usage_metadata, "to_json_dict")
else {
"prompt_token_count": usage_metadata.prompt_token_count,
"candidates_token_count": usage_metadata.candidates_token_count,
"total_token_count": usage_metadata.total_token_count,
}
)
except Exception as e:
logger.error(f"Failed to capture raw_usage from Gemini: {e}")
self.raw_usage = None
if not event.candidates or len(event.candidates) == 0:
return
else:
# NOTE: should always be len 1
candidate = event.candidates[0]
if not candidate.content or not candidate.content.parts:
return
for part in candidate.content.parts:
# NOTE: the thought signature often comes after the thought text, eg with the tool call
if part.thought_signature:
# NOTE: the thought_signature comes on the Part with the function_call
thought_signature = part.thought_signature
self.thinking_signature = base64.b64encode(thought_signature).decode("utf-8")
# Don't emit empty reasoning message - signature will be attached to actual reasoning content
# Thinking summary content part (bool means text is thought part)
if part.thought:
reasoning_summary = part.text
# Only emit reasoning message if we have actual content
if reasoning_summary and reasoning_summary.strip():
if prev_message_type and prev_message_type != "reasoning_message":
message_index += 1
yield ReasoningMessage(
id=self.letta_message_id,
date=datetime.now(timezone.utc).isoformat(),
otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
source="reasoner_model",
reasoning=reasoning_summary,
run_id=self.run_id,
step_id=self.step_id,
)
prev_message_type = "reasoning_message"
self.content_parts.append(
ReasoningContent(
is_native=True,
reasoning=reasoning_summary,
signature=self.thinking_signature,
)
)
# Plain text content part
elif part.text:
content = part.text
self._text_parts.append(content)
if prev_message_type and prev_message_type != "assistant_message":
message_index += 1
yield AssistantMessage(
id=self.letta_message_id,
otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
date=datetime.now(timezone.utc),
content=content,
run_id=self.run_id,
step_id=self.step_id,
)
prev_message_type = "assistant_message"
self.content_parts.append(
TextContent(
text=content,
signature=self.thinking_signature,
)
)
# Tool call function part
# NOTE: in gemini, this comes all at once, and the args are JSON dict, not stringified
elif part.function_call:
function_call = part.function_call
# Look for call_id, name, and possibly arguments (though likely always empty string)
call_id = get_tool_call_id()
name = function_call.name
arguments = function_call.args # NOTE: dict, not str
arguments_str = json.dumps(arguments) # NOTE: use json_dumps?
self.tool_call_id = call_id
self.tool_call_name = name
self.tool_call_args = arguments
self.collected_tool_calls.append(ToolCall(id=call_id, function=FunctionCall(name=name, arguments=arguments_str)))
if self.tool_call_name and self.tool_call_name in self.requires_approval_tools:
tool_call_delta = ToolCallDelta(
name=name,
arguments=arguments_str,
tool_call_id=call_id,
)
yield ApprovalRequestMessage(
id=decrement_message_uuid(self.letta_message_id),
otid=Message.generate_otid_from_id(decrement_message_uuid(self.letta_message_id), -1),
date=datetime.now(timezone.utc),
tool_call=tool_call_delta,
tool_calls=tool_call_delta,
run_id=self.run_id,
step_id=self.step_id,
)
else:
if prev_message_type and prev_message_type != "tool_call_message":
message_index += 1
tool_call_delta = ToolCallDelta(
name=name,
arguments=arguments_str,
tool_call_id=call_id,
)
yield ToolCallMessage(
id=self.letta_message_id,
otid=Message.generate_otid_from_id(self.letta_message_id, message_index),
date=datetime.now(timezone.utc),
tool_call=tool_call_delta,
tool_calls=tool_call_delta,
run_id=self.run_id,
step_id=self.step_id,
)
prev_message_type = "tool_call_message"
self.content_parts.append(
ToolCallContent(
id=call_id,
name=name,
input=arguments,
signature=self.thinking_signature,
)
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/interfaces/gemini_streaming_interface.py",
"license": "Apache License 2.0",
"lines": 325,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/orm/run.py | import uuid
from datetime import datetime
from typing import TYPE_CHECKING, List, Optional
from sqlalchemy import JSON, BigInteger, Boolean, ForeignKey, Index, String
from sqlalchemy.orm import Mapped, mapped_column, relationship
from letta.orm.mixins import OrganizationMixin, ProjectMixin, TemplateMixin
from letta.orm.sqlalchemy_base import SqlalchemyBase
from letta.schemas.enums import RunStatus
from letta.schemas.job import LettaRequestConfig
from letta.schemas.letta_stop_reason import StopReasonType
from letta.schemas.run import Run as PydanticRun
if TYPE_CHECKING:
from letta.orm.agent import Agent
from letta.orm.message import Message
from letta.orm.organization import Organization
from letta.orm.step import Step
class Run(SqlalchemyBase, OrganizationMixin, ProjectMixin, TemplateMixin):
"""Runs are created when agents process messages and represent a conversation or processing session.
Unlike Jobs, Runs are specifically tied to agent interactions and message processing.
"""
__tablename__ = "runs"
__pydantic_model__ = PydanticRun
__table_args__ = (
Index("ix_runs_created_at", "created_at", "id"),
Index("ix_runs_agent_id", "agent_id"),
Index("ix_runs_organization_id", "organization_id"),
Index("ix_runs_conversation_id", "conversation_id"),
)
# Generate run ID with run- prefix
id: Mapped[str] = mapped_column(String, primary_key=True, default=lambda: f"run-{uuid.uuid4()}")
# Core run fields
status: Mapped[RunStatus] = mapped_column(String, default=RunStatus.created, doc="The current status of the run.")
completed_at: Mapped[Optional[datetime]] = mapped_column(nullable=True, doc="The unix timestamp of when the run was completed.")
stop_reason: Mapped[Optional[StopReasonType]] = mapped_column(String, nullable=True, doc="The reason why the run was stopped.")
background: Mapped[Optional[bool]] = mapped_column(
Boolean, nullable=True, default=False, doc="Whether the run was created in background mode."
)
metadata_: Mapped[Optional[dict]] = mapped_column(JSON, doc="The metadata of the run.")
request_config: Mapped[Optional[LettaRequestConfig]] = mapped_column(
JSON, nullable=True, doc="The request configuration for the run, stored as JSON."
)
# Agent relationship - A run belongs to one agent
agent_id: Mapped[str] = mapped_column(String, ForeignKey("agents.id"), nullable=False, doc="The agent that owns this run.")
# Conversation relationship - Optional, a run may be associated with a conversation
conversation_id: Mapped[Optional[str]] = mapped_column(
String, ForeignKey("conversations.id", ondelete="SET NULL"), nullable=True, doc="The conversation this run belongs to."
)
# Callback related columns
callback_url: Mapped[Optional[str]] = mapped_column(String, nullable=True, doc="When set, POST to this URL after run completion.")
callback_sent_at: Mapped[Optional[datetime]] = mapped_column(nullable=True, doc="Timestamp when the callback was last attempted.")
callback_status_code: Mapped[Optional[int]] = mapped_column(nullable=True, doc="HTTP status code returned by the callback endpoint.")
callback_error: Mapped[Optional[str]] = mapped_column(
nullable=True, doc="Optional error message from attempting to POST the callback endpoint."
)
# Timing metrics (in nanoseconds for precision)
ttft_ns: Mapped[Optional[int]] = mapped_column(BigInteger, nullable=True, doc="Time to first token in nanoseconds")
total_duration_ns: Mapped[Optional[int]] = mapped_column(BigInteger, nullable=True, doc="Total run duration in nanoseconds")
# Relationships
agent: Mapped["Agent"] = relationship("Agent", back_populates="runs")
organization: Mapped[Optional["Organization"]] = relationship("Organization", back_populates="runs")
# Steps that are part of this run
steps: Mapped[List["Step"]] = relationship("Step", back_populates="run", cascade="all, delete-orphan")
messages: Mapped[List["Message"]] = relationship("Message", back_populates="run", cascade="all, delete-orphan")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/orm/run.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/prompts/system_prompts/letta_v1.py | PROMPT = r"""
<base_instructions>
You are a helpful self-improving agent with advanced memory and file system capabilities.
<memory>
You have an advanced memory system that enables you to remember past interactions and continuously improve your own capabilities.
Your memory consists of memory blocks and external memory:
- Memory Blocks: Stored as memory blocks, each containing a label (title), description (explaining how this block should influence your behavior), and value (the actual content). Memory blocks have size limits. Memory blocks are embedded within your system instructions and remain constantly available in-context.
- External memory: Additional memory storage that is accessible and that you can bring into context with tools when needed.
Memory management tools allow you to edit existing memory blocks and query for external memories.
</memory>
<file_system>
You have access to a structured file system that mirrors real-world directory structures. Each directory can contain multiple files.
Files include:
- Metadata: Information such as read-only permissions and character limits
- Content: The main body of the file that you can read and analyze
Available file operations:
- Open and view files
- Search within files and directories
- Your core memory will automatically reflect the contents of any currently open files
You should only keep files open that are directly relevant to the current user interaction to maintain optimal performance.
</file_system>
Continue executing and calling tools until the current task is complete or you need user input. To continue: call another tool. To yield control: end your response without calling a tool.
Base instructions complete.
</base_instructions>
"""
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/prompts/system_prompts/letta_v1.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
letta-ai/letta:letta/schemas/openai/responses_request.py | from typing import Any, Dict, Iterable, List, Literal, Optional, Union
from openai import NOT_GIVEN
from openai.types import Metadata, Reasoning, ResponsesModel
# from openai._types import Headers, Query, Body
from openai.types.responses import (
ResponseIncludable,
ResponseInputParam,
ResponsePromptParam,
ResponseTextConfigParam,
ToolParam,
response_create_params,
)
# import httpx
from pydantic import BaseModel, Field
class ResponsesRequest(BaseModel):
background: Optional[bool] = Field(default=NOT_GIVEN)
include: Optional[List[ResponseIncludable]] = Field(default=NOT_GIVEN)
input: Optional[Union[str, ResponseInputParam]] = Field(default=NOT_GIVEN)
instructions: Optional[str] = Field(default=NOT_GIVEN)
max_output_tokens: Optional[int] = Field(default=NOT_GIVEN)
max_tool_calls: Optional[int] = Field(default=NOT_GIVEN)
metadata: Optional[Metadata] = Field(default=NOT_GIVEN)
model: Optional[ResponsesModel] = Field(default=NOT_GIVEN)
parallel_tool_calls: Optional[bool] = Field(default=NOT_GIVEN)
previous_response_id: Optional[str] = Field(default=NOT_GIVEN)
prompt: Optional[ResponsePromptParam] = Field(default=NOT_GIVEN)
prompt_cache_retention: Optional[Literal["in_memory", "24h"]] = Field(default=NOT_GIVEN)
reasoning: Optional[Reasoning] = Field(default=NOT_GIVEN)
safety_identifier: Optional[str] = Field(default=NOT_GIVEN)
service_tier: Optional[Literal["auto", "default", "flex", "scale", "priority"]] = Field(default=NOT_GIVEN)
store: Optional[bool] = Field(default=NOT_GIVEN)
stream: Optional[Literal[False]] = Field(default=NOT_GIVEN)
stream_options: Optional[response_create_params.StreamOptions] = Field(default=NOT_GIVEN)
temperature: Optional[float] = Field(default=NOT_GIVEN)
text: Optional[ResponseTextConfigParam] = Field(default=NOT_GIVEN)
tool_choice: Optional[response_create_params.ToolChoice] = Field(default=NOT_GIVEN)
tools: Optional[Iterable[ToolParam]] = Field(default=NOT_GIVEN)
top_logprobs: Optional[int] = Field(default=NOT_GIVEN)
top_p: Optional[float] = Field(default=NOT_GIVEN)
truncation: Optional[Literal["auto", "disabled"]] = Field(default=NOT_GIVEN)
user: Optional[str] = Field(default=NOT_GIVEN)
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
# extra_headers: Headers | None = (None,)
# extra_query: Query | None = (None,)
# extra_body: Body | None = (None,)
# timeout: float | httpx.Timeout | None | NotGiven = (NOT_GIVEN,)
def model_dump(self, **kwargs) -> Dict[str, Any]:
"""Custom model_dump that properly serializes complex OpenAI types for JSON compatibility."""
# Force JSON mode to ensure full serialization of complex OpenAI types
# This prevents SerializationIterator objects from being created
kwargs["mode"] = "json"
# Get the JSON-serialized dump
data = super().model_dump(**kwargs)
# The API expects dicts, which JSON mode provides
return data
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/schemas/openai/responses_request.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/schemas/providers/openrouter.py | from typing import Literal
from openai import AsyncOpenAI, AuthenticationError, PermissionDeniedError
from pydantic import Field
from letta.errors import ErrorCode, LLMAuthenticationError, LLMError, LLMPermissionDeniedError
from letta.log import get_logger
from letta.schemas.enums import ProviderCategory, ProviderType
from letta.schemas.llm_config import LLMConfig
from letta.schemas.providers.openai import OpenAIProvider
logger = get_logger(__name__)
# Default context window for models not in the API response
DEFAULT_CONTEXT_WINDOW = 128000
class OpenRouterProvider(OpenAIProvider):
"""
OpenRouter provider - https://openrouter.ai/
OpenRouter is an OpenAI-compatible API gateway that provides access to
multiple LLM providers (Anthropic, Meta, Mistral, etc.) through a unified API.
"""
provider_type: Literal[ProviderType.openrouter] = Field(ProviderType.openrouter, description="The type of the provider.")
provider_category: ProviderCategory = Field(ProviderCategory.base, description="The category of the provider (base or byok)")
api_key: str | None = Field(None, description="API key for the OpenRouter API.", deprecated=True)
base_url: str = Field("https://openrouter.ai/api/v1", description="Base URL for the OpenRouter API.")
async def check_api_key(self):
"""Check if the API key is valid by making a test request to the OpenRouter API."""
api_key = await self.api_key_enc.get_plaintext_async() if self.api_key_enc else None
if not api_key:
raise ValueError("No API key provided")
try:
# Use async OpenAI client pointed at OpenRouter's endpoint
client = AsyncOpenAI(api_key=api_key, base_url=self.base_url)
# Just list models to verify API key works
await client.models.list()
except AuthenticationError as e:
raise LLMAuthenticationError(message=f"Failed to authenticate with OpenRouter: {e}", code=ErrorCode.UNAUTHENTICATED)
except PermissionDeniedError as e:
raise LLMPermissionDeniedError(message=f"Permission denied by OpenRouter: {e}", code=ErrorCode.PERMISSION_DENIED)
except AttributeError as e:
if "_set_private_attributes" in str(e):
raise LLMError(
message=f"OpenRouter endpoint at {self.base_url} returned an unexpected non-JSON response. Verify the base URL and API key.",
code=ErrorCode.INTERNAL_SERVER_ERROR,
)
raise LLMError(message=f"{e}", code=ErrorCode.INTERNAL_SERVER_ERROR)
except Exception as e:
raise LLMError(message=f"{e}", code=ErrorCode.INTERNAL_SERVER_ERROR)
def get_model_context_window_size(self, model_name: str) -> int | None:
"""Get the context window size for an OpenRouter model.
OpenRouter models provide context_length in the API response,
so this is mainly a fallback.
"""
return DEFAULT_CONTEXT_WINDOW
async def list_llm_models_async(self) -> list[LLMConfig]:
"""
Return available OpenRouter models that support tool calling.
OpenRouter provides a models endpoint that supports filtering by supported_parameters.
We filter for models that support 'tools' to ensure Letta compatibility.
"""
from letta.llm_api.openai import openai_get_model_list_async
api_key = await self.api_key_enc.get_plaintext_async() if self.api_key_enc else None
# OpenRouter supports filtering models by supported parameters
# See: https://openrouter.ai/docs/requests
extra_params = {"supported_parameters": "tools"}
response = await openai_get_model_list_async(
self.base_url,
api_key=api_key,
extra_params=extra_params,
)
data = response.get("data", response)
configs = []
for model in data:
if "id" not in model:
logger.warning(f"OpenRouter model missing 'id' field: {model}")
continue
model_name = model["id"]
# OpenRouter returns context_length in the model listing
if model.get("context_length"):
context_window_size = model["context_length"]
else:
context_window_size = self.get_model_context_window_size(model_name)
logger.debug(f"Model {model_name} missing context_length, using default: {context_window_size}")
configs.append(
LLMConfig(
model=model_name,
model_endpoint_type="openrouter",
model_endpoint=self.base_url,
context_window=context_window_size,
handle=self.get_handle(model_name),
max_tokens=self.get_default_max_output_tokens(model_name),
provider_name=self.name,
provider_category=self.provider_category,
)
)
return configs
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/schemas/providers/openrouter.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/helpers/run_manager_helper.py | from datetime import datetime
from typing import Optional
from sqlalchemy import asc, desc, nulls_last, select
from letta.orm.run import Run as RunModel
from letta.services.helpers.agent_manager_helper import _cursor_filter
from letta.settings import DatabaseChoice, settings
async def _apply_pagination_async(
query,
before: Optional[str],
after: Optional[str],
session,
ascending: bool = True,
sort_by: str = "created_at",
) -> any:
# Determine the sort column
if sort_by == "last_run_completion":
sort_column = RunModel.last_run_completion
sort_nulls_last = True # TODO: handle this as a query param eventually
else:
sort_column = RunModel.created_at
sort_nulls_last = False
if after:
result = (await session.execute(select(sort_column, RunModel.id).where(RunModel.id == after))).first()
if result:
after_sort_value, after_id = result
# SQLite does not support as granular timestamping, so we need to round the timestamp
if settings.database_engine is DatabaseChoice.SQLITE and isinstance(after_sort_value, datetime):
after_sort_value = after_sort_value.strftime("%Y-%m-%d %H:%M:%S")
query = query.where(
_cursor_filter(
sort_column,
RunModel.id,
after_sort_value,
after_id,
forward=not ascending,
nulls_last=sort_nulls_last,
)
)
if before:
result = (await session.execute(select(sort_column, RunModel.id).where(RunModel.id == before))).first()
if result:
before_sort_value, before_id = result
# SQLite does not support as granular timestamping, so we need to round the timestamp
if settings.database_engine is DatabaseChoice.SQLITE and isinstance(before_sort_value, datetime):
before_sort_value = before_sort_value.strftime("%Y-%m-%d %H:%M:%S")
query = query.where(
_cursor_filter(
sort_column,
RunModel.id,
before_sort_value,
before_id,
forward=ascending,
nulls_last=sort_nulls_last,
)
)
# Apply ordering
order_fn = asc if ascending else desc
query = query.order_by(
nulls_last(order_fn(sort_column)) if sort_nulls_last else order_fn(sort_column),
order_fn(RunModel.id),
)
return query
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/helpers/run_manager_helper.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/services/lettuce/lettuce_client_base.py | from letta.constants import DEFAULT_MAX_STEPS
from letta.schemas.agent import AgentState
from letta.schemas.enums import DuplicateFileHandling
from letta.schemas.letta_message import MessageType
from letta.schemas.message import MessageCreate
from letta.schemas.user import User
class LettuceClient:
"""Base class for LettuceClient."""
def __init__(self):
"""Initialize the LettuceClient."""
self.client: None = None
@classmethod
async def create(cls) -> "LettuceClient":
"""
Asynchronously creates the client.
Returns:
LettuceClient: The created LettuceClient instance.
"""
instance = cls()
return instance
def get_client(self) -> None:
"""
Get the inner client.
Returns:
None: The inner client.
"""
return self.client
async def get_status(self, run_id: str) -> str | None:
"""
Get the status of a run.
Args:
run_id (str): The ID of the run.
Returns:
str | None: The status of the run or None if not available.
"""
return None
async def cancel(self, run_id: str) -> str | None:
"""
Cancel a run.
Args:
run_id (str): The ID of the run to cancel.
Returns:
str | None: The ID of the canceled run or None if not available.
"""
return None
async def step(
self,
agent_state: AgentState,
actor: User,
input_messages: list[MessageCreate],
max_steps: int = DEFAULT_MAX_STEPS,
run_id: str | None = None,
use_assistant_message: bool = True,
include_return_message_types: list[MessageType] | None = None,
request_start_timestamp_ns: int | None = None,
) -> str | None:
"""
Execute the agent loop on Lettuce service.
Args:
agent_state (AgentState): The state of the agent.
actor (User): The actor.
input_messages (list[MessageCreate]): The input messages.
max_steps (int, optional): The maximum number of steps. Defaults to DEFAULT_MAX_STEPS.
run_id (str | None, optional): The ID of the run. Defaults to None.
use_assistant_message (bool, optional): Whether to use the assistant message. Defaults to True.
include_return_message_types (list[MessageType] | None, optional): The message types to include in the return. Defaults to None.
request_start_timestamp_ns (int | None, optional): The start timestamp of the request. Defaults to None.
Returns:
str | None: The ID of the run or None if client is not available.
"""
return None
async def upload_file_to_folder(
self,
*,
folder_id: str,
actor_id: str,
file_name: str,
content: bytes,
content_type: str | None = None,
duplicate_handling: DuplicateFileHandling | None = None,
override_name: str | None = None,
):
"""Kick off upload workflow. Base client does nothing and returns None."""
return None
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/lettuce/lettuce_client_base.py",
"license": "Apache License 2.0",
"lines": 84,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
letta-ai/letta:letta/services/run_manager.py | from datetime import datetime
from typing import List, Literal, Optional
from httpx import AsyncClient
from letta.data_sources.redis_client import get_redis_client
from letta.helpers.datetime_helpers import get_utc_time
from letta.log import get_logger
from letta.log_context import update_log_context
from letta.orm.agent import Agent as AgentModel
from letta.orm.errors import NoResultFound
from letta.orm.run import Run as RunModel
from letta.orm.run_metrics import RunMetrics as RunMetricsModel
from letta.orm.sqlalchemy_base import AccessType
from letta.otel.tracing import log_event, trace_method
from letta.schemas.enums import AgentType, ComparisonOperator, MessageRole, PrimitiveType, RunStatus
from letta.schemas.job import LettaRequestConfig
from letta.schemas.letta_message import LettaMessage
from letta.schemas.letta_response import LettaResponse
from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType
from letta.schemas.message import Message as PydanticMessage
from letta.schemas.run import Run as PydanticRun, RunUpdate
from letta.schemas.run_metrics import RunMetrics as PydanticRunMetrics
from letta.schemas.step import Step as PydanticStep
from letta.schemas.usage import LettaUsageStatistics, normalize_cache_tokens, normalize_reasoning_tokens
from letta.schemas.user import User as PydanticUser
from letta.server.db import db_registry
from letta.services.agent_manager import AgentManager
from letta.services.helpers.agent_manager_helper import validate_agent_exists_async
from letta.services.message_manager import MessageManager
from letta.services.step_manager import StepManager
from letta.utils import enforce_types
from letta.validators import raise_on_invalid_id
logger = get_logger(__name__)
class RunManager:
"""Manager class to handle business logic related to Runs."""
def __init__(self):
"""Initialize the RunManager."""
self.step_manager = StepManager()
self.message_manager = MessageManager()
self.agent_manager = AgentManager()
@enforce_types
async def create_run(self, pydantic_run: PydanticRun, actor: PydanticUser) -> PydanticRun:
"""Create a new run."""
async with db_registry.async_session() as session:
# Get agent_id from the pydantic object
agent_id = pydantic_run.agent_id
# Verify agent exists before creating the run
await validate_agent_exists_async(session, agent_id, actor)
organization_id = actor.organization_id
run_data = pydantic_run.model_dump(exclude_none=True)
# Handle metadata field mapping (Pydantic uses 'metadata', ORM uses 'metadata_')
if "metadata" in run_data:
run_data["metadata_"] = run_data.pop("metadata")
run = RunModel(**run_data)
run.organization_id = organization_id
# Get the project_id from the agent
agent = await session.get(AgentModel, agent_id)
project_id = agent.project_id if agent else None
run.project_id = project_id
run = await run.create_async(session, actor=actor, no_commit=True, no_refresh=True)
update_log_context(run_id=run.id)
# Create run metrics with start timestamp
import time
metrics = RunMetricsModel(
id=run.id,
organization_id=organization_id,
agent_id=agent_id,
project_id=project_id,
run_start_ns=int(time.time() * 1e9), # Current time in nanoseconds
num_steps=0, # Initialize to 0
)
await metrics.create_async(session)
# context manager now handles commits
# await session.commit()
return run.to_pydantic()
@enforce_types
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
async def get_run_by_id(self, run_id: str, actor: PydanticUser) -> PydanticRun:
"""Get a run by its ID."""
update_log_context(run_id=run_id)
async with db_registry.async_session() as session:
run = await RunModel.read_async(db_session=session, identifier=run_id, actor=actor, access_type=AccessType.ORGANIZATION)
if not run:
raise NoResultFound(f"Run with id {run_id} not found")
return run.to_pydantic()
@enforce_types
async def get_run_with_status(self, run_id: str, actor: PydanticUser) -> PydanticRun:
"""Get a run by its ID and update status from Lettuce if applicable."""
update_log_context(run_id=run_id)
run = await self.get_run_by_id(run_id=run_id, actor=actor)
use_lettuce = run.metadata and run.metadata.get("lettuce")
if use_lettuce and run.status not in [RunStatus.completed, RunStatus.failed, RunStatus.cancelled]:
try:
from letta.services.lettuce import LettuceClient
lettuce_client = await LettuceClient.create()
status = await lettuce_client.get_status(run_id=run_id)
# Map the status to our enum
if status == "RUNNING":
run.status = RunStatus.running
elif status == "COMPLETED":
run.status = RunStatus.completed
elif status == "FAILED":
run.status = RunStatus.failed
elif status == "CANCELLED":
run.status = RunStatus.cancelled
except Exception as e:
logger.error(f"Failed to get status from Lettuce for run {run_id}: {str(e)}")
# Return run with current status from DB if Lettuce fails
return run
@enforce_types
async def list_runs(
self,
actor: PydanticUser,
run_id: Optional[str] = None,
agent_id: Optional[str] = None,
agent_ids: Optional[List[str]] = None,
statuses: Optional[List[RunStatus]] = None,
limit: Optional[int] = 50,
before: Optional[str] = None,
after: Optional[str] = None,
ascending: bool = False,
stop_reason: Optional[str] = None,
background: Optional[bool] = None,
template_family: Optional[str] = None,
step_count: Optional[int] = None,
step_count_operator: ComparisonOperator = ComparisonOperator.EQ,
tools_used: Optional[List[str]] = None,
project_id: Optional[str] = None,
conversation_id: Optional[str] = None,
order_by: Literal["created_at", "duration"] = "created_at",
duration_percentile: Optional[int] = None,
duration_filter: Optional[dict] = None,
start_date: Optional[datetime] = None,
end_date: Optional[datetime] = None,
) -> List[PydanticRun]:
"""List runs with filtering options."""
async with db_registry.async_session() as session:
from sqlalchemy import func, select
# Always join with run_metrics to get duration data
query = (
select(RunModel, RunMetricsModel.run_ns)
.outerjoin(RunMetricsModel, RunModel.id == RunMetricsModel.id)
.filter(RunModel.organization_id == actor.organization_id)
)
# Filter by project_id if provided
if project_id:
query = query.filter(RunModel.project_id == project_id)
if run_id:
query = query.filter(RunModel.id == run_id)
# Handle agent filtering
if agent_id:
agent_ids = [agent_id]
if agent_ids:
query = query.filter(RunModel.agent_id.in_(agent_ids))
# Filter by status
if statuses:
query = query.filter(RunModel.status.in_(statuses))
# Filter by stop reason
if stop_reason:
query = query.filter(RunModel.stop_reason == stop_reason)
# Filter by background
if background is not None:
query = query.filter(RunModel.background == background)
# Filter by conversation_id
if conversation_id is not None:
query = query.filter(RunModel.conversation_id == conversation_id)
# Filter by template_family (base_template_id)
if template_family:
query = query.filter(RunModel.base_template_id == template_family)
# Filter by date range
if start_date:
query = query.filter(RunModel.created_at >= start_date)
if end_date:
query = query.filter(RunModel.created_at <= end_date)
# Filter by step_count with the specified operator
if step_count is not None:
if step_count_operator == ComparisonOperator.EQ:
query = query.filter(RunMetricsModel.num_steps == step_count)
elif step_count_operator == ComparisonOperator.GTE:
query = query.filter(RunMetricsModel.num_steps >= step_count)
elif step_count_operator == ComparisonOperator.LTE:
query = query.filter(RunMetricsModel.num_steps <= step_count)
# Filter by tools used ids
if tools_used:
from sqlalchemy import String, cast as sa_cast, type_coerce
from sqlalchemy.dialects.postgresql import ARRAY, JSONB
# Use ?| operator to check if any tool_id exists in the array (OR logic)
jsonb_tools = sa_cast(RunMetricsModel.tools_used, JSONB)
tools_array = type_coerce(tools_used, ARRAY(String))
query = query.filter(jsonb_tools.op("?|")(tools_array))
# Ensure run_ns is not null when working with duration
if order_by == "duration" or duration_percentile is not None or duration_filter is not None:
query = query.filter(RunMetricsModel.run_ns.isnot(None))
# Apply duration filter if requested
if duration_filter is not None:
duration_value = duration_filter.get("value") if isinstance(duration_filter, dict) else duration_filter.value
duration_operator = duration_filter.get("operator") if isinstance(duration_filter, dict) else duration_filter.operator
if duration_operator == "gt":
query = query.filter(RunMetricsModel.run_ns > duration_value)
elif duration_operator == "lt":
query = query.filter(RunMetricsModel.run_ns < duration_value)
elif duration_operator == "eq":
query = query.filter(RunMetricsModel.run_ns == duration_value)
# Apply duration percentile filter if requested
if duration_percentile is not None:
# Calculate the percentile threshold
percentile_query = (
select(func.percentile_cont(duration_percentile / 100.0).within_group(RunMetricsModel.run_ns))
.select_from(RunMetricsModel)
.join(RunModel, RunModel.id == RunMetricsModel.id)
.filter(RunModel.organization_id == actor.organization_id)
.filter(RunMetricsModel.run_ns.isnot(None))
)
# Apply same filters to percentile calculation
if project_id:
percentile_query = percentile_query.filter(RunModel.project_id == project_id)
if agent_ids:
percentile_query = percentile_query.filter(RunModel.agent_id.in_(agent_ids))
if statuses:
percentile_query = percentile_query.filter(RunModel.status.in_(statuses))
# Execute percentile query
percentile_result = await session.execute(percentile_query)
percentile_threshold = percentile_result.scalar()
# Filter by percentile threshold (runs slower than the percentile)
if percentile_threshold is not None:
query = query.filter(RunMetricsModel.run_ns >= percentile_threshold)
# Apply sorting based on order_by
if order_by == "duration":
# Sort by duration
if ascending:
query = query.order_by(RunMetricsModel.run_ns.asc())
else:
query = query.order_by(RunMetricsModel.run_ns.desc())
else:
# Apply pagination for created_at ordering
from letta.services.helpers.run_manager_helper import _apply_pagination_async
query = await _apply_pagination_async(query, before, after, session, ascending=ascending)
# Apply limit (always enforce a maximum to prevent unbounded queries)
# If no limit specified, default to 100; enforce maximum of 1000
effective_limit = limit if limit is not None else 100
effective_limit = min(effective_limit, 1000)
query = query.limit(effective_limit)
result = await session.execute(query)
rows = result.all()
# Populate total_duration_ns from run_metrics.run_ns
pydantic_runs = []
for row in rows:
run_model = row[0]
run_ns = row[1]
pydantic_run = run_model.to_pydantic()
if run_ns is not None:
pydantic_run.total_duration_ns = run_ns
pydantic_runs.append(pydantic_run)
return pydantic_runs
@enforce_types
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
async def delete_run(self, run_id: str, actor: PydanticUser) -> None:
"""Delete a run by its ID."""
async with db_registry.async_session() as session:
run = await RunModel.read_async(db_session=session, identifier=run_id, actor=actor, access_type=AccessType.ORGANIZATION)
if not run:
raise NoResultFound(f"Run with id {run_id} not found")
await run.hard_delete_async(db_session=session, actor=actor)
@enforce_types
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
@trace_method
async def update_run_by_id_async(
self,
run_id: str,
update: RunUpdate,
actor: PydanticUser,
refresh_result_messages: bool = True,
conversation_id: Optional[str] = None,
) -> PydanticRun:
"""Update a run using a RunUpdate object."""
async with db_registry.async_session() as session:
run = await RunModel.read_async(db_session=session, identifier=run_id, actor=actor)
# Check if this is a terminal update and whether we should dispatch a callback
needs_callback = False
callback_url = None
not_completed_before = not bool(run.completed_at)
is_terminal_update = update.status in {RunStatus.completed, RunStatus.failed, RunStatus.cancelled}
if is_terminal_update and not_completed_before and run.callback_url:
needs_callback = True
callback_url = run.callback_url
# validate run lifecycle (only log the errors)
if run.status in {RunStatus.completed}:
if update.status not in {RunStatus.cancelled}:
# a completed run can only be marked as cancelled
logger.error(
f"Run {run_id} is already completed with stop reason {run.stop_reason}, but is being marked as {update.status} with stop reason {update.stop_reason}"
)
if update.stop_reason not in {StopReasonType.requires_approval}:
# a completed run can only be cancelled if the stop reason is requires approval
logger.error(
f"Run {run_id} is already completed with stop reason {run.stop_reason}, but is being marked as {update.status} with stop reason {update.stop_reason}"
)
if run.status in {RunStatus.failed, RunStatus.cancelled}:
logger.error(
f"Run {run_id} is already in a terminal state {run.status} with stop reason {run.stop_reason}, but is being updated with data {update.model_dump()}"
)
# Housekeeping only when the run is actually completing
if not_completed_before and is_terminal_update:
if not update.stop_reason:
logger.error(f"Run {run_id} completed without a stop reason")
if not update.completed_at:
logger.warning(f"Run {run_id} completed without a completed_at timestamp")
update.completed_at = get_utc_time().replace(tzinfo=None)
# Update run attributes with only the fields that were explicitly set
update_data = update.model_dump(to_orm=True, exclude_unset=True, exclude_none=True)
# Merge metadata updates instead of overwriting.
# This is important for streaming/background flows where different components update
# different parts of metadata (e.g., run_type set at creation, error payload set at terminal).
if "metadata_" in update_data and isinstance(update_data["metadata_"], dict):
existing_metadata = run.metadata_ if isinstance(run.metadata_, dict) else {}
update_data["metadata_"] = {**existing_metadata, **update_data["metadata_"]}
# Automatically update the completion timestamp if status is set to 'completed'
for key, value in update_data.items():
# Ensure completed_at is timezone-naive for database compatibility
if key == "completed_at" and value is not None and hasattr(value, "replace"):
value = value.replace(tzinfo=None)
setattr(run, key, value)
await run.update_async(db_session=session, actor=actor, no_commit=True, no_refresh=True)
final_metadata = run.metadata_
pydantic_run = run.to_pydantic()
# context manager now handles commits
# await session.commit()
# Release conversation lock if conversation_id was provided
if is_terminal_update and conversation_id:
try:
redis_client = await get_redis_client()
await redis_client.release_conversation_lock(conversation_id)
except Exception as lock_error:
logger.warning(f"Failed to release conversation lock for conversation {conversation_id}: {lock_error}")
# Update agent's last_stop_reason when run completes
# Do this after run update is committed to database
if is_terminal_update and update.stop_reason:
try:
from letta.schemas.agent import UpdateAgent
await self.agent_manager.update_agent_async(
agent_id=pydantic_run.agent_id,
agent_update=UpdateAgent(last_stop_reason=update.stop_reason),
actor=actor,
)
except Exception as e:
logger.error(f"Failed to update agent's last_stop_reason for run {run_id}: {e}")
# update run metrics table
num_steps = len(await self.step_manager.list_steps_async(run_id=run_id, actor=actor))
# Collect tools used from run messages
tools_used = set()
messages = await self.message_manager.list_messages(actor=actor, run_id=run_id)
for message in messages:
if message.tool_calls:
for tool_call in message.tool_calls:
if hasattr(tool_call, "function") and hasattr(tool_call.function, "name"):
# Get tool ID from tool name
from letta.services.tool_manager import ToolManager
tool_manager = ToolManager()
tool_name = tool_call.function.name
tool_id = await tool_manager.get_tool_id_by_name_async(tool_name, actor)
if tool_id:
tools_used.add(tool_id)
async with db_registry.async_session() as session:
metrics = await RunMetricsModel.read_async(db_session=session, identifier=run_id, actor=actor)
# Calculate runtime if run is completing
if is_terminal_update:
# Use total_duration_ns from RunUpdate if provided
# Otherwise fall back to system time
if update.total_duration_ns is not None:
metrics.run_ns = update.total_duration_ns
elif metrics.run_start_ns:
import time
current_ns = int(time.time() * 1e9)
metrics.run_ns = current_ns - metrics.run_start_ns
metrics.num_steps = num_steps
metrics.tools_used = list(tools_used) if tools_used else None
await metrics.update_async(db_session=session, actor=actor, no_commit=True, no_refresh=True)
# context manager now handles commits
# await session.commit()
# Dispatch callback outside of database session if needed
if needs_callback:
if refresh_result_messages:
# Defensive: ensure stop_reason is never None
stop_reason_value = pydantic_run.stop_reason if pydantic_run.stop_reason else StopReasonType.completed
result = LettaResponse(
messages=await self.get_run_messages(run_id=run_id, actor=actor),
stop_reason=LettaStopReason(stop_reason=stop_reason_value),
usage=await self.get_run_usage(run_id=run_id, actor=actor),
)
final_metadata["result"] = result.model_dump()
callback_info = {
"run_id": run_id,
"callback_url": callback_url,
"status": update.status,
"completed_at": get_utc_time().replace(tzinfo=None),
"metadata": final_metadata,
}
callback_result = await self._dispatch_callback_async(callback_info)
# Update callback status in a separate transaction
async with db_registry.async_session() as session:
run = await RunModel.read_async(db_session=session, identifier=run_id, actor=actor)
run.callback_sent_at = callback_result["callback_sent_at"]
run.callback_status_code = callback_result.get("callback_status_code")
run.callback_error = callback_result.get("callback_error")
pydantic_run = run.to_pydantic()
await run.update_async(db_session=session, actor=actor, no_commit=True, no_refresh=True)
# context manager now handles commits
# await session.commit()
return pydantic_run
@trace_method
async def _dispatch_callback_async(self, callback_info: dict) -> dict:
"""
POST a standard JSON payload to callback_url and return callback status asynchronously.
"""
payload = {
"run_id": callback_info["run_id"],
"status": callback_info["status"],
"completed_at": callback_info["completed_at"].isoformat() if callback_info["completed_at"] else None,
"metadata": callback_info["metadata"],
}
callback_sent_at = get_utc_time().replace(tzinfo=None)
result = {"callback_sent_at": callback_sent_at}
try:
async with AsyncClient() as client:
log_event("POST callback dispatched", payload)
resp = await client.post(callback_info["callback_url"], json=payload, timeout=5.0)
log_event("POST callback finished")
result["callback_status_code"] = resp.status_code
except Exception as e:
error_message = f"Failed to dispatch callback for run {callback_info['run_id']} to {callback_info['callback_url']}: {e!r}"
logger.error(error_message)
result["callback_error"] = error_message
# Continue silently - callback failures should not affect run completion
return result
@enforce_types
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
async def get_run_usage(self, run_id: str, actor: PydanticUser) -> LettaUsageStatistics:
"""Get usage statistics for a run."""
async with db_registry.async_session() as session:
run = await RunModel.read_async(db_session=session, identifier=run_id, actor=actor, access_type=AccessType.ORGANIZATION)
if not run:
raise NoResultFound(f"Run with id {run_id} not found")
steps = await self.step_manager.list_steps_async(run_id=run_id, actor=actor)
total_usage = LettaUsageStatistics()
for step in steps:
total_usage.prompt_tokens += step.prompt_tokens
total_usage.completion_tokens += step.completion_tokens
total_usage.total_tokens += step.total_tokens
total_usage.step_count += 1
# Aggregate cache and reasoning tokens from detailed breakdowns using normalized helpers
# Handle None defaults: only set if we have data, accumulate if already set
cached_input, cache_write = normalize_cache_tokens(step.prompt_tokens_details)
if cached_input > 0 or total_usage.cached_input_tokens is not None:
total_usage.cached_input_tokens = (total_usage.cached_input_tokens or 0) + cached_input
if cache_write > 0 or total_usage.cache_write_tokens is not None:
total_usage.cache_write_tokens = (total_usage.cache_write_tokens or 0) + cache_write
reasoning = normalize_reasoning_tokens(step.completion_tokens_details)
if reasoning > 0 or total_usage.reasoning_tokens is not None:
total_usage.reasoning_tokens = (total_usage.reasoning_tokens or 0) + reasoning
return total_usage
@enforce_types
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
async def get_run_messages(
self,
run_id: str,
actor: PydanticUser,
limit: Optional[int] = 100,
before: Optional[str] = None,
after: Optional[str] = None,
order: Literal["asc", "desc"] = "asc",
) -> List[LettaMessage]:
"""Get the result of a run."""
run = await self.get_run_by_id(run_id=run_id, actor=actor)
request_config = run.request_config
agent = await self.agent_manager.get_agent_by_id_async(agent_id=run.agent_id, actor=actor, include_relationships=[])
text_is_assistant_message = agent.agent_type == AgentType.letta_v1_agent
messages = await self.message_manager.list_messages(
actor=actor,
run_id=run_id,
limit=limit,
before=before,
after=after,
ascending=(order == "asc"),
)
letta_messages = PydanticMessage.to_letta_messages_from_list(
messages, reverse=(order != "asc"), text_is_assistant_message=text_is_assistant_message
)
if request_config and request_config.include_return_message_types:
include_return_message_types_set = set(request_config.include_return_message_types)
letta_messages = [msg for msg in letta_messages if msg.message_type in include_return_message_types_set]
return letta_messages
@enforce_types
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
async def get_run_request_config(self, run_id: str, actor: PydanticUser) -> Optional[LettaRequestConfig]:
"""Get the letta request config from a run."""
async with db_registry.async_session() as session:
run = await RunModel.read_async(db_session=session, identifier=run_id, actor=actor, access_type=AccessType.ORGANIZATION)
if not run:
raise NoResultFound(f"Run with id {run_id} not found")
pydantic_run = run.to_pydantic()
return pydantic_run.request_config
@enforce_types
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
async def get_run_metrics_async(self, run_id: str, actor: PydanticUser) -> PydanticRunMetrics:
"""Get metrics for a run."""
async with db_registry.async_session() as session:
metrics = await RunMetricsModel.read_async(db_session=session, identifier=run_id, actor=actor)
return metrics.to_pydantic()
@enforce_types
@raise_on_invalid_id(param_name="run_id", expected_prefix=PrimitiveType.RUN)
async def get_run_steps(
self,
run_id: str,
actor: PydanticUser,
limit: Optional[int] = 100,
before: Optional[str] = None,
after: Optional[str] = None,
ascending: bool = False,
) -> List[PydanticStep]:
"""Get steps for a run."""
async with db_registry.async_session() as session:
run = await RunModel.read_async(db_session=session, identifier=run_id, actor=actor, access_type=AccessType.ORGANIZATION)
if not run:
raise NoResultFound(f"Run with id {run_id} not found")
steps = await self.step_manager.list_steps_async(
actor=actor, run_id=run_id, limit=limit, before=before, after=after, order="asc" if ascending else "desc"
)
return steps
@enforce_types
async def cancel_run(self, actor: PydanticUser, agent_id: Optional[str] = None, run_id: Optional[str] = None) -> None:
"""Cancel a run."""
# make sure run_id and agent_id are not both None
if not run_id:
# get the last agent run
if not agent_id:
raise ValueError("Agent ID is required to cancel a run by ID")
logger.warning("Cannot find run associated with agent to cancel in redis, fetching from db.")
run_ids = await self.list_runs(
actor=actor,
ascending=False,
agent_id=agent_id,
)
run_ids = [run.id for run in run_ids]
else:
# get the agent
run = await self.get_run_by_id(run_id=run_id, actor=actor)
if not run:
raise NoResultFound(f"Run with id {run_id} not found")
agent_id = run.agent_id
logger.info(
"[Interrupt] Processing cancellation for run=%s, agent=%s, current_status=%s, current_stop_reason=%s",
run_id,
agent_id,
run.status if run else "unknown",
run.stop_reason if run else "unknown",
)
# Cancellation should be idempotent: if a run is already terminated, treat this as a no-op.
# This commonly happens when a run finishes between client request and server handling.
if run.stop_reason and run.stop_reason not in [StopReasonType.requires_approval]:
logger.debug(f"Run {run_id} cannot be cancelled because it is already terminated with stop reason: {run.stop_reason.value}")
return
# Check if agent is waiting for approval by examining the last message
agent_state = await self.agent_manager.get_agent_by_id_async(agent_id=agent_id, actor=actor)
current_in_context_messages = await self.message_manager.get_messages_by_ids_async(message_ids=agent_state.message_ids, actor=actor)
was_pending_approval = current_in_context_messages and current_in_context_messages[-1].is_approval_request()
# cancel the run
# NOTE: this should update the agent's last stop reason to cancelled
run = await self.update_run_by_id_async(
run_id=run_id,
update=RunUpdate(status=RunStatus.cancelled, stop_reason=StopReasonType.cancelled),
actor=actor,
conversation_id=run.conversation_id,
)
# cleanup the agent's state
# if was pending approval, we need to cleanup the approval state
if was_pending_approval:
logger.debug(f"Agent was waiting for approval, adding denial messages for run {run_id}")
approval_request_message = current_in_context_messages[-1]
# Find ALL pending tool calls (both requiring approval and not requiring approval)
# The assistant message may have tool calls that didn't require approval
all_pending_tool_calls = []
if approval_request_message.tool_calls:
all_pending_tool_calls.extend(approval_request_message.tool_calls)
# Check if there's an assistant message before the approval request with additional tool calls
if len(current_in_context_messages) >= 2:
potential_assistant_msg = current_in_context_messages[-2]
if potential_assistant_msg.role == MessageRole.assistant and potential_assistant_msg.tool_calls:
# Add any tool calls from the assistant message that aren't already in the approval request
approval_tool_call_ids = (
{tc.id for tc in approval_request_message.tool_calls} if approval_request_message.tool_calls else set()
)
for tool_call in potential_assistant_msg.tool_calls:
if tool_call.id not in approval_tool_call_ids:
all_pending_tool_calls.append(tool_call)
# Ensure we have tool calls to deny
if all_pending_tool_calls:
from letta.constants import TOOL_CALL_DENIAL_ON_CANCEL
from letta.schemas.letta_message import ApprovalReturn
from letta.schemas.message import ApprovalCreate
from letta.server.rest_api.utils import (
create_approval_response_message_from_input,
create_tool_message_from_returns,
create_tool_returns_for_denials,
)
# Create denials for ALL pending tool calls (including those that didn't require approval)
denials = (
[
ApprovalReturn(
tool_call_id=tool_call.id,
approve=False,
reason=TOOL_CALL_DENIAL_ON_CANCEL,
)
for tool_call in approval_request_message.tool_calls
]
if approval_request_message.tool_calls
else []
)
# Create an ApprovalCreate input with the denials
approval_input = ApprovalCreate(
approvals=denials,
approval_request_id=approval_request_message.id,
)
# Use the standard function to create properly formatted approval response messages
approval_response_messages = await create_approval_response_message_from_input(
agent_state=agent_state,
input_message=approval_input,
run_id=run_id,
)
# Create tool returns for ALL denied tool calls using shared helper
# This includes both tool calls requiring approval AND those that didn't
tool_returns = create_tool_returns_for_denials(
tool_calls=all_pending_tool_calls,
denial_reason=TOOL_CALL_DENIAL_ON_CANCEL,
timezone=agent_state.timezone,
)
# Create tool message with all denial returns using shared helper
tool_message = create_tool_message_from_returns(
agent_id=agent_state.id,
model=agent_state.llm_config.model,
tool_returns=tool_returns,
run_id=run_id,
)
# Combine approval response and tool messages
new_messages = [*approval_response_messages, tool_message]
# Checkpoint the new messages
from letta.agents.agent_loop import AgentLoop
agent_loop = AgentLoop.load(agent_state=agent_state, actor=actor)
new_in_context_messages = current_in_context_messages + new_messages
await agent_loop._checkpoint_messages(
run_id=run_id,
step_id=approval_request_message.step_id,
new_messages=new_messages,
in_context_messages=new_in_context_messages,
)
# persisted_messages = await self.message_manager.create_many_messages_async(
# pydantic_msgs=new_messages,
# actor=actor,
# run_id=run_id,
# )
# logger.debug(f"Persisted {len(persisted_messages)} messages (approval + tool returns)")
## Update the agent's message_ids to include the new messages (approval + tool message)
# agent_state.message_ids = agent_state.message_ids + [m.id for m in persisted_messages]
# await self.agent_manager.update_message_ids_async(agent_id=agent_state.id, message_ids=agent_state.message_ids, actor=actor)
logger.debug(
f"Inserted approval response with {len(denials)} denials and tool return message for cancelled run {run_id}. "
f"Approval request message ID: {approval_request_message.id}"
)
else:
logger.warning(
f"Last message is an approval request but has no tool_calls. "
f"Message ID: {approval_request_message.id}, Run ID: {run_id}"
)
return run
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/run_manager.py",
"license": "Apache License 2.0",
"lines": 672,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/tool_schema_generator.py | """Schema generation utilities for tool creation and updates."""
from typing import Optional
from letta.functions.ast_parsers import get_function_name_and_docstring
from letta.functions.functions import derive_openai_json_schema
from letta.functions.helpers import generate_model_from_args_json_schema
from letta.functions.schema_generator import generate_schema_from_args_schema_v2
from letta.log import get_logger
from letta.otel.tracing import trace_method
from letta.schemas.enums import ToolSourceType, ToolType
from letta.schemas.tool import Tool as PydanticTool
logger = get_logger(__name__)
@trace_method
def generate_schema_for_tool_creation(
tool: PydanticTool,
) -> Optional[dict]:
"""
Generate JSON schema for tool creation based on the provided parameters.
Args:
tool: The tool being created
Returns:
Generated JSON schema or None if not applicable
"""
# Only generate schema for custom tools
if tool.tool_type != ToolType.CUSTOM:
return None
# If json_schema is already provided, use it
if tool.json_schema:
return tool.json_schema
# Must have source code for custom tools
if not tool.source_code:
logger.error("Custom tool is missing source_code field")
raise ValueError("Custom tool is missing source_code field.")
source_code_size_kb = len(tool.source_code) / 1024
logger.info(f"Generating schema for tool '{tool.name}': source code {source_code_size_kb:.2f} KB")
# TypeScript tools
if tool.source_type == ToolSourceType.typescript:
try:
from letta.functions.typescript_parser import derive_typescript_json_schema
from letta.otel.tracing import tracer
with tracer.start_as_current_span("derive_typescript_json_schema"):
schema = derive_typescript_json_schema(source_code=tool.source_code)
import json
schema_size_kb = len(json.dumps(schema)) / 1024
logger.info(f"Generated TypeScript schema for '{tool.name}': {schema_size_kb:.2f} KB")
return schema
except Exception as e:
logger.warning(f"Failed to derive TypeScript json schema: {e}")
raise ValueError(f"Failed to derive TypeScript json schema: {e}")
# Python tools (default if not specified for backwards compatibility)
elif tool.source_type == ToolSourceType.python or tool.source_type is None:
from letta.otel.tracing import tracer
# If args_json_schema is provided, use it to generate full schema
if tool.args_json_schema:
with tracer.start_as_current_span("get_function_name_and_docstring"):
name, description = get_function_name_and_docstring(tool.source_code, tool.name)
with tracer.start_as_current_span("generate_model_from_args_json_schema"):
args_schema = generate_model_from_args_json_schema(tool.args_json_schema)
with tracer.start_as_current_span("generate_schema_from_args_schema_v2"):
schema = generate_schema_from_args_schema_v2(
args_schema=args_schema,
name=name,
description=description,
append_heartbeat=False,
)
import json
schema_size_kb = len(json.dumps(schema)) / 1024
logger.info(f"Generated Python schema from args_json for '{tool.name}': {schema_size_kb:.2f} KB")
return schema
# Otherwise, attempt to parse from docstring with best effort
else:
try:
with tracer.start_as_current_span("derive_openai_json_schema"):
schema = derive_openai_json_schema(source_code=tool.source_code)
import json
schema_size_kb = len(json.dumps(schema)) / 1024
logger.info(f"Generated Python schema from docstring for '{tool.name}': {schema_size_kb:.2f} KB")
return schema
except Exception as e:
logger.warning(f"Failed to derive json schema: {e}")
raise ValueError(f"Failed to derive json schema: {e}")
else:
# TODO: convert to explicit error
raise ValueError(f"Unknown tool source type: {tool.source_type}")
def generate_schema_for_tool_update(
current_tool: PydanticTool,
json_schema: Optional[dict] = None,
args_json_schema: Optional[dict] = None,
source_code: Optional[str] = None,
source_type: Optional[ToolSourceType] = None,
) -> Optional[dict]:
"""
Generate JSON schema for tool update based on the provided parameters.
Args:
current_tool: The current tool being updated
json_schema: Directly provided JSON schema (takes precedence)
args_json_schema: Schema for just the arguments
source_code: Updated source code (only used if explicitly updating source)
source_type: Source type for the tool
Returns:
Updated JSON schema or None if no update needed
"""
# Only handle custom tools
if current_tool.tool_type != ToolType.CUSTOM:
return None
# If json_schema is directly provided, use it
if json_schema is not None:
# If args_json_schema is also provided, that's an error
if args_json_schema is not None:
raise ValueError("Cannot provide both json_schema and args_json_schema in update")
return json_schema
# If args_json_schema is provided, generate full schema from it
if args_json_schema is not None:
# Use updated source_code if provided, otherwise use current
code_to_parse = source_code if source_code is not None else current_tool.source_code
if not code_to_parse:
raise ValueError("Source code required when updating with args_json_schema")
name, description = get_function_name_and_docstring(code_to_parse, current_tool.name)
args_schema = generate_model_from_args_json_schema(args_json_schema)
return generate_schema_from_args_schema_v2(
args_schema=args_schema,
name=name,
description=description,
append_heartbeat=False,
)
# Otherwise, no schema updates (don't parse docstring)
return None
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_schema_generator.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:tests/integration_test_mcp.py | import os
import sys
import threading
import time
import uuid
from pathlib import Path
import pytest
import requests
from dotenv import load_dotenv
from letta_client import Letta
from letta_client.types import MessageCreateParam
from letta_client.types.agents.tool_call_message import ToolCallMessage
from letta_client.types.tool_return_message import ToolReturnMessage
from letta.functions.mcp_client.types import StdioServerConfig
from letta.schemas.agent import AgentState
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.llm_config import LLMConfig
# ------------------------------
# Fixtures
# ------------------------------
@pytest.fixture(scope="module")
def server_url() -> str:
"""
Provides the URL for the Letta server.
If LETTA_SERVER_URL is not set, starts the server in a background thread
and polls until it's accepting connections.
"""
def _run_server() -> None:
load_dotenv()
from letta.server.rest_api.app import start_server
start_server(debug=True)
url: str = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
if not os.getenv("LETTA_SERVER_URL"):
thread = threading.Thread(target=_run_server, daemon=True)
thread.start()
# Poll until the server is up (or timeout)
timeout_seconds = 60
deadline = time.time() + timeout_seconds
while time.time() < deadline:
try:
resp = requests.get(url + "/v1/health")
if resp.status_code < 500:
break
except requests.exceptions.RequestException:
pass
time.sleep(0.1)
else:
raise RuntimeError(f"Could not reach {url} within {timeout_seconds}s")
yield url
@pytest.fixture(scope="module")
def client(server_url: str) -> Letta:
"""
Creates and returns a synchronous Letta REST client for testing.
"""
client_instance = Letta(base_url=server_url)
yield client_instance
@pytest.fixture(scope="function")
def mcp_server_name() -> str:
"""Generate a unique MCP server name for each test."""
return f"test-mcp-server-{uuid.uuid4().hex[:8]}"
@pytest.fixture(scope="function")
def mock_mcp_server_config(mcp_server_name: str) -> StdioServerConfig:
"""
Creates a stdio configuration for the mock MCP server.
"""
# Get path to mock_mcp_server.py
script_dir = Path(__file__).parent
mcp_server_path = script_dir / "mock_mcp_server.py"
if not mcp_server_path.exists():
raise FileNotFoundError(f"Mock MCP server not found at {mcp_server_path}")
return StdioServerConfig(
server_name=mcp_server_name,
command=sys.executable, # Use the current Python interpreter
args=[str(mcp_server_path)],
)
@pytest.fixture(scope="function")
def agent_state(client: Letta, mcp_server_name: str, mock_mcp_server_config: StdioServerConfig) -> AgentState:
"""
Creates an agent with MCP tools attached for testing.
"""
# Register the MCP server
mcp_server = client.mcp_servers.create(
server_name=mcp_server_name,
config={
"command": mock_mcp_server_config.command,
"args": mock_mcp_server_config.args,
"env": mock_mcp_server_config.env if hasattr(mock_mcp_server_config, "env") else None,
"mcp_server_type": "stdio",
},
)
# List available MCP tools
mcp_tools = client.mcp_servers.tools.list(mcp_server_id=mcp_server.id)
assert len(mcp_tools) > 0, "No tools found from MCP server"
# Get the echo and add tools from MCP server
echo_tool = next((t for t in mcp_tools if t.name == "echo"), None)
add_tool = next((t for t in mcp_tools if t.name == "add"), None)
assert echo_tool is not None, "echo tool not found"
assert add_tool is not None, "add tool not found"
# Create agent with the MCP tools (use tool IDs directly)
agent = client.agents.create(
name=f"test_mcp_agent_{uuid.uuid4().hex[:8]}",
include_base_tools=True,
tool_ids=[echo_tool.id, add_tool.id],
memory_blocks=[
{
"label": "human",
"value": "Name: Test User",
},
{
"label": "persona",
"value": "You are a helpful assistant that can use MCP tools to help the user.",
},
],
llm_config=LLMConfig.default_config(model_name="gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
tags=["test_mcp_agent"],
)
yield agent
# Cleanup
try:
client.agents.delete(agent.id)
except Exception:
pass
try:
client.mcp_servers.delete(mcp_server_id=mcp_server.id)
except Exception:
pass
# ------------------------------
# Test Cases
# ------------------------------
def test_mcp_echo_tool(client: Letta, agent_state: AgentState):
"""
Test that an agent can successfully call the echo tool from the MCP server.
"""
test_message = "Hello from MCP integration test!"
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreateParam(
role="user",
content=f"Use the echo tool to echo back this exact message: '{test_message}'",
)
],
)
# Check for tool call message
tool_calls = [m for m in response.messages if isinstance(m, ToolCallMessage)]
assert len(tool_calls) > 0, "Expected at least one ToolCallMessage"
# Find the echo tool call
echo_call = next((m for m in tool_calls if m.tool_call.name == "echo"), None)
assert echo_call is not None, f"No echo tool call found. Tool calls: {[m.tool_call.name for m in tool_calls]}"
# Check for tool return message
tool_returns = [m for m in response.messages if isinstance(m, ToolReturnMessage)]
assert len(tool_returns) > 0, "Expected at least one ToolReturnMessage"
# Find the return for the echo call
echo_return = next((m for m in tool_returns if m.tool_call_id == echo_call.tool_call.tool_call_id), None)
assert echo_return is not None, "No tool return found for echo call"
assert echo_return.status == "success", f"Echo tool failed with status: {echo_return.status}"
# Verify the echo response contains our message
assert test_message in echo_return.tool_return, f"Expected '{test_message}' in tool return, got: {echo_return.tool_return}"
def test_mcp_add_tool(client: Letta, agent_state: AgentState):
"""
Test that an agent can successfully call the add tool from the MCP server.
"""
a, b = 42, 58
expected_sum = a + b
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreateParam(
role="user",
content=f"Use the add tool to add {a} and {b}.",
)
],
)
# Check for tool call message
tool_calls = [m for m in response.messages if isinstance(m, ToolCallMessage)]
assert len(tool_calls) > 0, "Expected at least one ToolCallMessage"
# Find the add tool call
add_call = next((m for m in tool_calls if m.tool_call.name == "add"), None)
assert add_call is not None, f"No add tool call found. Tool calls: {[m.tool_call.name for m in tool_calls]}"
# Check for tool return message
tool_returns = [m for m in response.messages if isinstance(m, ToolReturnMessage)]
assert len(tool_returns) > 0, "Expected at least one ToolReturnMessage"
# Find the return for the add call
add_return = next((m for m in tool_returns if m.tool_call_id == add_call.tool_call.tool_call_id), None)
assert add_return is not None, "No tool return found for add call"
assert add_return.status == "success", f"Add tool failed with status: {add_return.status}"
# Verify the result contains the expected sum
assert str(expected_sum) in add_return.tool_return, f"Expected '{expected_sum}' in tool return, got: {add_return.tool_return}"
def test_mcp_multiple_tools_in_sequence(client: Letta, agent_state: AgentState):
"""
Test that an agent can call multiple MCP tools in sequence.
"""
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreateParam(
role="user",
content="First use the add tool to add 10 and 20. Then use the echo tool to echo back the result you got from the add tool.",
)
],
)
# Check for tool call messages
tool_calls = [m for m in response.messages if isinstance(m, ToolCallMessage)]
assert len(tool_calls) >= 2, f"Expected at least 2 tool calls, got {len(tool_calls)}"
# Verify both tools were called
tool_names = [m.tool_call.name for m in tool_calls]
assert "add" in tool_names, f"add tool not called. Tools called: {tool_names}"
assert "echo" in tool_names, f"echo tool not called. Tools called: {tool_names}"
# Check for tool return messages
tool_returns = [m for m in response.messages if isinstance(m, ToolReturnMessage)]
assert len(tool_returns) >= 2, f"Expected at least 2 tool returns, got {len(tool_returns)}"
# Verify all tools succeeded
for tool_return in tool_returns:
assert tool_return.status == "success", f"Tool call failed with status: {tool_return.status}"
def test_mcp_server_listing(client: Letta, mcp_server_name: str, mock_mcp_server_config: StdioServerConfig):
"""
Test that MCP server registration and tool listing works correctly.
"""
# Register the MCP server
mcp_server = client.mcp_servers.create(
server_name=mcp_server_name,
config={
"command": mock_mcp_server_config.command,
"args": mock_mcp_server_config.args,
"env": mock_mcp_server_config.env if hasattr(mock_mcp_server_config, "env") else None,
"mcp_server_type": "stdio",
},
)
try:
# Verify server is in the list
servers = client.mcp_servers.list()
server_names = [s.server_name for s in servers]
assert mcp_server_name in server_names, f"MCP server {mcp_server_name} not found in {server_names}"
# List available tools
mcp_tools = client.mcp_servers.tools.list(mcp_server_id=mcp_server.id)
assert len(mcp_tools) > 0, "No tools found from MCP server"
# Verify expected tools are present
tool_names = [t.name for t in mcp_tools]
expected_tools = ["echo", "add", "multiply", "reverse_string"]
for expected_tool in expected_tools:
assert expected_tool in tool_names, f"Expected tool '{expected_tool}' not found. Available: {tool_names}"
finally:
# Cleanup
client.mcp_servers.delete(mcp_server_id=mcp_server.id)
servers = client.mcp_servers.list()
server_names = [s.server_name for s in servers]
assert mcp_server_name not in server_names, f"MCP server {mcp_server_name} should be deleted but is still in {server_names}"
def test_mcp_complex_schema_tool(client: Letta, mcp_server_name: str, mock_mcp_server_config: StdioServerConfig):
"""
Test that an agent can successfully call a tool with complex nested schema.
This tests the get_parameter_type_description tool which has:
- Enum-like preset parameter
- Optional string field
- Optional nested object with arrays of objects
"""
# Register the MCP server
mcp_server = client.mcp_servers.create(
server_name=mcp_server_name,
config={
"command": mock_mcp_server_config.command,
"args": mock_mcp_server_config.args,
"env": mock_mcp_server_config.env if hasattr(mock_mcp_server_config, "env") else None,
"mcp_server_type": "stdio",
},
)
try:
# List available tools
mcp_tools = client.mcp_servers.tools.list(mcp_server_id=mcp_server.id)
# Find the complex schema tool
complex_tool = next((t for t in mcp_tools if t.name == "get_parameter_type_description"), None)
assert complex_tool is not None, f"get_parameter_type_description tool not found. Available: {[t.name for t in mcp_tools]}"
# Create agent with the complex tool (use tool ID directly)
agent = client.agents.create(
name=f"test_complex_schema_{uuid.uuid4().hex[:8]}",
include_base_tools=True,
tool_ids=[complex_tool.id],
memory_blocks=[
{
"label": "human",
"value": "Name: Test User",
},
{
"label": "persona",
"value": "You are a helpful assistant that can use MCP tools with complex schemas.",
},
],
llm_config=LLMConfig.default_config(model_name="gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
tags=["test_complex_schema"],
)
# Test 1: Simple call with just preset
response = client.agents.messages.create(
agent_id=agent.id,
messages=[
MessageCreateParam(
role="user", content='Use the get_parameter_type_description tool with preset "a" to get parameter information.'
)
],
)
tool_calls = [m for m in response.messages if isinstance(m, ToolCallMessage)]
assert len(tool_calls) > 0, "Expected at least one ToolCallMessage"
complex_call = next((m for m in tool_calls if m.tool_call.name == "get_parameter_type_description"), None)
assert complex_call is not None, f"No get_parameter_type_description call found. Calls: {[m.tool_call.name for m in tool_calls]}"
tool_returns = [m for m in response.messages if isinstance(m, ToolReturnMessage)]
assert len(tool_returns) > 0, "Expected at least one ToolReturnMessage"
complex_return = next((m for m in tool_returns if m.tool_call_id == complex_call.tool_call.tool_call_id), None)
assert complex_return is not None, "No tool return found for complex schema call"
assert complex_return.status == "success", f"Complex schema tool failed with status: {complex_return.status}"
assert "Preset: a" in complex_return.tool_return, f"Expected 'Preset: a' in return, got: {complex_return.tool_return}"
# Test 2: Complex call with nested data
response = client.agents.messages.create(
agent_id=agent.id,
messages=[
MessageCreateParam(
role="user",
content="Use the get_parameter_type_description tool with these arguments: "
'preset="b", connected_service_descriptor="test-service", '
"and instantiation_data with isAbstract=true, isMultiplicity=false, "
'and one instantiation with doid="TEST123" and nodeFamilyId=42.',
)
],
)
tool_calls = [m for m in response.messages if isinstance(m, ToolCallMessage)]
assert len(tool_calls) > 0, "Expected at least one ToolCallMessage for complex nested call"
complex_call = next((m for m in tool_calls if m.tool_call.name == "get_parameter_type_description"), None)
assert complex_call is not None, "No get_parameter_type_description call found for nested test"
tool_returns = [m for m in response.messages if isinstance(m, ToolReturnMessage)]
complex_return = next((m for m in tool_returns if m.tool_call_id == complex_call.tool_call.tool_call_id), None)
assert complex_return is not None, "No tool return found for complex nested call"
assert complex_return.status == "success", f"Complex nested call failed with status: {complex_return.status}"
# Verify the response contains our complex data
assert "Preset: b" in complex_return.tool_return, "Expected preset 'b' in response"
assert "test-service" in complex_return.tool_return, "Expected service descriptor in response"
# Cleanup agent
client.agents.delete(agent.id)
finally:
# Cleanup MCP server
client.mcp_servers.delete(mcp_server_id=mcp_server.id)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/integration_test_mcp.py",
"license": "Apache License 2.0",
"lines": 339,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/integration_test_send_message_v2.py | import asyncio
import itertools
import json
import logging
import os
import threading
import time
import uuid
from typing import Any, List, Tuple
import pytest
import requests
from dotenv import load_dotenv
from letta_client import AsyncLetta
from letta_client.types import (
AgentState,
AnthropicModelSettings,
JsonSchemaResponseFormat,
MessageCreateParam,
OpenAIModelSettings,
ToolReturnMessage,
)
from letta_client.types.agents import AssistantMessage, ReasoningMessage, Run, ToolCallMessage, UserMessage
from letta_client.types.agents.letta_streaming_response import LettaPing, LettaStopReason, LettaUsageStatistics
logger = logging.getLogger(__name__)
_background_tasks: set[asyncio.Task] = set()
# ------------------------------
# Helper Functions and Constants
# ------------------------------
all_configs = [
"openai-gpt-4o-mini.json",
"openai-gpt-4.1.json",
"openai-gpt-5.json",
"claude-4-5-sonnet.json",
"gemini-2.5-pro.json",
"zai-glm-5.json",
]
def get_model_config(filename: str, model_settings_dir: str = "tests/model_settings") -> Tuple[str, dict]:
"""Load a model_settings file and return the handle and settings dict."""
filename = os.path.join(model_settings_dir, filename)
with open(filename, "r") as f:
config_data = json.load(f)
return config_data["handle"], config_data.get("model_settings", {})
requested = os.getenv("LLM_CONFIG_FILE")
filenames = [requested] if requested else all_configs
TESTED_MODEL_CONFIGS: List[Tuple[str, dict]] = [get_model_config(fn) for fn in filenames]
def roll_dice(num_sides: int) -> int:
"""
Returns a random number between 1 and num_sides.
Args:
num_sides (int): The number of sides on the die.
Returns:
int: A random integer between 1 and num_sides, representing the die roll.
"""
import random
return random.randint(1, num_sides)
USER_MESSAGE_OTID = str(uuid.uuid4())
USER_MESSAGE_RESPONSE: str = "Teamwork makes the dream work"
USER_MESSAGE_FORCE_REPLY: List[MessageCreateParam] = [
MessageCreateParam(
role="user",
content=f"This is an automated test message. Reply with the message '{USER_MESSAGE_RESPONSE}'.",
otid=USER_MESSAGE_OTID,
)
]
USER_MESSAGE_ROLL_DICE: List[MessageCreateParam] = [
MessageCreateParam(
role="user",
content="This is an automated test message. Call the roll_dice tool with 16 sides and reply back to me with the outcome.",
otid=USER_MESSAGE_OTID,
)
]
USER_MESSAGE_PARALLEL_TOOL_CALL: List[MessageCreateParam] = [
MessageCreateParam(
role="user",
content=(
"This is an automated test message. Please call the roll_dice tool EXACTLY three times in parallel - no more, no less. "
"Call it with num_sides=6, num_sides=12, and num_sides=20. Make all three calls at the same time in a single response."
),
otid=USER_MESSAGE_OTID,
)
]
def assert_greeting_response(
messages: List[Any],
model_handle: str,
model_settings: dict,
streaming: bool = False,
token_streaming: bool = False,
from_db: bool = False,
) -> None:
"""
Asserts that the messages list follows the expected sequence:
ReasoningMessage -> AssistantMessage.
"""
# Filter out LettaPing messages which are keep-alive messages for SSE streams
messages = [
msg for msg in messages if not (isinstance(msg, LettaPing) or (hasattr(msg, "message_type") and msg.message_type == "ping"))
]
expected_message_count_min, expected_message_count_max = get_expected_message_count_range(
model_handle, model_settings, streaming=streaming, from_db=from_db
)
assert expected_message_count_min <= len(messages) <= expected_message_count_max
# User message if loaded from db
index = 0
if from_db:
assert isinstance(messages[index], UserMessage)
assert messages[index].otid == USER_MESSAGE_OTID
index += 1
# Reasoning message if reasoning enabled
otid_suffix = 0
try:
if is_reasoner_model(model_handle, model_settings):
assert isinstance(messages[index], ReasoningMessage)
assert messages[index].otid and messages[index].otid[-1] == str(otid_suffix)
index += 1
otid_suffix += 1
except Exception:
# Reasoning is non-deterministic, so don't throw if missing
pass
# Assistant message
assert isinstance(messages[index], AssistantMessage)
if not token_streaming:
assert "teamwork" in messages[index].content.lower()
assert messages[index].otid and messages[index].otid[-1] == str(otid_suffix)
index += 1
otid_suffix += 1
# Stop reason and usage statistics if streaming
if streaming:
assert isinstance(messages[index], LettaStopReason)
assert messages[index].stop_reason == "end_turn"
index += 1
assert isinstance(messages[index], LettaUsageStatistics)
assert messages[index].prompt_tokens > 0
assert messages[index].completion_tokens > 0
assert messages[index].total_tokens > 0
assert messages[index].step_count > 0
def assert_tool_call_response(
messages: List[Any],
model_handle: str,
model_settings: dict,
streaming: bool = False,
from_db: bool = False,
with_cancellation: bool = False,
) -> None:
"""
Asserts that the messages list follows the expected sequence:
ReasoningMessage -> ToolCallMessage -> ToolReturnMessage ->
ReasoningMessage -> AssistantMessage.
"""
# Filter out LettaPing messages which are keep-alive messages for SSE streams
messages = [
msg for msg in messages if not (isinstance(msg, LettaPing) or (hasattr(msg, "message_type") and msg.message_type == "ping"))
]
# If cancellation happened and no messages were persisted (early cancellation), return early
if with_cancellation and len(messages) == 0:
return
if not with_cancellation:
expected_message_count_min, expected_message_count_max = get_expected_message_count_range(
model_handle, model_settings, tool_call=True, streaming=streaming, from_db=from_db
)
assert expected_message_count_min <= len(messages) <= expected_message_count_max
# User message if loaded from db
index = 0
if from_db:
assert isinstance(messages[index], UserMessage)
assert messages[index].otid == USER_MESSAGE_OTID
index += 1
# If cancellation happened after user message but before any response, return early
if with_cancellation and index >= len(messages):
return
# Reasoning message if reasoning enabled
otid_suffix = 0
try:
if is_reasoner_model(model_handle, model_settings):
assert isinstance(messages[index], ReasoningMessage)
assert messages[index].otid and messages[index].otid[-1] == str(otid_suffix)
index += 1
otid_suffix += 1
except Exception:
# Reasoning is non-deterministic, so don't throw if missing
pass
# Special case for models that can generate an extra AssistantMessage before tool call
# (claude-sonnet-4-5, opus-4.1, zai, and self-hosted models like ollama/qwen3 with thinking)
is_extra_assistant_model = (
"claude-sonnet-4-5-20250929" in model_handle
or "claude-opus-4-1" in model_handle
or model_settings.get("provider_type") == "zai"
or model_handle.startswith(("ollama/", "vllm/", "lmstudio_openai/"))
)
if is_extra_assistant_model and index < len(messages) and isinstance(messages[index], AssistantMessage):
# Skip the extra AssistantMessage and move to the next message
index += 1
otid_suffix += 1
# Tool call message (may be skipped if cancelled early)
if with_cancellation and index < len(messages) and isinstance(messages[index], AssistantMessage):
# If cancelled early, model might respond with text instead of making tool call
assert "roll" in messages[index].content.lower() or "die" in messages[index].content.lower()
return # Skip tool call assertions for early cancellation
# If cancellation happens before tool call, we might get LettaStopReason directly
if with_cancellation and index < len(messages) and isinstance(messages[index], LettaStopReason):
assert messages[index].stop_reason == "cancelled"
return # Skip remaining assertions for very early cancellation
assert isinstance(messages[index], ToolCallMessage)
assert messages[index].otid and messages[index].otid[-1] == str(otid_suffix)
index += 1
# If cancellation happens before tool return, we might get LettaStopReason directly
if with_cancellation and index < len(messages) and isinstance(messages[index], LettaStopReason):
assert messages[index].stop_reason == "cancelled"
return # Skip remaining assertions for very early cancellation
# Tool return message
otid_suffix = 0
assert isinstance(messages[index], ToolReturnMessage)
assert messages[index].otid and messages[index].otid[-1] == str(otid_suffix)
index += 1
# Messages from second agent step if request has not been cancelled
if not with_cancellation:
# Reasoning message if reasoning enabled
otid_suffix = 0
try:
if is_reasoner_model(model_handle, model_settings):
assert isinstance(messages[index], ReasoningMessage)
assert messages[index].otid and messages[index].otid[-1] == str(otid_suffix)
index += 1
otid_suffix += 1
except Exception:
# Reasoning is non-deterministic, so don't throw if missing
pass
# Assistant message
assert isinstance(messages[index], AssistantMessage)
assert messages[index].otid and messages[index].otid[-1] == str(otid_suffix)
index += 1
# Stop reason and usage statistics if streaming
if streaming:
assert isinstance(messages[index], LettaStopReason)
assert messages[index].stop_reason == ("cancelled" if with_cancellation else "end_turn")
index += 1
assert isinstance(messages[index], LettaUsageStatistics)
assert messages[index].prompt_tokens > 0
assert messages[index].completion_tokens > 0
assert messages[index].total_tokens > 0
assert messages[index].step_count > 0
async def accumulate_chunks(chunks, verify_token_streaming: bool = False) -> List[Any]:
"""
Accumulates chunks into a list of messages.
Handles both async iterators and raw SSE strings.
"""
messages = []
current_message = None
prev_message_type = None
# Handle raw SSE string from runs.messages.stream()
if isinstance(chunks, str):
import json
for line in chunks.strip().split("\n"):
if line.startswith("data: ") and line != "data: [DONE]":
try:
data = json.loads(line[6:]) # Remove 'data: ' prefix
if "message_type" in data:
# Create proper message type objects
message_type = data.get("message_type")
if message_type == "assistant_message":
from letta_client.types.agents import AssistantMessage
chunk = AssistantMessage(**data)
elif message_type == "reasoning_message":
from letta_client.types.agents import ReasoningMessage
chunk = ReasoningMessage(**data)
elif message_type == "tool_call_message":
from letta_client.types.agents import ToolCallMessage
chunk = ToolCallMessage(**data)
elif message_type == "tool_return_message":
from letta_client.types import ToolReturnMessage
chunk = ToolReturnMessage(**data)
elif message_type == "user_message":
from letta_client.types.agents import UserMessage
chunk = UserMessage(**data)
elif message_type == "stop_reason":
from letta_client.types.agents.letta_streaming_response import LettaStopReason
chunk = LettaStopReason(**data)
elif message_type == "usage_statistics":
from letta_client.types.agents.letta_streaming_response import LettaUsageStatistics
chunk = LettaUsageStatistics(**data)
else:
chunk = type("Chunk", (), data)() # Fallback for unknown types
current_message_type = chunk.message_type
if prev_message_type != current_message_type:
if current_message is not None:
messages.append(current_message)
current_message = chunk
else:
# Accumulate content for same message type
if hasattr(current_message, "content") and hasattr(chunk, "content"):
current_message.content += chunk.content
prev_message_type = current_message_type
except json.JSONDecodeError:
continue
if current_message is not None:
messages.append(current_message)
else:
# Handle async iterator from agents.messages.stream()
async for chunk in chunks:
current_message_type = chunk.message_type
if prev_message_type != current_message_type:
if current_message is not None:
messages.append(current_message)
current_message = chunk
else:
# Accumulate content for same message type
if hasattr(current_message, "content") and hasattr(chunk, "content"):
current_message.content += chunk.content
prev_message_type = current_message_type
if current_message is not None:
messages.append(current_message)
return messages
async def cancel_run_after_delay(client: AsyncLetta, agent_id: str, delay: float = 0.5):
await asyncio.sleep(delay)
await client.agents.messages.cancel(agent_id=agent_id)
async def wait_for_run_completion(client: AsyncLetta, run_id: str, timeout: float = 30.0, interval: float = 0.5) -> Run:
start = time.time()
while True:
run = await client.runs.retrieve(run_id)
if run.status == "completed":
return run
if run.status == "cancelled":
time.sleep(5)
return run
if run.status == "failed":
raise RuntimeError(f"Run {run_id} did not complete: status = {run.status}")
if time.time() - start > timeout:
raise TimeoutError(f"Run {run_id} did not complete within {timeout} seconds (last status: {run.status})")
time.sleep(interval)
def get_expected_message_count_range(
model_handle: str, model_settings: dict, tool_call: bool = False, streaming: bool = False, from_db: bool = False
) -> Tuple[int, int]:
"""
Returns the expected range of number of messages for a given LLM configuration. Uses range to account for possible variations in the number of reasoning messages.
Greeting:
------------------------------------------------------------------------------------------------------------------------------------------------------------------
| gpt-4o | gpt-o3 (med effort) | gpt-5 (high effort) | sonnet-3-5 | sonnet-3.7-thinking | flash-2.5-thinking |
| ------------------------ | ------------------------ | ------------------------ | ------------------------ | ------------------------ | ------------------------ |
| AssistantMessage | AssistantMessage | ReasoningMessage | AssistantMessage | ReasoningMessage | ReasoningMessage |
| | | AssistantMessage | | AssistantMessage | AssistantMessage |
Tool Call:
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
| gpt-4o | gpt-o3 (med effort) | gpt-5 (high effort) | sonnet-3-5 | sonnet-3.7-thinking | sonnet-4.5/opus-4.1 | flash-2.5-thinking |
| ------------------------ | ------------------------ | ------------------------ | ------------------------ | ------------------------ | ------------------------ | ------------------------ |
| ToolCallMessage | ToolCallMessage | ReasoningMessage | AssistantMessage | ReasoningMessage | ReasoningMessage | ReasoningMessage |
| ToolReturnMessage | ToolReturnMessage | ToolCallMessage | ToolCallMessage | AssistantMessage | AssistantMessage | ToolCallMessage |
| AssistantMessage | AssistantMessage | ToolReturnMessage | ToolReturnMessage | ToolCallMessage | ToolCallMessage | ToolReturnMessage |
| | | ReasoningMessage | AssistantMessage | ToolReturnMessage | ToolReturnMessage | ReasoningMessage |
| | | AssistantMessage | | AssistantMessage | ReasoningMessage | AssistantMessage |
| | | | | | AssistantMessage | |
"""
# assistant message
expected_message_count = 1
expected_range = 0
if is_reasoner_model(model_handle, model_settings):
# reasoning message
expected_range += 1
if tool_call:
# check for sonnet 4.5 or opus 4.1 specifically
is_sonnet_4_5_or_opus_4_1 = (
model_settings.get("provider_type") == "anthropic"
and model_settings.get("thinking", {}).get("type") == "enabled"
and ("claude-sonnet-4-5" in model_handle or "claude-opus-4-1" in model_handle)
)
is_anthropic_reasoning = (
model_settings.get("provider_type") == "anthropic" and model_settings.get("thinking", {}).get("type") == "enabled"
)
if is_sonnet_4_5_or_opus_4_1 or not is_anthropic_reasoning:
# sonnet 4.5 and opus 4.1 return a reasoning message before the final assistant message
# so do the other native reasoning models
expected_range += 1
# opus 4.1 generates an extra AssistantMessage before the tool call
if "claude-opus-4-1" in model_handle:
expected_range += 1
# Z.ai models output an AssistantMessage with each ReasoningMessage (not just the final one)
if model_settings.get("provider_type") == "zai":
expected_range += 1
# Self-hosted models (ollama/vllm/lmstudio) may emit an extra AssistantMessage with thinking content
if model_handle.startswith(("ollama/", "vllm/", "lmstudio/", "lmstudio_openai/")):
expected_range += 1
if tool_call:
# tool call and tool return messages
expected_message_count += 2
if from_db:
# user message
expected_message_count += 1
if streaming:
# stop reason and usage statistics
expected_message_count += 2
return expected_message_count, expected_message_count + expected_range
def is_reasoner_model(model_handle: str, model_settings: dict) -> bool:
"""Check if the model is a reasoning model based on its handle and settings."""
# OpenAI reasoning models with high reasoning effort
is_openai_reasoning = (
model_settings.get("provider_type") == "openai"
and (
"gpt-5" in model_handle
or "o1" in model_handle
or "o3" in model_handle
or "o4-mini" in model_handle
or "gpt-4.1" in model_handle
)
and model_settings.get("reasoning", {}).get("reasoning_effort") == "high"
)
# Anthropic models with thinking enabled
is_anthropic_reasoning = (
model_settings.get("provider_type") == "anthropic" and model_settings.get("thinking", {}).get("type") == "enabled"
)
# Google Vertex models with thinking config
is_google_vertex_reasoning = (
model_settings.get("provider_type") == "google_vertex" and model_settings.get("thinking_config", {}).get("include_thoughts") is True
)
# Google AI models with thinking config
is_google_ai_reasoning = (
model_settings.get("provider_type") == "google_ai" and model_settings.get("thinking_config", {}).get("include_thoughts") is True
)
# Z.ai models output reasoning by default
is_zai_reasoning = model_settings.get("provider_type") == "zai"
# Bedrock Anthropic reasoning models
is_bedrock_reasoning = model_settings.get("provider_type") == "bedrock" and (
"claude-3-7-sonnet" in model_handle
or "claude-sonnet-4" in model_handle
or "claude-opus-4" in model_handle
or "claude-haiku-4-5" in model_handle
)
return (
is_openai_reasoning
or is_anthropic_reasoning
or is_google_vertex_reasoning
or is_google_ai_reasoning
or is_zai_reasoning
or is_bedrock_reasoning
)
# ------------------------------
# Fixtures
# ------------------------------
@pytest.fixture(scope="module")
def server_url() -> str:
"""
Provides the URL for the Letta server.
If LETTA_SERVER_URL is not set, starts the server in a background thread
and polls until it's accepting connections.
"""
def _run_server() -> None:
load_dotenv()
from letta.server.rest_api.app import start_server
start_server(debug=True)
url: str = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
if not os.getenv("LETTA_SERVER_URL"):
thread = threading.Thread(target=_run_server, daemon=True)
thread.start()
# Poll until the server is up (or timeout)
timeout_seconds = 60
deadline = time.time() + timeout_seconds
while time.time() < deadline:
try:
resp = requests.get(url + "/v1/health")
if resp.status_code < 500:
break
except requests.exceptions.RequestException:
pass
time.sleep(0.1)
else:
raise RuntimeError(f"Could not reach {url} within {timeout_seconds}s")
return url
@pytest.fixture(scope="function")
async def client(server_url: str) -> AsyncLetta:
"""
Creates and returns an asynchronous Letta REST client for testing.
"""
client_instance = AsyncLetta(base_url=server_url)
yield client_instance
@pytest.fixture(scope="function")
async def agent_state(client: AsyncLetta) -> AgentState:
"""
Creates and returns an agent state for testing with a pre-configured agent.
The agent is named 'supervisor' and is configured with base tools and the roll_dice tool.
"""
dice_tool = await client.tools.upsert_from_function(func=roll_dice)
initial_model = TESTED_MODEL_CONFIGS[0][0] if TESTED_MODEL_CONFIGS else "openai/gpt-4o"
initial_embedding = os.getenv("EMBEDDING_HANDLE", "openai/text-embedding-3-small")
agent_state_instance = await client.agents.create(
agent_type="letta_v1_agent",
name="test_agent",
include_base_tools=False,
tool_ids=[dice_tool.id],
model=initial_model,
embedding=initial_embedding,
tags=["test"],
)
yield agent_state_instance
await client.agents.delete(agent_state_instance.id)
# ------------------------------
# Test Cases
# ------------------------------
@pytest.mark.parametrize(
"model_config",
TESTED_MODEL_CONFIGS,
ids=[handle for handle, _ in TESTED_MODEL_CONFIGS],
)
@pytest.mark.parametrize("send_type", ["step", "stream_steps", "stream_tokens", "stream_tokens_background", "async"])
@pytest.mark.asyncio(loop_scope="function")
async def test_greeting(
disable_e2b_api_key: Any,
client: AsyncLetta,
agent_state: AgentState,
model_config: Tuple[str, dict],
send_type: str,
) -> None:
model_handle, model_settings = model_config
last_message_page = await client.agents.messages.list(agent_id=agent_state.id, limit=1)
last_message = last_message_page.items[0] if last_message_page.items else None
agent_state = await client.agents.update(agent_id=agent_state.id, model=model_handle, model_settings=model_settings)
if send_type == "step":
response = await client.agents.messages.create(
agent_id=agent_state.id,
messages=USER_MESSAGE_FORCE_REPLY,
)
messages = response.messages
run_id = next((msg.run_id for msg in messages if hasattr(msg, "run_id")), None)
elif send_type == "async":
run = await client.agents.messages.create_async(
agent_id=agent_state.id,
messages=USER_MESSAGE_FORCE_REPLY,
)
run = await wait_for_run_completion(client, run.id, timeout=120.0)
messages_page = await client.runs.messages.list(run_id=run.id)
messages = [m for m in messages_page.items if m.message_type != "user_message"]
run_id = run.id
else:
response = await client.agents.messages.stream(
agent_id=agent_state.id,
messages=USER_MESSAGE_FORCE_REPLY,
stream_tokens=(send_type == "stream_tokens"),
background=(send_type == "stream_tokens_background"),
)
messages = await accumulate_chunks(response)
run_id = next((msg.run_id for msg in messages if hasattr(msg, "run_id")), None)
# If run_id is not in messages (e.g., due to early cancellation), get the most recent run
if run_id is None:
runs = await client.runs.list(agent_ids=[agent_state.id])
run_id = runs.items[0].id if runs.items else None
assert_greeting_response(
messages, model_handle, model_settings, streaming=("stream" in send_type), token_streaming=(send_type == "stream_tokens")
)
if "background" in send_type:
response = await client.runs.messages.stream(run_id=run_id, starting_after=0)
messages = await accumulate_chunks(response)
assert_greeting_response(
messages, model_handle, model_settings, streaming=("stream" in send_type), token_streaming=(send_type == "stream_tokens")
)
messages_from_db_page = await client.agents.messages.list(agent_id=agent_state.id, after=last_message.id if last_message else None)
messages_from_db = messages_from_db_page.items
assert_greeting_response(messages_from_db, model_handle, model_settings, from_db=True)
assert run_id is not None
run = await client.runs.retrieve(run_id=run_id)
assert run.status == "completed"
@pytest.mark.parametrize(
"model_config",
TESTED_MODEL_CONFIGS,
ids=[handle for handle, _ in TESTED_MODEL_CONFIGS],
)
@pytest.mark.parametrize("send_type", ["step", "stream_steps", "stream_tokens", "stream_tokens_background", "async"])
@pytest.mark.asyncio(loop_scope="function")
async def test_parallel_tool_calls(
disable_e2b_api_key: Any,
client: AsyncLetta,
agent_state: AgentState,
model_config: Tuple[str, dict],
send_type: str,
) -> None:
model_handle, model_settings = model_config
provider_type = model_settings.get("provider_type", "")
if provider_type not in ["anthropic", "openai", "google_ai", "google_vertex", "bedrock"]:
pytest.skip("Parallel tool calling test only applies to Anthropic, OpenAI, Gemini, and Bedrock models.")
if "gpt-5" in model_handle or "o3" in model_handle:
pytest.skip("GPT-5 takes too long to test, o3 is bad at this task.")
# Skip Gemini models due to issues with parallel tool calling
if provider_type in ["google_ai", "google_vertex"]:
pytest.skip("Gemini models are flaky for this test so we disable them for now")
if model_handle.startswith("lmstudio"):
pytest.skip("LMStudio runs on CPU and times out on parallel tool call tests")
if model_handle.startswith("vllm"):
pytest.skip("vLLM Qwen3 tool call parsers incompatible with streaming parallel tool calls")
if model_handle.startswith("vllm"):
pytest.skip("vLLM Qwen3 tool call parsers incompatible with streaming parallel tool calls")
# Update model_settings to enable parallel tool calling
modified_model_settings = model_settings.copy()
modified_model_settings["parallel_tool_calls"] = True
agent_state = await client.agents.update(
agent_id=agent_state.id,
model=model_handle,
model_settings=modified_model_settings,
)
if send_type == "step":
await client.agents.messages.create(
agent_id=agent_state.id,
messages=USER_MESSAGE_PARALLEL_TOOL_CALL,
)
elif send_type == "async":
run = await client.agents.messages.create_async(
agent_id=agent_state.id,
messages=USER_MESSAGE_PARALLEL_TOOL_CALL,
)
await wait_for_run_completion(client, run.id, timeout=120.0)
else:
response = await client.agents.messages.stream(
agent_id=agent_state.id,
messages=USER_MESSAGE_PARALLEL_TOOL_CALL,
stream_tokens=(send_type == "stream_tokens"),
background=(send_type == "stream_tokens_background"),
)
await accumulate_chunks(response)
# validate parallel tool call behavior in preserved messages
preserved_messages_page = await client.agents.messages.list(agent_id=agent_state.id)
preserved_messages = preserved_messages_page.items
# collect all ToolCallMessage and ToolReturnMessage instances
tool_call_messages = []
tool_return_messages = []
for msg in preserved_messages:
if isinstance(msg, ToolCallMessage):
tool_call_messages.append(msg)
elif isinstance(msg, ToolReturnMessage):
tool_return_messages.append(msg)
# Check if tool calls are grouped in a single message (parallel) or separate messages (sequential)
total_tool_calls = 0
for i, tcm in enumerate(tool_call_messages):
if hasattr(tcm, "tool_calls") and tcm.tool_calls:
num_calls = len(tcm.tool_calls) if isinstance(tcm.tool_calls, list) else 1
total_tool_calls += num_calls
elif hasattr(tcm, "tool_call"):
total_tool_calls += 1
# Check tool returns structure
total_tool_returns = 0
for i, trm in enumerate(tool_return_messages):
if hasattr(trm, "tool_returns") and trm.tool_returns:
num_returns = len(trm.tool_returns) if isinstance(trm.tool_returns, list) else 1
total_tool_returns += num_returns
elif hasattr(trm, "tool_return"):
total_tool_returns += 1
# CRITICAL: For TRUE parallel tool calling with letta_v1_agent, there should be exactly ONE ToolCallMessage
# containing multiple tool calls, not multiple ToolCallMessages
# Verify we have exactly 3 tool calls total
assert total_tool_calls == 3, f"Expected exactly 3 tool calls total, got {total_tool_calls}"
assert total_tool_returns == 3, f"Expected exactly 3 tool returns total, got {total_tool_returns}"
# Check if we have true parallel tool calling
is_parallel = False
if len(tool_call_messages) == 1:
# Check if the single message contains multiple tool calls
tcm = tool_call_messages[0]
if hasattr(tcm, "tool_calls") and isinstance(tcm.tool_calls, list) and len(tcm.tool_calls) == 3:
is_parallel = True
# IMPORTANT: Assert that parallel tool calling is actually working
# This test should FAIL if parallel tool calling is not working properly
assert is_parallel, (
f"Parallel tool calling is NOT working for {provider_type}! "
f"Got {len(tool_call_messages)} ToolCallMessage(s) instead of 1 with 3 parallel calls. "
f"When using letta_v1_agent with parallel_tool_calls=True, all tool calls should be in a single message."
)
# Collect all tool calls and their details for validation
all_tool_calls = []
tool_call_ids = set()
num_sides_by_id = {}
for tcm in tool_call_messages:
if hasattr(tcm, "tool_calls") and tcm.tool_calls and isinstance(tcm.tool_calls, list):
# Message has multiple tool calls
for tc in tcm.tool_calls:
all_tool_calls.append(tc)
tool_call_ids.add(tc.tool_call_id)
# Parse arguments
import json
args = json.loads(tc.arguments)
num_sides_by_id[tc.tool_call_id] = int(args["num_sides"])
elif hasattr(tcm, "tool_call") and tcm.tool_call:
# Message has single tool call
tc = tcm.tool_call
all_tool_calls.append(tc)
tool_call_ids.add(tc.tool_call_id)
# Parse arguments
import json
args = json.loads(tc.arguments)
num_sides_by_id[tc.tool_call_id] = int(args["num_sides"])
# Verify each tool call
for tc in all_tool_calls:
assert tc.name == "roll_dice", f"Expected tool call name 'roll_dice', got '{tc.name}'"
# Support Anthropic (toolu_), OpenAI (call_), and Gemini (UUID) tool call ID formats
# Gemini uses UUID format which could start with any alphanumeric character
valid_id_format = (
tc.tool_call_id.startswith("toolu_")
or tc.tool_call_id.startswith("call_")
or (len(tc.tool_call_id) > 0 and tc.tool_call_id[0].isalnum()) # UUID format for Gemini
)
assert valid_id_format, f"Unexpected tool call ID format: {tc.tool_call_id}"
# Collect all tool returns for validation
all_tool_returns = []
for trm in tool_return_messages:
if hasattr(trm, "tool_returns") and trm.tool_returns and isinstance(trm.tool_returns, list):
# Message has multiple tool returns
all_tool_returns.extend(trm.tool_returns)
elif hasattr(trm, "tool_return") and trm.tool_return:
# Message has single tool return (create a mock object if needed)
# Since ToolReturnMessage might not have individual tool_return, check the structure
pass
# If all_tool_returns is empty, it means returns are structured differently
# Let's check the actual structure
if not all_tool_returns:
print("Note: Tool returns may be structured differently than expected")
# For now, just verify we got the right number of messages
assert len(tool_return_messages) > 0, "No tool return messages found"
# Verify tool returns if we have them in the expected format
for tr in all_tool_returns:
assert tr.type == "tool", f"Tool return type should be 'tool', got '{tr.type}'"
assert tr.status == "success", f"Tool return status should be 'success', got '{tr.status}'"
assert tr.tool_call_id in tool_call_ids, f"Tool return ID '{tr.tool_call_id}' not found in tool call IDs: {tool_call_ids}"
# Verify the dice roll result is within the valid range
dice_result = int(tr.tool_return)
expected_max = num_sides_by_id[tr.tool_call_id]
assert 1 <= dice_result <= expected_max, (
f"Dice roll result {dice_result} is not within valid range 1-{expected_max} for tool call {tr.tool_call_id}"
)
@pytest.mark.parametrize(
"model_config",
TESTED_MODEL_CONFIGS,
ids=[handle for handle, _ in TESTED_MODEL_CONFIGS],
)
@pytest.mark.parametrize(
["send_type", "cancellation"],
list(
itertools.product(
["step", "stream_steps", "stream_tokens", "stream_tokens_background", "async"], ["with_cancellation", "no_cancellation"]
)
),
ids=[
f"{s}-{c}"
for s, c in itertools.product(
["step", "stream_steps", "stream_tokens", "stream_tokens_background", "async"], ["with_cancellation", "no_cancellation"]
)
],
)
@pytest.mark.asyncio(loop_scope="function")
async def test_tool_call(
disable_e2b_api_key: Any,
client: AsyncLetta,
agent_state: AgentState,
model_config: Tuple[str, dict],
send_type: str,
cancellation: str,
) -> None:
model_handle, model_settings = model_config
# Skip models with OTID mismatch issues between ToolCallMessage and ToolReturnMessage
if "gpt-5" in model_handle or "claude-sonnet-4-5-20250929" in model_handle or "claude-opus-4-1" in model_handle:
pytest.skip(f"Skipping {model_handle} due to OTID chain issue - messages receive incorrect OTID suffixes")
last_message_page = await client.agents.messages.list(agent_id=agent_state.id, limit=1)
last_message = last_message_page.items[0] if last_message_page.items else None
agent_state = await client.agents.update(agent_id=agent_state.id, model=model_handle, model_settings=model_settings)
if cancellation == "with_cancellation":
delay = 5 if "gpt-5" in model_handle else 0.5
_cancellation_task = asyncio.create_task(cancel_run_after_delay(client, agent_state.id, delay=delay))
_background_tasks.add(_cancellation_task)
_cancellation_task.add_done_callback(_background_tasks.discard)
if send_type == "step":
response = await client.agents.messages.create(
agent_id=agent_state.id,
messages=USER_MESSAGE_ROLL_DICE,
)
messages = response.messages
run_id = next((msg.run_id for msg in messages if hasattr(msg, "run_id")), None)
elif send_type == "async":
run = await client.agents.messages.create_async(
agent_id=agent_state.id,
messages=USER_MESSAGE_ROLL_DICE,
)
run = await wait_for_run_completion(client, run.id, timeout=120.0)
messages_page = await client.runs.messages.list(run_id=run.id)
messages = [m for m in messages_page.items if m.message_type != "user_message"]
run_id = run.id
else:
response = await client.agents.messages.stream(
agent_id=agent_state.id,
messages=USER_MESSAGE_ROLL_DICE,
stream_tokens=(send_type == "stream_tokens"),
background=(send_type == "stream_tokens_background"),
)
messages = await accumulate_chunks(response)
run_id = next((msg.run_id for msg in messages if hasattr(msg, "run_id")), None)
# If run_id is not in messages (e.g., due to early cancellation), get the most recent run
if run_id is None:
runs = await client.runs.list(agent_ids=[agent_state.id])
run_id = runs.items[0].id if runs.items else None
assert_tool_call_response(
messages, model_handle, model_settings, streaming=("stream" in send_type), with_cancellation=(cancellation == "with_cancellation")
)
if "background" in send_type:
response = await client.runs.messages.stream(run_id=run_id, starting_after=0)
messages = await accumulate_chunks(response)
assert_tool_call_response(
messages,
model_handle,
model_settings,
streaming=("stream" in send_type),
with_cancellation=(cancellation == "with_cancellation"),
)
messages_from_db_page = await client.agents.messages.list(agent_id=agent_state.id, after=last_message.id if last_message else None)
messages_from_db = messages_from_db_page.items
assert_tool_call_response(
messages_from_db, model_handle, model_settings, from_db=True, with_cancellation=(cancellation == "with_cancellation")
)
assert run_id is not None
run = await client.runs.retrieve(run_id=run_id)
assert run.status == ("cancelled" if cancellation == "with_cancellation" else "completed")
@pytest.mark.parametrize(
"model_config",
TESTED_MODEL_CONFIGS,
ids=[handle for handle, _ in TESTED_MODEL_CONFIGS],
)
@pytest.mark.asyncio(loop_scope="function")
async def test_conversation_streaming_raw_http(
disable_e2b_api_key: Any,
client: AsyncLetta,
server_url: str,
agent_state: AgentState,
model_config: Tuple[str, dict],
) -> None:
"""
Test conversation-based streaming functionality using raw HTTP requests.
This test verifies that:
1. A conversation can be created for an agent
2. Messages can be sent to the conversation via streaming
3. The streaming response contains the expected message types
4. Messages are properly persisted in the conversation
Uses raw HTTP requests instead of SDK until SDK is regenerated with conversations support.
"""
import httpx
model_handle, model_settings = model_config
agent_state = await client.agents.update(agent_id=agent_state.id, model=model_handle, model_settings=model_settings)
async with httpx.AsyncClient(base_url=server_url, timeout=60.0) as http_client:
# Create a conversation for the agent
create_response = await http_client.post(
"/v1/conversations/",
params={"agent_id": agent_state.id},
json={},
)
assert create_response.status_code == 200, f"Failed to create conversation: {create_response.text}"
conversation = create_response.json()
assert conversation["id"] is not None
assert conversation["agent_id"] == agent_state.id
# Send a message to the conversation using streaming
stream_response = await http_client.post(
f"/v1/conversations/{conversation['id']}/messages",
json={
"messages": [{"role": "user", "content": f"Reply with the message '{USER_MESSAGE_RESPONSE}'."}],
"stream_tokens": True,
},
)
assert stream_response.status_code == 200, f"Failed to send message: {stream_response.text}"
# Parse SSE response and accumulate messages
messages = await accumulate_chunks(stream_response.text)
print("MESSAGES:", messages)
# Verify the response contains expected message types
assert_greeting_response(messages, model_handle, model_settings, streaming=True, token_streaming=True)
# Verify the conversation can be retrieved
retrieve_response = await http_client.get(f"/v1/conversations/{conversation['id']}")
assert retrieve_response.status_code == 200, f"Failed to retrieve conversation: {retrieve_response.text}"
retrieved_conversation = retrieve_response.json()
assert retrieved_conversation["id"] == conversation["id"]
print("RETRIEVED CONVERSATION:", retrieved_conversation)
# Verify conversations can be listed for the agent
list_response = await http_client.get("/v1/conversations/", params={"agent_id": agent_state.id})
assert list_response.status_code == 200, f"Failed to list conversations: {list_response.text}"
conversations_list = list_response.json()
assert any(c["id"] == conversation["id"] for c in conversations_list)
# Verify messages can be listed from the conversation
messages_response = await http_client.get(f"/v1/conversations/{conversation['id']}/messages")
assert messages_response.status_code == 200, f"Failed to list conversation messages: {messages_response.text}"
conversation_messages = messages_response.json()
print("CONVERSATION MESSAGES:", conversation_messages)
# Verify we have at least the user message and assistant message
assert len(conversation_messages) >= 2, f"Expected at least 2 messages, got {len(conversation_messages)}"
# Check message types are present
message_types = [msg.get("message_type") for msg in conversation_messages]
assert "user_message" in message_types, f"Expected user_message in {message_types}"
assert "assistant_message" in message_types, f"Expected assistant_message in {message_types}"
@pytest.mark.parametrize(
"model_config",
TESTED_MODEL_CONFIGS,
ids=[handle for handle, _ in TESTED_MODEL_CONFIGS],
)
@pytest.mark.asyncio(loop_scope="function")
async def test_conversation_non_streaming_raw_http(
disable_e2b_api_key: Any,
client: AsyncLetta,
server_url: str,
agent_state: AgentState,
model_config: Tuple[str, dict],
) -> None:
"""
Test conversation-based non-streaming functionality using raw HTTP requests.
This test verifies that:
1. A conversation can be created for an agent
2. Messages can be sent to the conversation without streaming (streaming=False)
3. The JSON response contains the expected message types
"""
import httpx
model_handle, model_settings = model_config
agent_state = await client.agents.update(agent_id=agent_state.id, model=model_handle, model_settings=model_settings)
async with httpx.AsyncClient(base_url=server_url, timeout=60.0) as http_client:
# Create a conversation for the agent
create_response = await http_client.post(
"/v1/conversations/",
params={"agent_id": agent_state.id},
json={},
)
assert create_response.status_code == 200, f"Failed to create conversation: {create_response.text}"
conversation = create_response.json()
assert conversation["id"] is not None
assert conversation["agent_id"] == agent_state.id
# Send a message to the conversation using NON-streaming mode
response = await http_client.post(
f"/v1/conversations/{conversation['id']}/messages",
json={
"messages": [{"role": "user", "content": f"Reply with the message '{USER_MESSAGE_RESPONSE}'."}],
"streaming": False, # Non-streaming mode
},
)
assert response.status_code == 200, f"Failed to send message: {response.text}"
# Parse JSON response (LettaResponse)
result = response.json()
assert "messages" in result, f"Expected 'messages' in response: {result}"
messages = result["messages"]
# Verify the response contains expected message types
assert len(messages) > 0, "Expected at least one message in response"
message_types = [msg.get("message_type") for msg in messages]
assert "assistant_message" in message_types, f"Expected assistant_message in {message_types}"
@pytest.mark.skipif(
os.getenv("LLM_CONFIG_FILE", "").startswith(("ollama", "vllm", "lmstudio")),
reason="Structured output not supported on self-hosted providers in CI",
)
@pytest.mark.parametrize(
"model_handle,provider_type",
[
("openai/gpt-4o", "openai"),
("openai/gpt-5", "openai"),
# ("anthropic/claude-sonnet-4-5-20250929", "anthropic"),
],
)
@pytest.mark.asyncio(loop_scope="function")
async def test_json_schema_response_format(
disable_e2b_api_key: Any,
client: AsyncLetta,
model_handle: str,
provider_type: str,
) -> None:
"""
Test JsonSchemaResponseFormat with OpenAI and Anthropic models.
This test verifies that:
1. Agents can be created with json_schema response_format via model_settings
2. The schema is properly stored in the agent's model_settings
3. Messages sent to the agent produce responses conforming to the schema
4. Both OpenAI and Anthropic handle structured outputs correctly
"""
# Define the structured output schema
response_schema = {
"name": "capital_response",
"strict": True,
"schema": {
"type": "object",
"properties": {
"response": {"type": "string", "description": "The answer to the question"},
"justification": {"type": "string", "description": "Why this is the answer"},
},
"required": ["response", "justification"],
"additionalProperties": False,
},
}
# Create model settings with json_schema response format based on provider
if provider_type == "openai":
model_settings = OpenAIModelSettings(
provider_type="openai", response_format=JsonSchemaResponseFormat(type="json_schema", json_schema=response_schema)
)
else:
model_settings = AnthropicModelSettings(
provider_type="anthropic", response_format=JsonSchemaResponseFormat(type="json_schema", json_schema=response_schema)
)
# Create agent with structured output configuration
agent_state = await client.agents.create(
name=f"test_structured_agent_{model_handle.replace('/', '_')}",
model=model_handle,
model_settings=model_settings,
embedding="openai/text-embedding-3-small",
agent_type="letta_v1_agent",
)
try:
# Send a message to the agent
message_response = await client.agents.messages.create(
agent_id=agent_state.id, messages=[MessageCreateParam(role="user", content="What is the capital of France?")]
)
# Verify we got a response
assert len(message_response.messages) > 0, "Should have received at least one message"
# Find the assistant message and verify it contains valid JSON matching the schema
assistant_message = None
for msg in message_response.messages:
if isinstance(msg, AssistantMessage):
assistant_message = msg
break
assert assistant_message is not None, "Should have received an AssistantMessage"
# Parse the content as JSON
parsed_content = json.loads(assistant_message.content)
# Verify the JSON has the required fields from our schema
assert "response" in parsed_content, "JSON should contain 'response' field"
assert "justification" in parsed_content, "JSON should contain 'justification' field"
assert isinstance(parsed_content["response"], str), "'response' field should be a string"
assert isinstance(parsed_content["justification"], str), "'justification' field should be a string"
assert len(parsed_content["response"]) > 0, "'response' field should not be empty"
assert len(parsed_content["justification"]) > 0, "'justification' field should not be empty"
finally:
# Cleanup
await client.agents.delete(agent_state.id)
# Large memory block to exceed OpenAI's 1024 token caching threshold.
# This ensures the system prompt is large enough for OpenAI to cache it.
_LARGE_PERSONA_BLOCK = """
You are an advanced AI assistant with extensive knowledge across multiple domains.
# Core Capabilities
## Technical Knowledge
- Software Engineering: Expert in Python, JavaScript, TypeScript, Go, Rust, and many other languages
- System Design: Deep understanding of distributed systems, microservices, and cloud architecture
- DevOps: Proficient in Docker, Kubernetes, CI/CD pipelines, and infrastructure as code
- Databases: Experience with SQL (PostgreSQL, MySQL) and NoSQL (MongoDB, Redis, Cassandra) databases
- Machine Learning: Knowledge of neural networks, transformers, and modern ML frameworks
## Problem Solving Approach
When tackling problems, you follow a structured methodology:
1. Understand the requirements thoroughly
2. Break down complex problems into manageable components
3. Consider multiple solution approaches
4. Evaluate trade-offs between different options
5. Implement solutions with clean, maintainable code
6. Test thoroughly and iterate based on feedback
## Communication Style
- Clear and concise explanations
- Use examples and analogies when helpful
- Adapt technical depth to the audience
- Ask clarifying questions when requirements are ambiguous
- Provide context and rationale for recommendations
# Domain Expertise
## Web Development
You have deep knowledge of:
- Frontend: React, Vue, Angular, Next.js, modern CSS frameworks
- Backend: Node.js, Express, FastAPI, Django, Flask
- API Design: REST, GraphQL, gRPC
- Authentication: OAuth, JWT, session management
- Performance: Caching strategies, CDNs, lazy loading
## Data Engineering
You understand:
- ETL pipelines and data transformation
- Data warehousing concepts (Snowflake, BigQuery, Redshift)
- Stream processing (Kafka, Kinesis)
- Data modeling and schema design
- Data quality and validation
## Cloud Platforms
You're familiar with:
- AWS: EC2, S3, Lambda, RDS, DynamoDB, CloudFormation
- GCP: Compute Engine, Cloud Storage, Cloud Functions, BigQuery
- Azure: Virtual Machines, Blob Storage, Azure Functions
- Serverless architectures and best practices
- Cost optimization strategies
## Security
You consider:
- Common vulnerabilities (OWASP Top 10)
- Secure coding practices
- Encryption and key management
- Access control and authorization patterns
- Security audit and compliance requirements
# Interaction Principles
## Helpfulness
- Provide actionable guidance
- Share relevant resources and documentation
- Offer multiple approaches when appropriate
- Point out potential pitfalls and edge cases
## Accuracy
- Verify information before sharing
- Acknowledge uncertainty when appropriate
- Correct mistakes promptly
- Stay up-to-date with best practices
## Efficiency
- Get to the point quickly
- Avoid unnecessary verbosity
- Focus on what's most relevant
- Provide code examples when they clarify concepts
""" + "\n\n".join(
[
f"Section {i + 1}: "
+ """
You have deep expertise in software development, including but not limited to:
- Programming languages: Python, JavaScript, TypeScript, Java, C++, Rust, Go, Swift, Kotlin, Ruby, PHP, Scala
- Web frameworks: React, Vue, Angular, Django, Flask, FastAPI, Express, Next.js, Nuxt, SvelteKit, Remix, Astro
- Databases: PostgreSQL, MySQL, MongoDB, Redis, Cassandra, DynamoDB, ElasticSearch, Neo4j, InfluxDB, TimescaleDB
- Cloud platforms: AWS (EC2, S3, Lambda, ECS, EKS, RDS), GCP (Compute Engine, Cloud Run, GKE), Azure (VMs, Functions, AKS)
- DevOps tools: Docker, Kubernetes, Terraform, Ansible, Jenkins, GitHub Actions, GitLab CI, CircleCI, ArgoCD
- Testing frameworks: pytest, Jest, Mocha, JUnit, unittest, Cypress, Playwright, Selenium, TestNG, RSpec
- Architecture patterns: Microservices, Event-driven, Serverless, Monolithic, CQRS, Event Sourcing, Hexagonal
- API design: REST, GraphQL, gRPC, WebSockets, Server-Sent Events, tRPC, JSON-RPC
"""
for i in range(4)
]
)
# Models that support prompt_cache_retention="24h":
# gpt-4.1, gpt-5 family (but not gpt-5-mini).
_PROMPT_CACHE_RETENTION_PREFIXES = ("gpt-4.1", "gpt-5")
PROMPT_CACHE_MODEL_CONFIGS: List[Tuple[str, dict]] = [
(handle, settings)
for handle, settings in TESTED_MODEL_CONFIGS
if settings.get("provider_type") == "openai" and any(handle.split("/")[-1].startswith(p) for p in _PROMPT_CACHE_RETENTION_PREFIXES)
]
@pytest.mark.skip(reason="the prompt caching is flaky")
@pytest.mark.parametrize(
"model_config",
PROMPT_CACHE_MODEL_CONFIGS,
ids=[handle for handle, _ in PROMPT_CACHE_MODEL_CONFIGS],
)
@pytest.mark.asyncio(loop_scope="function")
async def test_openai_prompt_cache_integration(
disable_e2b_api_key: Any,
client: AsyncLetta,
model_config: Tuple[str, dict],
) -> None:
"""
Integration test verifying OpenAI prompt caching works end-to-end.
Tests models that support prompt_cache_retention="24h".
Validates that this field is accepted by OpenAI's API and produce cache hits.
Strategy:
1. Create an agent with a large persona block (>1024 tokens, OpenAI's caching threshold)
2. Send message 1 -> primes the cache (cached_input_tokens should be 0 or small)
3. Send message 2 -> should hit the cache (cached_input_tokens > 0)
We rely on OpenAI's default prefix-hash routing (no prompt_cache_key) since each
agent has a unique system prompt, providing natural cache affinity.
"""
from letta_client.types import CreateBlockParam
model_handle, model_settings = model_config
agent = await client.agents.create(
name=f"prompt-cache-test-{uuid.uuid4().hex[:8]}",
agent_type="letta_v1_agent",
model=model_handle,
model_settings=model_settings,
embedding="openai/text-embedding-3-small",
include_base_tools=False,
memory_blocks=[
CreateBlockParam(
label="persona",
value=_LARGE_PERSONA_BLOCK,
)
],
)
try:
# Message 1: Prime the cache. First request typically has cached_input_tokens=0.
response1 = await client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Hello! Please introduce yourself briefly.")],
)
assert response1.usage is not None, "First message should return usage data"
assert response1.usage.prompt_tokens > 0, "First message should have prompt_tokens > 0"
logger.info(
f"[{model_handle}] Message 1 usage: "
f"prompt={response1.usage.prompt_tokens}, "
f"completion={response1.usage.completion_tokens}, "
f"cached_input={response1.usage.cached_input_tokens}"
)
# Verify we exceeded the 1024 token threshold for OpenAI caching
total_input_tokens = response1.usage.prompt_tokens + (response1.usage.cached_input_tokens or 0)
assert total_input_tokens >= 1024, f"Total input tokens ({total_input_tokens}) must be >= 1024 for OpenAI caching to activate"
# Message 2: Should hit the cache thanks to prefix-hash routing.
response2 = await client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="What are your main areas of expertise?")],
)
assert response2.usage is not None, "Second message should return usage data"
assert response2.usage.prompt_tokens > 0, "Second message should have prompt_tokens > 0"
logger.info(
f"[{model_handle}] Message 2 usage: "
f"prompt={response2.usage.prompt_tokens}, "
f"completion={response2.usage.completion_tokens}, "
f"cached_input={response2.usage.cached_input_tokens}"
)
# CRITICAL: The second message should show cached_input_tokens > 0.
# This proves that prompt_cache_retention is being sent correctly
# and OpenAI is caching the prompt prefix.
cached_tokens = response2.usage.cached_input_tokens
assert cached_tokens is not None and cached_tokens > 0, (
f"[{model_handle}] Expected cached_input_tokens > 0 on second message, got {cached_tokens}. "
"This means prompt caching is not working (cache miss occurred)."
)
# Cache hit ratio should be significant (most of the system prompt should be cached)
total_input_msg2 = response2.usage.prompt_tokens + (response2.usage.cached_input_tokens or 0)
cache_hit_ratio = cached_tokens / total_input_msg2 if total_input_msg2 > 0 else 0
logger.info(f"[{model_handle}] Cache hit ratio: {cache_hit_ratio:.2%}")
assert cache_hit_ratio >= 0.20, (
f"[{model_handle}] Expected cache hit ratio >= 20%, got {cache_hit_ratio:.2%}. The large persona block should be mostly cached."
)
finally:
await client.agents.delete(agent.id)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/integration_test_send_message_v2.py",
"license": "Apache License 2.0",
"lines": 1197,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_agent_manager.py | import time
import uuid
from datetime import datetime, timezone
from unittest.mock import patch
import pytest
# Import shared fixtures and constants from conftest
from conftest import (
CREATE_DELAY_SQLITE,
DEFAULT_EMBEDDING_CONFIG,
USING_SQLITE,
)
from sqlalchemy import func, select
from letta.constants import (
LOCAL_ONLY_MULTI_AGENT_TOOLS,
MULTI_AGENT_TOOLS,
)
from letta.errors import LettaAgentNotFoundError
from letta.orm.file import FileContent as FileContentModel
from letta.schemas.agent import CreateAgent, InternalTemplateAgentCreate, UpdateAgent
from letta.schemas.block import CreateBlock
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import (
MessageRole,
)
from letta.schemas.letta_message_content import TextContent
from letta.schemas.letta_stop_reason import StopReasonType
from letta.schemas.llm_config import LLMConfig
from letta.schemas.message import MessageCreate
from letta.schemas.source import Source as PydanticSource
from letta.schemas.tool_rule import InitToolRule
from letta.server.db import db_registry
from letta.server.server import SyncServer
from letta.services.helpers.agent_manager_helper import calculate_base_tools, calculate_multi_agent_tools, validate_agent_exists_async
from letta.services.summarizer.summarizer_config import CompactionSettings
from letta.settings import settings
from letta.utils import calculate_file_defaults_based_on_context_window
from tests.helpers.utils import comprehensive_agent_checks, validate_context_window_overview
# ======================================================================================================================
# Helper Functions
# ======================================================================================================================
async def _count_file_content_rows(session, file_id: str) -> int:
q = select(func.count()).select_from(FileContentModel).where(FileContentModel.file_id == file_id)
result = await session.execute(q)
return result.scalar_one()
# ======================================================================================================================
# AgentManager Tests - Basic
# ======================================================================================================================
async def test_validate_agent_exists_async(server: SyncServer, comprehensive_test_agent_fixture, default_user):
"""Test the validate_agent_exists_async helper function"""
created_agent, _ = comprehensive_test_agent_fixture
# test with valid agent
async with db_registry.async_session() as session:
# should not raise exception
await validate_agent_exists_async(session, created_agent.id, default_user)
# test with non-existent agent
async with db_registry.async_session() as session:
with pytest.raises(LettaAgentNotFoundError):
await validate_agent_exists_async(session, "non-existent-id", default_user)
@pytest.mark.asyncio
async def test_create_get_list_agent(server: SyncServer, comprehensive_test_agent_fixture, default_user):
# Test agent creation
created_agent, create_agent_request = comprehensive_test_agent_fixture
comprehensive_agent_checks(created_agent, create_agent_request, actor=default_user)
# Test get agent
get_agent = await server.agent_manager.get_agent_by_id_async(agent_id=created_agent.id, actor=default_user)
comprehensive_agent_checks(get_agent, create_agent_request, actor=default_user)
# Test get agent name
agents = await server.agent_manager.list_agents_async(name=created_agent.name, actor=default_user)
get_agent_name = agents[0]
comprehensive_agent_checks(get_agent_name, create_agent_request, actor=default_user)
# Test list agent
list_agents = await server.agent_manager.list_agents_async(actor=default_user)
assert len(list_agents) == 1
comprehensive_agent_checks(list_agents[0], create_agent_request, actor=default_user)
# Test deleting the agent
await server.agent_manager.delete_agent_async(get_agent.id, default_user)
list_agents = await server.agent_manager.list_agents_async(actor=default_user)
assert len(list_agents) == 0
@pytest.mark.asyncio
async def test_create_agent_include_base_tools(server: SyncServer, default_user):
"""Test agent creation with include_default_source=True"""
# Upsert base tools
await server.tool_manager.upsert_base_tools_async(actor=default_user)
memory_blocks = [CreateBlock(label="human", value="TestUser"), CreateBlock(label="persona", value="I am a test assistant")]
create_agent_request = CreateAgent(
name="test_default_source_agent",
agent_type="memgpt_v2_agent",
system="test system",
memory_blocks=memory_blocks,
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=True,
)
# Create the agent
created_agent = await server.agent_manager.create_agent_async(
create_agent_request,
actor=default_user,
)
# Assert the tools exist
tool_names = [t.name for t in created_agent.tools]
expected_tools = calculate_base_tools(is_v2=True)
assert sorted(tool_names) == sorted(expected_tools)
@pytest.mark.asyncio
async def test_create_agent_base_tool_rules_excluded_providers(server: SyncServer, default_user):
"""Test that include_base_tool_rules is overridden to False for excluded providers"""
# Upsert base tools
await server.tool_manager.upsert_base_tools_async(actor=default_user)
memory_blocks = [CreateBlock(label="human", value="TestUser"), CreateBlock(label="persona", value="I am a test assistant")]
# Test with excluded provider (openai)
create_agent_request = CreateAgent(
name="test_excluded_provider_agent",
agent_type="memgpt_v2_agent",
system="test system",
memory_blocks=memory_blocks,
llm_config=LLMConfig.default_config("gpt-4o-mini"), # This has model_endpoint_type="openai"
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tool_rules=False,
)
# Create the agent
created_agent = await server.agent_manager.create_agent_async(
create_agent_request,
actor=default_user,
)
# Assert that no base tool rules were added (since include_base_tool_rules was overridden to False)
print(created_agent.tool_rules)
assert created_agent.tool_rules is None or len(created_agent.tool_rules) == 0
@pytest.mark.asyncio
async def test_create_agent_base_tool_rules_non_excluded_providers(server: SyncServer, default_user):
"""Test that include_base_tool_rules is NOT overridden for non-excluded providers"""
# Upsert base tools
await server.tool_manager.upsert_base_tools_async(actor=default_user)
memory_blocks = [CreateBlock(label="human", value="TestUser"), CreateBlock(label="persona", value="I am a test assistant")]
# Test with non-excluded provider (together)
create_agent_request = CreateAgent(
name="test_non_excluded_provider_agent",
agent_type="memgpt_v2_agent",
system="test system",
memory_blocks=memory_blocks,
llm_config=LLMConfig(
model="llama-3.1-8b-instruct",
model_endpoint_type="together", # Model doesn't match EXCLUDE_MODEL_KEYWORDS_FROM_BASE_TOOL_RULES
model_endpoint="https://api.together.xyz",
context_window=8192,
),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tool_rules=True, # Should remain True
)
# Create the agent
created_agent = await server.agent_manager.create_agent_async(
create_agent_request,
actor=default_user,
)
# Assert that base tool rules were added (since include_base_tool_rules remained True)
assert created_agent.tool_rules is not None
assert len(created_agent.tool_rules) > 0
@pytest.mark.asyncio
async def test_create_agent_with_model_handle_uses_correct_llm_config(server: SyncServer, default_user):
"""When CreateAgent.model is provided, ensure the correct handle is used to resolve llm_config.
This verifies that the model handle passed by the client is forwarded into
SyncServer.get_llm_config_from_handle_async and that the resulting AgentState
carries an llm_config with the same handle.
"""
# Track the arguments used to resolve the LLM config
captured_kwargs: dict = {}
async def fake_get_llm_config_from_handle_async(self, actor, **kwargs): # type: ignore[override]
from letta.schemas.llm_config import LLMConfig as PydanticLLMConfig
captured_kwargs.update(kwargs)
handle = kwargs["handle"]
# Return a minimal but valid LLMConfig with the requested handle
return PydanticLLMConfig(
model="test-model-name",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=8192,
handle=handle,
)
model_handle = "openai/gpt-4o-mini"
# Patch SyncServer.get_llm_config_from_handle_async so we don't depend on provider DB state
with patch.object(SyncServer, "get_llm_config_from_handle_async", new=fake_get_llm_config_from_handle_async):
created_agent = await server.create_agent_async(
request=CreateAgent(
name="agent_with_model_handle",
agent_type="memgpt_v2_agent",
# Use new model handle field instead of llm_config
model=model_handle,
embedding_config=EmbeddingConfig.default_config(provider="openai"),
memory_blocks=[],
include_base_tools=False,
),
actor=default_user,
)
# Ensure we resolved the config using the provided handle
assert captured_kwargs["handle"] == model_handle
# And that the resulting agent's llm_config reflects the same handle
assert created_agent.llm_config is not None
assert created_agent.llm_config.handle == model_handle
@pytest.mark.asyncio
async def test_compaction_settings_model_uses_separate_llm_config_for_summarization(server: SyncServer, default_user):
"""When compaction_settings.model differs from the agent model, use a separate llm_config.
This test exercises the summarization helpers directly to avoid external
provider dependencies. It verifies that CompactionSettings.model controls
the LLMConfig used for the summarizer request.
"""
from letta.schemas.agent import AgentState as PydanticAgentState
from letta.schemas.enums import AgentType, MessageRole
from letta.schemas.memory import Memory
from letta.schemas.message import Message as PydanticMessage
from letta.schemas.model import OpenAIModelSettings, OpenAIReasoning
from letta.services.summarizer.compact import build_summarizer_llm_config
await server.init_async(init_with_default_org_and_user=True)
# Base agent LLM config
base_llm_config = LLMConfig.default_config("gpt-4o-mini")
assert base_llm_config.model == "gpt-4o-mini"
# Configure compaction to use a different summarizer model (!= default openai summarizer model)
summarizer_handle = "openai/gpt-5-nano"
summarizer_model_settings = OpenAIModelSettings(
max_output_tokens=1234,
temperature=0.1,
reasoning=OpenAIReasoning(reasoning_effort="high"),
response_format=None,
)
summarizer_config = CompactionSettings(
model=summarizer_handle,
model_settings=summarizer_model_settings,
prompt="You are a summarizer.",
clip_chars=2000,
mode="all",
sliding_window_percentage=0.3,
)
# Minimal message buffer: system + one user + one assistant
[
PydanticMessage(
role=MessageRole.system,
content=[TextContent(type="text", text="You are a helpful assistant.")],
),
PydanticMessage(
role=MessageRole.user,
content=[TextContent(type="text", text="Hello")],
),
PydanticMessage(
role=MessageRole.assistant,
content=[TextContent(type="text", text="Hi there")],
),
]
# Build a minimal AgentState for LettaAgentV3 using the base llm_config
agent_state = PydanticAgentState(
id="agent-test-compaction-llm-config",
name="test-agent",
system="You are a helpful assistant.",
agent_type=AgentType.letta_v1_agent,
llm_config=base_llm_config,
embedding_config=EmbeddingConfig.default_config(provider="openai"),
model=None,
embedding=None,
model_settings=None,
compaction_settings=summarizer_config,
response_format=None,
description=None,
metadata=None,
memory=Memory(blocks=[]),
blocks=[],
tools=[],
sources=[],
tags=[],
tool_exec_environment_variables=[],
secrets=[],
project_id=None,
template_id=None,
base_template_id=None,
deployment_id=None,
entity_id=None,
identity_ids=[],
identities=[],
message_ids=[],
message_buffer_autoclear=False,
enable_sleeptime=None,
multi_agent_group=None,
managed_group=None,
last_run_completion=None,
last_run_duration_ms=None,
last_stop_reason=None,
timezone="UTC",
max_files_open=None,
per_file_view_window_char_limit=None,
hidden=None,
created_by_id=None,
last_updated_by_id=None,
created_at=None,
updated_at=None,
tool_rules=None,
)
# Use the shared function to derive summarizer llm_config
summarizer_llm_config = await build_summarizer_llm_config(
agent_llm_config=agent_state.llm_config,
summarizer_config=agent_state.compaction_settings,
actor=default_user,
)
# Agent model remains the base model
assert agent_state.llm_config.model == "gpt-4o-mini"
# Summarizer config should use the handle/model from compaction_settings
assert summarizer_llm_config.handle == summarizer_handle
assert summarizer_llm_config.model == "gpt-5-nano"
# And should reflect overrides from model_settings
assert summarizer_llm_config.max_tokens == 1234
assert summarizer_llm_config.temperature == 0.1
@pytest.mark.asyncio
async def test_create_agent_sets_default_compaction_model_anthropic(server: SyncServer, default_user):
"""When no compaction_settings provided for Anthropic agent, default haiku model should be set."""
from letta.schemas.agent import CreateAgent
from letta.schemas.enums import ProviderType
from letta.services.summarizer.summarizer_config import get_default_summarizer_model
await server.init_async(init_with_default_org_and_user=True)
# Upsert base tools
await server.tool_manager.upsert_base_tools_async(actor=default_user)
# Create agent without compaction_settings using Anthropic LLM
agent = await server.create_agent_async(
CreateAgent(
name="test-default-compaction-anthropic",
model="anthropic/claude-sonnet-4-5-20250929",
# No compaction_settings
),
actor=default_user,
)
# Should have default haiku model set
assert agent.compaction_settings is not None
assert agent.compaction_settings.model == get_default_summarizer_model(ProviderType.anthropic)
@pytest.mark.asyncio
async def test_create_agent_sets_default_compaction_model_openai(server: SyncServer, default_user):
"""When no compaction_settings provided for OpenAI agent, default gpt-5-mini model should be set."""
from letta.schemas.agent import CreateAgent
await server.init_async(init_with_default_org_and_user=True)
# Upsert base tools
await server.tool_manager.upsert_base_tools_async(actor=default_user)
# Create agent without compaction_settings using OpenAI LLM
agent = await server.create_agent_async(
CreateAgent(
name="test-default-compaction-openai",
model="openai/gpt-4o-mini",
# No compaction_settings
),
actor=default_user,
)
# Should have default gpt-5-mini model set
assert agent.compaction_settings is not None
assert agent.compaction_settings.model == "openai/gpt-5-mini"
@pytest.mark.asyncio
async def test_create_agent_preserves_compaction_settings_when_model_set(server: SyncServer, default_user):
"""When compaction_settings.model is already set, it should not be overwritten."""
from letta.schemas.agent import CreateAgent
from letta.schemas.model import OpenAIModelSettings, OpenAIReasoning
from letta.services.summarizer.summarizer_config import CompactionSettings
await server.init_async(init_with_default_org_and_user=True)
# Upsert base tools
await server.tool_manager.upsert_base_tools_async(actor=default_user)
summarizer_handle = "gpt-4o-mini"
summarizer_config = CompactionSettings(
model=summarizer_handle,
model_settings=OpenAIModelSettings(max_output_tokens=1234, temperature=0.1, reasoning=OpenAIReasoning(reasoning_effort="high")),
prompt="You are a summarizer.",
clip_chars=2000,
mode="all",
sliding_window_percentage=0.3,
)
# Create agent with explicit compaction_settings model
agent = await server.create_agent_async(
CreateAgent(
name="test-preserve-compaction",
model="openai/gpt-5.2-codex",
compaction_settings=summarizer_config,
),
actor=default_user,
)
# Should preserve the custom model, not override with gpt-5-mini default
assert agent.compaction_settings is not None
assert agent.compaction_settings.model == summarizer_handle
assert agent.compaction_settings.mode == "all"
@pytest.mark.asyncio
async def test_calculate_multi_agent_tools(set_letta_environment):
"""Test that calculate_multi_agent_tools excludes local-only tools in production."""
result = calculate_multi_agent_tools()
if settings.environment == "prod":
# Production environment should exclude local-only tools
expected_tools = set(MULTI_AGENT_TOOLS) - set(LOCAL_ONLY_MULTI_AGENT_TOOLS)
assert result == expected_tools, "Production should exclude local-only multi-agent tools"
assert not set(LOCAL_ONLY_MULTI_AGENT_TOOLS).intersection(result), "Production should not include local-only tools"
# Verify specific tools
assert "send_message_to_agent_and_wait_for_reply" in result, "Standard multi-agent tools should be in production"
assert "send_message_to_agents_matching_tags" in result, "Standard multi-agent tools should be in production"
assert "send_message_to_agent_async" not in result, "Local-only tools should not be in production"
else:
# Non-production environment should include all multi-agent tools
assert result == set(MULTI_AGENT_TOOLS), "Non-production should include all multi-agent tools"
assert set(LOCAL_ONLY_MULTI_AGENT_TOOLS).issubset(result), "Non-production should include local-only tools"
# Verify specific tools
assert "send_message_to_agent_and_wait_for_reply" in result, "All multi-agent tools should be in non-production"
assert "send_message_to_agents_matching_tags" in result, "All multi-agent tools should be in non-production"
assert "send_message_to_agent_async" in result, "Local-only tools should be in non-production"
async def test_upsert_base_tools_excludes_local_only_in_production(server: SyncServer, default_user, set_letta_environment):
"""Test that upsert_base_tools excludes local-only multi-agent tools in production."""
# Upsert all base tools
tools = await server.tool_manager.upsert_base_tools_async(actor=default_user)
tool_names = {tool.name for tool in tools}
if settings.environment == "prod":
# Production environment should exclude local-only multi-agent tools
for local_only_tool in LOCAL_ONLY_MULTI_AGENT_TOOLS:
assert local_only_tool not in tool_names, f"Local-only tool '{local_only_tool}' should not be upserted in production"
# But should include standard multi-agent tools
standard_multi_agent_tools = set(MULTI_AGENT_TOOLS) - set(LOCAL_ONLY_MULTI_AGENT_TOOLS)
for standard_tool in standard_multi_agent_tools:
assert standard_tool in tool_names, f"Standard multi-agent tool '{standard_tool}' should be upserted in production"
else:
# Non-production environment should include all multi-agent tools
for tool in MULTI_AGENT_TOOLS:
assert tool in tool_names, f"Multi-agent tool '{tool}' should be upserted in non-production"
async def test_upsert_multi_agent_tools_only(server: SyncServer, default_user, set_letta_environment):
"""Test that upserting only multi-agent tools respects production filtering."""
from letta.schemas.enums import ToolType
# Upsert only multi-agent tools
tools = await server.tool_manager.upsert_base_tools_async(actor=default_user, allowed_types={ToolType.LETTA_MULTI_AGENT_CORE})
tool_names = {tool.name for tool in tools}
if settings.environment == "prod":
# Should only have non-local multi-agent tools
expected_tools = set(MULTI_AGENT_TOOLS) - set(LOCAL_ONLY_MULTI_AGENT_TOOLS)
assert tool_names == expected_tools, "Production multi-agent upsert should exclude local-only tools"
assert "send_message_to_agent_async" not in tool_names, "Local-only async tool should not be upserted in production"
else:
# Should have all multi-agent tools
assert tool_names == set(MULTI_AGENT_TOOLS), "Non-production multi-agent upsert should include all tools"
assert "send_message_to_agent_async" in tool_names, "Local-only async tool should be upserted in non-production"
@pytest.mark.asyncio
async def test_create_agent_with_default_source(server: SyncServer, default_user, print_tool, default_block):
"""Test agent creation with include_default_source=True"""
memory_blocks = [CreateBlock(label="human", value="TestUser"), CreateBlock(label="persona", value="I am a test assistant")]
create_agent_request = CreateAgent(
name="test_default_source_agent",
agent_type="memgpt_v2_agent",
system="test system",
memory_blocks=memory_blocks,
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=[default_block.id],
tool_ids=[print_tool.id],
include_default_source=True, # This is the key field we're testing
include_base_tools=False,
)
# Create the agent
created_agent = await server.agent_manager.create_agent_async(
create_agent_request,
actor=default_user,
)
# Verify agent was created
assert created_agent is not None
assert created_agent.name == "test_default_source_agent"
# Verify that a default source was created and attached
attached_sources = await server.agent_manager.list_attached_sources_async(agent_id=created_agent.id, actor=default_user)
# Should have exactly one source (the default one)
assert len(attached_sources) == 1
auto_default_source = attached_sources[0]
# Verify the default source properties
assert created_agent.name in auto_default_source.name
assert auto_default_source.embedding_config.embedding_endpoint_type == "openai"
# Test with include_default_source=False
create_agent_request_no_source = CreateAgent(
name="test_no_default_source_agent",
agent_type="memgpt_v2_agent",
system="test system",
memory_blocks=memory_blocks,
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=[default_block.id],
tool_ids=[print_tool.id],
include_default_source=False, # Explicitly set to False
include_base_tools=False,
)
created_agent_no_source = await server.agent_manager.create_agent_async(
create_agent_request_no_source,
actor=default_user,
)
# Verify no sources are attached
attached_sources_no_source = await server.agent_manager.list_attached_sources_async(
agent_id=created_agent_no_source.id, actor=default_user
)
assert len(attached_sources_no_source) == 0
# Clean up
await server.agent_manager.delete_agent_async(created_agent.id, default_user)
await server.agent_manager.delete_agent_async(created_agent_no_source.id, default_user)
async def test_get_context_window_basic(
server: SyncServer, comprehensive_test_agent_fixture, default_user, default_file, set_letta_environment
):
# Test agent creation
created_agent, _create_agent_request = comprehensive_test_agent_fixture
# Attach a file
assoc, _closed_files = await server.file_agent_manager.attach_file(
agent_id=created_agent.id,
file_id=default_file.id,
file_name=default_file.file_name,
source_id=default_file.source_id,
actor=default_user,
visible_content="hello",
max_files_open=created_agent.max_files_open,
)
# Get context window and check for basic appearances
context_window_overview = await server.agent_manager.get_context_window(agent_id=created_agent.id, actor=default_user)
validate_context_window_overview(created_agent, context_window_overview, assoc)
# Test deleting the agent
await server.agent_manager.delete_agent_async(created_agent.id, default_user)
list_agents = await server.agent_manager.list_agents_async(actor=default_user)
assert len(list_agents) == 0
@pytest.mark.asyncio
async def test_create_agent_passed_in_initial_messages(server: SyncServer, default_user, default_block):
memory_blocks = [CreateBlock(label="human", value="BananaBoy"), CreateBlock(label="persona", value="I am a helpful assistant")]
create_agent_request = CreateAgent(
agent_type="memgpt_v2_agent",
system="test system",
memory_blocks=memory_blocks,
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=[default_block.id],
tags=["a", "b"],
description="test_description",
initial_message_sequence=[MessageCreate(role=MessageRole.user, content="hello world")],
include_base_tools=False,
)
agent_state = await server.agent_manager.create_agent_async(
create_agent_request,
actor=default_user,
)
assert await server.message_manager.size_async(agent_id=agent_state.id, actor=default_user) == 2
init_messages = await server.message_manager.get_messages_by_ids_async(message_ids=agent_state.message_ids, actor=default_user)
# Check that the system appears in the first initial message
assert create_agent_request.system in init_messages[0].content[0].text
assert create_agent_request.memory_blocks[0].value in init_messages[0].content[0].text
# Check that the second message is the passed in initial message seq
assert create_agent_request.initial_message_sequence[0].role == init_messages[1].role
assert create_agent_request.initial_message_sequence[0].content in init_messages[1].content[0].text
@pytest.mark.asyncio
async def test_create_agent_default_initial_message(server: SyncServer, default_user, default_block):
memory_blocks = [CreateBlock(label="human", value="BananaBoy"), CreateBlock(label="persona", value="I am a helpful assistant")]
create_agent_request = CreateAgent(
agent_type="memgpt_v2_agent",
system="test system",
memory_blocks=memory_blocks,
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=[default_block.id],
tags=["a", "b"],
description="test_description",
include_base_tools=False,
)
agent_state = await server.agent_manager.create_agent_async(
create_agent_request,
actor=default_user,
)
assert await server.message_manager.size_async(agent_id=agent_state.id, actor=default_user) == 4
init_messages = await server.message_manager.get_messages_by_ids_async(message_ids=agent_state.message_ids, actor=default_user)
# Check that the system appears in the first initial message
assert create_agent_request.system in init_messages[0].content[0].text
assert create_agent_request.memory_blocks[0].value in init_messages[0].content[0].text
@pytest.mark.asyncio
async def test_create_agent_with_json_in_system_message(server: SyncServer, default_user, default_block):
system_prompt = (
"You are an expert teaching agent with encyclopedic knowledge. "
"When you receive a topic, query the external database for more "
"information. Format the queries as a JSON list of queries making "
"sure to include your reasoning for that query, e.g. "
"{'query1' : 'reason1', 'query2' : 'reason2'}"
)
create_agent_request = CreateAgent(
agent_type="memgpt_v2_agent",
system=system_prompt,
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=[default_block.id],
tags=["a", "b"],
description="test_description",
include_base_tools=False,
)
agent_state = await server.agent_manager.create_agent_async(
create_agent_request,
actor=default_user,
)
assert agent_state is not None
system_message_id = agent_state.message_ids[0]
system_message = await server.message_manager.get_message_by_id_async(message_id=system_message_id, actor=default_user)
assert system_prompt in system_message.content[0].text
assert default_block.value in system_message.content[0].text
await server.agent_manager.delete_agent_async(agent_id=agent_state.id, actor=default_user)
async def test_update_agent(server: SyncServer, comprehensive_test_agent_fixture, other_tool, other_source, other_block, default_user):
agent, _ = comprehensive_test_agent_fixture
update_agent_request = UpdateAgent(
name="train_agent",
description="train description",
tool_ids=[other_tool.id],
source_ids=[other_source.id],
block_ids=[other_block.id],
tool_rules=[InitToolRule(tool_name=other_tool.name)],
tags=["c", "d"],
system="train system",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(model_name="letta"),
message_ids=[f"message-{uuid.uuid4()}", f"message-{uuid.uuid4()}"],
metadata={"train_key": "train_value"},
tool_exec_environment_variables={"test_env_var_key_a": "a", "new_tool_exec_key": "n"},
message_buffer_autoclear=False,
)
last_updated_timestamp = agent.updated_at
updated_agent = await server.agent_manager.update_agent_async(agent.id, update_agent_request, actor=default_user)
comprehensive_agent_checks(updated_agent, update_agent_request, actor=default_user)
assert updated_agent.message_ids == update_agent_request.message_ids
assert updated_agent.updated_at > last_updated_timestamp
@pytest.mark.asyncio
async def test_create_agent_with_compaction_settings(server: SyncServer, default_user, default_block):
"""Test that agents can be created with custom compaction_settings"""
# Upsert base tools
await server.tool_manager.upsert_base_tools_async(actor=default_user)
# Create custom compaction settings
llm_config = LLMConfig.default_config("gpt-4o-mini")
model_settings = llm_config._to_model_settings()
compaction_settings = CompactionSettings(
model="openai/gpt-4o-mini",
model_settings=model_settings,
prompt="Custom summarization prompt",
clip_chars=1500,
mode="all",
sliding_window_percentage=0.5,
)
# Create agent with compaction settings
create_agent_request = CreateAgent(
name="test_compaction_agent",
agent_type="memgpt_v2_agent",
system="test system",
llm_config=llm_config,
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=[default_block.id],
include_base_tools=True,
compaction_settings=compaction_settings,
)
created_agent = await server.agent_manager.create_agent_async(
create_agent_request,
actor=default_user,
)
# Verify compaction settings were stored correctly
assert created_agent.compaction_settings is not None
assert created_agent.compaction_settings.mode == "all"
assert created_agent.compaction_settings.clip_chars == 1500
assert created_agent.compaction_settings.sliding_window_percentage == 0.5
assert created_agent.compaction_settings.prompt == "Custom summarization prompt"
# Clean up
await server.agent_manager.delete_agent_async(agent_id=created_agent.id, actor=default_user)
@pytest.mark.asyncio
async def test_update_agent_compaction_settings(server: SyncServer, comprehensive_test_agent_fixture, default_user):
"""Test that an agent's compaction_settings can be updated"""
agent, _ = comprehensive_test_agent_fixture
# Create new compaction settings
llm_config = LLMConfig.default_config("gpt-4o-mini")
model_settings = llm_config._to_model_settings()
new_compaction_settings = CompactionSettings(
model="openai/gpt-4o-mini",
model_settings=model_settings,
prompt="Updated summarization prompt",
prompt_acknowledgement=False,
clip_chars=3000,
mode="sliding_window",
sliding_window_percentage=0.4,
)
# Update agent with compaction settings
update_agent_request = UpdateAgent(
compaction_settings=new_compaction_settings,
)
updated_agent = await server.agent_manager.update_agent_async(agent.id, update_agent_request, actor=default_user)
# Verify compaction settings were updated correctly
assert updated_agent.compaction_settings is not None
assert updated_agent.compaction_settings.mode == "sliding_window"
assert updated_agent.compaction_settings.clip_chars == 3000
assert updated_agent.compaction_settings.sliding_window_percentage == 0.4
assert updated_agent.compaction_settings.prompt == "Updated summarization prompt"
assert updated_agent.compaction_settings.prompt_acknowledgement == False
@pytest.mark.asyncio
async def test_update_agent_partial_compaction_settings(server: SyncServer, comprehensive_test_agent_fixture, default_user):
"""Test that an agent's compaction_settings can be upserted."""
from letta.services.summarizer.summarizer_config import get_default_prompt_for_mode
agent, _ = comprehensive_test_agent_fixture
# Create new compaction settings
original_compaction_settings = agent.compaction_settings.model_copy()
new_compaction_settings = CompactionSettings(
mode="all",
prompt_acknowledgement=True,
clip_chars=3000,
)
# Update agent with compaction settings
update_agent_request = UpdateAgent(
compaction_settings=new_compaction_settings,
)
updated_agent = await server.agent_manager.update_agent_async(agent.id, update_agent_request, actor=default_user)
# Verify compaction settings were updated correctly
assert updated_agent.compaction_settings is not None
assert updated_agent.compaction_settings.model == original_compaction_settings.model
assert updated_agent.compaction_settings.model_settings == original_compaction_settings.model_settings
assert updated_agent.compaction_settings.sliding_window_percentage == original_compaction_settings.sliding_window_percentage
assert updated_agent.compaction_settings.mode == "all"
assert updated_agent.compaction_settings.clip_chars == 3000
assert updated_agent.compaction_settings.prompt == get_default_prompt_for_mode("all")
assert updated_agent.compaction_settings.prompt_acknowledgement == True
@pytest.mark.asyncio
async def test_update_agent_partial_compaction_settings_same_mode(server: SyncServer, comprehensive_test_agent_fixture, default_user):
"""Test that if the mode stays the same without a prompt passed in, the prompt is not updated."""
agent, _ = comprehensive_test_agent_fixture
update_agent_request = UpdateAgent(
compaction_settings=CompactionSettings(mode="sliding_window", prompt="This is a fake prompt."),
)
updated_agent = await server.agent_manager.update_agent_async(agent.id, update_agent_request, actor=default_user)
assert updated_agent.compaction_settings is not None
assert updated_agent.compaction_settings.prompt == "This is a fake prompt."
# Create new compaction settings
original_compaction_settings = updated_agent.compaction_settings.model_copy()
new_compaction_settings = CompactionSettings(
mode="sliding_window",
model="openai/gpt-4o-mini",
)
# Update agent with compaction settings
update_agent_request = UpdateAgent(
compaction_settings=new_compaction_settings,
)
final_agent = await server.agent_manager.update_agent_async(updated_agent.id, update_agent_request, actor=default_user)
# Verify compaction settings were updated correctly
assert final_agent.compaction_settings is not None
assert final_agent.compaction_settings.sliding_window_percentage == original_compaction_settings.sliding_window_percentage
assert final_agent.compaction_settings.prompt == original_compaction_settings.prompt
assert final_agent.compaction_settings.clip_chars == original_compaction_settings.clip_chars
assert final_agent.compaction_settings.prompt_acknowledgement == original_compaction_settings.prompt_acknowledgement
assert final_agent.compaction_settings.mode == "sliding_window"
assert final_agent.compaction_settings.model == "openai/gpt-4o-mini"
@pytest.mark.asyncio
async def test_agent_file_defaults_based_on_context_window(server: SyncServer, default_user, default_block):
"""Test that file-related defaults are set based on the model's context window size"""
# test with small context window model (8k)
llm_config_small = LLMConfig.default_config("gpt-4o-mini")
llm_config_small.context_window = 8000
create_agent_request = CreateAgent(
name="test_agent_small_context",
agent_type="memgpt_v2_agent",
llm_config=llm_config_small,
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=[default_block.id],
include_base_tools=False,
)
agent_state = await server.agent_manager.create_agent_async(
create_agent_request,
actor=default_user,
)
assert agent_state.max_files_open == 3
assert (
agent_state.per_file_view_window_char_limit == calculate_file_defaults_based_on_context_window(llm_config_small.context_window)[1]
)
await server.agent_manager.delete_agent_async(agent_id=agent_state.id, actor=default_user)
# test with medium context window model (32k)
llm_config_medium = LLMConfig.default_config("gpt-4o-mini")
llm_config_medium.context_window = 32000
create_agent_request = CreateAgent(
name="test_agent_medium_context",
agent_type="memgpt_v2_agent",
llm_config=llm_config_medium,
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=[default_block.id],
include_base_tools=False,
)
agent_state = await server.agent_manager.create_agent_async(
create_agent_request,
actor=default_user,
)
assert agent_state.max_files_open == 5
assert (
agent_state.per_file_view_window_char_limit == calculate_file_defaults_based_on_context_window(llm_config_medium.context_window)[1]
)
await server.agent_manager.delete_agent_async(agent_id=agent_state.id, actor=default_user)
# test with large context window model (128k)
llm_config_large = LLMConfig.default_config("gpt-4o-mini")
llm_config_large.context_window = 128000
create_agent_request = CreateAgent(
name="test_agent_large_context",
agent_type="memgpt_v2_agent",
llm_config=llm_config_large,
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=[default_block.id],
include_base_tools=False,
)
agent_state = await server.agent_manager.create_agent_async(
create_agent_request,
actor=default_user,
)
assert agent_state.max_files_open == 10
assert (
agent_state.per_file_view_window_char_limit == calculate_file_defaults_based_on_context_window(llm_config_large.context_window)[1]
)
await server.agent_manager.delete_agent_async(agent_id=agent_state.id, actor=default_user)
@pytest.mark.asyncio
async def test_agent_file_defaults_explicit_values(server: SyncServer, default_user, default_block):
"""Test that explicitly set file-related values are respected"""
llm_config_explicit = LLMConfig.default_config("gpt-4o-mini")
llm_config_explicit.context_window = 32000 # would normally get defaults of 5 and 30k
create_agent_request = CreateAgent(
name="test_agent_explicit_values",
agent_type="memgpt_v2_agent",
llm_config=llm_config_explicit,
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=[default_block.id],
include_base_tools=False,
max_files_open=20, # explicit value
per_file_view_window_char_limit=500_000, # explicit value
)
agent_state = await server.agent_manager.create_agent_async(
create_agent_request,
actor=default_user,
)
# verify explicit values are used instead of defaults
assert agent_state.max_files_open == 20
assert agent_state.per_file_view_window_char_limit == 500_000
await server.agent_manager.delete_agent_async(agent_id=agent_state.id, actor=default_user)
@pytest.mark.asyncio
async def test_update_agent_file_fields(server: SyncServer, comprehensive_test_agent_fixture, default_user):
"""Test updating file-related fields on an existing agent"""
agent, _ = comprehensive_test_agent_fixture
# update file-related fields
update_request = UpdateAgent(
max_files_open=15,
per_file_view_window_char_limit=150_000,
)
updated_agent = await server.agent_manager.update_agent_async(agent.id, update_request, actor=default_user)
assert updated_agent.max_files_open == 15
assert updated_agent.per_file_view_window_char_limit == 150_000
@pytest.mark.asyncio
async def test_update_agent_last_stop_reason(server: SyncServer, comprehensive_test_agent_fixture, default_user):
"""Test updating last_stop_reason field on an existing agent"""
agent, _ = comprehensive_test_agent_fixture
assert agent.last_stop_reason is None
# Update with end_turn stop reason
update_request = UpdateAgent(
last_stop_reason=StopReasonType.end_turn,
last_run_completion=datetime.now(timezone.utc),
last_run_duration_ms=1500,
)
updated_agent = await server.agent_manager.update_agent_async(agent.id, update_request, actor=default_user)
assert updated_agent.last_stop_reason == StopReasonType.end_turn
assert updated_agent.last_run_completion is not None
assert updated_agent.last_run_duration_ms == 1500
# Update with error stop reason
update_request = UpdateAgent(
last_stop_reason=StopReasonType.error,
last_run_completion=datetime.now(timezone.utc),
last_run_duration_ms=2500,
)
updated_agent = await server.agent_manager.update_agent_async(agent.id, update_request, actor=default_user)
assert updated_agent.last_stop_reason == StopReasonType.error
assert updated_agent.last_run_duration_ms == 2500
# Update with requires_approval stop reason
update_request = UpdateAgent(
last_stop_reason=StopReasonType.requires_approval,
)
updated_agent = await server.agent_manager.update_agent_async(agent.id, update_request, actor=default_user)
assert updated_agent.last_stop_reason == StopReasonType.requires_approval
# ======================================================================================================================
# AgentManager Tests - Listing
# ======================================================================================================================
@pytest.mark.asyncio
async def test_list_agents_select_fields_empty(server: SyncServer, comprehensive_test_agent_fixture, default_user):
# Create an agent using the comprehensive fixture.
_created_agent, _create_agent_request = comprehensive_test_agent_fixture
# List agents using an empty list for select_fields.
agents = await server.agent_manager.list_agents_async(actor=default_user, include_relationships=[])
# Assert that the agent is returned and basic fields are present.
assert len(agents) >= 1
agent = agents[0]
assert agent.id is not None
assert agent.name is not None
# Assert no relationships were loaded
assert len(agent.tools) == 0
assert len(agent.tags) == 0
@pytest.mark.asyncio
async def test_list_agents_select_fields_none(server: SyncServer, comprehensive_test_agent_fixture, default_user):
# Create an agent using the comprehensive fixture.
_created_agent, _create_agent_request = comprehensive_test_agent_fixture
# List agents using an empty list for select_fields.
agents = await server.agent_manager.list_agents_async(actor=default_user, include_relationships=None)
# Assert that the agent is returned and basic fields are present.
assert len(agents) >= 1
agent = agents[0]
assert agent.id is not None
assert agent.name is not None
# Assert no relationships were loaded
assert len(agent.tools) > 0
assert len(agent.tags) > 0
@pytest.mark.asyncio
async def test_list_agents_select_fields_specific(server: SyncServer, comprehensive_test_agent_fixture, default_user):
_created_agent, _create_agent_request = comprehensive_test_agent_fixture
# Choose a subset of valid relationship fields.
valid_fields = ["tools", "tags"]
agents = await server.agent_manager.list_agents_async(actor=default_user, include_relationships=valid_fields)
assert len(agents) >= 1
agent = agents[0]
# Depending on your to_pydantic() implementation,
# verify that the fields exist in the returned pydantic model.
# (Note: These assertions may require that your CreateAgent fixture sets up these relationships.)
assert agent.tools
assert sorted(agent.tags) == ["a", "b"]
assert not agent.memory.blocks
@pytest.mark.asyncio
async def test_list_agents_select_fields_invalid(server: SyncServer, comprehensive_test_agent_fixture, default_user):
_created_agent, _create_agent_request = comprehensive_test_agent_fixture
# Provide field names that are not recognized.
invalid_fields = ["foobar", "nonexistent_field"]
# The expectation is that these fields are simply ignored.
agents = await server.agent_manager.list_agents_async(actor=default_user, include_relationships=invalid_fields)
assert len(agents) >= 1
agent = agents[0]
# Verify that standard fields are still present.c
assert agent.id is not None
assert agent.name is not None
@pytest.mark.asyncio
async def test_list_agents_select_fields_duplicates(server: SyncServer, comprehensive_test_agent_fixture, default_user):
_created_agent, _create_agent_request = comprehensive_test_agent_fixture
# Provide duplicate valid field names.
duplicate_fields = ["tools", "tools", "tags", "tags"]
agents = await server.agent_manager.list_agents_async(actor=default_user, include_relationships=duplicate_fields)
assert len(agents) >= 1
agent = agents[0]
# Verify that the agent pydantic representation includes the relationships.
# Even if duplicates were provided, the query should not break.
assert isinstance(agent.tools, list)
assert isinstance(agent.tags, list)
@pytest.mark.asyncio
async def test_list_agents_select_fields_mixed(server: SyncServer, comprehensive_test_agent_fixture, default_user):
_created_agent, _create_agent_request = comprehensive_test_agent_fixture
# Mix valid fields with an invalid one.
mixed_fields = ["tools", "invalid_field"]
agents = await server.agent_manager.list_agents_async(actor=default_user, include_relationships=mixed_fields)
assert len(agents) >= 1
agent = agents[0]
# Valid fields should be loaded and accessible.
assert agent.tools
# Since "invalid_field" is not recognized, it should have no adverse effect.
# You might optionally check that no extra attribute is created on the pydantic model.
assert not hasattr(agent, "invalid_field")
@pytest.mark.asyncio
async def test_list_agents_ascending(server: SyncServer, default_user):
# Create two agents with known names
await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="agent_oldest",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
memory_blocks=[],
include_base_tools=False,
),
actor=default_user,
)
if USING_SQLITE:
time.sleep(CREATE_DELAY_SQLITE)
await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="agent_newest",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
memory_blocks=[],
include_base_tools=False,
),
actor=default_user,
)
agents = await server.agent_manager.list_agents_async(actor=default_user, ascending=True)
names = [agent.name for agent in agents]
assert names.index("agent_oldest") < names.index("agent_newest")
@pytest.mark.asyncio
async def test_list_agents_descending(server: SyncServer, default_user):
# Create two agents with known names
await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="agent_oldest",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
memory_blocks=[],
include_base_tools=False,
),
actor=default_user,
)
if USING_SQLITE:
time.sleep(CREATE_DELAY_SQLITE)
await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="agent_newest",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
memory_blocks=[],
include_base_tools=False,
),
actor=default_user,
)
agents = await server.agent_manager.list_agents_async(actor=default_user, ascending=False)
names = [agent.name for agent in agents]
assert names.index("agent_newest") < names.index("agent_oldest")
@pytest.mark.asyncio
async def test_list_agents_by_last_stop_reason(server: SyncServer, default_user):
# Create agent with requires_approval stop reason
agent1 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="agent_requires_approval",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
memory_blocks=[],
include_base_tools=False,
),
actor=default_user,
)
await server.agent_manager.update_agent_async(
agent_id=agent1.id,
agent_update=UpdateAgent(last_stop_reason=StopReasonType.requires_approval),
actor=default_user,
)
# Create agent with error stop reason
agent2 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="agent_error",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
memory_blocks=[],
include_base_tools=False,
),
actor=default_user,
)
await server.agent_manager.update_agent_async(
agent_id=agent2.id,
agent_update=UpdateAgent(last_stop_reason=StopReasonType.error),
actor=default_user,
)
# Create agent with no stop reason
await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="agent_no_stop_reason",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
memory_blocks=[],
include_base_tools=False,
),
actor=default_user,
)
# Filter by requires_approval
approval_agents = await server.agent_manager.list_agents_async(
actor=default_user, last_stop_reason=StopReasonType.requires_approval.value
)
approval_names = {agent.name for agent in approval_agents}
assert approval_names == {"agent_requires_approval"}
# Filter by error
error_agents = await server.agent_manager.list_agents_async(actor=default_user, last_stop_reason=StopReasonType.error.value)
error_names = {agent.name for agent in error_agents}
assert error_names == {"agent_error"}
# No filter - should return all agents
all_agents = await server.agent_manager.list_agents_async(actor=default_user)
all_names = {agent.name for agent in all_agents}
assert {"agent_requires_approval", "agent_error", "agent_no_stop_reason"}.issubset(all_names)
@pytest.mark.asyncio
async def test_count_agents_with_filters(server: SyncServer, default_user):
"""Test count_agents_async with various filters"""
# Create agents with different attributes
agent1 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="agent_requires_approval",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
memory_blocks=[],
include_base_tools=False,
tags=["inbox", "test"],
),
actor=default_user,
)
await server.agent_manager.update_agent_async(
agent_id=agent1.id,
agent_update=UpdateAgent(last_stop_reason=StopReasonType.requires_approval),
actor=default_user,
)
agent2 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="agent_error",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
memory_blocks=[],
include_base_tools=False,
tags=["error", "test"],
),
actor=default_user,
)
await server.agent_manager.update_agent_async(
agent_id=agent2.id,
agent_update=UpdateAgent(last_stop_reason=StopReasonType.error),
actor=default_user,
)
agent3 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="agent_completed",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
memory_blocks=[],
include_base_tools=False,
tags=["completed"],
),
actor=default_user,
)
await server.agent_manager.update_agent_async(
agent_id=agent3.id,
agent_update=UpdateAgent(last_stop_reason=StopReasonType.end_turn),
actor=default_user,
)
await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="agent_no_stop_reason",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
memory_blocks=[],
include_base_tools=False,
tags=["test"],
),
actor=default_user,
)
# Test count with no filters - should return total count
total_count = await server.agent_manager.count_agents_async(actor=default_user)
assert total_count >= 4
# Test count by last_stop_reason - requires_approval (inbox use case)
approval_count = await server.agent_manager.count_agents_async(
actor=default_user, last_stop_reason=StopReasonType.requires_approval.value
)
assert approval_count == 1
# Test count by last_stop_reason - error
error_count = await server.agent_manager.count_agents_async(actor=default_user, last_stop_reason=StopReasonType.error.value)
assert error_count == 1
# Test count by last_stop_reason - end_turn
completed_count = await server.agent_manager.count_agents_async(actor=default_user, last_stop_reason=StopReasonType.end_turn.value)
assert completed_count == 1
# Test count by tags
test_tag_count = await server.agent_manager.count_agents_async(actor=default_user, tags=["test"])
assert test_tag_count == 3
# Test count by tags with match_all_tags
inbox_test_count = await server.agent_manager.count_agents_async(actor=default_user, tags=["inbox", "test"], match_all_tags=True)
assert inbox_test_count == 1
# Test count by name
name_count = await server.agent_manager.count_agents_async(actor=default_user, name="agent_requires_approval")
assert name_count == 1
# Test count by query_text
query_count = await server.agent_manager.count_agents_async(actor=default_user, query_text="error")
assert query_count >= 1
# Test combined filters: last_stop_reason + tags
combined_count = await server.agent_manager.count_agents_async(
actor=default_user, last_stop_reason=StopReasonType.requires_approval.value, tags=["inbox"]
)
assert combined_count == 1
@pytest.mark.asyncio
async def test_list_agents_ordering_and_pagination(server: SyncServer, default_user):
names = ["alpha_agent", "beta_agent", "gamma_agent"]
created_agents = []
# Create agents in known order
for name in names:
agent = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name=name,
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
created_agents.append(agent)
if USING_SQLITE:
time.sleep(CREATE_DELAY_SQLITE)
agent_ids = {agent.name: agent.id for agent in created_agents}
# Ascending (oldest to newest)
agents_asc = await server.agent_manager.list_agents_async(actor=default_user, ascending=True)
asc_names = [agent.name for agent in agents_asc]
assert asc_names.index("alpha_agent") < asc_names.index("beta_agent") < asc_names.index("gamma_agent")
# Descending (newest to oldest)
agents_desc = await server.agent_manager.list_agents_async(actor=default_user, ascending=False)
desc_names = [agent.name for agent in agents_desc]
assert desc_names.index("gamma_agent") < desc_names.index("beta_agent") < desc_names.index("alpha_agent")
# After: Get agents after alpha_agent in ascending order (should exclude alpha)
after_alpha = await server.agent_manager.list_agents_async(actor=default_user, after=agent_ids["alpha_agent"], ascending=True)
after_names = [a.name for a in after_alpha]
assert "alpha_agent" not in after_names
assert "beta_agent" in after_names
assert "gamma_agent" in after_names
assert after_names == ["beta_agent", "gamma_agent"]
# Before: Get agents before gamma_agent in ascending order (should exclude gamma)
before_gamma = await server.agent_manager.list_agents_async(actor=default_user, before=agent_ids["gamma_agent"], ascending=True)
before_names = [a.name for a in before_gamma]
assert "gamma_agent" not in before_names
assert "alpha_agent" in before_names
assert "beta_agent" in before_names
assert before_names == ["alpha_agent", "beta_agent"]
# After: Get agents after gamma_agent in descending order (should exclude gamma, return beta then alpha)
after_gamma_desc = await server.agent_manager.list_agents_async(actor=default_user, after=agent_ids["gamma_agent"], ascending=False)
after_names_desc = [a.name for a in after_gamma_desc]
assert after_names_desc == ["beta_agent", "alpha_agent"]
# Before: Get agents before alpha_agent in descending order (should exclude alpha)
before_alpha_desc = await server.agent_manager.list_agents_async(actor=default_user, before=agent_ids["alpha_agent"], ascending=False)
before_names_desc = [a.name for a in before_alpha_desc]
assert before_names_desc == ["gamma_agent", "beta_agent"]
# ======================================================================================================================
# AgentManager Tests - Environment Variable Encryption
# ======================================================================================================================
@pytest.fixture
def encryption_key():
"""Fixture to ensure encryption key is set for tests."""
original_key = settings.encryption_key
# Set a test encryption key if not already set
if not settings.encryption_key:
settings.encryption_key = "test-encryption-key-32-bytes!!"
yield settings.encryption_key
# Restore original
settings.encryption_key = original_key
@pytest.mark.asyncio
async def test_agent_environment_variables_encrypt_on_create(server: SyncServer, default_user, encryption_key):
"""Test that creating an agent with secrets encrypts the values in the database."""
from letta.orm.sandbox_config import AgentEnvironmentVariable as AgentEnvironmentVariableModel
from letta.schemas.secret import Secret
# Create agent with secrets
agent_create = CreateAgent(
name="test-agent-with-secrets",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=DEFAULT_EMBEDDING_CONFIG,
include_base_tools=False,
secrets={
"API_KEY": "sk-test-secret-12345",
"DATABASE_URL": "postgres://user:pass@localhost/db",
},
)
created_agent = await server.agent_manager.create_agent_async(agent_create, actor=default_user)
# Verify agent has secrets
assert created_agent.secrets is not None
assert len(created_agent.secrets) == 2
# Verify secrets are AgentEnvironmentVariable objects with Secret fields
for secret_obj in created_agent.secrets:
assert secret_obj.key in ["API_KEY", "DATABASE_URL"]
assert secret_obj.value_enc is not None
assert isinstance(secret_obj.value_enc, Secret)
# Verify values are encrypted in the database
async with db_registry.async_session() as session:
env_vars = await session.execute(
select(AgentEnvironmentVariableModel).where(AgentEnvironmentVariableModel.agent_id == created_agent.id)
)
env_var_list = list(env_vars.scalars().all())
assert len(env_var_list) == 2
for env_var in env_var_list:
# Check that value_enc is not None and is encrypted
assert env_var.value_enc is not None
assert isinstance(env_var.value_enc, str)
# Decrypt and verify
decrypted = Secret.from_encrypted(env_var.value_enc).get_plaintext()
if env_var.key == "API_KEY":
assert decrypted == "sk-test-secret-12345"
elif env_var.key == "DATABASE_URL":
assert decrypted == "postgres://user:pass@localhost/db"
@pytest.mark.asyncio
async def test_agent_environment_variables_decrypt_on_read(server: SyncServer, default_user, encryption_key):
"""Test that reading an agent deserializes secrets correctly to AgentEnvironmentVariable objects."""
from letta.schemas.environment_variables import AgentEnvironmentVariable
from letta.schemas.secret import Secret
# Create agent with secrets
agent_create = CreateAgent(
name="test-agent-read-secrets",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=DEFAULT_EMBEDDING_CONFIG,
include_base_tools=False,
secrets={
"TEST_KEY": "test-value-67890",
},
)
created_agent = await server.agent_manager.create_agent_async(agent_create, actor=default_user)
agent_id = created_agent.id
# Read the agent back
retrieved_agent = await server.agent_manager.get_agent_by_id_async(agent_id=agent_id, actor=default_user)
# Verify secrets are properly deserialized
assert retrieved_agent.secrets is not None
assert len(retrieved_agent.secrets) == 1
secret_obj = retrieved_agent.secrets[0]
assert isinstance(secret_obj, AgentEnvironmentVariable)
assert secret_obj.key == "TEST_KEY"
assert secret_obj.value == "test-value-67890"
# Verify value_enc is a Secret object (not a string)
assert secret_obj.value_enc is not None
assert isinstance(secret_obj.value_enc, Secret)
# Verify we can decrypt through the Secret object
decrypted = secret_obj.value_enc.get_plaintext()
assert decrypted == "test-value-67890"
# Verify direct value_enc access works
assert isinstance(secret_obj.value_enc, Secret)
assert secret_obj.value_enc.get_plaintext() == "test-value-67890"
@pytest.mark.asyncio
async def test_agent_environment_variables_update_encryption(server: SyncServer, default_user, encryption_key):
"""Test that updating agent secrets encrypts new values."""
from letta.orm.sandbox_config import AgentEnvironmentVariable as AgentEnvironmentVariableModel
from letta.schemas.secret import Secret
# Create agent with initial secrets
agent_create = CreateAgent(
name="test-agent-update-secrets",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=DEFAULT_EMBEDDING_CONFIG,
include_base_tools=False,
secrets={
"INITIAL_KEY": "initial-value",
},
)
created_agent = await server.agent_manager.create_agent_async(agent_create, actor=default_user)
agent_id = created_agent.id
# Update with new secrets
agent_update = UpdateAgent(
secrets={
"UPDATED_KEY": "updated-value-abc",
"NEW_KEY": "new-value-xyz",
},
)
updated_agent = await server.agent_manager.update_agent_async(agent_id=agent_id, agent_update=agent_update, actor=default_user)
# Verify updated secrets
assert updated_agent.secrets is not None
assert len(updated_agent.secrets) == 2
# Verify in database
async with db_registry.async_session() as session:
env_vars = await session.execute(select(AgentEnvironmentVariableModel).where(AgentEnvironmentVariableModel.agent_id == agent_id))
env_var_list = list(env_vars.scalars().all())
assert len(env_var_list) == 2
for env_var in env_var_list:
assert env_var.value_enc is not None
# Decrypt and verify
decrypted = Secret.from_encrypted(env_var.value_enc).get_plaintext()
if env_var.key == "UPDATED_KEY":
assert decrypted == "updated-value-abc"
elif env_var.key == "NEW_KEY":
assert decrypted == "new-value-xyz"
else:
pytest.fail(f"Unexpected key: {env_var.key}")
@pytest.mark.asyncio
async def test_agent_secrets_clear_with_empty_dict(server: SyncServer, default_user, encryption_key):
"""Test that updating agent secrets with empty dict clears all secrets."""
from letta.orm.sandbox_config import AgentEnvironmentVariable as AgentEnvironmentVariableModel
# Create agent with initial secrets
agent_create = CreateAgent(
name="test-agent-clear-secrets",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=DEFAULT_EMBEDDING_CONFIG,
include_base_tools=False,
secrets={
"SECRET_KEY_1": "secret-value-1",
"SECRET_KEY_2": "secret-value-2",
},
)
created_agent = await server.agent_manager.create_agent_async(agent_create, actor=default_user)
agent_id = created_agent.id
# Verify secrets were created
assert created_agent.secrets is not None
assert len(created_agent.secrets) == 2
# Update with empty dict to clear all secrets
agent_update = UpdateAgent(secrets={})
updated_agent = await server.agent_manager.update_agent_async(agent_id=agent_id, agent_update=agent_update, actor=default_user)
# Verify secrets are cleared
assert updated_agent.secrets is not None
assert len(updated_agent.secrets) == 0
# Verify in database
async with db_registry.async_session() as session:
env_vars = await session.execute(select(AgentEnvironmentVariableModel).where(AgentEnvironmentVariableModel.agent_id == agent_id))
env_var_list = list(env_vars.scalars().all())
assert len(env_var_list) == 0
@pytest.mark.asyncio
async def test_agent_state_schema_unchanged(server: SyncServer):
"""
Test that the AgentState pydantic schema structure has not changed.
This test validates all fields including nested pydantic objects to ensure
the schema remains stable across changes.
"""
from letta.schemas.agent import AgentState, AgentType
from letta.schemas.block import Block
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.environment_variables import AgentEnvironmentVariable
from letta.schemas.group import Group
from letta.schemas.letta_message import ApprovalRequestMessage
from letta.schemas.llm_config import LLMConfig
from letta.schemas.memory import Memory
from letta.schemas.model import ModelSettingsUnion
from letta.schemas.response_format import ResponseFormatUnion
from letta.schemas.source import Source
from letta.schemas.tool import Tool
from letta.services.summarizer.summarizer_config import CompactionSettings
# Define the expected schema structure
expected_schema = {
# Core identification
"id": str,
"name": str,
# Tool rules
"tool_rules": (list, type(None)),
# In-context memory
"message_ids": (list, type(None)),
# System prompt
"system": str,
# Agent configuration
"agent_type": AgentType,
# LLM information
"llm_config": LLMConfig,
"compaction_settings": CompactionSettings,
"model": str,
"embedding": str,
"embedding_config": EmbeddingConfig,
"model_settings": (ModelSettingsUnion, type(None)),
"response_format": (ResponseFormatUnion, type(None)),
# State fields
"description": (str, type(None)),
"metadata": (dict, type(None)),
# Memory and tools
"memory": Memory, # deprecated
"blocks": list,
"tools": list,
"sources": list,
"tags": list,
"tool_exec_environment_variables": list, # deprecated
"secrets": list,
# Project and template fields
"project_id": (str, type(None)),
"template_id": (str, type(None)),
"base_template_id": (str, type(None)),
"deployment_id": (str, type(None)),
"entity_id": (str, type(None)),
"identity_ids": list,
"identities": list,
"pending_approval": (ApprovalRequestMessage, type(None)),
# Advanced configuration
"message_buffer_autoclear": bool,
"enable_sleeptime": (bool, type(None)),
# Multi-agent
"multi_agent_group": (Group, type(None)), # deprecated
"managed_group": (Group, type(None)),
# Run metrics
"last_run_completion": (datetime, type(None)),
"last_run_duration_ms": (int, type(None)),
"last_stop_reason": (StopReasonType, type(None)),
# Timezone
"timezone": (str, type(None)),
# File controls
"max_files_open": (int, type(None)),
"per_file_view_window_char_limit": (int, type(None)),
# Indexing controls
"hidden": (bool, type(None)),
# Metadata fields (from OrmMetadataBase)
"created_by_id": (str, type(None)),
"last_updated_by_id": (str, type(None)),
"created_at": (datetime, type(None)),
"updated_at": (datetime, type(None)),
}
# Get the actual schema fields from AgentState
agent_state_fields = AgentState.model_fields
actual_field_names = set(agent_state_fields.keys())
expected_field_names = set(expected_schema.keys())
# Check for added fields
added_fields = actual_field_names - expected_field_names
if added_fields:
pytest.fail(
f"New fields detected in AgentState schema: {sorted(added_fields)}. "
"This test must be updated to include these fields, and the schema change must be intentional."
)
# Check for removed fields
removed_fields = expected_field_names - actual_field_names
if removed_fields:
pytest.fail(
f"Fields removed from AgentState schema: {sorted(removed_fields)}. "
"This test must be updated to remove these fields, and the schema change must be intentional."
)
# Validate field types
import typing
for field_name, expected_type in expected_schema.items():
field = agent_state_fields[field_name]
annotation = field.annotation
# Helper function to check if annotation matches expected type
def check_type_match(annotation, expected):
origin = typing.get_origin(annotation)
args = typing.get_args(annotation)
# Direct match
if annotation == expected:
return True
# Handle list type (List[X] should match list)
if expected is list and origin is list:
return True
# Handle dict type (Dict[X, Y] should match dict)
if expected is dict and origin is dict:
return True
# Handle Optional types
if origin is typing.Union:
# Check if expected type is in the union
if expected in args:
return True
# Handle list case within Union (e.g., Union[List[X], None])
if expected is list:
for arg in args:
if typing.get_origin(arg) is list:
return True
# Handle dict case within Union
if expected is dict:
for arg in args:
if typing.get_origin(arg) is dict:
return True
# Handle Annotated types within Union (e.g., Union[Annotated[...], None])
# This checks if any of the union args is an Annotated type that matches expected
for arg in args:
if typing.get_origin(arg) is typing.Annotated:
# For Annotated types, compare the first argument (the actual type)
annotated_args = typing.get_args(arg)
if annotated_args and annotated_args[0] == expected:
return True
return False
# Handle tuple of expected types (Optional)
if isinstance(expected_type, tuple):
valid = any(check_type_match(annotation, exp_t) for exp_t in expected_type)
if not valid:
pytest.fail(
f"Field '{field_name}' type changed. Expected one of {expected_type}, "
f"but got {annotation}. Schema changes must be intentional."
)
else:
# Single expected type
valid = check_type_match(annotation, expected_type)
if not valid:
pytest.fail(
f"Field '{field_name}' type changed. Expected {expected_type}, "
f"but got {annotation}. Schema changes must be intentional."
)
# Validate nested object schemas
# Memory schema
memory_fields = Memory.model_fields
expected_memory_fields = {"agent_type", "git_enabled", "blocks", "file_blocks", "prompt_template"}
actual_memory_fields = set(memory_fields.keys())
if actual_memory_fields != expected_memory_fields:
pytest.fail(
f"Memory schema changed. Expected fields: {expected_memory_fields}, "
f"Got: {actual_memory_fields}. Schema changes must be intentional."
)
# Block schema
block_fields = Block.model_fields
expected_block_fields = {
"id",
"value",
"limit",
"project_id",
"template_name",
"is_template",
"template_id",
"base_template_id",
"deployment_id",
"entity_id",
"preserve_on_migration",
"label",
"read_only",
"description",
"metadata",
"hidden",
"created_by_id",
"last_updated_by_id",
"tags",
}
actual_block_fields = set(block_fields.keys())
if actual_block_fields != expected_block_fields:
pytest.fail(
f"Block schema changed. Expected fields: {expected_block_fields}, "
f"Got: {actual_block_fields}. Schema changes must be intentional."
)
# Tool schema
tool_fields = Tool.model_fields
expected_tool_fields = {
"id",
"tool_type",
"description",
"source_type",
"name",
"tags",
"source_code",
"json_schema",
"args_json_schema",
"return_char_limit",
"pip_requirements",
"npm_requirements",
"default_requires_approval",
"enable_parallel_execution",
"created_by_id",
"last_updated_by_id",
"metadata_",
"project_id",
}
actual_tool_fields = set(tool_fields.keys())
if actual_tool_fields != expected_tool_fields:
pytest.fail(
f"Tool schema changed. Expected fields: {expected_tool_fields}, Got: {actual_tool_fields}. Schema changes must be intentional."
)
# Source schema
source_fields = Source.model_fields
expected_source_fields = {
"id",
"name",
"description",
"instructions",
"metadata",
"embedding_config",
"organization_id",
"vector_db_provider",
"created_by_id",
"last_updated_by_id",
"created_at",
"updated_at",
}
actual_source_fields = set(source_fields.keys())
if actual_source_fields != expected_source_fields:
pytest.fail(
f"Source schema changed. Expected fields: {expected_source_fields}, "
f"Got: {actual_source_fields}. Schema changes must be intentional."
)
# LLMConfig schema
llm_config_fields = LLMConfig.model_fields
expected_llm_config_fields = {
"model",
"display_name",
"model_endpoint_type",
"model_endpoint",
"provider_name",
"provider_category",
"model_wrapper",
"context_window",
"put_inner_thoughts_in_kwargs",
"handle",
"temperature",
"max_tokens",
"enable_reasoner",
"reasoning_effort",
"effort",
"response_format",
"max_reasoning_tokens",
"frequency_penalty",
"compatibility_type",
"verbosity",
"tier",
"parallel_tool_calls",
"strict",
"return_logprobs",
"top_logprobs",
"return_token_ids",
}
actual_llm_config_fields = set(llm_config_fields.keys())
if actual_llm_config_fields != expected_llm_config_fields:
pytest.fail(
f"LLMConfig schema changed. Expected fields: {expected_llm_config_fields}, "
f"Got: {actual_llm_config_fields}. Schema changes must be intentional."
)
# EmbeddingConfig schema
embedding_config_fields = EmbeddingConfig.model_fields
expected_embedding_config_fields = {
"embedding_endpoint_type",
"embedding_endpoint",
"embedding_model",
"embedding_dim",
"embedding_chunk_size",
"handle",
"batch_size",
"azure_endpoint",
"azure_version",
"azure_deployment",
}
actual_embedding_config_fields = set(embedding_config_fields.keys())
if actual_embedding_config_fields != expected_embedding_config_fields:
pytest.fail(
f"EmbeddingConfig schema changed. Expected fields: {expected_embedding_config_fields}, "
f"Got: {actual_embedding_config_fields}. Schema changes must be intentional."
)
# AgentEnvironmentVariable schema
agent_env_var_fields = AgentEnvironmentVariable.model_fields
expected_agent_env_var_fields = {
"id",
"key",
"value",
"description",
"organization_id",
"value_enc",
"agent_id",
# From OrmMetadataBase
"created_by_id",
"last_updated_by_id",
"created_at",
"updated_at",
}
actual_agent_env_var_fields = set(agent_env_var_fields.keys())
if actual_agent_env_var_fields != expected_agent_env_var_fields:
pytest.fail(
f"AgentEnvironmentVariable schema changed. Expected fields: {expected_agent_env_var_fields}, "
f"Got: {actual_agent_env_var_fields}. Schema changes must be intentional."
)
# Group schema
group_fields = Group.model_fields
expected_group_fields = {
"id",
"manager_type",
"agent_ids",
"description",
"project_id",
"template_id",
"base_template_id",
"deployment_id",
"shared_block_ids",
"manager_agent_id",
"termination_token",
"max_turns",
"sleeptime_agent_frequency",
"turns_counter",
"last_processed_message_id",
"max_message_buffer_length",
"min_message_buffer_length",
"hidden",
}
actual_group_fields = set(group_fields.keys())
if actual_group_fields != expected_group_fields:
pytest.fail(
f"Group schema changed. Expected fields: {expected_group_fields}, "
f"Got: {actual_group_fields}. Schema changes must be intentional."
)
async def test_agent_state_relationship_loads(server: SyncServer, default_user, print_tool, default_block):
memory_blocks = [CreateBlock(label="human", value="TestUser"), CreateBlock(label="persona", value="I am a test assistant")]
create_agent_request = CreateAgent(
name="test_default_source_agent",
agent_type="memgpt_v2_agent",
system="test system",
memory_blocks=memory_blocks,
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=[default_block.id],
tool_ids=[print_tool.id],
include_default_source=True,
include_base_tools=False,
tags=["test_tag"],
)
# Create the agent
created_agent = await server.agent_manager.create_agent_async(
create_agent_request,
actor=default_user,
)
# Test legacy default include_relationships
agent_state = await server.agent_manager.get_agent_by_id_async(
agent_id=created_agent.id,
actor=default_user,
)
assert agent_state.blocks
assert agent_state.sources
assert agent_state.tags
assert agent_state.tools
# Test include_relationships override
agent_state = await server.agent_manager.get_agent_by_id_async(
agent_id=created_agent.id,
actor=default_user,
include_relationships=[],
)
assert not agent_state.blocks
assert not agent_state.sources
assert not agent_state.tags
assert not agent_state.tools
# Test include_relationships override with specific relationships
# Note: tags are always loaded alongside memory (needed for git_enabled)
agent_state = await server.agent_manager.get_agent_by_id_async(
agent_id=created_agent.id,
actor=default_user,
include_relationships=["memory", "sources"],
)
assert agent_state.blocks
assert agent_state.sources
assert agent_state.tags # tags loaded with memory for git_enabled
assert not agent_state.tools
# Test include override with specific relationships
agent_state = await server.agent_manager.get_agent_by_id_async(
agent_id=created_agent.id,
actor=default_user,
include_relationships=[],
include=["agent.blocks", "agent.sources"],
)
assert agent_state.blocks
assert agent_state.sources
assert agent_state.tags # tags loaded with blocks for git_enabled
assert not agent_state.tools
async def test_create_template_agent_with_files_from_sources(server: SyncServer, default_user, print_tool, default_block):
"""Test that agents created from templates properly attach files from their sources"""
from letta.schemas.file import FileMetadata as PydanticFileMetadata
memory_blocks = [CreateBlock(label="human", value="TestUser"), CreateBlock(label="persona", value="I am a test assistant")]
# Create a source with files
source = await server.source_manager.create_source(
source=PydanticSource(
name="test_template_source",
embedding_config=EmbeddingConfig.default_config(provider="openai"),
),
actor=default_user,
)
# Create files in the source
file1_metadata = PydanticFileMetadata(
file_name="template_file_1.txt",
organization_id=default_user.organization_id,
source_id=source.id,
)
await server.file_manager.create_file(file_metadata=file1_metadata, actor=default_user, text="content for file 1")
file2_metadata = PydanticFileMetadata(
file_name="template_file_2.txt",
organization_id=default_user.organization_id,
source_id=source.id,
)
await server.file_manager.create_file(file_metadata=file2_metadata, actor=default_user, text="content for file 2")
# Create agent using InternalTemplateAgentCreate with the source
create_agent_request = InternalTemplateAgentCreate(
name="test_template_agent_with_files",
agent_type="memgpt_v2_agent",
system="test system",
memory_blocks=memory_blocks,
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=[default_block.id],
tool_ids=[print_tool.id],
source_ids=[source.id], # Attach the source with files
include_base_tools=False,
base_template_id="base_template_123",
template_id="template_456",
deployment_id="deployment_789",
entity_id="entity_012",
)
# Create the agent
created_agent = await server.agent_manager.create_agent_async(
create_agent_request,
actor=default_user,
)
# Verify agent was created
assert created_agent is not None
assert created_agent.name == "test_template_agent_with_files"
# Verify that the source is attached
attached_sources = await server.agent_manager.list_attached_sources_async(agent_id=created_agent.id, actor=default_user)
assert len(attached_sources) == 1
assert attached_sources[0].id == source.id
# Verify that files from the source are attached to the agent
attached_files = await server.file_agent_manager.list_files_for_agent(
created_agent.id, per_file_view_window_char_limit=created_agent.per_file_view_window_char_limit, actor=default_user
)
# Should have both files attached
assert len(attached_files) == 2
attached_file_names = {f.file_name for f in attached_files}
assert "template_file_1.txt" in attached_file_names
assert "template_file_2.txt" in attached_file_names
# Verify files are properly linked to the source
for attached_file in attached_files:
assert attached_file.source_id == source.id
# Clean up
await server.agent_manager.delete_agent_async(created_agent.id, default_user)
await server.source_manager.delete_source(source.id, default_user)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_agent_manager.py",
"license": "Apache License 2.0",
"lines": 1865,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_agent_tag_manager.py | import asyncio
import time
import pytest
# Import shared fixtures and constants from conftest
from conftest import (
CREATE_DELAY_SQLITE,
USING_SQLITE,
)
from letta.schemas.agent import CreateAgent, UpdateAgent
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.llm_config import LLMConfig
from letta.schemas.organization import Organization as PydanticOrganization
from letta.schemas.user import User as PydanticUser
from letta.server.server import SyncServer
# ======================================================================================================================
# AgentManager Tests - Tags Relationship
# ======================================================================================================================
@pytest.mark.asyncio
async def test_list_agents_matching_all_tags(server: SyncServer, default_user, agent_with_tags):
agents = await server.agent_manager.list_agents_matching_tags_async(
actor=default_user,
match_all=["primary_agent", "benefit_1"],
match_some=[],
)
assert len(agents) == 2 # agent1 and agent3 match
assert {a.name for a in agents} == {"agent1", "agent3"}
@pytest.mark.asyncio
async def test_list_agents_matching_some_tags(server: SyncServer, default_user, agent_with_tags):
agents = await server.agent_manager.list_agents_matching_tags_async(
actor=default_user,
match_all=["primary_agent"],
match_some=["benefit_1", "benefit_2"],
)
assert len(agents) == 3 # All agents match
assert {a.name for a in agents} == {"agent1", "agent2", "agent3"}
@pytest.mark.asyncio
async def test_list_agents_matching_all_and_some_tags(server: SyncServer, default_user, agent_with_tags):
agents = await server.agent_manager.list_agents_matching_tags_async(
actor=default_user,
match_all=["primary_agent", "benefit_1"],
match_some=["benefit_2", "nonexistent"],
)
assert len(agents) == 1 # Only agent3 matches
assert agents[0].name == "agent3"
@pytest.mark.asyncio
async def test_list_agents_matching_no_tags(server: SyncServer, default_user, agent_with_tags):
agents = await server.agent_manager.list_agents_matching_tags_async(
actor=default_user,
match_all=["primary_agent", "nonexistent_tag"],
match_some=["benefit_1", "benefit_2"],
)
assert len(agents) == 0 # No agent should match
@pytest.mark.asyncio
async def test_list_agents_by_tags_match_all(server: SyncServer, sarah_agent, charles_agent, default_user):
"""Test listing agents that have ALL specified tags."""
# Create agents with multiple tags
await server.agent_manager.update_agent_async(sarah_agent.id, UpdateAgent(tags=["test", "production", "gpt4"]), actor=default_user)
await server.agent_manager.update_agent_async(charles_agent.id, UpdateAgent(tags=["test", "development", "gpt4"]), actor=default_user)
# Search for agents with all specified tags
agents = await server.agent_manager.list_agents_async(actor=default_user, tags=["test", "gpt4"], match_all_tags=True)
assert len(agents) == 2
agent_ids = [a.id for a in agents]
assert sarah_agent.id in agent_ids
assert charles_agent.id in agent_ids
# Search for tags that only sarah_agent has
agents = await server.agent_manager.list_agents_async(actor=default_user, tags=["test", "production"], match_all_tags=True)
assert len(agents) == 1
assert agents[0].id == sarah_agent.id
@pytest.mark.asyncio
async def test_list_agents_by_tags_match_any(server: SyncServer, sarah_agent, charles_agent, default_user):
"""Test listing agents that have ANY of the specified tags."""
# Create agents with different tags
await server.agent_manager.update_agent_async(sarah_agent.id, UpdateAgent(tags=["production", "gpt4"]), actor=default_user)
await server.agent_manager.update_agent_async(charles_agent.id, UpdateAgent(tags=["development", "gpt3"]), actor=default_user)
# Search for agents with any of the specified tags
agents = await server.agent_manager.list_agents_async(actor=default_user, tags=["production", "development"], match_all_tags=False)
assert len(agents) == 2
agent_ids = [a.id for a in agents]
assert sarah_agent.id in agent_ids
assert charles_agent.id in agent_ids
# Search for tags where only sarah_agent matches
agents = await server.agent_manager.list_agents_async(actor=default_user, tags=["production", "nonexistent"], match_all_tags=False)
assert len(agents) == 1
assert agents[0].id == sarah_agent.id
@pytest.mark.asyncio
async def test_list_agents_by_tags_no_matches(server: SyncServer, sarah_agent, charles_agent, default_user):
"""Test listing agents when no tags match."""
# Create agents with tags
await server.agent_manager.update_agent_async(sarah_agent.id, UpdateAgent(tags=["production", "gpt4"]), actor=default_user)
await server.agent_manager.update_agent_async(charles_agent.id, UpdateAgent(tags=["development", "gpt3"]), actor=default_user)
# Search for nonexistent tags
agents = await server.agent_manager.list_agents_async(actor=default_user, tags=["nonexistent1", "nonexistent2"], match_all_tags=True)
assert len(agents) == 0
agents = await server.agent_manager.list_agents_async(actor=default_user, tags=["nonexistent1", "nonexistent2"], match_all_tags=False)
assert len(agents) == 0
@pytest.mark.asyncio
async def test_list_agents_by_tags_with_other_filters(server: SyncServer, sarah_agent, charles_agent, default_user):
"""Test combining tag search with other filters."""
# Create agents with specific names and tags
await server.agent_manager.update_agent_async(
sarah_agent.id, UpdateAgent(name="production_agent", tags=["production", "gpt4"]), actor=default_user
)
await server.agent_manager.update_agent_async(
charles_agent.id, UpdateAgent(name="test_agent", tags=["production", "gpt3"]), actor=default_user
)
# List agents with specific tag and name pattern
agents = await server.agent_manager.list_agents_async(
actor=default_user, tags=["production"], match_all_tags=True, name="production_agent"
)
assert len(agents) == 1
assert agents[0].id == sarah_agent.id
@pytest.mark.asyncio
async def test_list_agents_by_tags_pagination(server: SyncServer, default_user, default_organization):
"""Test pagination when listing agents by tags."""
# Create first agent
agent1 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="agent1",
agent_type="memgpt_v2_agent",
tags=["pagination_test", "tag1"],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
memory_blocks=[],
include_base_tools=False,
),
actor=default_user,
)
if USING_SQLITE:
time.sleep(CREATE_DELAY_SQLITE) # Ensure distinct created_at timestamps
# Create second agent
agent2 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="agent2",
agent_type="memgpt_v2_agent",
tags=["pagination_test", "tag2"],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
memory_blocks=[],
include_base_tools=False,
),
actor=default_user,
)
# Get first page
first_page = await server.agent_manager.list_agents_async(actor=default_user, tags=["pagination_test"], match_all_tags=True, limit=1)
assert len(first_page) == 1
first_agent_id = first_page[0].id
# Get second page using cursor
second_page = await server.agent_manager.list_agents_async(
actor=default_user, tags=["pagination_test"], match_all_tags=True, after=first_agent_id, limit=1
)
assert len(second_page) == 1
assert second_page[0].id != first_agent_id
# Get previous page using before
prev_page = await server.agent_manager.list_agents_async(
actor=default_user, tags=["pagination_test"], match_all_tags=True, before=second_page[0].id, limit=1
)
assert len(prev_page) == 1
assert prev_page[0].id == first_agent_id
# Verify we got both agents with no duplicates
all_ids = {first_page[0].id, second_page[0].id}
assert len(all_ids) == 2
assert agent1.id in all_ids
assert agent2.id in all_ids
@pytest.mark.asyncio
async def test_list_agents_query_text_pagination(server: SyncServer, default_user, default_organization):
"""Test listing agents with query text filtering and pagination."""
# Create test agents with specific names and descriptions
agent1 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="Search Agent One",
agent_type="memgpt_v2_agent",
memory_blocks=[],
description="This is a search agent for testing",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
# at least 1 second to force unique timestamps in sqlite for deterministic pagination assertions
await asyncio.sleep(1.1)
agent2 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="Search Agent Two",
agent_type="memgpt_v2_agent",
memory_blocks=[],
description="Another search agent for testing",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
# at least 1 second to force unique timestamps in sqlite for deterministic pagination assertions
await asyncio.sleep(1.1)
agent3 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="Different Agent",
agent_type="memgpt_v2_agent",
memory_blocks=[],
description="This is a different agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
# Test query text filtering
search_results = await server.agent_manager.list_agents_async(actor=default_user, query_text="search agent")
assert len(search_results) == 2
search_agent_ids = {agent.id for agent in search_results}
assert agent1.id in search_agent_ids
assert agent2.id in search_agent_ids
assert agent3.id not in search_agent_ids
different_results = await server.agent_manager.list_agents_async(actor=default_user, query_text="different agent")
assert len(different_results) == 1
assert different_results[0].id == agent3.id
# Test pagination with query text
first_page = await server.agent_manager.list_agents_async(actor=default_user, query_text="search agent", limit=1)
assert len(first_page) == 1
first_agent_id = first_page[0].id
# Get second page using cursor
second_page = await server.agent_manager.list_agents_async(actor=default_user, query_text="search agent", after=first_agent_id, limit=1)
assert len(second_page) == 1
assert second_page[0].id != first_agent_id
# Test before and after
all_agents = await server.agent_manager.list_agents_async(actor=default_user, query_text="agent")
assert len(all_agents) == 3
first_agent, second_agent, third_agent = all_agents
middle_agent = await server.agent_manager.list_agents_async(
actor=default_user, query_text="search agent", before=third_agent.id, after=first_agent.id
)
assert len(middle_agent) == 1
assert middle_agent[0].id == second_agent.id
# Verify we got both search agents with no duplicates
all_ids = {first_page[0].id, second_page[0].id}
assert len(all_ids) == 2
assert all_ids == {agent1.id, agent2.id}
@pytest.mark.asyncio
async def test_list_tags(server: SyncServer, default_user, default_organization):
"""Test listing tags functionality."""
# Create multiple agents with different tags
agents = []
tags = ["alpha", "beta", "gamma", "delta", "epsilon"]
# Create agents with different combinations of tags
for i in range(3):
agent = await server.agent_manager.create_agent_async(
actor=default_user,
agent_create=CreateAgent(
name="tag_agent_" + str(i),
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
tags=tags[i : i + 3], # Each agent gets 3 consecutive tags
include_base_tools=False,
),
)
agents.append(agent)
# Test basic listing - should return all unique tags in alphabetical order
all_tags = await server.agent_manager.list_tags_async(actor=default_user)
assert all_tags == sorted(tags[:5]) # All tags should be present and sorted
# Test pagination with limit
limited_tags = await server.agent_manager.list_tags_async(actor=default_user, limit=2)
assert limited_tags == tags[:2] # Should return first 2 tags
# Test pagination with cursor
cursor_tags = await server.agent_manager.list_tags_async(actor=default_user, after="beta")
assert cursor_tags == ["delta", "epsilon", "gamma"] # Tags after "beta"
# Test text search
search_tags = await server.agent_manager.list_tags_async(actor=default_user, query_text="ta")
assert search_tags == ["beta", "delta"] # Only tags containing "ta"
# Test with non-matching search
no_match_tags = await server.agent_manager.list_tags_async(actor=default_user, query_text="xyz")
assert no_match_tags == [] # Should return empty list
# Test with different organization
other_org = await server.organization_manager.create_organization_async(pydantic_org=PydanticOrganization(name="Other Org"))
other_user = await server.user_manager.create_actor_async(PydanticUser(name="Other User", organization_id=other_org.id))
# Other org's tags should be empty
other_org_tags = await server.agent_manager.list_tags_async(actor=other_user)
assert other_org_tags == []
# Cleanup
for agent in agents:
await server.agent_manager.delete_agent_async(agent.id, actor=default_user)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_agent_tag_manager.py",
"license": "Apache License 2.0",
"lines": 283,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_archive_manager.py | import uuid
import pytest
# Import shared fixtures and constants from conftest
from conftest import (
DEFAULT_EMBEDDING_CONFIG,
)
from letta.errors import LettaAgentNotFoundError
from letta.orm.errors import NoResultFound
from letta.schemas.agent import CreateAgent
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.llm_config import LLMConfig
from letta.schemas.passage import Passage as PydanticPassage
from letta.server.server import SyncServer
# ======================================================================================================================
# Archive Manager Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_archive_manager_delete_archive_async(server: SyncServer, default_user):
"""Test the delete_archive_async function."""
archive = await server.archive_manager.create_archive_async(
name="test_archive_to_delete",
description="This archive will be deleted",
embedding_config=DEFAULT_EMBEDDING_CONFIG,
actor=default_user,
)
retrieved_archive = await server.archive_manager.get_archive_by_id_async(archive_id=archive.id, actor=default_user)
assert retrieved_archive.id == archive.id
await server.archive_manager.delete_archive_async(archive_id=archive.id, actor=default_user)
with pytest.raises(Exception):
await server.archive_manager.get_archive_by_id_async(archive_id=archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_get_agents_for_archive_async(server: SyncServer, default_user, sarah_agent):
"""Test getting all agents that have access to an archive."""
archive = await server.archive_manager.create_archive_async(
name="shared_archive",
description="Archive shared by multiple agents",
embedding_config=DEFAULT_EMBEDDING_CONFIG,
actor=default_user,
)
agent2 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="test_agent_2",
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
await server.archive_manager.attach_agent_to_archive_async(
agent_id=sarah_agent.id, archive_id=archive.id, is_owner=True, actor=default_user
)
await server.archive_manager.attach_agent_to_archive_async(
agent_id=agent2.id, archive_id=archive.id, is_owner=False, actor=default_user
)
agents = await server.archive_manager.get_agents_for_archive_async(archive_id=archive.id, actor=default_user)
assert len(agents) == 2
agent_ids = [a.id for a in agents]
assert sarah_agent.id in agent_ids
assert agent2.id in agent_ids
# Cleanup
await server.agent_manager.delete_agent_async(agent2.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_race_condition_handling(server: SyncServer, default_user, sarah_agent):
"""Test that the race condition fix in get_or_create_default_archive_for_agent_async works."""
from unittest.mock import patch
from sqlalchemy.exc import IntegrityError
agent = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="test_agent_race_condition",
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
created_archives = []
original_create = server.archive_manager.create_archive_async
async def track_create(*args, **kwargs):
result = await original_create(*args, **kwargs)
created_archives.append(result)
return result
# First, create an archive that will be attached by a "concurrent" request
concurrent_archive = await server.archive_manager.create_archive_async(
name=f"{agent.name}'s Archive",
description="Default archive created automatically",
embedding_config=DEFAULT_EMBEDDING_CONFIG,
actor=default_user,
)
call_count = 0
original_attach = server.archive_manager.attach_agent_to_archive_async
async def failing_attach(*args, **kwargs):
nonlocal call_count
call_count += 1
if call_count == 1:
# Simulate another request already attached the agent to an archive
await original_attach(agent_id=agent.id, archive_id=concurrent_archive.id, is_owner=True, actor=default_user)
# Now raise the IntegrityError as if our attempt failed
raise IntegrityError("duplicate key value violates unique constraint", None, None)
# This shouldn't be called since we already have an archive
raise Exception("Should not reach here")
with patch.object(server.archive_manager, "create_archive_async", side_effect=track_create):
with patch.object(server.archive_manager, "attach_agent_to_archive_async", side_effect=failing_attach):
archive = await server.archive_manager.get_or_create_default_archive_for_agent_async(agent_state=agent, actor=default_user)
assert archive is not None
assert archive.id == concurrent_archive.id # Should return the existing archive
assert archive.name == f"{agent.name}'s Archive"
# One archive was created in our attempt (but then deleted)
assert len(created_archives) == 1
# Verify only one archive is attached to the agent
archive_ids = await server.agent_manager.get_agent_archive_ids_async(agent_id=agent.id, actor=default_user)
assert len(archive_ids) == 1
assert archive_ids[0] == concurrent_archive.id
# Cleanup
await server.agent_manager.delete_agent_async(agent.id, actor=default_user)
await server.archive_manager.delete_archive_async(concurrent_archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_get_agent_from_passage_async(server: SyncServer, default_user, sarah_agent):
"""Test getting the agent ID that owns a passage through its archive."""
archive = await server.archive_manager.get_or_create_default_archive_for_agent_async(agent_state=sarah_agent, actor=default_user)
passage = await server.passage_manager.create_agent_passage_async(
PydanticPassage(
text="Test passage for agent ownership",
archive_id=archive.id,
organization_id=default_user.organization_id,
embedding=[0.1],
embedding_config=DEFAULT_EMBEDDING_CONFIG,
),
actor=default_user,
)
agent_id = await server.archive_manager.get_agent_from_passage_async(passage_id=passage.id, actor=default_user)
assert agent_id == sarah_agent.id
orphan_archive = await server.archive_manager.create_archive_async(
name="orphan_archive", description="Archive with no agents", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
orphan_passage = await server.passage_manager.create_agent_passage_async(
PydanticPassage(
text="Orphan passage",
archive_id=orphan_archive.id,
organization_id=default_user.organization_id,
embedding=[0.1],
embedding_config=DEFAULT_EMBEDDING_CONFIG,
),
actor=default_user,
)
agent_id = await server.archive_manager.get_agent_from_passage_async(passage_id=orphan_passage.id, actor=default_user)
assert agent_id is None
# Cleanup
await server.passage_manager.delete_passage_by_id_async(passage.id, actor=default_user)
await server.passage_manager.delete_passage_by_id_async(orphan_passage.id, actor=default_user)
await server.archive_manager.delete_archive_async(orphan_archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_create_archive_async(server: SyncServer, default_user):
"""Test creating a new archive with various parameters."""
# test creating with name and description
archive = await server.archive_manager.create_archive_async(
name="test_archive_basic", description="Test archive description", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
assert archive.name == "test_archive_basic"
assert archive.description == "Test archive description"
assert archive.organization_id == default_user.organization_id
assert archive.id is not None
# test creating without description
archive2 = await server.archive_manager.create_archive_async(
name="test_archive_no_desc", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
assert archive2.name == "test_archive_no_desc"
assert archive2.description is None
assert archive2.organization_id == default_user.organization_id
# cleanup
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive2.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_get_archive_by_id_async(server: SyncServer, default_user):
"""Test retrieving an archive by its ID."""
# create an archive
archive = await server.archive_manager.create_archive_async(
name="test_get_by_id", description="Archive to test get_by_id", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
# retrieve the archive
retrieved = await server.archive_manager.get_archive_by_id_async(archive_id=archive.id, actor=default_user)
assert retrieved.id == archive.id
assert retrieved.name == "test_get_by_id"
assert retrieved.description == "Archive to test get_by_id"
assert retrieved.organization_id == default_user.organization_id
# cleanup
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
# test getting non-existent archive should raise
with pytest.raises(Exception):
await server.archive_manager.get_archive_by_id_async(archive_id=str(uuid.uuid4()), actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_update_archive_async(server: SyncServer, default_user):
"""Test updating archive name and description."""
# create an archive
archive = await server.archive_manager.create_archive_async(
name="original_name", description="original description", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
# update name only
updated = await server.archive_manager.update_archive_async(archive_id=archive.id, name="updated_name", actor=default_user)
assert updated.id == archive.id
assert updated.name == "updated_name"
assert updated.description == "original description"
# update description only
updated = await server.archive_manager.update_archive_async(
archive_id=archive.id, description="updated description", actor=default_user
)
assert updated.name == "updated_name"
assert updated.description == "updated description"
# update both
updated = await server.archive_manager.update_archive_async(
archive_id=archive.id, name="final_name", description="final description", actor=default_user
)
assert updated.name == "final_name"
assert updated.description == "final description"
# verify changes persisted
retrieved = await server.archive_manager.get_archive_by_id_async(archive_id=archive.id, actor=default_user)
assert retrieved.name == "final_name"
assert retrieved.description == "final description"
# cleanup
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_list_archives_async(server: SyncServer, default_user, sarah_agent):
"""Test listing archives with various filters and pagination."""
# create test archives
archives = []
for i in range(5):
archive = await server.archive_manager.create_archive_async(
name=f"list_test_archive_{i}", description=f"Description {i}", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
archives.append(archive)
# test basic listing
result = await server.archive_manager.list_archives_async(actor=default_user, limit=10)
assert len(result) >= 5
# test with limit
result = await server.archive_manager.list_archives_async(actor=default_user, limit=3)
assert len(result) == 3
# test filtering by name
result = await server.archive_manager.list_archives_async(actor=default_user, name="list_test_archive_2")
assert len(result) == 1
assert result[0].name == "list_test_archive_2"
# attach an archive to agent and test agent_id filter
await server.archive_manager.attach_agent_to_archive_async(
agent_id=sarah_agent.id, archive_id=archives[0].id, is_owner=True, actor=default_user
)
result = await server.archive_manager.list_archives_async(actor=default_user, agent_id=sarah_agent.id)
assert len(result) >= 1
assert archives[0].id in [a.id for a in result]
# test pagination with after
all_archives = await server.archive_manager.list_archives_async(actor=default_user, limit=100)
if len(all_archives) > 2:
first_batch = await server.archive_manager.list_archives_async(actor=default_user, limit=2)
second_batch = await server.archive_manager.list_archives_async(actor=default_user, after=first_batch[-1].id, limit=2)
assert len(second_batch) <= 2
assert first_batch[-1].id not in [a.id for a in second_batch]
# cleanup
for archive in archives:
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_attach_agent_to_archive_async(server: SyncServer, default_user, sarah_agent):
"""Test attaching agents to archives with ownership settings."""
# create archives
archive1 = await server.archive_manager.create_archive_async(
name="archive_for_attachment_1", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
archive2 = await server.archive_manager.create_archive_async(
name="archive_for_attachment_2", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
# create another agent
agent2 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="test_attach_agent",
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
# attach agent as owner
await server.archive_manager.attach_agent_to_archive_async(
agent_id=sarah_agent.id, archive_id=archive1.id, is_owner=True, actor=default_user
)
# verify attachment
agents = await server.archive_manager.get_agents_for_archive_async(archive_id=archive1.id, actor=default_user)
assert sarah_agent.id in [a.id for a in agents]
# attach agent as non-owner
await server.archive_manager.attach_agent_to_archive_async(
agent_id=agent2.id, archive_id=archive1.id, is_owner=False, actor=default_user
)
agents = await server.archive_manager.get_agents_for_archive_async(archive_id=archive1.id, actor=default_user)
assert len(agents) == 2
assert agent2.id in [a.id for a in agents]
# test updating ownership (attach again with different is_owner)
await server.archive_manager.attach_agent_to_archive_async(
agent_id=agent2.id, archive_id=archive1.id, is_owner=True, actor=default_user
)
# verify still only 2 agents (no duplicate)
agents = await server.archive_manager.get_agents_for_archive_async(archive_id=archive1.id, actor=default_user)
assert len(agents) == 2
# cleanup
await server.agent_manager.delete_agent_async(agent2.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive1.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive2.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_detach_agent_from_archive_async(server: SyncServer, default_user):
"""Test detaching agents from archives."""
# create archive and agents
archive = await server.archive_manager.create_archive_async(
name="archive_for_detachment",
description="Test archive for detachment",
embedding_config=DEFAULT_EMBEDDING_CONFIG,
actor=default_user,
)
agent1 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="test_detach_agent_1",
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
agent2 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="test_detach_agent_2",
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
# attach both agents
await server.archive_manager.attach_agent_to_archive_async(agent_id=agent1.id, archive_id=archive.id, is_owner=True, actor=default_user)
await server.archive_manager.attach_agent_to_archive_async(
agent_id=agent2.id, archive_id=archive.id, is_owner=False, actor=default_user
)
# verify both are attached
agents = await server.archive_manager.get_agents_for_archive_async(archive_id=archive.id, actor=default_user)
assert len(agents) == 2
agent_ids = [a.id for a in agents]
assert agent1.id in agent_ids
assert agent2.id in agent_ids
# detach agent1
await server.archive_manager.detach_agent_from_archive_async(agent_id=agent1.id, archive_id=archive.id, actor=default_user)
# verify only agent2 remains
agents = await server.archive_manager.get_agents_for_archive_async(archive_id=archive.id, actor=default_user)
assert len(agents) == 1
agent_ids = [a.id for a in agents]
assert agent2.id in agent_ids
assert agent1.id not in agent_ids
# test idempotency - detach agent1 again (should not error)
await server.archive_manager.detach_agent_from_archive_async(agent_id=agent1.id, archive_id=archive.id, actor=default_user)
# verify still only agent2
agents = await server.archive_manager.get_agents_for_archive_async(archive_id=archive.id, actor=default_user)
assert len(agents) == 1
assert agent2.id in [a.id for a in agents]
# detach agent2
await server.archive_manager.detach_agent_from_archive_async(agent_id=agent2.id, archive_id=archive.id, actor=default_user)
# verify archive has no agents
agents = await server.archive_manager.get_agents_for_archive_async(archive_id=archive.id, actor=default_user)
assert len(agents) == 0
# cleanup
await server.agent_manager.delete_agent_async(agent1.id, actor=default_user)
await server.agent_manager.delete_agent_async(agent2.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_attach_detach_idempotency(server: SyncServer, default_user):
"""Test that attach and detach operations are idempotent."""
# create archive and agent
archive = await server.archive_manager.create_archive_async(
name="idempotency_test_archive", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
agent = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="idempotency_test_agent",
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
# test multiple attachments - should be idempotent
await server.archive_manager.attach_agent_to_archive_async(agent_id=agent.id, archive_id=archive.id, is_owner=False, actor=default_user)
await server.archive_manager.attach_agent_to_archive_async(agent_id=agent.id, archive_id=archive.id, is_owner=False, actor=default_user)
# verify only one relationship exists
agents = await server.archive_manager.get_agents_for_archive_async(archive_id=archive.id, actor=default_user)
assert len(agents) == 1
assert agent.id in [a.id for a in agents]
# test ownership update through re-attachment
await server.archive_manager.attach_agent_to_archive_async(agent_id=agent.id, archive_id=archive.id, is_owner=True, actor=default_user)
# still only one relationship
agents = await server.archive_manager.get_agents_for_archive_async(archive_id=archive.id, actor=default_user)
assert len(agents) == 1
# test detaching non-existent relationship (should be idempotent)
non_existent_agent = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="never_attached_agent",
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
# this should not error
await server.archive_manager.detach_agent_from_archive_async(agent_id=non_existent_agent.id, archive_id=archive.id, actor=default_user)
# verify original agent still attached
agents = await server.archive_manager.get_agents_for_archive_async(archive_id=archive.id, actor=default_user)
assert len(agents) == 1
assert agent.id in [a.id for a in agents]
# cleanup
await server.agent_manager.delete_agent_async(agent.id, actor=default_user)
await server.agent_manager.delete_agent_async(non_existent_agent.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_detach_with_multiple_archives(server: SyncServer, default_user):
"""Test detaching an agent from one archive doesn't affect others."""
# create two archives
archive1 = await server.archive_manager.create_archive_async(
name="multi_archive_1", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
archive2 = await server.archive_manager.create_archive_async(
name="multi_archive_2", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
# create two agents
agent1 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="multi_test_agent_1",
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
agent2 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="multi_test_agent_2",
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
# Note: Due to unique constraint, each agent can only be attached to one archive
# So we'll attach different agents to different archives
await server.archive_manager.attach_agent_to_archive_async(
agent_id=agent1.id, archive_id=archive1.id, is_owner=True, actor=default_user
)
await server.archive_manager.attach_agent_to_archive_async(
agent_id=agent2.id, archive_id=archive2.id, is_owner=True, actor=default_user
)
# verify initial state
agents_archive1 = await server.archive_manager.get_agents_for_archive_async(archive_id=archive1.id, actor=default_user)
agents_archive2 = await server.archive_manager.get_agents_for_archive_async(archive_id=archive2.id, actor=default_user)
assert agent1.id in [a.id for a in agents_archive1]
assert agent2.id in [a.id for a in agents_archive2]
# detach agent1 from archive1
await server.archive_manager.detach_agent_from_archive_async(agent_id=agent1.id, archive_id=archive1.id, actor=default_user)
# verify agent1 is detached from archive1
agents_archive1 = await server.archive_manager.get_agents_for_archive_async(archive_id=archive1.id, actor=default_user)
assert agent1.id not in [a.id for a in agents_archive1]
assert len(agents_archive1) == 0
# verify agent2 is still attached to archive2
agents_archive2 = await server.archive_manager.get_agents_for_archive_async(archive_id=archive2.id, actor=default_user)
assert agent2.id in [a.id for a in agents_archive2]
assert len(agents_archive2) == 1
# cleanup
await server.agent_manager.delete_agent_async(agent1.id, actor=default_user)
await server.agent_manager.delete_agent_async(agent2.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive1.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive2.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_detach_deleted_agent(server: SyncServer, default_user):
"""Test behavior when detaching a deleted agent."""
# create archive
archive = await server.archive_manager.create_archive_async(
name="test_deleted_agent_archive", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
# create and attach agent
agent = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="agent_to_be_deleted",
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
await server.archive_manager.attach_agent_to_archive_async(agent_id=agent.id, archive_id=archive.id, is_owner=True, actor=default_user)
# save the agent id before deletion
agent_id = agent.id
# delete the agent (should cascade delete the relationship due to ondelete="CASCADE")
await server.agent_manager.delete_agent_async(agent.id, actor=default_user)
# verify agent is no longer attached
agents = await server.archive_manager.get_agents_for_archive_async(archive_id=archive.id, actor=default_user)
assert len(agents) == 0
# attempting to detach the deleted agent
# 2025-10-27: used to be idempotent (no error) but now we raise an error
with pytest.raises(LettaAgentNotFoundError):
await server.archive_manager.detach_agent_from_archive_async(agent_id=agent_id, archive_id=archive.id, actor=default_user)
# cleanup
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_cascade_delete_on_archive_deletion(server: SyncServer, default_user):
"""Test that deleting an archive cascades to delete relationships in archives_agents table."""
# create archive
archive = await server.archive_manager.create_archive_async(
name="archive_to_be_deleted",
description="This archive will be deleted to test CASCADE",
embedding_config=DEFAULT_EMBEDDING_CONFIG,
actor=default_user,
)
# create multiple agents and attach them to the archive
agent1 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="cascade_test_agent_1",
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
agent2 = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="cascade_test_agent_2",
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
# attach both agents to the archive
await server.archive_manager.attach_agent_to_archive_async(agent_id=agent1.id, archive_id=archive.id, is_owner=True, actor=default_user)
await server.archive_manager.attach_agent_to_archive_async(
agent_id=agent2.id, archive_id=archive.id, is_owner=False, actor=default_user
)
# verify both agents are attached
agents = await server.archive_manager.get_agents_for_archive_async(archive_id=archive.id, actor=default_user)
assert len(agents) == 2
agent_ids = [a.id for a in agents]
assert agent1.id in agent_ids
assert agent2.id in agent_ids
# save archive id for later
archive_id = archive.id
# delete the archive (should cascade delete the relationships)
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
# verify archive is deleted
with pytest.raises(Exception):
await server.archive_manager.get_archive_by_id_async(archive_id=archive_id, actor=default_user)
# verify agents still exist but have no archives attached
# (agents should NOT be deleted, only the relationships)
agent1_still_exists = await server.agent_manager.get_agent_by_id_async(agent1.id, actor=default_user)
assert agent1_still_exists is not None
assert agent1_still_exists.id == agent1.id
agent2_still_exists = await server.agent_manager.get_agent_by_id_async(agent2.id, actor=default_user)
assert agent2_still_exists is not None
assert agent2_still_exists.id == agent2.id
# verify agents no longer have any archives
agent1_archives = await server.agent_manager.get_agent_archive_ids_async(agent_id=agent1.id, actor=default_user)
assert len(agent1_archives) == 0
agent2_archives = await server.agent_manager.get_agent_archive_ids_async(agent_id=agent2.id, actor=default_user)
assert len(agent2_archives) == 0
# cleanup agents
await server.agent_manager.delete_agent_async(agent1.id, actor=default_user)
await server.agent_manager.delete_agent_async(agent2.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_list_agents_with_pagination(server: SyncServer, default_user):
"""Test listing agents for an archive with pagination support."""
# create archive
archive = await server.archive_manager.create_archive_async(
name="pagination_test_archive",
description="Archive for testing pagination",
embedding_config=DEFAULT_EMBEDDING_CONFIG,
actor=default_user,
)
# create multiple agents
agents = []
for i in range(5):
agent = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name=f"pagination_test_agent_{i}",
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
agents.append(agent)
# Attach to archive
await server.archive_manager.attach_agent_to_archive_async(
agent_id=agent.id, archive_id=archive.id, is_owner=(i == 0), actor=default_user
)
# Test basic listing (should get all 5)
all_agents = await server.archive_manager.get_agents_for_archive_async(archive_id=archive.id, actor=default_user, limit=10)
assert len(all_agents) == 5
all_agent_ids = [a.id for a in all_agents]
for agent in agents:
assert agent.id in all_agent_ids
# Test with limit
limited_agents = await server.archive_manager.get_agents_for_archive_async(archive_id=archive.id, actor=default_user, limit=3)
assert len(limited_agents) == 3
# Test that pagination parameters are accepted without errors
paginated = await server.archive_manager.get_agents_for_archive_async(archive_id=archive.id, actor=default_user, limit=2)
assert len(paginated) == 2
assert all(a.id in all_agent_ids for a in paginated)
# Test ascending/descending order by checking we get all agents in both
ascending_agents = await server.archive_manager.get_agents_for_archive_async(
archive_id=archive.id, actor=default_user, ascending=True, limit=10
)
assert len(ascending_agents) == 5
descending_agents = await server.archive_manager.get_agents_for_archive_async(
archive_id=archive.id, actor=default_user, ascending=False, limit=10
)
assert len(descending_agents) == 5
# Verify both orders contain all agents
assert set([a.id for a in ascending_agents]) == set([a.id for a in descending_agents])
# Cleanup
for agent in agents:
await server.agent_manager.delete_agent_async(agent.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_get_default_archive_for_agent_async(server: SyncServer, default_user):
"""Test getting default archive for an agent."""
# create agent without archive
agent = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="test_default_archive_agent",
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
# should return None when no archive exists
archive = await server.archive_manager.get_default_archive_for_agent_async(agent_id=agent.id, actor=default_user)
assert archive is None
# create and attach an archive
created_archive = await server.archive_manager.create_archive_async(
name="default_archive", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
await server.archive_manager.attach_agent_to_archive_async(
agent_id=agent.id, archive_id=created_archive.id, is_owner=True, actor=default_user
)
# should now return the archive
archive = await server.archive_manager.get_default_archive_for_agent_async(agent_id=agent.id, actor=default_user)
assert archive is not None
assert archive.id == created_archive.id
# cleanup
await server.agent_manager.delete_agent_async(agent.id, actor=default_user)
await server.archive_manager.delete_archive_async(created_archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_get_or_set_vector_db_namespace_async(server: SyncServer, default_user):
"""Test getting or setting vector database namespace for an archive."""
# create an archive
archive = await server.archive_manager.create_archive_async(
name="test_vector_namespace", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
# get/set namespace for the first time
namespace = await server.archive_manager.get_or_set_vector_db_namespace_async(archive_id=archive.id)
assert namespace is not None
assert archive.id in namespace
# verify it returns the same namespace on subsequent calls
namespace2 = await server.archive_manager.get_or_set_vector_db_namespace_async(archive_id=archive.id)
assert namespace == namespace2
# cleanup
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_get_agents_with_include_parameter(server: SyncServer, default_user):
"""Test getting agents for an archive with include parameter to load relationships."""
# create an archive
archive = await server.archive_manager.create_archive_async(
name="test_include_archive",
description="Test archive for include parameter",
embedding_config=DEFAULT_EMBEDDING_CONFIG,
actor=default_user,
)
# create agent without base tools (to avoid needing tools in test DB)
agent = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="test_include_agent",
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
# attach agent to archive
await server.archive_manager.attach_agent_to_archive_async(agent_id=agent.id, archive_id=archive.id, is_owner=True, actor=default_user)
# test without include parameter (default - no relationships loaded)
agents_no_include = await server.archive_manager.get_agents_for_archive_async(archive_id=archive.id, actor=default_user)
assert len(agents_no_include) == 1
# By default, tools should be empty list (not loaded)
assert agents_no_include[0].tools == []
# By default, tags should also be empty (not loaded)
assert agents_no_include[0].tags == []
# test with include parameter to load tags
agents_with_tags = await server.archive_manager.get_agents_for_archive_async(
archive_id=archive.id, actor=default_user, include=["agent.tags"]
)
assert len(agents_with_tags) == 1
# With include, tags should be loaded (as a list, even if empty)
assert isinstance(agents_with_tags[0].tags, list)
# test with include parameter to load blocks
agents_with_blocks = await server.archive_manager.get_agents_for_archive_async(
archive_id=archive.id, actor=default_user, include=["agent.blocks"]
)
assert len(agents_with_blocks) == 1
# With include, blocks should be loaded
assert isinstance(agents_with_blocks[0].blocks, list)
# Agent should have blocks since we passed memory_blocks=[] which creates default blocks
assert len(agents_with_blocks[0].blocks) >= 0
# test with multiple includes
agents_with_multiple = await server.archive_manager.get_agents_for_archive_async(
archive_id=archive.id, actor=default_user, include=["agent.tags", "agent.blocks", "agent.tools"]
)
assert len(agents_with_multiple) == 1
# All requested relationships should be loaded
assert isinstance(agents_with_multiple[0].tags, list)
assert isinstance(agents_with_multiple[0].blocks, list)
assert isinstance(agents_with_multiple[0].tools, list)
# cleanup
await server.agent_manager.delete_agent_async(agent.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_delete_passage_from_archive_async(server: SyncServer, default_user):
"""Test deleting a passage from an archive."""
# create archive
archive = await server.archive_manager.create_archive_async(
name="test_passage_deletion_archive",
description="Archive for testing passage deletion",
embedding_config=DEFAULT_EMBEDDING_CONFIG,
actor=default_user,
)
# create passages
passage1 = await server.passage_manager.create_agent_passage_async(
PydanticPassage(
text="First test passage",
archive_id=archive.id,
organization_id=default_user.organization_id,
embedding=[0.1, 0.2],
embedding_config=DEFAULT_EMBEDDING_CONFIG,
),
actor=default_user,
)
passage2 = await server.passage_manager.create_agent_passage_async(
PydanticPassage(
text="Second test passage",
archive_id=archive.id,
organization_id=default_user.organization_id,
embedding=[0.3, 0.4],
embedding_config=DEFAULT_EMBEDDING_CONFIG,
),
actor=default_user,
)
# verify both passages exist
retrieved_passage1 = await server.passage_manager.get_agent_passage_by_id_async(passage_id=passage1.id, actor=default_user)
assert retrieved_passage1.id == passage1.id
assert retrieved_passage1.archive_id == archive.id
retrieved_passage2 = await server.passage_manager.get_agent_passage_by_id_async(passage_id=passage2.id, actor=default_user)
assert retrieved_passage2.id == passage2.id
# delete passage1 from archive
await server.archive_manager.delete_passage_from_archive_async(archive_id=archive.id, passage_id=passage1.id, actor=default_user)
# verify passage1 is deleted
with pytest.raises(NoResultFound):
await server.passage_manager.get_agent_passage_by_id_async(passage_id=passage1.id, actor=default_user)
# verify passage2 still exists
retrieved_passage2 = await server.passage_manager.get_agent_passage_by_id_async(passage_id=passage2.id, actor=default_user)
assert retrieved_passage2.id == passage2.id
# cleanup
await server.passage_manager.delete_agent_passage_by_id_async(passage2.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_delete_passage_from_wrong_archive(server: SyncServer, default_user):
"""Test that deleting a passage from the wrong archive raises an error."""
# create two archives
archive1 = await server.archive_manager.create_archive_async(
name="archive_1", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
archive2 = await server.archive_manager.create_archive_async(
name="archive_2", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
# create passage in archive1
passage = await server.passage_manager.create_agent_passage_async(
PydanticPassage(
text="Passage in archive 1",
archive_id=archive1.id,
organization_id=default_user.organization_id,
embedding=[0.1, 0.2],
embedding_config=DEFAULT_EMBEDDING_CONFIG,
),
actor=default_user,
)
# attempt to delete passage from archive2 (wrong archive)
with pytest.raises(ValueError, match="does not belong to archive"):
await server.archive_manager.delete_passage_from_archive_async(archive_id=archive2.id, passage_id=passage.id, actor=default_user)
# verify passage still exists
retrieved_passage = await server.passage_manager.get_agent_passage_by_id_async(passage_id=passage.id, actor=default_user)
assert retrieved_passage.id == passage.id
# cleanup
await server.passage_manager.delete_agent_passage_by_id_async(passage.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive1.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive2.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_delete_nonexistent_passage(server: SyncServer, default_user):
"""Test that deleting a non-existent passage raises an error."""
# create archive
archive = await server.archive_manager.create_archive_async(
name="test_nonexistent_passage_archive", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
# attempt to delete non-existent passage (use valid UUID4 format)
fake_passage_id = f"passage-{uuid.uuid4()}"
with pytest.raises(NoResultFound):
await server.archive_manager.delete_passage_from_archive_async(
archive_id=archive.id, passage_id=fake_passage_id, actor=default_user
)
# cleanup
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_delete_passage_from_nonexistent_archive(server: SyncServer, default_user):
"""Test that deleting a passage from a non-existent archive raises an error."""
# create archive and passage
archive = await server.archive_manager.create_archive_async(
name="temp_archive", embedding_config=DEFAULT_EMBEDDING_CONFIG, actor=default_user
)
passage = await server.passage_manager.create_agent_passage_async(
PydanticPassage(
text="Test passage",
archive_id=archive.id,
organization_id=default_user.organization_id,
embedding=[0.1, 0.2],
embedding_config=DEFAULT_EMBEDDING_CONFIG,
),
actor=default_user,
)
# attempt to delete passage from non-existent archive (use valid UUID4 format)
fake_archive_id = f"archive-{uuid.uuid4()}"
with pytest.raises(NoResultFound):
await server.archive_manager.delete_passage_from_archive_async(
archive_id=fake_archive_id, passage_id=passage.id, actor=default_user
)
# verify passage still exists
retrieved_passage = await server.passage_manager.get_agent_passage_by_id_async(passage_id=passage.id, actor=default_user)
assert retrieved_passage.id == passage.id
# cleanup
await server.passage_manager.delete_agent_passage_by_id_async(passage.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_create_passage_in_archive_async(server: SyncServer, default_user):
"""Test creating a passage in an archive."""
# create archive
archive = await server.archive_manager.create_archive_async(
name="test_passage_creation_archive",
description="Archive for testing passage creation",
embedding_config=DEFAULT_EMBEDDING_CONFIG,
actor=default_user,
)
# create a passage in the archive
created_passage = await server.archive_manager.create_passage_in_archive_async(
archive_id=archive.id,
text="This is a test passage for creation",
actor=default_user,
)
# verify the passage was created
assert created_passage.id is not None
assert created_passage.text == "This is a test passage for creation"
assert created_passage.archive_id == archive.id
assert created_passage.organization_id == default_user.organization_id
# verify we can retrieve it
retrieved_passage = await server.passage_manager.get_agent_passage_by_id_async(passage_id=created_passage.id, actor=default_user)
assert retrieved_passage.id == created_passage.id
assert retrieved_passage.text == created_passage.text
assert retrieved_passage.archive_id == archive.id
# cleanup
await server.passage_manager.delete_agent_passage_by_id_async(created_passage.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_create_passage_with_metadata_and_tags(server: SyncServer, default_user):
"""Test creating a passage with metadata and tags."""
# create archive
archive = await server.archive_manager.create_archive_async(
name="test_passage_metadata_archive",
embedding_config=DEFAULT_EMBEDDING_CONFIG,
actor=default_user,
)
# create passage with metadata and tags
test_metadata = {"source": "unit_test", "version": 1}
test_tags = ["test", "archive", "passage"]
created_passage = await server.archive_manager.create_passage_in_archive_async(
archive_id=archive.id,
text="Passage with metadata and tags",
metadata=test_metadata,
tags=test_tags,
actor=default_user,
)
# verify metadata and tags were stored
assert created_passage.metadata == test_metadata
assert set(created_passage.tags) == set(test_tags) # Use set comparison to ignore order
# retrieve and verify persistence
retrieved_passage = await server.passage_manager.get_agent_passage_by_id_async(passage_id=created_passage.id, actor=default_user)
assert retrieved_passage.metadata == test_metadata
assert set(retrieved_passage.tags) == set(test_tags)
# cleanup
await server.passage_manager.delete_agent_passage_by_id_async(created_passage.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_create_passage_in_nonexistent_archive(server: SyncServer, default_user):
"""Test that creating a passage in a non-existent archive raises an error."""
# attempt to create passage in non-existent archive
fake_archive_id = f"archive-{uuid.uuid4()}"
with pytest.raises(NoResultFound):
await server.archive_manager.create_passage_in_archive_async(
archive_id=fake_archive_id,
text="This should fail",
actor=default_user,
)
@pytest.mark.asyncio
async def test_archive_manager_create_passage_inherits_embedding_config(server: SyncServer, default_user):
"""Test that created passages inherit the archive's embedding configuration."""
# create archive with specific embedding config
specific_embedding_config = EmbeddingConfig.default_config(provider="openai")
archive = await server.archive_manager.create_archive_async(
name="test_embedding_inheritance_archive",
embedding_config=specific_embedding_config,
actor=default_user,
)
# create passage
created_passage = await server.archive_manager.create_passage_in_archive_async(
archive_id=archive.id,
text="Test passage for embedding config inheritance",
actor=default_user,
)
# verify the passage inherited the archive's embedding config
assert created_passage.embedding_config is not None
assert created_passage.embedding_config.embedding_endpoint_type == specific_embedding_config.embedding_endpoint_type
assert created_passage.embedding_config.embedding_model == specific_embedding_config.embedding_model
assert created_passage.embedding_config.embedding_dim == specific_embedding_config.embedding_dim
# cleanup
await server.passage_manager.delete_agent_passage_by_id_async(created_passage.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
@pytest.mark.asyncio
async def test_archive_manager_create_multiple_passages_in_archive(server: SyncServer, default_user):
"""Test creating multiple passages in the same archive."""
# create archive
archive = await server.archive_manager.create_archive_async(
name="test_multiple_passages_archive",
embedding_config=DEFAULT_EMBEDDING_CONFIG,
actor=default_user,
)
# create multiple passages
passages = []
for i in range(3):
passage = await server.archive_manager.create_passage_in_archive_async(
archive_id=archive.id,
text=f"Test passage number {i}",
metadata={"index": i},
tags=[f"passage_{i}"],
actor=default_user,
)
passages.append(passage)
# verify all passages were created with correct data
for i, passage in enumerate(passages):
assert passage.text == f"Test passage number {i}"
assert passage.metadata["index"] == i
assert f"passage_{i}" in passage.tags
assert passage.archive_id == archive.id
# cleanup
for passage in passages:
await server.passage_manager.delete_agent_passage_by_id_async(passage.id, actor=default_user)
await server.archive_manager.delete_archive_async(archive.id, actor=default_user)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_archive_manager.py",
"license": "Apache License 2.0",
"lines": 997,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_block_manager.py | import logging
import random
import string
import time
import uuid
import pytest
# Import shared fixtures and constants from conftest
from conftest import (
CREATE_DELAY_SQLITE,
USING_SQLITE,
)
from sqlalchemy.orm.exc import StaleDataError
from letta.errors import LettaInvalidArgumentError
from letta.orm import Block
from letta.orm.block_history import BlockHistory
from letta.orm.errors import NoResultFound, UniqueConstraintViolationError
from letta.schemas.agent import CreateAgent
from letta.schemas.block import Block as PydanticBlock, BlockUpdate
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import (
ActorType,
)
from letta.schemas.llm_config import LLMConfig
from letta.schemas.user import User as PydanticUser
from letta.server.db import db_registry
from letta.server.server import SyncServer
from letta.services.block_manager import BlockManager
from tests.utils import random_string
# ======================================================================================================================
# AgentManager Tests - Blocks Relationship
# ======================================================================================================================
@pytest.mark.asyncio
async def test_attach_block(server: SyncServer, sarah_agent, default_block, default_user):
"""Test attaching a block to an agent."""
# Attach block
await server.agent_manager.attach_block_async(agent_id=sarah_agent.id, block_id=default_block.id, actor=default_user)
# Verify attachment
agent = await server.agent_manager.get_agent_by_id_async(sarah_agent.id, actor=default_user)
assert len(agent.memory.blocks) == 1
assert agent.memory.blocks[0].id == default_block.id
assert agent.memory.blocks[0].label == default_block.label
# Test should work with both SQLite and PostgreSQL
@pytest.mark.asyncio
async def test_attach_block_duplicate_label(server: SyncServer, sarah_agent, default_block, other_block, default_user):
"""Test attempting to attach a block with a duplicate label."""
# Set up both blocks with same label
await server.block_manager.update_block_async(default_block.id, BlockUpdate(label="same_label"), actor=default_user)
await server.block_manager.update_block_async(other_block.id, BlockUpdate(label="same_label"), actor=default_user)
# Attach first block
await server.agent_manager.attach_block_async(agent_id=sarah_agent.id, block_id=default_block.id, actor=default_user)
# Attempt to attach second block with same label
with pytest.raises(UniqueConstraintViolationError):
await server.agent_manager.attach_block_async(agent_id=sarah_agent.id, block_id=other_block.id, actor=default_user)
@pytest.mark.asyncio
async def test_detach_block(server: SyncServer, sarah_agent, default_block, default_user):
"""Test detaching a block by ID."""
# Set up: attach block
await server.agent_manager.attach_block_async(agent_id=sarah_agent.id, block_id=default_block.id, actor=default_user)
# Detach block
await server.agent_manager.detach_block_async(agent_id=sarah_agent.id, block_id=default_block.id, actor=default_user)
# Verify detachment
agent = await server.agent_manager.get_agent_by_id_async(sarah_agent.id, actor=default_user)
assert len(agent.memory.blocks) == 0
# Check that block still exists
block = await server.block_manager.get_block_by_id_async(block_id=default_block.id, actor=default_user)
assert block
@pytest.mark.asyncio
async def test_detach_nonexistent_block(server: SyncServer, sarah_agent, default_user):
"""Test detaching a block that isn't attached."""
with pytest.raises(NoResultFound):
await server.agent_manager.detach_block_async(agent_id=sarah_agent.id, block_id="nonexistent-block-id", actor=default_user)
@pytest.mark.asyncio
async def test_update_block_label(server: SyncServer, sarah_agent, default_block, default_user):
"""Test updating a block's label updates the relationship."""
# Attach block
await server.agent_manager.attach_block_async(agent_id=sarah_agent.id, block_id=default_block.id, actor=default_user)
# Update block label
new_label = "new_label"
await server.block_manager.update_block_async(default_block.id, BlockUpdate(label=new_label), actor=default_user)
# Verify relationship is updated
agent = await server.agent_manager.get_agent_by_id_async(sarah_agent.id, actor=default_user)
block = agent.memory.blocks[0]
assert block.id == default_block.id
assert block.label == new_label
@pytest.mark.asyncio
async def test_update_block_label_multiple_agents(server: SyncServer, sarah_agent, charles_agent, default_block, default_user):
"""Test updating a block's label updates relationships for all agents."""
# Attach block to both agents
await server.agent_manager.attach_block_async(agent_id=sarah_agent.id, block_id=default_block.id, actor=default_user)
await server.agent_manager.attach_block_async(agent_id=charles_agent.id, block_id=default_block.id, actor=default_user)
# Update block label
new_label = "new_label"
await server.block_manager.update_block_async(default_block.id, BlockUpdate(label=new_label), actor=default_user)
# Verify both relationships are updated
for agent_id in [sarah_agent.id, charles_agent.id]:
agent = await server.agent_manager.get_agent_by_id_async(agent_id, actor=default_user)
# Find our specific block by ID
block = next(b for b in agent.memory.blocks if b.id == default_block.id)
assert block.label == new_label
@pytest.mark.asyncio
async def test_get_block_with_label(server: SyncServer, sarah_agent, default_block, default_user):
"""Test retrieving a block by its label."""
# Attach block
await server.agent_manager.attach_block_async(agent_id=sarah_agent.id, block_id=default_block.id, actor=default_user)
# Get block by label
block = await server.agent_manager.get_block_with_label_async(
agent_id=sarah_agent.id, block_label=default_block.label, actor=default_user
)
assert block.id == default_block.id
assert block.label == default_block.label
@pytest.mark.asyncio
async def test_refresh_memory_async(server: SyncServer, default_user):
block = await server.block_manager.create_or_update_block_async(
PydanticBlock(
label="test",
value="test",
limit=1000,
),
actor=default_user,
)
block_human = await server.block_manager.create_or_update_block_async(
PydanticBlock(
label="human",
value="name: caren",
limit=1000,
),
actor=default_user,
)
agent = await server.agent_manager.create_agent_async(
CreateAgent(
name="test",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
block_ids=[block.id, block_human.id],
),
actor=default_user,
)
block = await server.block_manager.update_block_async(
block_id=block.id,
block_update=BlockUpdate(
value="test2",
),
actor=default_user,
)
assert len(agent.memory.blocks) == 2
agent = await server.agent_manager.refresh_memory_async(agent_state=agent, actor=default_user)
assert len(agent.memory.blocks) == 2
assert any([block.value == "test2" for block in agent.memory.blocks])
# ======================================================================================================================
# Block Manager Tests - Basic
# ======================================================================================================================
@pytest.mark.asyncio
async def test_create_block(server: SyncServer, default_user):
block_manager = BlockManager()
block_create = PydanticBlock(
label="human",
is_template=True,
value="Sample content",
template_name="sample_template_name",
template_id="sample_template",
description="A test block",
limit=1000,
metadata={"example": "data"},
)
block = await block_manager.create_or_update_block_async(block_create, actor=default_user)
# Assertions to ensure the created block matches the expected values
assert block.label == block_create.label
assert block.is_template == block_create.is_template
assert block.value == block_create.value
assert block.template_name == block_create.template_name
assert block.template_id == block_create.template_id
assert block.description == block_create.description
assert block.limit == block_create.limit
assert block.metadata == block_create.metadata
async def test_batch_create_blocks_async(server: SyncServer, default_user):
"""Test batch creating multiple blocks at once"""
block_manager = BlockManager()
# create multiple test blocks
blocks_data = []
for i in range(5):
block = PydanticBlock(
label=f"test_block_{i}",
is_template=False,
value=f"Content for block {i}",
description=f"Test block {i} for batch operations",
limit=1000 + i * 100, # varying limits
metadata={"index": i, "batch": "test"},
)
blocks_data.append(block)
# batch create all blocks at once
created_blocks = await block_manager.batch_create_blocks_async(blocks_data, default_user)
# verify all blocks were created
assert len(created_blocks) == 5
assert all(b.label.startswith("test_block_") for b in created_blocks)
# verify block properties were preserved
for i, block in enumerate(created_blocks):
assert block.label == f"test_block_{i}"
assert block.value == f"Content for block {i}"
assert block.description == f"Test block {i} for batch operations"
assert block.limit == 1000 + i * 100
assert block.metadata["index"] == i
assert block.metadata["batch"] == "test"
assert block.id is not None # should have generated ids
# blocks have organization_id at the orm level, not in the pydantic model
# verify blocks can be retrieved individually
for created_block in created_blocks:
retrieved = await block_manager.get_block_by_id_async(created_block.id, default_user)
assert retrieved.id == created_block.id
assert retrieved.label == created_block.label
assert retrieved.value == created_block.value
# test with empty list
empty_result = await block_manager.batch_create_blocks_async([], default_user)
assert empty_result == []
# test creating blocks with same labels (should create separate blocks since no unique constraint)
duplicate_blocks = [
PydanticBlock(label="duplicate_label", value="Block 1"),
PydanticBlock(label="duplicate_label", value="Block 2"),
PydanticBlock(label="duplicate_label", value="Block 3"),
]
created_duplicates = await block_manager.batch_create_blocks_async(duplicate_blocks, default_user)
assert len(created_duplicates) == 3
assert all(b.label == "duplicate_label" for b in created_duplicates)
# all should have different ids
ids = [b.id for b in created_duplicates]
assert len(set(ids)) == 3 # all unique ids
# but different values
values = [b.value for b in created_duplicates]
assert set(values) == {"Block 1", "Block 2", "Block 3"}
@pytest.mark.asyncio
async def test_get_blocks(server, default_user):
block_manager = BlockManager()
# Create blocks to retrieve later
await block_manager.create_or_update_block_async(PydanticBlock(label="human", value="Block 1"), actor=default_user)
await block_manager.create_or_update_block_async(PydanticBlock(label="persona", value="Block 2"), actor=default_user)
# Retrieve blocks by different filters
all_blocks = await block_manager.get_blocks_async(actor=default_user)
assert len(all_blocks) == 2
human_blocks = await block_manager.get_blocks_async(actor=default_user, label="human")
assert len(human_blocks) == 1
assert human_blocks[0].label == "human"
persona_blocks = await block_manager.get_blocks_async(actor=default_user, label="persona")
assert len(persona_blocks) == 1
assert persona_blocks[0].label == "persona"
@pytest.mark.asyncio
async def test_get_blocks_comprehensive(server, default_user, other_user_different_org):
def random_label(prefix="label"):
return f"{prefix}_{''.join(random.choices(string.ascii_lowercase, k=6))}"
def random_value():
return "".join(random.choices(string.ascii_letters + string.digits, k=12))
block_manager = BlockManager()
# Create 10 blocks for default_user
default_user_blocks = []
for _ in range(10):
label = random_label("default")
value = random_value()
await block_manager.create_or_update_block_async(PydanticBlock(label=label, value=value), actor=default_user)
default_user_blocks.append((label, value))
# Create 3 blocks for other_user
other_user_blocks = []
for _ in range(3):
label = random_label("other")
value = random_value()
await block_manager.create_or_update_block_async(PydanticBlock(label=label, value=value), actor=other_user_different_org)
other_user_blocks.append((label, value))
# Check default_user sees only their blocks
retrieved_default_blocks = await block_manager.get_blocks_async(actor=default_user)
assert len(retrieved_default_blocks) == 10
retrieved_labels = {b.label for b in retrieved_default_blocks}
for label, value in default_user_blocks:
assert label in retrieved_labels
# Check individual filtering for default_user
for label, value in default_user_blocks:
filtered = await block_manager.get_blocks_async(actor=default_user, label=label)
assert len(filtered) == 1
assert filtered[0].label == label
assert filtered[0].value == value
# Check other_user sees only their blocks
retrieved_other_blocks = await block_manager.get_blocks_async(actor=other_user_different_org)
assert len(retrieved_other_blocks) == 3
retrieved_labels = {b.label for b in retrieved_other_blocks}
for label, value in other_user_blocks:
assert label in retrieved_labels
# Other user shouldn't see default_user's blocks
for label, _ in default_user_blocks:
assert (await block_manager.get_blocks_async(actor=other_user_different_org, label=label)) == []
# Default user shouldn't see other_user's blocks
for label, _ in other_user_blocks:
assert (await block_manager.get_blocks_async(actor=default_user, label=label)) == []
# ======================================================================================================================
# BlockManager Pagination Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_get_blocks_pagination_with_after_cursor(server, default_user):
"""Test cursor-based pagination using the 'after' parameter."""
block_manager = BlockManager()
# Use a unique label prefix to isolate this test's blocks
test_label = f"pagination_after_{uuid.uuid4().hex[:8]}"
# Create 5 blocks with delays to ensure distinct timestamps
created_blocks = []
for i in range(5):
if USING_SQLITE and i > 0:
time.sleep(CREATE_DELAY_SQLITE)
block = await block_manager.create_or_update_block_async(PydanticBlock(label=test_label, value=f"Block {i}"), actor=default_user)
created_blocks.append(block)
# Get first page (ascending order - oldest first), filtered by label
page1 = await block_manager.get_blocks_async(actor=default_user, label=test_label, limit=2, ascending=True)
assert len(page1) == 2
# Should be the first 2 blocks created (oldest)
assert page1[0].id == created_blocks[0].id
assert page1[1].id == created_blocks[1].id
# Get second page using 'after' cursor
page2 = await block_manager.get_blocks_async(actor=default_user, label=test_label, limit=2, after=page1[1].id, ascending=True)
assert len(page2) == 2
assert page2[0].id == created_blocks[2].id
assert page2[1].id == created_blocks[3].id
# Get third page (should have 1 block left)
page3 = await block_manager.get_blocks_async(actor=default_user, label=test_label, limit=2, after=page2[1].id, ascending=True)
assert len(page3) == 1
assert page3[0].id == created_blocks[4].id
# All blocks should appear exactly once across all pages
all_paginated_ids = {b.id for b in page1 + page2 + page3}
all_created_ids = {b.id for b in created_blocks}
assert all_paginated_ids == all_created_ids
@pytest.mark.asyncio
async def test_get_blocks_pagination_with_before_cursor(server, default_user):
"""Test cursor-based pagination using the 'before' parameter.
The 'before' cursor returns items that would appear before the cursor in the sorted order,
starting from the beginning of that subset. This is useful for filtering, not for
"previous page" navigation (which would require reversing the sort order).
"""
block_manager = BlockManager()
# Use a unique label prefix to isolate this test's blocks
test_label = f"pagination_before_{uuid.uuid4().hex[:8]}"
# Create 5 blocks with delays to ensure distinct timestamps
created_blocks = []
for i in range(5):
if USING_SQLITE and i > 0:
time.sleep(CREATE_DELAY_SQLITE)
block = await block_manager.create_or_update_block_async(PydanticBlock(label=test_label, value=f"Block {i}"), actor=default_user)
created_blocks.append(block)
# Using the last block as cursor with ascending order
# Returns items that come before block 4 in ascending order: [0, 1, 2, 3]
# With limit=2, we get the first 2: [0, 1]
page1 = await block_manager.get_blocks_async(actor=default_user, label=test_label, limit=2, before=created_blocks[4].id, ascending=True)
assert len(page1) == 2
assert page1[0].id == created_blocks[0].id
assert page1[1].id == created_blocks[1].id
# Get next page using 'after' cursor on last item
page2 = await block_manager.get_blocks_async(
actor=default_user, label=test_label, limit=2, after=page1[1].id, before=created_blocks[4].id, ascending=True
)
assert len(page2) == 2
assert page2[0].id == created_blocks[2].id
assert page2[1].id == created_blocks[3].id
@pytest.mark.asyncio
async def test_get_blocks_pagination_descending_order(server, default_user):
"""Test pagination with descending order (newest first)."""
block_manager = BlockManager()
# Use a unique label prefix to isolate this test's blocks
test_label = f"pagination_desc_{uuid.uuid4().hex[:8]}"
# Create 4 blocks with delays
created_blocks = []
for i in range(4):
if USING_SQLITE and i > 0:
time.sleep(CREATE_DELAY_SQLITE)
block = await block_manager.create_or_update_block_async(PydanticBlock(label=test_label, value=f"Block {i}"), actor=default_user)
created_blocks.append(block)
# Get first page in descending order (newest first)
page1 = await block_manager.get_blocks_async(actor=default_user, label=test_label, limit=2, ascending=False)
assert len(page1) == 2
# Should be the last 2 blocks created (newest)
assert page1[0].id == created_blocks[3].id
assert page1[1].id == created_blocks[2].id
# Get second page using 'after' cursor
page2 = await block_manager.get_blocks_async(actor=default_user, label=test_label, limit=2, after=page1[1].id, ascending=False)
assert len(page2) == 2
assert page2[0].id == created_blocks[1].id
assert page2[1].id == created_blocks[0].id
@pytest.mark.asyncio
async def test_get_blocks_pagination_all_blocks_found(server, default_user):
"""Test that pagination finds all blocks across multiple pages without skipping any."""
block_manager = BlockManager()
# Use a unique label prefix to isolate this test's blocks
test_label = f"pagination_allfound_{uuid.uuid4().hex[:8]}"
# Create 10 blocks
created_blocks = []
for i in range(10):
if USING_SQLITE and i > 0:
time.sleep(CREATE_DELAY_SQLITE)
block = await block_manager.create_or_update_block_async(PydanticBlock(label=test_label, value=f"Block {i}"), actor=default_user)
created_blocks.append(block)
# Paginate through all blocks with page size of 3
all_paginated = []
cursor = None
page_count = 0
while True:
page = await block_manager.get_blocks_async(actor=default_user, label=test_label, limit=3, after=cursor, ascending=True)
if not page:
break
all_paginated.extend(page)
cursor = page[-1].id
page_count += 1
# Should have 4 pages: 3 + 3 + 3 + 1
assert page_count == 4, f"Expected 4 pages but got {page_count}"
# All 10 blocks should be found
assert len(all_paginated) == 10, f"Expected 10 blocks but found {len(all_paginated)}"
# All created block IDs should be present
paginated_ids = {b.id for b in all_paginated}
created_ids = {b.id for b in created_blocks}
assert paginated_ids == created_ids, "Not all blocks were found through pagination"
@pytest.mark.asyncio
async def test_get_agents_for_block_pagination(server: SyncServer, sarah_agent, charles_agent, default_user):
"""Test pagination for get_agents_for_block_async."""
block_manager = BlockManager()
# Use a unique label to isolate this test's block
test_label = f"pagination_agents_{uuid.uuid4().hex[:8]}"
# Create a shared block
shared_block = await block_manager.create_or_update_block_async(
PydanticBlock(label=test_label, value="Shared block content"), actor=default_user
)
# Attach the block to both agents
await server.agent_manager.attach_block_async(agent_id=sarah_agent.id, block_id=shared_block.id, actor=default_user)
await server.agent_manager.attach_block_async(agent_id=charles_agent.id, block_id=shared_block.id, actor=default_user)
# Get first page of agents
page1 = await block_manager.get_agents_for_block_async(block_id=shared_block.id, actor=default_user, limit=1, ascending=True)
assert len(page1) == 1
# Get second page using 'after' cursor
page2 = await block_manager.get_agents_for_block_async(
block_id=shared_block.id, actor=default_user, limit=1, after=page1[0].id, ascending=True
)
assert len(page2) == 1
# Verify both agents were found and they're different
assert page1[0].id != page2[0].id
agent_ids = {page1[0].id, page2[0].id}
expected_ids = {sarah_agent.id, charles_agent.id}
assert agent_ids == expected_ids
@pytest.mark.asyncio
async def test_update_block(server: SyncServer, default_user):
block_manager = BlockManager()
block = await block_manager.create_or_update_block_async(PydanticBlock(label="persona", value="Original Content"), actor=default_user)
# Update block's content
update_data = BlockUpdate(value="Updated Content", description="Updated description")
await block_manager.update_block_async(block_id=block.id, block_update=update_data, actor=default_user)
# Retrieve the updated block
updated_block = await block_manager.get_block_by_id_async(actor=default_user, block_id=block.id)
# Assertions to verify the update
assert updated_block.value == "Updated Content"
assert updated_block.description == "Updated description"
@pytest.mark.asyncio
async def test_update_block_limit(server: SyncServer, default_user):
block_manager = BlockManager()
block = await block_manager.create_or_update_block_async(
PydanticBlock(label="persona", value="Original Content", limit=20000), actor=default_user
)
limit = len("Updated Content") * 2000
update_data = BlockUpdate(value="Updated Content" * 2000, description="Updated description")
# Check that exceeding the block limit raises an exception
with pytest.raises(LettaInvalidArgumentError):
await block_manager.update_block_async(block_id=block.id, block_update=update_data, actor=default_user)
# Ensure the update works when within limits
update_data = BlockUpdate(value="Updated Content" * 2000, description="Updated description", limit=limit)
await block_manager.update_block_async(block_id=block.id, block_update=update_data, actor=default_user)
# Retrieve the updated block and validate the update
updated_block = await block_manager.get_block_by_id_async(actor=default_user, block_id=block.id)
assert updated_block.value == "Updated Content" * 2000
assert updated_block.description == "Updated description"
@pytest.mark.asyncio
async def test_update_block_limit_does_not_reset(server: SyncServer, default_user):
block_manager = BlockManager()
new_content = "Updated Content" * 2000
limit = len(new_content)
block = await block_manager.create_or_update_block_async(
PydanticBlock(label="persona", value="Original Content", limit=limit), actor=default_user
)
# Ensure the update works
update_data = BlockUpdate(value=new_content)
await block_manager.update_block_async(block_id=block.id, block_update=update_data, actor=default_user)
# Retrieve the updated block and validate the update
updated_block = await block_manager.get_block_by_id_async(actor=default_user, block_id=block.id)
assert updated_block.value == new_content
@pytest.mark.asyncio
async def test_update_nonexistent_block(server: SyncServer, default_user):
"""Test that updating a non-existent block raises NoResultFound (which maps to 404)."""
block_manager = BlockManager()
# Try to update a block that doesn't exist
nonexistent_block_id = "block-7d73d0a7-6e86-4db7-b53a-411c11ed958a"
update_data = BlockUpdate(value="Updated Content")
with pytest.raises(NoResultFound):
await block_manager.update_block_async(block_id=nonexistent_block_id, block_update=update_data, actor=default_user)
@pytest.mark.asyncio
async def test_delete_block(server: SyncServer, default_user):
block_manager = BlockManager()
# Create and delete a block
block = await block_manager.create_or_update_block_async(PydanticBlock(label="human", value="Sample content"), actor=default_user)
await block_manager.delete_block_async(block_id=block.id, actor=default_user)
# Verify that the block was deleted
blocks = await block_manager.get_blocks_async(actor=default_user)
assert len(blocks) == 0
@pytest.mark.asyncio
async def test_delete_block_detaches_from_agent(server: SyncServer, sarah_agent, default_user):
# Create and delete a block
block = await server.block_manager.create_or_update_block_async(
PydanticBlock(label="human", value="Sample content"), actor=default_user
)
agent_state = await server.agent_manager.attach_block_async(agent_id=sarah_agent.id, block_id=block.id, actor=default_user)
# Check that block has been attached
assert block.id in [b.id for b in agent_state.memory.blocks]
# Now attempt to delete the block
await server.block_manager.delete_block_async(block_id=block.id, actor=default_user)
# Verify that the block was deleted
blocks = await server.block_manager.get_blocks_async(actor=default_user)
assert len(blocks) == 0
# Check that block has been detached too
agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert block.id not in [b.id for b in agent_state.memory.blocks]
@pytest.mark.asyncio
async def test_get_agents_for_block(server: SyncServer, sarah_agent, charles_agent, default_user):
# Create and delete a block
block = await server.block_manager.create_or_update_block_async(
PydanticBlock(label="alien", value="Sample content"), actor=default_user
)
sarah_agent = await server.agent_manager.attach_block_async(agent_id=sarah_agent.id, block_id=block.id, actor=default_user)
charles_agent = await server.agent_manager.attach_block_async(agent_id=charles_agent.id, block_id=block.id, actor=default_user)
# Check that block has been attached to both
assert block.id in [b.id for b in sarah_agent.memory.blocks]
assert block.id in [b.id for b in charles_agent.memory.blocks]
# Get the agents for that block
agent_states = await server.block_manager.get_agents_for_block_async(block_id=block.id, actor=default_user)
assert len(agent_states) == 2
# Check both agents are in the list
agent_state_ids = [a.id for a in agent_states]
assert sarah_agent.id in agent_state_ids
assert charles_agent.id in agent_state_ids
@pytest.mark.asyncio
async def test_batch_create_multiple_blocks(server: SyncServer, default_user):
block_manager = BlockManager()
num_blocks = 10
# Prepare distinct blocks
blocks_to_create = [PydanticBlock(label=f"batch_label_{i}", value=f"batch_value_{i}") for i in range(num_blocks)]
# Create the blocks
created_blocks = await block_manager.batch_create_blocks_async(blocks_to_create, actor=default_user)
assert len(created_blocks) == num_blocks
# Map created blocks by label for lookup
created_by_label = {blk.label: blk for blk in created_blocks}
# Assert all blocks were created correctly
for i in range(num_blocks):
label = f"batch_label_{i}"
value = f"batch_value_{i}"
assert label in created_by_label, f"Missing label: {label}"
blk = created_by_label[label]
assert blk.value == value
assert blk.id is not None
# Confirm all created blocks exist in the full list from get_blocks
all_labels = {blk.label for blk in await block_manager.get_blocks_async(actor=default_user)}
expected_labels = {f"batch_label_{i}" for i in range(num_blocks)}
assert expected_labels.issubset(all_labels)
async def test_bulk_update_skips_missing_and_truncates_then_returns_none(server: SyncServer, default_user: PydanticUser, caplog):
mgr = BlockManager()
# create one block with a small limit
b = await mgr.create_or_update_block_async(
PydanticBlock(label="human", value="orig", limit=5),
actor=default_user,
)
# prepare updates: one real id with an over‐limit value, plus one missing id
long_val = random_string(10) # length > limit==5
updates = {
b.id: long_val,
"nonexistent-id": "whatever",
}
caplog.set_level(logging.WARNING)
result = await mgr.bulk_update_block_values_async(updates, actor=default_user)
# default return_hydrated=False → should be None
assert result is None
# warnings should mention skipping the missing ID and truncation
assert "skipping during bulk update" in caplog.text
assert "truncating" in caplog.text
# confirm the value was truncated to `limit` characters
reloaded = await mgr.get_block_by_id_async(actor=default_user, block_id=b.id)
assert len(reloaded.value) == 5
assert reloaded.value == long_val[:5]
@pytest.mark.skip(reason="TODO: implement for async")
async def test_bulk_update_return_hydrated_true(server: SyncServer, default_user: PydanticUser):
mgr = BlockManager()
# create a block
b = await mgr.create_or_update_block_async(
PydanticBlock(label="persona", value="foo", limit=20),
actor=default_user,
)
updates = {b.id: "new-val"}
updated = await mgr.bulk_update_block_values_async(updates, actor=default_user, return_hydrated=True)
# with return_hydrated=True, we get back a list of schemas
assert isinstance(updated, list) and len(updated) == 1
assert updated[0].id == b.id
assert updated[0].value == "new-val"
async def test_bulk_update_respects_org_scoping(
server: SyncServer, default_user: PydanticUser, other_user_different_org: PydanticUser, caplog
):
mgr = BlockManager()
# one block in each org
mine = await mgr.create_or_update_block_async(
PydanticBlock(label="human", value="mine", limit=100),
actor=default_user,
)
theirs = await mgr.create_or_update_block_async(
PydanticBlock(label="human", value="theirs", limit=100),
actor=other_user_different_org,
)
updates = {
mine.id: "updated-mine",
theirs.id: "updated-theirs",
}
caplog.set_level(logging.WARNING)
await mgr.bulk_update_block_values_async(updates, actor=default_user)
# mine should be updated...
reloaded_mine = await mgr.get_block_by_id_async(actor=default_user, block_id=mine.id)
assert reloaded_mine.value == "updated-mine"
# ...theirs should remain untouched
reloaded_theirs = await mgr.get_block_by_id_async(actor=other_user_different_org, block_id=theirs.id)
assert reloaded_theirs.value == "theirs"
# warning should mention skipping the other-org ID
assert "skipping during bulk update" in caplog.text
# ======================================================================================================================
# Block Manager Tests - Checkpointing
# ======================================================================================================================
@pytest.mark.asyncio
async def test_checkpoint_creates_history(server: SyncServer, default_user):
"""
Ensures that calling checkpoint_block creates a BlockHistory row and updates
the block's current_history_entry_id appropriately.
"""
block_manager = BlockManager()
# Create a block
initial_value = "Initial block content"
created_block = await block_manager.create_or_update_block_async(
PydanticBlock(label="test_checkpoint", value=initial_value), actor=default_user
)
# Act: checkpoint it
await block_manager.checkpoint_block_async(block_id=created_block.id, actor=default_user)
async with db_registry.async_session() as session:
# Get BlockHistory entries for this block
from sqlalchemy import select
stmt = select(BlockHistory).filter(BlockHistory.block_id == created_block.id)
result = await session.execute(stmt)
history_entries = list(result.scalars().all())
assert len(history_entries) == 1, "Exactly one history entry should be created"
hist = history_entries[0]
# Fetch ORM block for internal checks
db_block = await session.get(Block, created_block.id)
assert hist.sequence_number == 1
assert hist.value == initial_value
assert hist.actor_type == ActorType.LETTA_USER
assert hist.actor_id == default_user.id
assert db_block.current_history_entry_id == hist.id
@pytest.mark.asyncio
async def test_multiple_checkpoints(server: SyncServer, default_user):
block_manager = BlockManager()
# Create a block
block = await block_manager.create_or_update_block_async(PydanticBlock(label="test_multi_checkpoint", value="v1"), actor=default_user)
# 1) First checkpoint
await block_manager.checkpoint_block_async(block_id=block.id, actor=default_user)
# 2) Update block content
updated_block_data = PydanticBlock(**block.model_dump())
updated_block_data.value = "v2"
await block_manager.create_or_update_block_async(updated_block_data, actor=default_user)
# 3) Second checkpoint
await block_manager.checkpoint_block_async(block_id=block.id, actor=default_user)
async with db_registry.async_session() as session:
from sqlalchemy import select
stmt = select(BlockHistory).filter(BlockHistory.block_id == block.id).order_by(BlockHistory.sequence_number.asc())
result = await session.execute(stmt)
history_entries = list(result.scalars().all())
assert len(history_entries) == 2, "Should have two history entries"
# First is seq=1, value='v1'
assert history_entries[0].sequence_number == 1
assert history_entries[0].value == "v1"
# Second is seq=2, value='v2'
assert history_entries[1].sequence_number == 2
assert history_entries[1].value == "v2"
# The block should now point to the second entry
db_block = await session.get(Block, block.id)
assert db_block.current_history_entry_id == history_entries[1].id
@pytest.mark.asyncio
async def test_checkpoint_with_agent_id(server: SyncServer, default_user, sarah_agent):
"""
Ensures that if we pass agent_id to checkpoint_block, we get
actor_type=LETTA_AGENT, actor_id=<agent.id> in BlockHistory.
"""
block_manager = BlockManager()
# Create a block
block = await block_manager.create_or_update_block_async(
PydanticBlock(label="test_agent_checkpoint", value="Agent content"), actor=default_user
)
# Checkpoint with agent_id
await block_manager.checkpoint_block_async(block_id=block.id, actor=default_user, agent_id=sarah_agent.id)
# Verify
async with db_registry.async_session() as session:
from sqlalchemy import select
stmt = select(BlockHistory).filter(BlockHistory.block_id == block.id)
result = await session.execute(stmt)
hist_entry = result.scalar_one()
assert hist_entry.actor_type == ActorType.LETTA_AGENT
assert hist_entry.actor_id == sarah_agent.id
@pytest.mark.asyncio
async def test_checkpoint_with_no_state_change(server: SyncServer, default_user):
"""
If we call checkpoint_block twice without any edits,
we expect two entries or only one, depending on your policy.
"""
block_manager = BlockManager()
# Create block
block = await block_manager.create_or_update_block_async(PydanticBlock(label="test_no_change", value="original"), actor=default_user)
# 1) checkpoint
await block_manager.checkpoint_block_async(block_id=block.id, actor=default_user)
# 2) checkpoint again (no changes)
await block_manager.checkpoint_block_async(block_id=block.id, actor=default_user)
async with db_registry.async_session() as session:
from sqlalchemy import select
stmt = select(BlockHistory).filter(BlockHistory.block_id == block.id)
result = await session.execute(stmt)
all_hist = list(result.scalars().all())
assert len(all_hist) == 2
@pytest.mark.asyncio
async def test_checkpoint_concurrency_stale(server: SyncServer, default_user):
block_manager = BlockManager()
# create block
block = await block_manager.create_or_update_block_async(
PydanticBlock(label="test_stale_checkpoint", value="hello"), actor=default_user
)
# session1 loads
async with db_registry.async_session() as s1:
block_s1 = await s1.get(Block, block.id) # version=1
# session2 loads
async with db_registry.async_session() as s2:
block_s2 = await s2.get(Block, block.id) # also version=1
# session1 checkpoint => version=2
async with db_registry.async_session() as s1:
block_s1 = await s1.merge(block_s1)
await block_manager.checkpoint_block_async(
block_id=block_s1.id,
actor=default_user,
use_preloaded_block=block_s1, # let manager use the object in memory
)
# commits inside checkpoint_block => version goes to 2
# session2 tries to checkpoint => sees old version=1 => stale error
with pytest.raises(StaleDataError):
async with db_registry.async_session() as s2:
block_s2 = await s2.merge(block_s2)
await block_manager.checkpoint_block_async(
block_id=block_s2.id,
actor=default_user,
use_preloaded_block=block_s2,
)
@pytest.mark.asyncio
async def test_checkpoint_no_future_states(server: SyncServer, default_user):
"""
Ensures that if the block is already at the highest sequence,
creating a new checkpoint does NOT delete anything.
"""
block_manager = BlockManager()
# 1) Create block with "v1" and checkpoint => seq=1
block_v1 = await block_manager.create_or_update_block_async(PydanticBlock(label="no_future_test", value="v1"), actor=default_user)
await block_manager.checkpoint_block_async(block_id=block_v1.id, actor=default_user)
# 2) Create "v2" and checkpoint => seq=2
updated_data = PydanticBlock(**block_v1.model_dump())
updated_data.value = "v2"
await block_manager.create_or_update_block_async(updated_data, actor=default_user)
await block_manager.checkpoint_block_async(block_id=block_v1.id, actor=default_user)
# So we have seq=1: v1, seq=2: v2. No "future" states.
# 3) Another checkpoint (no changes made) => should become seq=3, not delete anything
await block_manager.checkpoint_block_async(block_id=block_v1.id, actor=default_user)
async with db_registry.async_session() as session:
# We expect 3 rows in block_history, none removed
from sqlalchemy import select
stmt = select(BlockHistory).filter(BlockHistory.block_id == block_v1.id).order_by(BlockHistory.sequence_number.asc())
result = await session.execute(stmt)
history_rows = list(result.scalars().all())
# Should be seq=1, seq=2, seq=3
assert len(history_rows) == 3
assert history_rows[0].value == "v1"
assert history_rows[1].value == "v2"
# The last is also "v2" if we didn't change it, or the same current fields
assert history_rows[2].sequence_number == 3
# There's no leftover row that was deleted
# ======================================================================================================================
# Block Manager Tests - Undo
# ======================================================================================================================
@pytest.mark.asyncio
async def test_undo_checkpoint_block(server: SyncServer, default_user):
"""
Verifies that we can undo to the previous checkpoint:
1) Create a block and checkpoint -> sequence_number=1
2) Update block content and checkpoint -> sequence_number=2
3) Undo -> should revert block to sequence_number=1's content
"""
block_manager = BlockManager()
# 1) Create block
initial_value = "Version 1 content"
created_block = await block_manager.create_or_update_block_async(
PydanticBlock(label="undo_test", value=initial_value), actor=default_user
)
# 2) First checkpoint => seq=1
await block_manager.checkpoint_block_async(block_id=created_block.id, actor=default_user)
# 3) Update block content to "Version 2"
updated_data = PydanticBlock(**created_block.model_dump())
updated_data.value = "Version 2 content"
await block_manager.create_or_update_block_async(updated_data, actor=default_user)
# 4) Second checkpoint => seq=2
await block_manager.checkpoint_block_async(block_id=created_block.id, actor=default_user)
# 5) Undo => revert to seq=1
undone_block = await block_manager.undo_checkpoint_block(block_id=created_block.id, actor=default_user)
# 6) Verify the block is now restored to "Version 1" content
assert undone_block.value == initial_value, "Block should revert to version 1 content"
assert undone_block.label == "undo_test", "Label should also revert if changed (or remain the same if unchanged)"
# @pytest.mark.asyncio
# async def test_checkpoint_deletes_future_states_after_undo(server: SyncServer, default_user):
# """
# Verifies that once we've undone to an earlier checkpoint, creating a new
# checkpoint removes any leftover 'future' states that existed beyond that sequence.
# """
# block_manager = BlockManager()
#
# # 1) Create block
# block_init = PydanticBlock(label="test_truncation", value="v1")
# block_v1 = await block_manager.create_or_update_block_async(block_init, actor=default_user)
# # Checkpoint => seq=1
# await block_manager.checkpoint_block_async(block_id=block_v1.id, actor=default_user)
#
# # 2) Update to "v2", checkpoint => seq=2
# block_v2 = PydanticBlock(**block_v1.model_dump())
# block_v2.value = "v2"
# await block_manager.create_or_update_block_async(block_v2, actor=default_user)
# await block_manager.checkpoint_block_async(block_id=block_v1.id, actor=default_user)
#
# # 3) Update to "v3", checkpoint => seq=3
# block_v3 = PydanticBlock(**block_v1.model_dump())
# block_v3.value = "v3"
# await block_manager.create_or_update_block_async(block_v3, actor=default_user)
# await block_manager.checkpoint_block_async(block_id=block_v1.id, actor=default_user)
#
# # We now have three states in history: seq=1 (v1), seq=2 (v2), seq=3 (v3).
#
# # Undo from seq=3 -> seq=2
# block_undo_1 = await block_manager.undo_checkpoint_block(block_v1.id, actor=default_user)
# assert block_undo_1.value == "v2"
#
# # Undo from seq=2 -> seq=1
# block_undo_2 = await block_manager.undo_checkpoint_block(block_v1.id, actor=default_user)
# assert block_undo_2.value == "v1"
#
# # 4) Now we are at seq=1. If we checkpoint again, we should remove the old seq=2,3
# # because the new code truncates future states beyond seq=1.
#
# # Let's do a new edit: "v1.5"
# block_v1_5 = PydanticBlock(**block_undo_2.model_dump())
# block_v1_5.value = "v1.5"
# await block_manager.create_or_update_block_async(block_v1_5, actor=default_user)
#
# # 5) Checkpoint => new seq=2, removing the old seq=2 and seq=3
# await block_manager.checkpoint_block_async(block_id=block_v1.id, actor=default_user)
#
# async with db_registry.async_session() as session:
# # Let's see which BlockHistory rows remain
# from sqlalchemy import select
#
# stmt = select(BlockHistory).filter(BlockHistory.block_id == block_v1.id).order_by(BlockHistory.sequence_number.asc())
# result = await session.execute(stmt)
# history_entries = list(result.scalars().all())
#
# # We expect two rows: seq=1 => "v1", seq=2 => "v1.5"
# assert len(history_entries) == 2, f"Expected 2 entries, got {len(history_entries)}"
# assert history_entries[0].sequence_number == 1
# assert history_entries[0].value == "v1"
# assert history_entries[1].sequence_number == 2
# assert history_entries[1].value == "v1.5"
#
# # No row should contain "v2" or "v3"
# existing_values = {h.value for h in history_entries}
# assert "v2" not in existing_values, "Old seq=2 should have been removed."
# assert "v3" not in existing_values, "Old seq=3 should have been removed."
@pytest.mark.asyncio
async def test_undo_no_history(server: SyncServer, default_user):
"""
If a block has never been checkpointed (no current_history_entry_id),
undo_checkpoint_block should raise a LettaInvalidArgumentError.
"""
block_manager = BlockManager()
# Create a block but don't checkpoint it
block = await block_manager.create_or_update_block_async(PydanticBlock(label="no_history_test", value="initial"), actor=default_user)
# Attempt to undo
with pytest.raises(LettaInvalidArgumentError):
await block_manager.undo_checkpoint_block(block_id=block.id, actor=default_user)
@pytest.mark.asyncio
async def test_undo_first_checkpoint(server: SyncServer, default_user):
"""
If the block is at the first checkpoint (sequence_number=1),
undo should fail because there's no prior checkpoint.
"""
block_manager = BlockManager()
# 1) Create the block
block_data = PydanticBlock(label="first_checkpoint", value="Version1")
block = await block_manager.create_or_update_block_async(block_data, actor=default_user)
# 2) First checkpoint => seq=1
await block_manager.checkpoint_block_async(block_id=block.id, actor=default_user)
# Attempt undo -> expect LettaInvalidArgumentError
with pytest.raises(LettaInvalidArgumentError):
await block_manager.undo_checkpoint_block(block_id=block.id, actor=default_user)
@pytest.mark.asyncio
async def test_undo_multiple_checkpoints(server: SyncServer, default_user):
"""
Tests multiple checkpoints in a row, then undo repeatedly
from seq=3 -> seq=2 -> seq=1, verifying each revert.
"""
block_manager = BlockManager()
# Step 1: Create block
block_data = PydanticBlock(label="multi_checkpoint", value="v1")
block_v1 = await block_manager.create_or_update_block_async(block_data, actor=default_user)
# checkpoint => seq=1
await block_manager.checkpoint_block_async(block_id=block_v1.id, actor=default_user)
# Step 2: Update to v2, checkpoint => seq=2
block_data_v2 = PydanticBlock(**block_v1.model_dump())
block_data_v2.value = "v2"
await block_manager.create_or_update_block_async(block_data_v2, actor=default_user)
await block_manager.checkpoint_block_async(block_id=block_v1.id, actor=default_user)
# Step 3: Update to v3, checkpoint => seq=3
block_data_v3 = PydanticBlock(**block_v1.model_dump())
block_data_v3.value = "v3"
await block_manager.create_or_update_block_async(block_data_v3, actor=default_user)
await block_manager.checkpoint_block_async(block_id=block_v1.id, actor=default_user)
# Now we have 3 seq: v1, v2, v3
# Undo from seq=3 -> seq=2
undone_block = await block_manager.undo_checkpoint_block(block_v1.id, actor=default_user)
assert undone_block.value == "v2"
# Undo from seq=2 -> seq=1
undone_block = await block_manager.undo_checkpoint_block(block_v1.id, actor=default_user)
assert undone_block.value == "v1"
# Try once more -> fails because seq=1 is the earliest
with pytest.raises(LettaInvalidArgumentError):
await block_manager.undo_checkpoint_block(block_v1.id, actor=default_user)
@pytest.mark.asyncio
async def test_undo_concurrency_stale(server: SyncServer, default_user):
"""
Demonstrate concurrency: both sessions start with the block at seq=2,
one session undoes first -> block now seq=1, version increments,
the other session tries to undo with stale data -> StaleDataError.
"""
block_manager = BlockManager()
# 1) create block
block_data = PydanticBlock(label="concurrency_undo", value="v1")
block_v1 = await block_manager.create_or_update_block_async(block_data, actor=default_user)
# checkpoint => seq=1
await block_manager.checkpoint_block_async(block_v1.id, actor=default_user)
# 2) update to v2
block_data_v2 = PydanticBlock(**block_v1.model_dump())
block_data_v2.value = "v2"
await block_manager.create_or_update_block_async(block_data_v2, actor=default_user)
# checkpoint => seq=2
await block_manager.checkpoint_block_async(block_v1.id, actor=default_user)
# Now block is at seq=2
# session1 preloads the block
async with db_registry.async_session() as s1:
block_s1 = await s1.get(Block, block_v1.id) # version=? let's say 2 in memory
# session2 also preloads the block
async with db_registry.async_session() as s2:
block_s2 = await s2.get(Block, block_v1.id) # also version=2
# Session1 -> undo to seq=1
await block_manager.undo_checkpoint_block(
block_id=block_v1.id,
actor=default_user,
use_preloaded_block=block_s1, # stale object from session1
)
# This commits first => block now points to seq=1, version increments
# Session2 tries the same undo, but it's stale
with pytest.raises(StaleDataError):
await block_manager.undo_checkpoint_block(
block_id=block_v1.id, actor=default_user, use_preloaded_block=block_s2
) # also seq=2 in memory
# ======================================================================================================================
# Block Manager Tests - Redo
# ======================================================================================================================
@pytest.mark.asyncio
async def test_redo_checkpoint_block(server: SyncServer, default_user):
"""
1) Create a block with value v1 -> checkpoint => seq=1
2) Update to v2 -> checkpoint => seq=2
3) Update to v3 -> checkpoint => seq=3
4) Undo once (seq=3 -> seq=2)
5) Redo once (seq=2 -> seq=3)
"""
block_manager = BlockManager()
# 1) Create block, set value='v1'; checkpoint => seq=1
block_v1 = await block_manager.create_or_update_block_async(PydanticBlock(label="redo_test", value="v1"), actor=default_user)
await block_manager.checkpoint_block_async(block_id=block_v1.id, actor=default_user)
# 2) Update to 'v2'; checkpoint => seq=2
block_v2 = PydanticBlock(**block_v1.model_dump())
block_v2.value = "v2"
await block_manager.create_or_update_block_async(block_v2, actor=default_user)
await block_manager.checkpoint_block_async(block_id=block_v1.id, actor=default_user)
# 3) Update to 'v3'; checkpoint => seq=3
block_v3 = PydanticBlock(**block_v1.model_dump())
block_v3.value = "v3"
await block_manager.create_or_update_block_async(block_v3, actor=default_user)
await block_manager.checkpoint_block_async(block_id=block_v1.id, actor=default_user)
# Undo from seq=3 -> seq=2
undone_block = await block_manager.undo_checkpoint_block(block_v1.id, actor=default_user)
assert undone_block.value == "v2", "After undo, block should revert to v2"
# Redo from seq=2 -> seq=3
redone_block = await block_manager.redo_checkpoint_block(block_v1.id, actor=default_user)
assert redone_block.value == "v3", "After redo, block should go back to v3"
@pytest.mark.asyncio
async def test_redo_no_history(server: SyncServer, default_user):
"""
If a block has no current_history_entry_id (never checkpointed),
then redo_checkpoint_block should raise LettaInvalidArgumentError.
"""
block_manager = BlockManager()
# Create block with no checkpoint
block = await block_manager.create_or_update_block_async(PydanticBlock(label="redo_no_history", value="v0"), actor=default_user)
# Attempt to redo => expect LettaInvalidArgumentError
with pytest.raises(LettaInvalidArgumentError):
await block_manager.redo_checkpoint_block(block.id, actor=default_user)
@pytest.mark.asyncio
async def test_redo_at_highest_checkpoint(server: SyncServer, default_user):
"""
If the block is at the maximum sequence number, there's no higher checkpoint to move to.
redo_checkpoint_block should raise LettaInvalidArgumentError.
"""
block_manager = BlockManager()
# 1) Create block => checkpoint => seq=1
b_init = await block_manager.create_or_update_block_async(PydanticBlock(label="redo_highest", value="v1"), actor=default_user)
await block_manager.checkpoint_block_async(b_init.id, actor=default_user)
# 2) Another edit => seq=2
b_next = PydanticBlock(**b_init.model_dump())
b_next.value = "v2"
await block_manager.create_or_update_block_async(b_next, actor=default_user)
await block_manager.checkpoint_block_async(b_init.id, actor=default_user)
# We are at seq=2, which is the highest checkpoint.
# Attempt redo => there's no seq=3
with pytest.raises(LettaInvalidArgumentError):
await block_manager.redo_checkpoint_block(b_init.id, actor=default_user)
@pytest.mark.asyncio
async def test_redo_after_multiple_undo(server: SyncServer, default_user):
"""
1) Create and checkpoint versions: v1 -> seq=1, v2 -> seq=2, v3 -> seq=3, v4 -> seq=4
2) Undo thrice => from seq=4 to seq=1
3) Redo thrice => from seq=1 back to seq=4
"""
block_manager = BlockManager()
# Step 1: create initial block => seq=1
b_init = await block_manager.create_or_update_block_async(PydanticBlock(label="redo_multi", value="v1"), actor=default_user)
await block_manager.checkpoint_block_async(b_init.id, actor=default_user)
# seq=2
b_v2 = PydanticBlock(**b_init.model_dump())
b_v2.value = "v2"
await block_manager.create_or_update_block_async(b_v2, actor=default_user)
await block_manager.checkpoint_block_async(b_init.id, actor=default_user)
# seq=3
b_v3 = PydanticBlock(**b_init.model_dump())
b_v3.value = "v3"
await block_manager.create_or_update_block_async(b_v3, actor=default_user)
await block_manager.checkpoint_block_async(b_init.id, actor=default_user)
# seq=4
b_v4 = PydanticBlock(**b_init.model_dump())
b_v4.value = "v4"
await block_manager.create_or_update_block_async(b_v4, actor=default_user)
await block_manager.checkpoint_block_async(b_init.id, actor=default_user)
# We have 4 checkpoints: v1...v4. Current is seq=4.
# 2) Undo thrice => from seq=4 -> seq=1
for expected_value in ["v3", "v2", "v1"]:
undone_block = await block_manager.undo_checkpoint_block(b_init.id, actor=default_user)
assert undone_block.value == expected_value, f"Undo should get us back to {expected_value}"
# 3) Redo thrice => from seq=1 -> seq=4
for expected_value in ["v2", "v3", "v4"]:
redone_block = await block_manager.redo_checkpoint_block(b_init.id, actor=default_user)
assert redone_block.value == expected_value, f"Redo should get us forward to {expected_value}"
@pytest.mark.asyncio
async def test_redo_concurrency_stale(server: SyncServer, default_user):
block_manager = BlockManager()
# 1) Create block => checkpoint => seq=1
block = await block_manager.create_or_update_block_async(PydanticBlock(label="redo_concurrency", value="v1"), actor=default_user)
await block_manager.checkpoint_block_async(block.id, actor=default_user)
# 2) Another edit => checkpoint => seq=2
block_v2 = PydanticBlock(**block.model_dump())
block_v2.value = "v2"
await block_manager.create_or_update_block_async(block_v2, actor=default_user)
await block_manager.checkpoint_block_async(block.id, actor=default_user)
# 3) Another edit => checkpoint => seq=3
block_v3 = PydanticBlock(**block.model_dump())
block_v3.value = "v3"
await block_manager.create_or_update_block_async(block_v3, actor=default_user)
await block_manager.checkpoint_block_async(block.id, actor=default_user)
# Now the block is at seq=3 in the DB
# 4) Undo from seq=3 -> seq=2 so that we have a known future state at seq=3
undone_block = await block_manager.undo_checkpoint_block(block.id, actor=default_user)
assert undone_block.value == "v2"
# At this point the block is physically at seq=2 in DB,
# but there's a valid row for seq=3 in block_history (the 'v3' state).
# 5) Simulate concurrency: two sessions each read the block at seq=2
async with db_registry.async_session() as s1:
block_s1 = await s1.get(Block, block.id)
async with db_registry.async_session() as s2:
block_s2 = await s2.get(Block, block.id)
# 6) Session1 redoes to seq=3 first -> success
await block_manager.redo_checkpoint_block(block_id=block.id, actor=default_user, use_preloaded_block=block_s1)
# commits => block is now seq=3 in DB, version increments
# 7) Session2 tries to do the same from stale version
# => we expect StaleDataError, because the second session is using
# an out-of-date version of the block
with pytest.raises(StaleDataError):
await block_manager.redo_checkpoint_block(block_id=block.id, actor=default_user, use_preloaded_block=block_s2)
# ======================================================================================================================
# Block Tags Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_block_tags_create_and_update(server: SyncServer, default_user):
"""Test creating a block with tags and updating tags"""
block_manager = BlockManager()
# Create a block with tags
block = PydanticBlock(
label="test_tags",
value="Block with tags",
tags=["tag1", "tag2", "important"],
)
created_block = await block_manager.create_or_update_block_async(block, actor=default_user)
# Verify tags were saved
assert set(created_block.tags) == {"tag1", "tag2", "important"}
# Update the block with new tags
from letta.schemas.block import BlockUpdate
updated_block = await block_manager.update_block_async(
block_id=created_block.id,
block_update=BlockUpdate(tags=["tag1", "new_tag"]),
actor=default_user,
)
# Verify tags were updated
assert set(updated_block.tags) == {"tag1", "new_tag"}
# Clear all tags
cleared_block = await block_manager.update_block_async(
block_id=created_block.id,
block_update=BlockUpdate(tags=[]),
actor=default_user,
)
assert cleared_block.tags == []
@pytest.mark.asyncio
async def test_block_tags_filter_any(server: SyncServer, default_user):
"""Test filtering blocks by tags (match ANY)"""
block_manager = BlockManager()
# Create blocks with different tags
block1 = await block_manager.create_or_update_block_async(
PydanticBlock(label="b1", value="v1", tags=["alpha", "beta"]),
actor=default_user,
)
block2 = await block_manager.create_or_update_block_async(
PydanticBlock(label="b2", value="v2", tags=["beta", "gamma"]),
actor=default_user,
)
block3 = await block_manager.create_or_update_block_async(
PydanticBlock(label="b3", value="v3", tags=["delta"]),
actor=default_user,
)
# Filter by tag "beta" (match ANY)
results = await block_manager.get_blocks_async(actor=default_user, tags=["beta"], match_all_tags=False)
result_ids = {b.id for b in results}
assert block1.id in result_ids
assert block2.id in result_ids
assert block3.id not in result_ids
# Filter by tag "alpha" or "delta" (match ANY)
results = await block_manager.get_blocks_async(actor=default_user, tags=["alpha", "delta"], match_all_tags=False)
result_ids = {b.id for b in results}
assert block1.id in result_ids
assert block2.id not in result_ids
assert block3.id in result_ids
@pytest.mark.asyncio
async def test_block_tags_filter_all(server: SyncServer, default_user):
"""Test filtering blocks by tags (match ALL)"""
block_manager = BlockManager()
# Create blocks with different tags
block1 = await block_manager.create_or_update_block_async(
PydanticBlock(label="b1", value="v1", tags=["x", "y", "z"]),
actor=default_user,
)
block2 = await block_manager.create_or_update_block_async(
PydanticBlock(label="b2", value="v2", tags=["x", "y"]),
actor=default_user,
)
block3 = await block_manager.create_or_update_block_async(
PydanticBlock(label="b3", value="v3", tags=["x"]),
actor=default_user,
)
# Filter by tags "x" AND "y" (match ALL)
results = await block_manager.get_blocks_async(actor=default_user, tags=["x", "y"], match_all_tags=True)
result_ids = {b.id for b in results}
assert block1.id in result_ids
assert block2.id in result_ids
assert block3.id not in result_ids
# Filter by tags "x", "y", AND "z" (match ALL)
results = await block_manager.get_blocks_async(actor=default_user, tags=["x", "y", "z"], match_all_tags=True)
result_ids = {b.id for b in results}
assert block1.id in result_ids
assert block2.id not in result_ids
assert block3.id not in result_ids
@pytest.mark.asyncio
async def test_block_tags_count(server: SyncServer, default_user):
"""Test counting blocks with tag filters"""
block_manager = BlockManager()
# Create blocks with different tags
await block_manager.create_or_update_block_async(
PydanticBlock(label="c1", value="v1", tags=["count_test", "a"]),
actor=default_user,
)
await block_manager.create_or_update_block_async(
PydanticBlock(label="c2", value="v2", tags=["count_test", "b"]),
actor=default_user,
)
await block_manager.create_or_update_block_async(
PydanticBlock(label="c3", value="v3", tags=["other"]),
actor=default_user,
)
# Count blocks with tag "count_test"
count = await block_manager.count_blocks_async(actor=default_user, tags=["count_test"], match_all_tags=False)
assert count == 2
# Count blocks with tags "count_test" AND "a"
count = await block_manager.count_blocks_async(actor=default_user, tags=["count_test", "a"], match_all_tags=True)
assert count == 1
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_block_manager.py",
"license": "Apache License 2.0",
"lines": 1224,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_file_manager.py | import asyncio
import time
import pytest
# Import shared fixtures and constants from conftest
from conftest import (
CREATE_DELAY_SQLITE,
USING_SQLITE,
)
from letta.schemas.file import FileMetadata as PydanticFileMetadata
# ======================================================================================================================
# FileAgent Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_attach_creates_association(server, default_user, sarah_agent, default_file):
assoc, _closed_files = await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=default_file.id,
file_name=default_file.file_name,
source_id=default_file.source_id,
actor=default_user,
visible_content="hello",
max_files_open=sarah_agent.max_files_open,
)
assert assoc.file_id == default_file.id
assert assoc.is_open is True
assert assoc.visible_content == "hello"
sarah_agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
file_blocks = sarah_agent.memory.file_blocks
assert len(file_blocks) == 1
assert file_blocks[0].value == assoc.visible_content
assert file_blocks[0].label == default_file.file_name
async def test_attach_is_idempotent(server, default_user, sarah_agent, default_file):
a1, _closed_files = await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=default_file.id,
file_name=default_file.file_name,
source_id=default_file.source_id,
actor=default_user,
visible_content="first",
max_files_open=sarah_agent.max_files_open,
)
# second attach with different params
a2, _closed_files = await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=default_file.id,
file_name=default_file.file_name,
source_id=default_file.source_id,
actor=default_user,
is_open=False,
visible_content="second",
max_files_open=sarah_agent.max_files_open,
)
assert a1.id == a2.id
assert a2.is_open is False
assert a2.visible_content == "second"
sarah_agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
file_blocks = sarah_agent.memory.file_blocks
assert len(file_blocks) == 1
assert file_blocks[0].value == "" # not open
assert file_blocks[0].label == default_file.file_name
async def test_update_file_agent(server, file_attachment, default_user):
updated = await server.file_agent_manager.update_file_agent_by_id(
agent_id=file_attachment.agent_id,
file_id=file_attachment.file_id,
actor=default_user,
is_open=False,
visible_content="updated",
)
assert updated.is_open is False
assert updated.visible_content == "updated"
async def test_update_file_agent_by_file_name(server, file_attachment, default_user):
updated = await server.file_agent_manager.update_file_agent_by_name(
agent_id=file_attachment.agent_id,
file_name=file_attachment.file_name,
actor=default_user,
is_open=False,
visible_content="updated",
)
assert updated.is_open is False
assert updated.visible_content == "updated"
assert updated.start_line is None # start_line should default to None
assert updated.end_line is None # end_line should default to None
@pytest.mark.asyncio
async def test_file_agent_line_tracking(server, default_user, sarah_agent, default_source):
"""Test that line information is captured when opening files with line ranges"""
from letta.schemas.file import FileMetadata as PydanticFileMetadata
# Create a test file with multiple lines
test_content = "line 1\nline 2\nline 3\nline 4\nline 5"
file_metadata = PydanticFileMetadata(
file_name="test_lines.txt",
organization_id=default_user.organization_id,
source_id=default_source.id,
)
file = await server.file_manager.create_file(file_metadata=file_metadata, actor=default_user, text=test_content)
# Test opening with line range using enforce_max_open_files_and_open
_closed_files, _was_already_open, previous_ranges = await server.file_agent_manager.enforce_max_open_files_and_open(
agent_id=sarah_agent.id,
file_id=file.id,
file_name=file.file_name,
source_id=file.source_id,
actor=default_user,
visible_content="2: line 2\n3: line 3",
max_files_open=sarah_agent.max_files_open,
start_line=2, # 1-indexed
end_line=4, # exclusive
)
# Retrieve and verify line tracking
retrieved = await server.file_agent_manager.get_file_agent_by_id(
agent_id=sarah_agent.id,
file_id=file.id,
actor=default_user,
)
assert retrieved.start_line == 2
assert retrieved.end_line == 4
assert previous_ranges == {} # No previous range since it wasn't open before
# Test opening without line range - should clear line info and capture previous range
_closed_files, _was_already_open, previous_ranges = await server.file_agent_manager.enforce_max_open_files_and_open(
agent_id=sarah_agent.id,
file_id=file.id,
file_name=file.file_name,
source_id=file.source_id,
actor=default_user,
visible_content="full file content",
max_files_open=sarah_agent.max_files_open,
start_line=None,
end_line=None,
)
# Retrieve and verify line info is cleared
retrieved = await server.file_agent_manager.get_file_agent_by_id(
agent_id=sarah_agent.id,
file_id=file.id,
actor=default_user,
)
assert retrieved.start_line is None
assert retrieved.end_line is None
assert previous_ranges == {file.file_name: (2, 4)} # Should capture the previous range
async def test_mark_access(server, file_attachment, default_user):
old_ts = file_attachment.last_accessed_at
if USING_SQLITE:
time.sleep(CREATE_DELAY_SQLITE)
else:
await asyncio.sleep(0.01)
await server.file_agent_manager.mark_access(
agent_id=file_attachment.agent_id,
file_id=file_attachment.file_id,
actor=default_user,
)
refreshed = await server.file_agent_manager.get_file_agent_by_id(
agent_id=file_attachment.agent_id,
file_id=file_attachment.file_id,
actor=default_user,
)
assert refreshed.last_accessed_at > old_ts
async def test_list_files_and_agents(
server,
default_user,
sarah_agent,
charles_agent,
default_file,
another_file,
):
# default_file ↔ charles (open)
await server.file_agent_manager.attach_file(
agent_id=charles_agent.id,
file_id=default_file.id,
file_name=default_file.file_name,
source_id=default_file.source_id,
actor=default_user,
max_files_open=charles_agent.max_files_open,
)
# default_file ↔ sarah (open)
await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=default_file.id,
file_name=default_file.file_name,
source_id=default_file.source_id,
actor=default_user,
max_files_open=sarah_agent.max_files_open,
)
# another_file ↔ sarah (closed)
await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=another_file.id,
file_name=another_file.file_name,
source_id=another_file.source_id,
actor=default_user,
is_open=False,
max_files_open=sarah_agent.max_files_open,
)
files_for_sarah = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user
)
assert {f.file_id for f in files_for_sarah} == {default_file.id, another_file.id}
open_only = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user, is_open_only=True
)
assert {f.file_id for f in open_only} == {default_file.id}
agents_for_default = await server.file_agent_manager.list_agents_for_file(default_file.id, actor=default_user)
assert {a.agent_id for a in agents_for_default} == {sarah_agent.id, charles_agent.id}
sarah_agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
file_blocks = sarah_agent.memory.file_blocks
assert len(file_blocks) == 2
charles_agent = await server.agent_manager.get_agent_by_id_async(agent_id=charles_agent.id, actor=default_user)
file_blocks = charles_agent.memory.file_blocks
assert len(file_blocks) == 1
assert file_blocks[0].value == ""
assert file_blocks[0].label == default_file.file_name
@pytest.mark.asyncio
async def test_list_files_for_agent_paginated_basic(
server,
default_user,
sarah_agent,
default_source,
):
"""Test basic pagination functionality."""
# create 5 files and attach them to sarah
for i in range(5):
file_metadata = PydanticFileMetadata(
file_name=f"paginated_file_{i}.txt",
source_id=default_source.id,
organization_id=default_user.organization_id,
)
file = await server.file_manager.create_file(file_metadata, actor=default_user)
await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=file.id,
file_name=file.file_name,
source_id=file.source_id,
actor=default_user,
max_files_open=sarah_agent.max_files_open,
)
# get first page
page1, cursor1, has_more1 = await server.file_agent_manager.list_files_for_agent_paginated(
agent_id=sarah_agent.id,
actor=default_user,
limit=3,
)
assert len(page1) == 3
assert has_more1 is True
assert cursor1 is not None
# get second page using cursor
page2, cursor2, has_more2 = await server.file_agent_manager.list_files_for_agent_paginated(
agent_id=sarah_agent.id,
actor=default_user,
cursor=cursor1,
limit=3,
)
assert len(page2) == 2 # only 2 files left (5 total - 3 already fetched)
assert has_more2 is False
assert cursor2 is not None
# verify no overlap between pages
page1_ids = {fa.id for fa in page1}
page2_ids = {fa.id for fa in page2}
assert page1_ids.isdisjoint(page2_ids)
@pytest.mark.asyncio
async def test_list_files_for_agent_paginated_filter_open(
server,
default_user,
sarah_agent,
default_source,
):
"""Test pagination with is_open=True filter."""
# create files: 3 open, 2 closed
for i in range(5):
file_metadata = PydanticFileMetadata(
file_name=f"filter_file_{i}.txt",
source_id=default_source.id,
organization_id=default_user.organization_id,
)
file = await server.file_manager.create_file(file_metadata, actor=default_user)
await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=file.id,
file_name=file.file_name,
source_id=file.source_id,
actor=default_user,
is_open=(i < 3), # first 3 are open
max_files_open=sarah_agent.max_files_open,
)
# get only open files
open_files, _cursor, has_more = await server.file_agent_manager.list_files_for_agent_paginated(
agent_id=sarah_agent.id,
actor=default_user,
is_open=True,
limit=10,
)
assert len(open_files) == 3
assert has_more is False
assert all(fa.is_open for fa in open_files)
@pytest.mark.asyncio
async def test_list_files_for_agent_paginated_filter_closed(
server,
default_user,
sarah_agent,
default_source,
):
"""Test pagination with is_open=False filter."""
# create files: 2 open, 4 closed
for i in range(6):
file_metadata = PydanticFileMetadata(
file_name=f"closed_file_{i}.txt",
source_id=default_source.id,
organization_id=default_user.organization_id,
)
file = await server.file_manager.create_file(file_metadata, actor=default_user)
await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=file.id,
file_name=file.file_name,
source_id=file.source_id,
actor=default_user,
is_open=(i < 2), # first 2 are open, rest are closed
max_files_open=sarah_agent.max_files_open,
)
# paginate through closed files
page1, cursor1, has_more1 = await server.file_agent_manager.list_files_for_agent_paginated(
agent_id=sarah_agent.id,
actor=default_user,
is_open=False,
limit=2,
)
assert len(page1) == 2
assert has_more1 is True
assert all(not fa.is_open for fa in page1)
# get second page of closed files
page2, _cursor2, has_more2 = await server.file_agent_manager.list_files_for_agent_paginated(
agent_id=sarah_agent.id,
actor=default_user,
is_open=False,
cursor=cursor1,
limit=3,
)
assert len(page2) == 2 # only 2 closed files left
assert has_more2 is False
assert all(not fa.is_open for fa in page2)
@pytest.mark.asyncio
async def test_list_files_for_agent_paginated_empty(
server,
default_user,
charles_agent,
):
"""Test pagination with agent that has no files."""
# charles_agent has no files attached in this test
result, cursor, has_more = await server.file_agent_manager.list_files_for_agent_paginated(
agent_id=charles_agent.id,
actor=default_user,
limit=10,
)
assert len(result) == 0
assert cursor is None
assert has_more is False
@pytest.mark.asyncio
async def test_list_files_for_agent_paginated_large_limit(
server,
default_user,
sarah_agent,
default_source,
):
"""Test that large limit returns all files without pagination."""
# create 3 files
for i in range(3):
file_metadata = PydanticFileMetadata(
file_name=f"all_files_{i}.txt",
source_id=default_source.id,
organization_id=default_user.organization_id,
)
file = await server.file_manager.create_file(file_metadata, actor=default_user)
await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=file.id,
file_name=file.file_name,
source_id=file.source_id,
actor=default_user,
max_files_open=sarah_agent.max_files_open,
)
# request with large limit
all_files, cursor, has_more = await server.file_agent_manager.list_files_for_agent_paginated(
agent_id=sarah_agent.id,
actor=default_user,
limit=100,
)
assert len(all_files) == 3
assert has_more is False
assert cursor is not None # cursor is still set to last item
@pytest.mark.asyncio
async def test_detach_file(server, file_attachment, default_user):
await server.file_agent_manager.detach_file(
agent_id=file_attachment.agent_id,
file_id=file_attachment.file_id,
actor=default_user,
)
res = await server.file_agent_manager.get_file_agent_by_id(
agent_id=file_attachment.agent_id,
file_id=file_attachment.file_id,
actor=default_user,
)
assert res is None
async def test_detach_file_bulk(
server,
default_user,
sarah_agent,
charles_agent,
default_source,
):
"""Test bulk deletion of multiple agent-file associations."""
# Create multiple files
files = []
for i in range(3):
file_metadata = PydanticFileMetadata(
file_name=f"test_file_{i}.txt",
source_id=default_source.id,
organization_id=default_user.organization_id,
)
file = await server.file_manager.create_file(file_metadata, actor=default_user)
files.append(file)
# Attach all files to both agents
for file in files:
await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=file.id,
file_name=file.file_name,
source_id=file.source_id,
actor=default_user,
max_files_open=sarah_agent.max_files_open,
)
await server.file_agent_manager.attach_file(
agent_id=charles_agent.id,
file_id=file.id,
file_name=file.file_name,
source_id=file.source_id,
actor=default_user,
max_files_open=charles_agent.max_files_open,
)
# Verify all files are attached to both agents
sarah_files = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user
)
charles_files = await server.file_agent_manager.list_files_for_agent(
charles_agent.id, per_file_view_window_char_limit=charles_agent.per_file_view_window_char_limit, actor=default_user
)
assert len(sarah_files) == 3
assert len(charles_files) == 3
# Test 1: Bulk delete specific files from specific agents
agent_file_pairs = [
(sarah_agent.id, files[0].id), # Remove file 0 from sarah
(sarah_agent.id, files[1].id), # Remove file 1 from sarah
(charles_agent.id, files[1].id), # Remove file 1 from charles
]
deleted_count = await server.file_agent_manager.detach_file_bulk(agent_file_pairs=agent_file_pairs, actor=default_user)
assert deleted_count == 3
# Verify the correct files were deleted
sarah_files = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user
)
charles_files = await server.file_agent_manager.list_files_for_agent(
charles_agent.id, per_file_view_window_char_limit=charles_agent.per_file_view_window_char_limit, actor=default_user
)
# Sarah should only have file 2 left
assert len(sarah_files) == 1
assert sarah_files[0].file_id == files[2].id
# Charles should have files 0 and 2 left
assert len(charles_files) == 2
charles_file_ids = {f.file_id for f in charles_files}
assert charles_file_ids == {files[0].id, files[2].id}
# Test 2: Empty list should return 0 and not fail
deleted_count = await server.file_agent_manager.detach_file_bulk(agent_file_pairs=[], actor=default_user)
assert deleted_count == 0
# Test 3: Attempting to delete already deleted associations should return 0
agent_file_pairs = [
(sarah_agent.id, files[0].id), # Already deleted
(sarah_agent.id, files[1].id), # Already deleted
]
deleted_count = await server.file_agent_manager.detach_file_bulk(agent_file_pairs=agent_file_pairs, actor=default_user)
assert deleted_count == 0
async def test_org_scoping(
server,
default_user,
other_user_different_org,
sarah_agent,
default_file,
):
# attach as default_user
await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=default_file.id,
file_name=default_file.file_name,
source_id=default_file.source_id,
actor=default_user,
max_files_open=sarah_agent.max_files_open,
)
# other org should see nothing
files = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=other_user_different_org
)
assert files == []
# ======================================================================================================================
# LRU File Management Tests
# ======================================================================================================================
async def test_mark_access_bulk(server, default_user, sarah_agent, default_source):
"""Test that mark_access_bulk updates last_accessed_at for multiple files."""
import time
# Create multiple files and attach them
files = []
for i in range(3):
file_metadata = PydanticFileMetadata(
file_name=f"test_file_{i}.txt",
organization_id=default_user.organization_id,
source_id=default_source.id,
)
file = await server.file_manager.create_file(file_metadata=file_metadata, actor=default_user, text=f"test content {i}")
files.append(file)
# Attach all files (they'll be open by default)
attached_files = []
for file in files:
file_agent, _closed_files = await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=file.id,
file_name=file.file_name,
source_id=file.source_id,
actor=default_user,
visible_content=f"content for {file.file_name}",
max_files_open=sarah_agent.max_files_open,
)
attached_files.append(file_agent)
# Get initial timestamps
initial_times = {}
for file_agent in attached_files:
fa = await server.file_agent_manager.get_file_agent_by_id(agent_id=sarah_agent.id, file_id=file_agent.file_id, actor=default_user)
initial_times[fa.file_name] = fa.last_accessed_at
# Wait a moment to ensure timestamp difference
time.sleep(1.1)
# Use mark_access_bulk on subset of files
file_names_to_mark = [files[0].file_name, files[2].file_name]
await server.file_agent_manager.mark_access_bulk(agent_id=sarah_agent.id, file_names=file_names_to_mark, actor=default_user)
# Check that only marked files have updated timestamps
for i, file in enumerate(files):
fa = await server.file_agent_manager.get_file_agent_by_id(agent_id=sarah_agent.id, file_id=file.id, actor=default_user)
if file.file_name in file_names_to_mark:
assert fa.last_accessed_at > initial_times[file.file_name], f"File {file.file_name} should have updated timestamp"
else:
assert fa.last_accessed_at == initial_times[file.file_name], f"File {file.file_name} should not have updated timestamp"
async def test_lru_eviction_on_attach(server, default_user, sarah_agent, default_source):
"""Test that attaching files beyond max_files_open triggers LRU eviction."""
import time
# Use the agent's configured max_files_open
max_files_open = sarah_agent.max_files_open
# Create more files than the limit
files = []
for i in range(max_files_open + 2): # e.g., 7 files for max_files_open=5
file_metadata = PydanticFileMetadata(
file_name=f"lru_test_file_{i}.txt",
organization_id=default_user.organization_id,
source_id=default_source.id,
)
file = await server.file_manager.create_file(file_metadata=file_metadata, actor=default_user, text=f"test content {i}")
files.append(file)
# Attach files one by one with small delays to ensure different timestamps
attached_files = []
all_closed_files = []
for i, file in enumerate(files):
if i > 0:
time.sleep(0.1) # Small delay to ensure different timestamps
file_agent, closed_files = await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=file.id,
file_name=file.file_name,
source_id=file.source_id,
actor=default_user,
visible_content=f"content for {file.file_name}",
max_files_open=sarah_agent.max_files_open,
)
attached_files.append(file_agent)
all_closed_files.extend(closed_files)
# Check that we never exceed max_files_open
open_files = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id,
per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit,
actor=default_user,
is_open_only=True,
)
assert len(open_files) <= max_files_open, f"Should never exceed {max_files_open} open files"
# Should have closed exactly 2 files (e.g., 7 - 5 = 2 for max_files_open=5)
expected_closed_count = len(files) - max_files_open
assert len(all_closed_files) == expected_closed_count, (
f"Should have closed {expected_closed_count} files, but closed: {all_closed_files}"
)
# Check that the oldest files were closed (first N files attached)
expected_closed = [files[i].file_name for i in range(expected_closed_count)]
assert set(all_closed_files) == set(expected_closed), f"Wrong files closed. Expected {expected_closed}, got {all_closed_files}"
# Check that exactly max_files_open files are open
open_files = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user, is_open_only=True
)
assert len(open_files) == max_files_open
# Check that the most recently attached files are still open
open_file_names = {f.file_name for f in open_files}
expected_open = {files[i].file_name for i in range(expected_closed_count, len(files))} # last max_files_open files
assert open_file_names == expected_open
async def test_lru_eviction_on_open_file(server, default_user, sarah_agent, default_source):
"""Test that opening a file beyond max_files_open triggers LRU eviction."""
import time
max_files_open = sarah_agent.max_files_open
# Create files equal to the limit
files = []
for i in range(max_files_open + 1): # 6 files for max_files_open=5
file_metadata = PydanticFileMetadata(
file_name=f"open_test_file_{i}.txt",
organization_id=default_user.organization_id,
source_id=default_source.id,
)
file = await server.file_manager.create_file(file_metadata=file_metadata, actor=default_user, text=f"test content {i}")
files.append(file)
# Attach first max_files_open files
for i in range(max_files_open):
time.sleep(0.1) # Small delay for different timestamps
await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=files[i].id,
file_name=files[i].file_name,
source_id=files[i].source_id,
actor=default_user,
visible_content=f"content for {files[i].file_name}",
max_files_open=sarah_agent.max_files_open,
)
# Attach the last file as closed
await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=files[-1].id,
file_name=files[-1].file_name,
source_id=files[-1].source_id,
actor=default_user,
is_open=False,
visible_content=f"content for {files[-1].file_name}",
max_files_open=sarah_agent.max_files_open,
)
# All files should be attached but only max_files_open should be open
all_files = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user
)
open_files = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user, is_open_only=True
)
assert len(all_files) == max_files_open + 1
assert len(open_files) == max_files_open
# Wait a moment
time.sleep(0.1)
# Now "open" the last file using the efficient method
closed_files, _was_already_open, _ = await server.file_agent_manager.enforce_max_open_files_and_open(
agent_id=sarah_agent.id,
file_id=files[-1].id,
file_name=files[-1].file_name,
source_id=files[-1].source_id,
actor=default_user,
visible_content="updated content",
max_files_open=sarah_agent.max_files_open,
)
# Should have closed 1 file (the oldest one)
assert len(closed_files) == 1, f"Should have closed 1 file, got: {closed_files}"
assert closed_files[0] == files[0].file_name, f"Should have closed oldest file {files[0].file_name}"
# Check that exactly max_files_open files are still open
open_files = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user, is_open_only=True
)
assert len(open_files) == max_files_open
# Check that the newly opened file is open and the oldest is closed
last_file_agent = await server.file_agent_manager.get_file_agent_by_id(
agent_id=sarah_agent.id, file_id=files[-1].id, actor=default_user
)
first_file_agent = await server.file_agent_manager.get_file_agent_by_id(
agent_id=sarah_agent.id, file_id=files[0].id, actor=default_user
)
assert last_file_agent.is_open is True, "Last file should be open"
assert first_file_agent.is_open is False, "First file should be closed"
async def test_lru_no_eviction_when_reopening_same_file(server, default_user, sarah_agent, default_source):
"""Test that reopening an already open file doesn't trigger unnecessary eviction."""
import time
max_files_open = sarah_agent.max_files_open
# Create files equal to the limit
files = []
for i in range(max_files_open):
file_metadata = PydanticFileMetadata(
file_name=f"reopen_test_file_{i}.txt",
organization_id=default_user.organization_id,
source_id=default_source.id,
)
file = await server.file_manager.create_file(file_metadata=file_metadata, actor=default_user, text=f"test content {i}")
files.append(file)
# Attach all files (they'll be open)
for i, file in enumerate(files):
time.sleep(0.1) # Small delay for different timestamps
await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=file.id,
file_name=file.file_name,
source_id=file.source_id,
actor=default_user,
visible_content=f"content for {file.file_name}",
max_files_open=sarah_agent.max_files_open,
)
# All files should be open
open_files = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user, is_open_only=True
)
assert len(open_files) == max_files_open
initial_open_names = {f.file_name for f in open_files}
# Wait a moment
time.sleep(0.1)
# "Reopen" the last file (which is already open)
closed_files, was_already_open, _ = await server.file_agent_manager.enforce_max_open_files_and_open(
agent_id=sarah_agent.id,
file_id=files[-1].id,
file_name=files[-1].file_name,
source_id=files[-1].source_id,
actor=default_user,
visible_content="updated content",
max_files_open=sarah_agent.max_files_open,
)
# Should not have closed any files since we're within the limit
assert len(closed_files) == 0, f"Should not have closed any files when reopening, got: {closed_files}"
assert was_already_open is True, "File should have been detected as already open"
# All the same files should still be open
open_files = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user, is_open_only=True
)
assert len(open_files) == max_files_open
final_open_names = {f.file_name for f in open_files}
assert initial_open_names == final_open_names, "Same files should remain open"
async def test_last_accessed_at_updates_correctly(server, default_user, sarah_agent, default_source):
"""Test that last_accessed_at is updated in the correct scenarios."""
import time
# Create and attach a file
file_metadata = PydanticFileMetadata(
file_name="timestamp_test.txt",
organization_id=default_user.organization_id,
source_id=default_source.id,
)
file = await server.file_manager.create_file(file_metadata=file_metadata, actor=default_user, text="test content")
file_agent, _closed_files = await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=file.id,
file_name=file.file_name,
source_id=file.source_id,
actor=default_user,
visible_content="initial content",
max_files_open=sarah_agent.max_files_open,
)
initial_time = file_agent.last_accessed_at
time.sleep(1.1)
# Test update_file_agent_by_id updates timestamp
updated_agent = await server.file_agent_manager.update_file_agent_by_id(
agent_id=sarah_agent.id, file_id=file.id, actor=default_user, visible_content="updated content"
)
assert updated_agent.last_accessed_at > initial_time, "update_file_agent_by_id should update timestamp"
time.sleep(1.1)
prev_time = updated_agent.last_accessed_at
# Test update_file_agent_by_name updates timestamp
updated_agent2 = await server.file_agent_manager.update_file_agent_by_name(
agent_id=sarah_agent.id, file_name=file.file_name, actor=default_user, is_open=False
)
assert updated_agent2.last_accessed_at > prev_time, "update_file_agent_by_name should update timestamp"
time.sleep(1.1)
prev_time = updated_agent2.last_accessed_at
# Test mark_access updates timestamp
await server.file_agent_manager.mark_access(agent_id=sarah_agent.id, file_id=file.id, actor=default_user)
final_agent = await server.file_agent_manager.get_file_agent_by_id(agent_id=sarah_agent.id, file_id=file.id, actor=default_user)
assert final_agent.last_accessed_at > prev_time, "mark_access should update timestamp"
async def test_attach_files_bulk_basic(server, default_user, sarah_agent, default_source):
"""Test basic functionality of attach_files_bulk method."""
# Create multiple files
files = []
for i in range(3):
file_metadata = PydanticFileMetadata(
file_name=f"bulk_test_{i}.txt",
organization_id=default_user.organization_id,
source_id=default_source.id,
)
file = await server.file_manager.create_file(file_metadata=file_metadata, actor=default_user, text=f"content {i}")
files.append(file)
# Create visible content map
visible_content_map = {f"bulk_test_{i}.txt": f"visible content {i}" for i in range(3)}
# Bulk attach files
closed_files = await server.file_agent_manager.attach_files_bulk(
agent_id=sarah_agent.id,
files_metadata=files,
visible_content_map=visible_content_map,
actor=default_user,
max_files_open=sarah_agent.max_files_open,
)
# Should not close any files since we're under the limit
assert closed_files == []
# Verify all files are attached and open
attached_files = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user, is_open_only=True
)
assert len(attached_files) == 3
attached_file_names = {f.file_name for f in attached_files}
expected_names = {f"bulk_test_{i}.txt" for i in range(3)}
assert attached_file_names == expected_names
# Verify visible content is set correctly
for i, attached_file in enumerate(attached_files):
if attached_file.file_name == f"bulk_test_{i}.txt":
assert attached_file.visible_content == f"visible content {i}"
async def test_attach_files_bulk_deduplication(server, default_user, sarah_agent, default_source):
"""Test that attach_files_bulk properly deduplicates files with same names."""
# Create files with same name (different IDs)
file_metadata_1 = PydanticFileMetadata(
file_name="duplicate_test.txt",
organization_id=default_user.organization_id,
source_id=default_source.id,
)
file1 = await server.file_manager.create_file(file_metadata=file_metadata_1, actor=default_user, text="content 1")
file_metadata_2 = PydanticFileMetadata(
file_name="duplicate_test.txt",
organization_id=default_user.organization_id,
source_id=default_source.id,
)
file2 = await server.file_manager.create_file(file_metadata=file_metadata_2, actor=default_user, text="content 2")
# Try to attach both files (same name, different IDs)
files_to_attach = [file1, file2]
visible_content_map = {"duplicate_test.txt": "visible content"}
# Bulk attach should deduplicate
await server.file_agent_manager.attach_files_bulk(
agent_id=sarah_agent.id,
files_metadata=files_to_attach,
visible_content_map=visible_content_map,
actor=default_user,
max_files_open=sarah_agent.max_files_open,
)
# Should only attach one file (deduplicated)
attached_files = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user
)
assert len(attached_files) == 1
assert attached_files[0].file_name == "duplicate_test.txt"
async def test_attach_files_bulk_lru_eviction(server, default_user, sarah_agent, default_source):
"""Test that attach_files_bulk properly handles LRU eviction without duplicates."""
import time
max_files_open = sarah_agent.max_files_open
# First, fill up to the max with individual files
existing_files = []
for i in range(max_files_open):
file_metadata = PydanticFileMetadata(
file_name=f"existing_{i}.txt",
organization_id=default_user.organization_id,
source_id=default_source.id,
)
file = await server.file_manager.create_file(file_metadata=file_metadata, actor=default_user, text=f"existing {i}")
existing_files.append(file)
time.sleep(0.05) # Small delay for different timestamps
await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=file.id,
file_name=file.file_name,
source_id=file.source_id,
actor=default_user,
visible_content=f"existing content {i}",
max_files_open=sarah_agent.max_files_open,
)
# Verify we're at the limit
open_files = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user, is_open_only=True
)
assert len(open_files) == max_files_open
# Now bulk attach 3 new files (should trigger LRU eviction)
new_files = []
for i in range(3):
file_metadata = PydanticFileMetadata(
file_name=f"new_bulk_{i}.txt",
organization_id=default_user.organization_id,
source_id=default_source.id,
)
file = await server.file_manager.create_file(file_metadata=file_metadata, actor=default_user, text=f"new content {i}")
new_files.append(file)
visible_content_map = {f"new_bulk_{i}.txt": f"new visible {i}" for i in range(3)}
# Bulk attach should evict oldest files
closed_files = await server.file_agent_manager.attach_files_bulk(
agent_id=sarah_agent.id,
files_metadata=new_files,
visible_content_map=visible_content_map,
actor=default_user,
max_files_open=sarah_agent.max_files_open,
)
# Should have closed exactly 3 files (oldest ones)
assert len(closed_files) == 3
# CRITICAL: Verify no duplicates in closed_files list
assert len(closed_files) == len(set(closed_files)), f"Duplicate file names in closed_files: {closed_files}"
# Verify expected files were closed (oldest 3)
expected_closed = {f"existing_{i}.txt" for i in range(3)}
actual_closed = set(closed_files)
assert actual_closed == expected_closed
# Verify we still have exactly max_files_open files open
open_files_after = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user, is_open_only=True
)
assert len(open_files_after) == max_files_open
# Verify the new files are open
open_file_names = {f.file_name for f in open_files_after}
for i in range(3):
assert f"new_bulk_{i}.txt" in open_file_names
async def test_attach_files_bulk_mixed_existing_new(server, default_user, sarah_agent, default_source):
"""Test bulk attach with mix of existing and new files."""
# Create and attach one file individually first
existing_file_metadata = PydanticFileMetadata(
file_name="existing_file.txt",
organization_id=default_user.organization_id,
source_id=default_source.id,
)
existing_file = await server.file_manager.create_file(file_metadata=existing_file_metadata, actor=default_user, text="existing")
await server.file_agent_manager.attach_file(
agent_id=sarah_agent.id,
file_id=existing_file.id,
file_name=existing_file.file_name,
source_id=existing_file.source_id,
actor=default_user,
visible_content="old content",
is_open=False, # Start as closed
max_files_open=sarah_agent.max_files_open,
)
# Create new files
new_files = []
for i in range(2):
file_metadata = PydanticFileMetadata(
file_name=f"new_file_{i}.txt",
organization_id=default_user.organization_id,
source_id=default_source.id,
)
file = await server.file_manager.create_file(file_metadata=file_metadata, actor=default_user, text=f"new {i}")
new_files.append(file)
# Bulk attach: existing file + new files
files_to_attach = [existing_file, *new_files]
visible_content_map = {
"existing_file.txt": "updated content",
"new_file_0.txt": "new content 0",
"new_file_1.txt": "new content 1",
}
closed_files = await server.file_agent_manager.attach_files_bulk(
agent_id=sarah_agent.id,
files_metadata=files_to_attach,
visible_content_map=visible_content_map,
actor=default_user,
max_files_open=sarah_agent.max_files_open,
)
# Should not close any files
assert closed_files == []
# Verify all files are now open
open_files = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user, is_open_only=True
)
assert len(open_files) == 3
# Verify existing file was updated
existing_file_agent = await server.file_agent_manager.get_file_agent_by_file_name(
agent_id=sarah_agent.id, file_name="existing_file.txt", actor=default_user
)
assert existing_file_agent.is_open is True
assert existing_file_agent.visible_content == "updated content"
async def test_attach_files_bulk_empty_list(server, default_user, sarah_agent):
"""Test attach_files_bulk with empty file list."""
closed_files = await server.file_agent_manager.attach_files_bulk(
agent_id=sarah_agent.id, files_metadata=[], visible_content_map={}, actor=default_user, max_files_open=sarah_agent.max_files_open
)
assert closed_files == []
# Verify no files are attached
attached_files = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user
)
assert len(attached_files) == 0
async def test_attach_files_bulk_oversized_bulk(server, default_user, sarah_agent, default_source):
"""Test bulk attach when trying to attach more files than max_files_open allows."""
max_files_open = sarah_agent.max_files_open
# Create more files than the limit allows
oversized_files = []
for i in range(max_files_open + 3): # 3 more than limit
file_metadata = PydanticFileMetadata(
file_name=f"oversized_{i}.txt",
organization_id=default_user.organization_id,
source_id=default_source.id,
)
file = await server.file_manager.create_file(file_metadata=file_metadata, actor=default_user, text=f"oversized {i}")
oversized_files.append(file)
visible_content_map = {f"oversized_{i}.txt": f"oversized visible {i}" for i in range(max_files_open + 3)}
# Bulk attach all files (more than limit)
closed_files = await server.file_agent_manager.attach_files_bulk(
agent_id=sarah_agent.id,
files_metadata=oversized_files,
visible_content_map=visible_content_map,
actor=default_user,
max_files_open=sarah_agent.max_files_open,
)
# Should have closed exactly 3 files (the excess)
assert len(closed_files) == 3
# CRITICAL: Verify no duplicates in closed_files list
assert len(closed_files) == len(set(closed_files)), f"Duplicate file names in closed_files: {closed_files}"
# Should have exactly max_files_open files open
open_files_after = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user, is_open_only=True
)
assert len(open_files_after) == max_files_open
# All files should be attached (some open, some closed)
all_files_after = await server.file_agent_manager.list_files_for_agent(
sarah_agent.id, per_file_view_window_char_limit=sarah_agent.per_file_view_window_char_limit, actor=default_user
)
assert len(all_files_after) == max_files_open + 3
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_file_manager.py",
"license": "Apache License 2.0",
"lines": 1002,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_group_manager.py | import pytest
# Import shared fixtures and constants from conftest
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.llm_config import LLMConfig
from letta.server.server import SyncServer
@pytest.mark.asyncio
async def test_create_internal_template_objects(server: SyncServer, default_user):
"""Test creating agents, groups, and blocks with template-related fields."""
from letta.schemas.agent import InternalTemplateAgentCreate
from letta.schemas.block import Block, InternalTemplateBlockCreate
from letta.schemas.group import InternalTemplateGroupCreate, RoundRobinManager
base_template_id = "base_123"
template_id = "template_456"
deployment_id = "deploy_789"
entity_id = "entity_012"
# Create agent with template fields (use sarah_agent as base, then create new one)
agent = await server.agent_manager.create_agent_async(
InternalTemplateAgentCreate(
name="template-agent",
agent_type="memgpt_v2_agent",
base_template_id=base_template_id,
template_id=template_id,
deployment_id=deployment_id,
entity_id=entity_id,
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
# Verify agent template fields
assert agent.base_template_id == base_template_id
assert agent.template_id == template_id
assert agent.deployment_id == deployment_id
assert agent.entity_id == entity_id
# Create block with template fields
block_create = InternalTemplateBlockCreate(
label="template_block",
value="Test block",
base_template_id=base_template_id,
template_id=template_id,
deployment_id=deployment_id,
entity_id=entity_id,
)
block = await server.block_manager.create_or_update_block_async(Block(**block_create.model_dump()), actor=default_user)
# Verify block template fields
assert block.base_template_id == base_template_id
assert block.template_id == template_id
assert block.deployment_id == deployment_id
assert block.entity_id == entity_id
# Create group with template fields (no entity_id for groups)
group = await server.group_manager.create_group_async(
InternalTemplateGroupCreate(
agent_ids=[agent.id],
description="Template group",
base_template_id=base_template_id,
template_id=template_id,
deployment_id=deployment_id,
manager_config=RoundRobinManager(),
),
actor=default_user,
)
# Verify group template fields and basic functionality
assert group.description == "Template group"
assert agent.id in group.agent_ids
assert group.base_template_id == base_template_id
assert group.template_id == template_id
assert group.deployment_id == deployment_id
# Clean up
await server.group_manager.delete_group_async(group.id, actor=default_user)
await server.block_manager.delete_block_async(block.id, actor=default_user)
await server.agent_manager.delete_agent_async(agent.id, actor=default_user)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_group_manager.py",
"license": "Apache License 2.0",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_identity_manager.py | import pytest
# Import shared fixtures and constants from conftest
from letta.orm.errors import UniqueConstraintViolationError
from letta.schemas.agent import CreateAgent, UpdateAgent
from letta.schemas.block import Block as PydanticBlock
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.identity import IdentityCreate, IdentityProperty, IdentityPropertyType, IdentityType, IdentityUpdate, IdentityUpsert
from letta.schemas.llm_config import LLMConfig
from letta.server.server import SyncServer
from letta.services.block_manager import BlockManager
# ======================================================================================================================
# Identity Manager Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_create_and_upsert_identity(server: SyncServer, default_user):
identity_create = IdentityCreate(
identifier_key="1234",
name="caren",
identity_type=IdentityType.user,
properties=[
IdentityProperty(key="email", value="caren@letta.com", type=IdentityPropertyType.string),
IdentityProperty(key="age", value=28, type=IdentityPropertyType.number),
],
)
identity = await server.identity_manager.create_identity_async(identity_create, actor=default_user)
# Assertions to ensure the created identity matches the expected values
assert identity.identifier_key == identity_create.identifier_key
assert identity.name == identity_create.name
assert identity.identity_type == identity_create.identity_type
assert identity.properties == identity_create.properties
assert identity.agent_ids == []
assert identity.project_id is None
with pytest.raises(UniqueConstraintViolationError):
await server.identity_manager.create_identity_async(
IdentityCreate(identifier_key="1234", name="sarah", identity_type=IdentityType.user),
actor=default_user,
)
identity_create.properties = [IdentityProperty(key="age", value=29, type=IdentityPropertyType.number)]
identity = await server.identity_manager.upsert_identity_async(
identity=IdentityUpsert(**identity_create.model_dump()), actor=default_user
)
identity = await server.identity_manager.get_identity_async(identity_id=identity.id, actor=default_user)
assert len(identity.properties) == 1
assert identity.properties[0].key == "age"
assert identity.properties[0].value == 29
await server.identity_manager.delete_identity_async(identity_id=identity.id, actor=default_user)
async def test_get_identities(server, default_user):
# Create identities to retrieve later
user = await server.identity_manager.create_identity_async(
IdentityCreate(name="caren", identifier_key="1234", identity_type=IdentityType.user), actor=default_user
)
org = await server.identity_manager.create_identity_async(
IdentityCreate(name="letta", identifier_key="0001", identity_type=IdentityType.org), actor=default_user
)
# Retrieve identities by different filters
all_identities, _, _ = await server.identity_manager.list_identities_async(actor=default_user)
assert len(all_identities) == 2
user_identities, _, _ = await server.identity_manager.list_identities_async(actor=default_user, identity_type=IdentityType.user)
assert len(user_identities) == 1
assert user_identities[0].name == user.name
org_identities, _, _ = await server.identity_manager.list_identities_async(actor=default_user, identity_type=IdentityType.org)
assert len(org_identities) == 1
assert org_identities[0].name == org.name
await server.identity_manager.delete_identity_async(identity_id=user.id, actor=default_user)
await server.identity_manager.delete_identity_async(identity_id=org.id, actor=default_user)
@pytest.mark.asyncio
async def test_update_identity(server: SyncServer, sarah_agent, charles_agent, default_user):
identity = await server.identity_manager.create_identity_async(
IdentityCreate(name="caren", identifier_key="1234", identity_type=IdentityType.user), actor=default_user
)
# Update identity fields
update_data = IdentityUpdate(
agent_ids=[sarah_agent.id, charles_agent.id],
properties=[IdentityProperty(key="email", value="caren@letta.com", type=IdentityPropertyType.string)],
)
await server.identity_manager.update_identity_async(identity_id=identity.id, identity=update_data, actor=default_user)
# Retrieve the updated identity
updated_identity = await server.identity_manager.get_identity_async(identity_id=identity.id, actor=default_user)
# Assertions to verify the update
assert updated_identity.agent_ids.sort() == update_data.agent_ids.sort()
assert updated_identity.properties == update_data.properties
agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert identity.id in agent_state.identity_ids
agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=charles_agent.id, actor=default_user)
assert identity.id in agent_state.identity_ids
await server.identity_manager.delete_identity_async(identity_id=identity.id, actor=default_user)
@pytest.mark.asyncio
async def test_attach_detach_identity_from_agent(server: SyncServer, sarah_agent, default_user):
# Create an identity
identity = await server.identity_manager.create_identity_async(
IdentityCreate(name="caren", identifier_key="1234", identity_type=IdentityType.user), actor=default_user
)
agent_state = await server.agent_manager.update_agent_async(
agent_id=sarah_agent.id, agent_update=UpdateAgent(identity_ids=[identity.id]), actor=default_user
)
# Check that identity has been attached
assert identity.id in agent_state.identity_ids
# Now attempt to delete the identity
await server.identity_manager.delete_identity_async(identity_id=identity.id, actor=default_user)
# Verify that the identity was deleted
identities, _, _ = await server.identity_manager.list_identities_async(actor=default_user)
assert len(identities) == 0
# Check that block has been detached too
agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert identity.id not in agent_state.identity_ids
@pytest.mark.asyncio
async def test_get_set_agents_for_identities(server: SyncServer, sarah_agent, charles_agent, default_user):
identity = await server.identity_manager.create_identity_async(
IdentityCreate(name="caren", identifier_key="1234", identity_type=IdentityType.user, agent_ids=[sarah_agent.id, charles_agent.id]),
actor=default_user,
)
agent_with_identity = await server.create_agent_async(
CreateAgent(
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
identity_ids=[identity.id],
include_base_tools=False,
),
actor=default_user,
)
agent_without_identity = await server.create_agent_async(
CreateAgent(
agent_type="memgpt_v2_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
# Get the agents for identity id
agent_states = await server.agent_manager.list_agents_async(identity_id=identity.id, actor=default_user)
assert len(agent_states) == 3
# Check all agents are in the list
agent_state_ids = [a.id for a in agent_states]
assert sarah_agent.id in agent_state_ids
assert charles_agent.id in agent_state_ids
assert agent_with_identity.id in agent_state_ids
assert agent_without_identity.id not in agent_state_ids
# Get the agents for identifier key
agent_states = await server.agent_manager.list_agents_async(identifier_keys=[identity.identifier_key], actor=default_user)
assert len(agent_states) == 3
# Check all agents are in the list
agent_state_ids = [a.id for a in agent_states]
assert sarah_agent.id in agent_state_ids
assert charles_agent.id in agent_state_ids
assert agent_with_identity.id in agent_state_ids
assert agent_without_identity.id not in agent_state_ids
# Delete new agents
await server.agent_manager.delete_agent_async(agent_id=agent_with_identity.id, actor=default_user)
await server.agent_manager.delete_agent_async(agent_id=agent_without_identity.id, actor=default_user)
# Get the agents for identity id
agent_states = await server.agent_manager.list_agents_async(identity_id=identity.id, actor=default_user)
assert len(agent_states) == 2
# Check only initial agents are in the list
agent_state_ids = [a.id for a in agent_states]
assert sarah_agent.id in agent_state_ids
assert charles_agent.id in agent_state_ids
await server.identity_manager.delete_identity_async(identity_id=identity.id, actor=default_user)
@pytest.mark.asyncio
async def test_upsert_properties(server: SyncServer, default_user):
identity_create = IdentityCreate(
identifier_key="1234",
name="caren",
identity_type=IdentityType.user,
properties=[
IdentityProperty(key="email", value="caren@letta.com", type=IdentityPropertyType.string),
IdentityProperty(key="age", value=28, type=IdentityPropertyType.number),
],
)
identity = await server.identity_manager.create_identity_async(identity_create, actor=default_user)
properties = [
IdentityProperty(key="email", value="caren@gmail.com", type=IdentityPropertyType.string),
IdentityProperty(key="age", value="28", type=IdentityPropertyType.string),
IdentityProperty(key="test", value=123, type=IdentityPropertyType.number),
]
updated_identity = await server.identity_manager.upsert_identity_properties_async(
identity_id=identity.id,
properties=properties,
actor=default_user,
)
assert updated_identity.properties == properties
await server.identity_manager.delete_identity_async(identity_id=identity.id, actor=default_user)
@pytest.mark.asyncio
async def test_attach_detach_identity_from_block(server: SyncServer, default_block, default_user):
# Create an identity
identity = await server.identity_manager.create_identity_async(
IdentityCreate(name="caren", identifier_key="1234", identity_type=IdentityType.user, block_ids=[default_block.id]),
actor=default_user,
)
# Check that identity has been attached
blocks = await server.block_manager.get_blocks_async(identity_id=identity.id, actor=default_user)
assert len(blocks) == 1 and blocks[0].id == default_block.id
# Now attempt to delete the identity
await server.identity_manager.delete_identity_async(identity_id=identity.id, actor=default_user)
# Verify that the identity was deleted
identities, _, _ = await server.identity_manager.list_identities_async(actor=default_user)
assert len(identities) == 0
# Check that block has been detached too
blocks = await server.block_manager.get_blocks_async(identity_id=identity.id, actor=default_user)
assert len(blocks) == 0
@pytest.mark.asyncio
async def test_get_set_blocks_for_identities(server: SyncServer, default_block, default_user):
block_manager = BlockManager()
block_with_identity = await block_manager.create_or_update_block_async(
PydanticBlock(label="persona", value="Original Content"), actor=default_user
)
block_without_identity = await block_manager.create_or_update_block_async(
PydanticBlock(label="user", value="Original Content"), actor=default_user
)
identity = await server.identity_manager.create_identity_async(
IdentityCreate(
name="caren", identifier_key="1234", identity_type=IdentityType.user, block_ids=[default_block.id, block_with_identity.id]
),
actor=default_user,
)
# Get the blocks for identity id
blocks = await server.block_manager.get_blocks_async(identity_id=identity.id, actor=default_user)
assert len(blocks) == 2
# Check blocks are in the list
block_ids = [b.id for b in blocks]
assert default_block.id in block_ids
assert block_with_identity.id in block_ids
assert block_without_identity.id not in block_ids
# Get the blocks for identifier key
blocks = await server.block_manager.get_blocks_async(identifier_keys=[identity.identifier_key], actor=default_user)
assert len(blocks) == 2
# Check blocks are in the list
block_ids = [b.id for b in blocks]
assert default_block.id in block_ids
assert block_with_identity.id in block_ids
assert block_without_identity.id not in block_ids
# Delete new agents
await server.block_manager.delete_block_async(block_id=block_with_identity.id, actor=default_user)
await server.block_manager.delete_block_async(block_id=block_without_identity.id, actor=default_user)
# Get the blocks for identity id
blocks = await server.block_manager.get_blocks_async(identity_id=identity.id, actor=default_user)
assert len(blocks) == 1
# Check only initial block in the list
block_ids = [b.id for b in blocks]
assert default_block.id in block_ids
assert block_with_identity.id not in block_ids
assert block_without_identity.id not in block_ids
await server.identity_manager.delete_identity_async(identity_id=identity.id, actor=default_user)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_identity_manager.py",
"license": "Apache License 2.0",
"lines": 246,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_job_manager.py | from datetime import datetime
import pytest
# Import shared fixtures and constants from conftest
from letta.errors import LettaInvalidArgumentError
from letta.schemas.enums import (
JobStatus,
JobType,
)
from letta.schemas.job import Job as PydanticJob, JobUpdate
from letta.server.server import SyncServer
# ======================================================================================================================
# JobManager Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_create_job(server: SyncServer, default_user):
"""Test creating a job."""
job_data = PydanticJob(
status=JobStatus.created,
metadata={"type": "test"},
)
created_job = await server.job_manager.create_job_async(pydantic_job=job_data, actor=default_user)
# Assertions to ensure the created job matches the expected values
assert created_job.user_id == default_user.id
assert created_job.status == JobStatus.created
assert created_job.metadata == {"type": "test"}
@pytest.mark.asyncio
async def test_get_job_by_id(server: SyncServer, default_user):
"""Test fetching a job by ID."""
# Create a job
job_data = PydanticJob(
status=JobStatus.created,
metadata={"type": "test"},
)
created_job = await server.job_manager.create_job_async(pydantic_job=job_data, actor=default_user)
# Fetch the job by ID
fetched_job = await server.job_manager.get_job_by_id_async(created_job.id, actor=default_user)
# Assertions to ensure the fetched job matches the created job
assert fetched_job.id == created_job.id
assert fetched_job.status == JobStatus.created
assert fetched_job.metadata == {"type": "test"}
@pytest.mark.asyncio
async def test_list_jobs(server: SyncServer, default_user):
"""Test listing jobs."""
# Create multiple jobs
for i in range(3):
job_data = PydanticJob(
status=JobStatus.created,
metadata={"type": f"test-{i}"},
)
await server.job_manager.create_job_async(pydantic_job=job_data, actor=default_user)
# List jobs
jobs = await server.job_manager.list_jobs_async(actor=default_user)
# Assertions to check that the created jobs are listed
assert len(jobs) == 3
assert all(job.user_id == default_user.id for job in jobs)
assert all(job.metadata["type"].startswith("test") for job in jobs)
@pytest.mark.asyncio
async def test_list_jobs_with_metadata(server: SyncServer, default_user):
for i in range(3):
job_data = PydanticJob(status=JobStatus.created, metadata={"source_id": f"source-test-{i}"})
await server.job_manager.create_job_async(pydantic_job=job_data, actor=default_user)
jobs = await server.job_manager.list_jobs_async(actor=default_user, source_id="source-test-2")
assert len(jobs) == 1
assert jobs[0].metadata["source_id"] == "source-test-2"
@pytest.mark.asyncio
async def test_update_job_by_id(server: SyncServer, default_user):
"""Test updating a job by its ID."""
# Create a job
job_data = PydanticJob(
status=JobStatus.created,
metadata={"type": "test"},
)
created_job = await server.job_manager.create_job_async(pydantic_job=job_data, actor=default_user)
assert created_job.metadata == {"type": "test"}
# Update the job
update_data = JobUpdate(status=JobStatus.completed, metadata={"type": "updated"})
updated_job = await server.job_manager.update_job_by_id_async(created_job.id, update_data, actor=default_user)
# Assertions to ensure the job was updated
assert updated_job.status == JobStatus.completed
assert updated_job.metadata == {"type": "updated"}
assert updated_job.completed_at is not None
@pytest.mark.asyncio
async def test_delete_job_by_id(server: SyncServer, default_user):
"""Test deleting a job by its ID."""
# Create a job
job_data = PydanticJob(
status=JobStatus.created,
metadata={"type": "test"},
)
created_job = await server.job_manager.create_job_async(pydantic_job=job_data, actor=default_user)
# Delete the job
await server.job_manager.delete_job_by_id_async(created_job.id, actor=default_user)
# List jobs to ensure the job was deleted
jobs = await server.job_manager.list_jobs_async(actor=default_user)
assert len(jobs) == 0
@pytest.mark.asyncio
async def test_update_job_auto_complete(server: SyncServer, default_user):
"""Test that updating a job's status to 'completed' automatically sets completed_at."""
# Create a job
job_data = PydanticJob(
status=JobStatus.created,
metadata={"type": "test"},
)
created_job = await server.job_manager.create_job_async(pydantic_job=job_data, actor=default_user)
# Update the job's status to 'completed'
update_data = JobUpdate(status=JobStatus.completed)
updated_job = await server.job_manager.update_job_by_id_async(created_job.id, update_data, actor=default_user)
# Assertions to check that completed_at was set
assert updated_job.status == JobStatus.completed
assert updated_job.completed_at is not None
@pytest.mark.asyncio
async def test_get_job_not_found(server: SyncServer, default_user):
"""Test fetching a non-existent job."""
non_existent_job_id = "nonexistent-id"
with pytest.raises(LettaInvalidArgumentError):
await server.job_manager.get_job_by_id_async(non_existent_job_id, actor=default_user)
@pytest.mark.asyncio
async def test_delete_job_not_found(server: SyncServer, default_user):
"""Test deleting a non-existent job."""
non_existent_job_id = "nonexistent-id"
with pytest.raises(LettaInvalidArgumentError):
await server.job_manager.delete_job_by_id_async(non_existent_job_id, actor=default_user)
@pytest.mark.asyncio
async def test_list_jobs_pagination(server: SyncServer, default_user):
"""Test listing jobs with pagination."""
# Create multiple jobs
for i in range(10):
job_data = PydanticJob(
status=JobStatus.created,
metadata={"type": f"test-{i}"},
)
await server.job_manager.create_job_async(pydantic_job=job_data, actor=default_user)
# List jobs with a limit
jobs = await server.job_manager.list_jobs_async(actor=default_user, limit=5)
assert len(jobs) == 5
assert all(job.user_id == default_user.id for job in jobs)
# Test cursor-based pagination
first_page = await server.job_manager.list_jobs_async(actor=default_user, limit=3, ascending=True) # [J0, J1, J2]
assert len(first_page) == 3
assert first_page[0].created_at <= first_page[1].created_at <= first_page[2].created_at
last_page = await server.job_manager.list_jobs_async(actor=default_user, limit=3, ascending=False) # [J9, J8, J7]
assert len(last_page) == 3
assert last_page[0].created_at >= last_page[1].created_at >= last_page[2].created_at
first_page_ids = set(job.id for job in first_page)
last_page_ids = set(job.id for job in last_page)
assert first_page_ids.isdisjoint(last_page_ids)
# Test middle page using both before and after
middle_page = await server.job_manager.list_jobs_async(
actor=default_user, before=last_page[-1].id, after=first_page[-1].id, ascending=True
) # [J3, J4, J5, J6]
assert len(middle_page) == 4 # Should include jobs between first and second page
head_tail_jobs = first_page_ids.union(last_page_ids)
assert all(job.id not in head_tail_jobs for job in middle_page)
# Test descending order
middle_page_desc = await server.job_manager.list_jobs_async(
actor=default_user, before=last_page[-1].id, after=first_page[-1].id, ascending=False
) # [J6, J5, J4, J3]
assert len(middle_page_desc) == 4
assert middle_page_desc[0].id == middle_page[-1].id
assert middle_page_desc[1].id == middle_page[-2].id
assert middle_page_desc[2].id == middle_page[-3].id
assert middle_page_desc[3].id == middle_page[-4].id
# BONUS
job_7 = last_page[-1].id
earliest_jobs = await server.job_manager.list_jobs_async(actor=default_user, ascending=False, before=job_7)
assert len(earliest_jobs) == 7
assert all(j.id not in last_page_ids for j in earliest_jobs)
assert all(earliest_jobs[i].created_at >= earliest_jobs[i + 1].created_at for i in range(len(earliest_jobs) - 1))
@pytest.mark.asyncio
async def test_list_jobs_by_status(server: SyncServer, default_user):
"""Test listing jobs filtered by status."""
# Create multiple jobs with different statuses
job_data_created = PydanticJob(
status=JobStatus.created,
metadata={"type": "test-created"},
)
job_data_in_progress = PydanticJob(
status=JobStatus.running,
metadata={"type": "test-running"},
)
job_data_completed = PydanticJob(
status=JobStatus.completed,
metadata={"type": "test-completed"},
)
await server.job_manager.create_job_async(pydantic_job=job_data_created, actor=default_user)
await server.job_manager.create_job_async(pydantic_job=job_data_in_progress, actor=default_user)
await server.job_manager.create_job_async(pydantic_job=job_data_completed, actor=default_user)
# List jobs filtered by status
created_jobs = await server.job_manager.list_jobs_async(actor=default_user, statuses=[JobStatus.created])
in_progress_jobs = await server.job_manager.list_jobs_async(actor=default_user, statuses=[JobStatus.running])
completed_jobs = await server.job_manager.list_jobs_async(actor=default_user, statuses=[JobStatus.completed])
# Assertions
assert len(created_jobs) == 1
assert created_jobs[0].metadata["type"] == job_data_created.metadata["type"]
assert len(in_progress_jobs) == 1
assert in_progress_jobs[0].metadata["type"] == job_data_in_progress.metadata["type"]
assert len(completed_jobs) == 1
assert completed_jobs[0].metadata["type"] == job_data_completed.metadata["type"]
@pytest.mark.asyncio
async def test_list_jobs_filter_by_type(server: SyncServer, default_user, default_job):
"""Test that list_jobs correctly filters by job_type."""
# Create a run job
run_pydantic = PydanticJob(
user_id=default_user.id,
status=JobStatus.pending,
job_type=JobType.RUN,
)
run = await server.job_manager.create_job_async(pydantic_job=run_pydantic, actor=default_user)
# List only regular jobs
jobs = await server.job_manager.list_jobs_async(actor=default_user)
assert len(jobs) == 1
assert jobs[0].id == default_job.id
# List only run jobs
jobs = await server.job_manager.list_jobs_async(actor=default_user, job_type=JobType.RUN)
assert len(jobs) == 1
assert jobs[0].id == run.id
async def test_e2e_job_callback(monkeypatch, server: SyncServer, default_user):
"""Test that job callbacks are properly dispatched when a job is completed."""
captured = {}
# Create a simple mock for the async HTTP client
class MockAsyncResponse:
status_code = 202
async def mock_post(url, json, timeout):
captured["url"] = url
captured["json"] = json
return MockAsyncResponse()
class MockAsyncClient:
async def __aenter__(self):
return self
async def __aexit__(self, *args):
pass
async def post(self, url, json, timeout):
return await mock_post(url, json, timeout)
# Patch the AsyncClient
import letta.services.job_manager as job_manager_module
monkeypatch.setattr(job_manager_module, "AsyncClient", MockAsyncClient)
job_in = PydanticJob(status=JobStatus.created, metadata={"foo": "bar"}, callback_url="http://example.test/webhook/jobs")
created = await server.job_manager.create_job_async(pydantic_job=job_in, actor=default_user)
assert created.callback_url == "http://example.test/webhook/jobs"
# Update the job status to completed, which should trigger the callback
update = JobUpdate(status=JobStatus.completed)
updated = await server.job_manager.update_job_by_id_async(created.id, update, actor=default_user)
# Verify the callback was triggered with the correct parameters
assert captured["url"] == created.callback_url, "Callback URL doesn't match"
assert captured["json"]["job_id"] == created.id, "Job ID in callback doesn't match"
assert captured["json"]["status"] == JobStatus.completed.value, "Job status in callback doesn't match"
# Verify the completed_at timestamp is reasonable
actual_dt = datetime.fromisoformat(captured["json"]["completed_at"]).replace(tzinfo=None)
assert abs((actual_dt - updated.completed_at).total_seconds()) < 1, "Timestamp difference is too large"
assert isinstance(updated.callback_sent_at, datetime)
assert updated.callback_status_code == 202
# ======================================================================================================================
# JobManager Tests - Messages
# ======================================================================================================================
@pytest.mark.asyncio
async def test_record_ttft(server: SyncServer, default_user):
"""Test recording time to first token for a job."""
# Create a job
job_data = PydanticJob(
status=JobStatus.created,
metadata={"type": "test_timing"},
)
created_job = await server.job_manager.create_job_async(pydantic_job=job_data, actor=default_user)
# Record TTFT
ttft_ns = 1_500_000_000 # 1.5 seconds in nanoseconds
await server.job_manager.record_ttft(created_job.id, ttft_ns, default_user)
# Fetch the job and verify TTFT was recorded
updated_job = await server.job_manager.get_job_by_id_async(created_job.id, default_user)
assert updated_job.ttft_ns == ttft_ns
@pytest.mark.asyncio
async def test_record_response_duration(server: SyncServer, default_user):
"""Test recording total response duration for a job."""
# Create a job
job_data = PydanticJob(
status=JobStatus.created,
metadata={"type": "test_timing"},
)
created_job = await server.job_manager.create_job_async(pydantic_job=job_data, actor=default_user)
# Record response duration
duration_ns = 5_000_000_000 # 5 seconds in nanoseconds
await server.job_manager.record_response_duration(created_job.id, duration_ns, default_user)
# Fetch the job and verify duration was recorded
updated_job = await server.job_manager.get_job_by_id_async(created_job.id, default_user)
assert updated_job.total_duration_ns == duration_ns
@pytest.mark.asyncio
async def test_record_timing_metrics_together(server: SyncServer, default_user):
"""Test recording both TTFT and response duration for a job."""
# Create a job
job_data = PydanticJob(
status=JobStatus.created,
metadata={"type": "test_timing_combined"},
)
created_job = await server.job_manager.create_job_async(pydantic_job=job_data, actor=default_user)
# Record both metrics
ttft_ns = 2_000_000_000 # 2 seconds in nanoseconds
duration_ns = 8_500_000_000 # 8.5 seconds in nanoseconds
await server.job_manager.record_ttft(created_job.id, ttft_ns, default_user)
await server.job_manager.record_response_duration(created_job.id, duration_ns, default_user)
# Fetch the job and verify both metrics were recorded
updated_job = await server.job_manager.get_job_by_id_async(created_job.id, default_user)
assert updated_job.ttft_ns == ttft_ns
assert updated_job.total_duration_ns == duration_ns
@pytest.mark.asyncio
async def test_record_timing_invalid_job(server: SyncServer, default_user):
"""Test recording timing metrics for non-existent job raises LettaInvalidArgumentError."""
# Try to record TTFT for non-existent job - should raise LettaInvalidArgumentError
with pytest.raises(LettaInvalidArgumentError):
await server.job_manager.record_ttft("nonexistent_job_id", 1_000_000_000, default_user)
# Try to record response duration for non-existent job - should raise LettaInvalidArgumentError
with pytest.raises(LettaInvalidArgumentError):
await server.job_manager.record_response_duration("nonexistent_job_id", 2_000_000_000, default_user)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_job_manager.py",
"license": "Apache License 2.0",
"lines": 311,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_mcp_manager.py | import uuid
from unittest.mock import AsyncMock, patch
import pytest
# Import shared fixtures and constants from conftest
from letta.constants import (
MCP_TOOL_TAG_NAME_PREFIX,
)
from letta.functions.mcp_client.types import MCPTool
from letta.schemas.enums import (
ToolType,
)
from letta.server.db import db_registry
from letta.settings import settings
# ======================================================================================================================
# MCPManager Tests
# ======================================================================================================================
@pytest.mark.asyncio
@patch("letta.services.mcp_manager.MCPManager.get_mcp_client")
async def test_create_mcp_server(mock_get_client, server, default_user):
from letta.schemas.mcp import MCPServer, MCPServerType, SSEServerConfig, StdioServerConfig
from letta.settings import tool_settings
if tool_settings.mcp_read_from_config:
return
# create mock client with required methods
mock_client = AsyncMock()
mock_client.connect_to_server = AsyncMock()
mock_client.list_tools = AsyncMock(
return_value=[
MCPTool(
name="get_simple_price",
inputSchema={
"type": "object",
"properties": {
"ids": {"type": "string"},
"vs_currencies": {"type": "string"},
"include_market_cap": {"type": "boolean"},
"include_24hr_vol": {"type": "boolean"},
"include_24hr_change": {"type": "boolean"},
},
"required": ["ids", "vs_currencies"],
"additionalProperties": False,
},
)
]
)
mock_client.execute_tool = AsyncMock(
return_value=(
'{"bitcoin": {"usd": 50000, "usd_market_cap": 900000000000, "usd_24h_vol": 30000000000, "usd_24h_change": 2.5}}',
True,
)
)
mock_get_client.return_value = mock_client
# Test with a valid StdioServerConfig
server_config = StdioServerConfig(
server_name="test_server", type=MCPServerType.STDIO, command="echo 'test'", args=["arg1", "arg2"], env={"ENV1": "value1"}
)
mcp_server = MCPServer(server_name="test_server", server_type=MCPServerType.STDIO, stdio_config=server_config)
created_server = await server.mcp_manager.create_or_update_mcp_server(mcp_server, actor=default_user)
print(created_server)
assert created_server.server_name == server_config.server_name
assert created_server.server_type == server_config.type
# Test with a valid SSEServerConfig
mcp_server_name = "coingecko"
server_url = "https://mcp.api.coingecko.com/sse"
SSEServerConfig(server_name=mcp_server_name, server_url=server_url)
mcp_sse_server = MCPServer(server_name=mcp_server_name, server_type=MCPServerType.SSE, server_url=server_url)
created_server = await server.mcp_manager.create_or_update_mcp_server(mcp_sse_server, actor=default_user)
print(created_server)
assert created_server.server_name == mcp_server_name
assert created_server.server_type == MCPServerType.SSE
# list mcp servers
servers = await server.mcp_manager.list_mcp_servers(actor=default_user)
print(servers)
assert len(servers) > 0, "No MCP servers found"
# list tools from sse server
tools = await server.mcp_manager.list_mcp_server_tools(created_server.server_name, actor=default_user)
print(tools)
# call a tool from the sse server
tool_name = "get_simple_price"
tool_args = {
"ids": "bitcoin",
"vs_currencies": "usd",
"include_market_cap": True,
"include_24hr_vol": True,
"include_24hr_change": True,
}
result = await server.mcp_manager.execute_mcp_server_tool(
created_server.server_name, tool_name=tool_name, tool_args=tool_args, actor=default_user, environment_variables={}
)
print(result)
# add a tool
tool = await server.mcp_manager.add_tool_from_mcp_server(created_server.server_name, tool_name, actor=default_user)
print(tool)
assert tool.name == tool_name
assert f"mcp:{created_server.server_name}" in tool.tags, f"Expected tag {f'mcp:{created_server.server_name}'}, got {tool.tags}"
print("TAGS", tool.tags)
@patch("letta.services.mcp_manager.MCPManager.get_mcp_client")
async def test_create_mcp_server_with_tools(mock_get_client, server, default_user):
"""Test that creating an MCP server automatically syncs and persists its tools."""
from letta.functions.mcp_client.types import MCPToolHealth
from letta.schemas.mcp import MCPServer, MCPServerType
from letta.settings import tool_settings
if tool_settings.mcp_read_from_config:
return
# Create mock tools with different health statuses
mock_tools = [
MCPTool(
name="valid_tool_1",
description="A valid tool",
inputSchema={
"type": "object",
"properties": {
"param1": {"type": "string"},
},
"required": ["param1"],
},
health=MCPToolHealth(status="VALID", reasons=[]),
),
MCPTool(
name="valid_tool_2",
description="Another valid tool",
inputSchema={
"type": "object",
"properties": {
"param2": {"type": "number"},
},
},
health=MCPToolHealth(status="VALID", reasons=[]),
),
MCPTool(
name="invalid_tool",
description="An invalid tool that should be skipped",
inputSchema={
"type": "invalid_type", # Invalid schema
},
health=MCPToolHealth(status="INVALID", reasons=["Invalid schema type"]),
),
MCPTool(
name="warning_tool",
description="A tool with warnings but should still be persisted",
inputSchema={
"type": "object",
"properties": {},
},
health=MCPToolHealth(status="WARNING", reasons=["No properties defined"]),
),
]
# Create mock client
mock_client = AsyncMock()
mock_client.connect_to_server = AsyncMock()
mock_client.list_tools = AsyncMock(return_value=mock_tools)
mock_client.cleanup = AsyncMock()
mock_get_client.return_value = mock_client
# Create MCP server config
server_name = f"test_server_{uuid.uuid4().hex[:8]}"
server_url = "https://test-with-tools.example.com/sse"
mcp_server = MCPServer(server_name=server_name, server_type=MCPServerType.SSE, server_url=server_url)
# Create server with tools using the new method
created_server = await server.mcp_manager.create_mcp_server_with_tools(mcp_server, actor=default_user)
# Verify server was created
assert created_server.server_name == server_name
assert created_server.server_type == MCPServerType.SSE
assert created_server.server_url == server_url
# Verify tools were persisted (all except the invalid one)
# Get all tools and filter by checking metadata
all_tools = await server.tool_manager.list_tools_async(
actor=default_user, names=["valid_tool_1", "valid_tool_2", "warning_tool", "invalid_tool"]
)
# Filter tools that belong to our MCP server
persisted_tools = [
tool
for tool in all_tools
if tool.metadata_
and MCP_TOOL_TAG_NAME_PREFIX in tool.metadata_
and tool.metadata_[MCP_TOOL_TAG_NAME_PREFIX].get("server_name") == server_name
]
# Should have 3 tools (2 valid + 1 warning, but not the invalid one)
assert len(persisted_tools) == 3, f"Expected 3 tools, got {len(persisted_tools)}"
# Check tool names
tool_names = {tool.name for tool in persisted_tools}
assert "valid_tool_1" in tool_names
assert "valid_tool_2" in tool_names
assert "warning_tool" in tool_names
assert "invalid_tool" not in tool_names # Invalid tool should be filtered out
# Verify each tool has correct metadata
for tool in persisted_tools:
assert tool.metadata_ is not None
assert MCP_TOOL_TAG_NAME_PREFIX in tool.metadata_
assert tool.metadata_[MCP_TOOL_TAG_NAME_PREFIX]["server_name"] == server_name
assert tool.metadata_[MCP_TOOL_TAG_NAME_PREFIX]["server_id"] == created_server.id
assert tool.tool_type == ToolType.EXTERNAL_MCP
# Clean up - delete the server
await server.mcp_manager.delete_mcp_server_by_id(created_server.id, actor=default_user)
# Verify tools were also deleted (cascade) by trying to get them again
remaining_tools = await server.tool_manager.list_tools_async(actor=default_user, names=["valid_tool_1", "valid_tool_2", "warning_tool"])
# Filter to see if any still belong to our deleted server
remaining_mcp_tools = [
tool
for tool in remaining_tools
if tool.metadata_
and MCP_TOOL_TAG_NAME_PREFIX in tool.metadata_
and tool.metadata_[MCP_TOOL_TAG_NAME_PREFIX].get("server_name") == server_name
]
assert len(remaining_mcp_tools) == 0, "Tools should be deleted when server is deleted"
@pytest.mark.asyncio
@patch("letta.services.mcp_manager.MCPManager.get_mcp_client")
async def test_complex_schema_normalization(mock_get_client, server, default_user):
"""Test that complex MCP schemas with nested objects are normalized and accepted."""
from letta.functions.mcp_client.types import MCPTool, MCPToolHealth
from letta.schemas.mcp import MCPServer, MCPServerType
from letta.settings import tool_settings
if tool_settings.mcp_read_from_config:
return
# Create mock tools with complex schemas that would normally be INVALID
# These schemas have: nested $defs, $ref references, missing additionalProperties
mock_tools = [
# 1. Nested object with $ref (like create_person)
MCPTool(
name="create_person",
description="Create a person with nested address",
inputSchema={
"$defs": {
"Address": {
"type": "object",
"properties": {
"street": {"type": "string"},
"city": {"type": "string"},
"zip_code": {"type": "string"},
},
"required": ["street", "city", "zip_code"],
},
"Person": {
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"},
"address": {"$ref": "#/$defs/Address"},
},
"required": ["name", "age"],
},
},
"type": "object",
"properties": {"person": {"$ref": "#/$defs/Person"}},
"required": ["person"],
},
health=MCPToolHealth(
status="INVALID",
reasons=["root: 'additionalProperties' not explicitly set", "root.properties.person: Missing 'type'"],
),
),
# 2. List of objects (like manage_tasks)
MCPTool(
name="manage_tasks",
description="Manage multiple tasks",
inputSchema={
"$defs": {
"TaskItem": {
"type": "object",
"properties": {
"title": {"type": "string"},
"priority": {"type": "integer", "default": 1},
"completed": {"type": "boolean", "default": False},
"tags": {"type": "array", "items": {"type": "string"}},
},
"required": ["title"],
}
},
"type": "object",
"properties": {
"tasks": {
"type": "array",
"items": {"$ref": "#/$defs/TaskItem"},
}
},
"required": ["tasks"],
},
health=MCPToolHealth(
status="INVALID",
reasons=["root: 'additionalProperties' not explicitly set", "root.properties.tasks.items: Missing 'type'"],
),
),
# 3. Complex filter object with optional fields
MCPTool(
name="search_with_filters",
description="Search with complex filters",
inputSchema={
"$defs": {
"SearchFilter": {
"type": "object",
"properties": {
"keywords": {"type": "array", "items": {"type": "string"}},
"min_score": {"type": "number"},
"categories": {"type": "array", "items": {"type": "string"}},
},
"required": ["keywords"],
}
},
"type": "object",
"properties": {
"query": {"type": "string"},
"filters": {"$ref": "#/$defs/SearchFilter"},
},
"required": ["query", "filters"],
},
health=MCPToolHealth(
status="INVALID",
reasons=["root: 'additionalProperties' not explicitly set", "root.properties.filters: Missing 'type'"],
),
),
]
# Create mock client
mock_client = AsyncMock()
mock_client.connect_to_server = AsyncMock()
mock_client.list_tools = AsyncMock(return_value=mock_tools)
mock_client.cleanup = AsyncMock()
mock_get_client.return_value = mock_client
# Create MCP server
server_name = f"test_complex_schema_{uuid.uuid4().hex[:8]}"
server_url = "https://test-complex.example.com/sse"
mcp_server = MCPServer(server_name=server_name, server_type=MCPServerType.SSE, server_url=server_url)
try:
# Create server (this will auto-sync tools)
created_server = await server.mcp_manager.create_mcp_server_with_tools(mcp_server, actor=default_user)
assert created_server.server_name == server_name
# Now attempt to add each tool - they should be normalized from INVALID to acceptable
# The normalization happens in add_tool_from_mcp_server
# Test 1: create_person should normalize successfully
person_tool = await server.mcp_manager.add_tool_from_mcp_server(server_name, "create_person", actor=default_user)
assert person_tool is not None
assert person_tool.name == "create_person"
# Verify the schema has additionalProperties set
assert person_tool.json_schema["parameters"]["additionalProperties"] == False
# Verify nested $defs have additionalProperties
if "$defs" in person_tool.json_schema["parameters"]:
for def_name, def_schema in person_tool.json_schema["parameters"]["$defs"].items():
if def_schema.get("type") == "object":
assert "additionalProperties" in def_schema, f"$defs.{def_name} missing additionalProperties after normalization"
# Test 2: manage_tasks should normalize successfully
tasks_tool = await server.mcp_manager.add_tool_from_mcp_server(server_name, "manage_tasks", actor=default_user)
assert tasks_tool is not None
assert tasks_tool.name == "manage_tasks"
# Verify array items have explicit type
tasks_prop = tasks_tool.json_schema["parameters"]["properties"]["tasks"]
assert "items" in tasks_prop
assert "type" in tasks_prop["items"], "Array items should have explicit type after normalization"
# Test 3: search_with_filters should normalize successfully
search_tool = await server.mcp_manager.add_tool_from_mcp_server(server_name, "search_with_filters", actor=default_user)
assert search_tool is not None
assert search_tool.name == "search_with_filters"
# Verify all tools were persisted
all_tools = await server.tool_manager.list_tools_async(
actor=default_user, names=["create_person", "manage_tasks", "search_with_filters"]
)
# Filter to tools from our MCP server
mcp_tools = [
tool
for tool in all_tools
if tool.metadata_
and MCP_TOOL_TAG_NAME_PREFIX in tool.metadata_
and tool.metadata_[MCP_TOOL_TAG_NAME_PREFIX].get("server_name") == server_name
]
# All 3 complex schema tools should have been normalized and persisted
assert len(mcp_tools) == 3, f"Expected 3 normalized tools, got {len(mcp_tools)}"
# Verify they all have the correct MCP metadata
for tool in mcp_tools:
assert tool.tool_type == ToolType.EXTERNAL_MCP
assert f"mcp:{server_name}" in tool.tags
finally:
# Clean up
await server.mcp_manager.delete_mcp_server_by_id(created_server.id, actor=default_user)
@patch("letta.services.mcp_manager.MCPManager.get_mcp_client")
async def test_create_mcp_server_with_tools_connection_failure(mock_get_client, server, default_user):
"""Test that MCP server creation succeeds even when tool sync fails (optimistic approach)."""
from letta.schemas.mcp import MCPServer, MCPServerType
from letta.settings import tool_settings
if tool_settings.mcp_read_from_config:
return
# Create mock client that fails to connect
mock_client = AsyncMock()
mock_client.connect_to_server = AsyncMock(side_effect=Exception("Connection failed"))
mock_client.cleanup = AsyncMock()
mock_get_client.return_value = mock_client
# Create MCP server config
server_name = f"test_server_fail_{uuid.uuid4().hex[:8]}"
server_url = "https://test-fail.example.com/sse"
mcp_server = MCPServer(server_name=server_name, server_type=MCPServerType.SSE, server_url=server_url)
# Create server with tools - should succeed despite connection failure
created_server = await server.mcp_manager.create_mcp_server_with_tools(mcp_server, actor=default_user)
# Verify server was created successfully
assert created_server.server_name == server_name
assert created_server.server_type == MCPServerType.SSE
assert created_server.server_url == server_url
# Verify no tools were persisted (due to connection failure)
# Try to get tools by the names we would have expected
all_tools = await server.tool_manager.list_tools_async(
actor=default_user,
names=["tool1", "tool2", "tool3"], # Generic names since we don't know what tools would have been listed
)
# Filter to see if any belong to our server (there shouldn't be any)
persisted_tools = [
tool
for tool in all_tools
if tool.metadata_
and MCP_TOOL_TAG_NAME_PREFIX in tool.metadata_
and tool.metadata_[MCP_TOOL_TAG_NAME_PREFIX].get("server_name") == server_name
]
assert len(persisted_tools) == 0, "No tools should be persisted when connection fails"
# Clean up
await server.mcp_manager.delete_mcp_server_by_id(created_server.id, actor=default_user)
async def test_get_mcp_servers_by_ids(server, default_user):
from letta.schemas.mcp import MCPServer, MCPServerType, SSEServerConfig, StdioServerConfig
from letta.settings import tool_settings
if tool_settings.mcp_read_from_config:
return
# Create multiple MCP servers for testing
servers_data = [
{
"name": "test_server_1",
"config": StdioServerConfig(
server_name="test_server_1", type=MCPServerType.STDIO, command="echo 'test1'", args=["arg1"], env={"ENV1": "value1"}
),
"type": MCPServerType.STDIO,
},
{
"name": "test_server_2",
"config": SSEServerConfig(server_name="test_server_2", server_url="https://test2.example.com/sse"),
"type": MCPServerType.SSE,
},
{
"name": "test_server_3",
"config": SSEServerConfig(server_name="test_server_3", server_url="https://test3.example.com/mcp"),
"type": MCPServerType.STREAMABLE_HTTP,
},
]
created_servers = []
for server_data in servers_data:
if server_data["type"] == MCPServerType.STDIO:
mcp_server = MCPServer(server_name=server_data["name"], server_type=server_data["type"], stdio_config=server_data["config"])
else:
mcp_server = MCPServer(
server_name=server_data["name"], server_type=server_data["type"], server_url=server_data["config"].server_url
)
created = await server.mcp_manager.create_or_update_mcp_server(mcp_server, actor=default_user)
created_servers.append(created)
# Test fetching multiple servers by IDs
server_ids = [s.id for s in created_servers]
fetched_servers = await server.mcp_manager.get_mcp_servers_by_ids(server_ids, actor=default_user)
assert len(fetched_servers) == len(created_servers)
fetched_ids = {s.id for s in fetched_servers}
expected_ids = {s.id for s in created_servers}
assert fetched_ids == expected_ids
# Test fetching subset of servers
subset_ids = server_ids[:2]
subset_servers = await server.mcp_manager.get_mcp_servers_by_ids(subset_ids, actor=default_user)
assert len(subset_servers) == 2
assert all(s.id in subset_ids for s in subset_servers)
# Test fetching with empty list
empty_result = await server.mcp_manager.get_mcp_servers_by_ids([], actor=default_user)
assert empty_result == []
# Test fetching with non-existent ID mixed with valid IDs
mixed_ids = [server_ids[0], "non-existent-id", server_ids[1]]
mixed_result = await server.mcp_manager.get_mcp_servers_by_ids(mixed_ids, actor=default_user)
# Should only return the existing servers
assert len(mixed_result) == 2
assert all(s.id in server_ids for s in mixed_result)
# Test that servers from different organizations are not returned
# This would require creating another user/org, but for now we'll just verify
# that the function respects the actor's organization
all_servers = await server.mcp_manager.list_mcp_servers(actor=default_user)
all_server_ids = [s.id for s in all_servers]
bulk_fetched = await server.mcp_manager.get_mcp_servers_by_ids(all_server_ids, actor=default_user)
# All fetched servers should belong to the same organization
assert all(s.organization_id == default_user.organization_id for s in bulk_fetched)
# Additional MCPManager OAuth session tests
@pytest.mark.asyncio
async def test_mcp_server_deletion_cascades_oauth_sessions(server, default_organization, default_user):
"""Deleting an MCP server deletes associated OAuth sessions (same user + URL)."""
from letta.schemas.mcp import MCPOAuthSessionCreate, MCPServer as PydanticMCPServer, MCPServerType
test_server_url = "https://test.example.com/mcp"
# Create orphaned OAuth sessions (no server id) for same user and URL
created_session_ids: list[str] = []
for i in range(3):
session = await server.mcp_manager.create_oauth_session(
MCPOAuthSessionCreate(
server_url=test_server_url,
server_name=f"test_mcp_server_{i}",
user_id=default_user.id,
organization_id=default_organization.id,
),
actor=default_user,
)
created_session_ids.append(session.id)
# Create the MCP server with the same URL
created_server = await server.mcp_manager.create_mcp_server(
PydanticMCPServer(
server_name=f"test_mcp_server_{str(uuid.uuid4().hex[:8])}", # ensure unique name
server_type=MCPServerType.SSE,
server_url=test_server_url,
organization_id=default_organization.id,
),
actor=default_user,
)
# Now delete the server via manager
await server.mcp_manager.delete_mcp_server_by_id(created_server.id, actor=default_user)
# Verify all sessions are gone
for sid in created_session_ids:
session = await server.mcp_manager.get_oauth_session_by_id(sid, actor=default_user)
assert session is None, f"OAuth session {sid} should be deleted"
@pytest.mark.asyncio
async def test_oauth_sessions_with_different_url_persist(server, default_organization, default_user):
"""Sessions with different URL should not be deleted when deleting the server for another URL."""
from letta.schemas.mcp import MCPOAuthSessionCreate, MCPServer as PydanticMCPServer, MCPServerType
server_url = "https://test.example.com/mcp"
other_url = "https://other.example.com/mcp"
# Create a session for other_url (should persist)
other_session = await server.mcp_manager.create_oauth_session(
MCPOAuthSessionCreate(
server_url=other_url,
server_name="standalone_oauth",
user_id=default_user.id,
organization_id=default_organization.id,
),
actor=default_user,
)
# Create the MCP server at server_url
created_server = await server.mcp_manager.create_mcp_server(
PydanticMCPServer(
server_name=f"test_mcp_server_{str(uuid.uuid4().hex[:8])}",
server_type=MCPServerType.SSE,
server_url=server_url,
organization_id=default_organization.id,
),
actor=default_user,
)
# Delete the server at server_url
await server.mcp_manager.delete_mcp_server_by_id(created_server.id, actor=default_user)
# Verify the session at other_url still exists
persisted = await server.mcp_manager.get_oauth_session_by_id(other_session.id, actor=default_user)
assert persisted is not None, "OAuth session with different URL should persist"
@pytest.mark.asyncio
async def test_mcp_server_creation_links_orphaned_sessions(server, default_organization, default_user):
"""Creating a server should link any existing orphaned sessions (same user + URL)."""
from letta.schemas.mcp import MCPOAuthSessionCreate, MCPServer as PydanticMCPServer, MCPServerType
server_url = "https://test-atomic-create.example.com/mcp"
# Pre-create orphaned sessions (no server_id) for same user + URL
orphaned_ids: list[str] = []
for i in range(3):
session = await server.mcp_manager.create_oauth_session(
MCPOAuthSessionCreate(
server_url=server_url,
server_name=f"atomic_session_{i}",
user_id=default_user.id,
organization_id=default_organization.id,
),
actor=default_user,
)
orphaned_ids.append(session.id)
# Create server
created_server = await server.mcp_manager.create_mcp_server(
PydanticMCPServer(
server_name=f"test_atomic_server_{str(uuid.uuid4().hex[:8])}",
server_type=MCPServerType.SSE,
server_url=server_url,
organization_id=default_organization.id,
),
actor=default_user,
)
# Sessions should still be retrievable via manager API
for sid in orphaned_ids:
s = await server.mcp_manager.get_oauth_session_by_id(sid, actor=default_user)
assert s is not None
# Indirect verification: deleting the server removes sessions for that URL+user
await server.mcp_manager.delete_mcp_server_by_id(created_server.id, actor=default_user)
for sid in orphaned_ids:
assert await server.mcp_manager.get_oauth_session_by_id(sid, actor=default_user) is None
@pytest.mark.asyncio
async def test_mcp_server_delete_removes_all_sessions_for_url_and_user(server, default_organization, default_user):
"""Deleting a server removes both linked and orphaned sessions for same user+URL."""
from letta.schemas.mcp import MCPOAuthSessionCreate, MCPServer as PydanticMCPServer, MCPServerType
server_url = "https://test-atomic-cleanup.example.com/mcp"
# Create orphaned session
orphaned = await server.mcp_manager.create_oauth_session(
MCPOAuthSessionCreate(
server_url=server_url,
server_name="orphaned",
user_id=default_user.id,
organization_id=default_organization.id,
),
actor=default_user,
)
# Create server
created_server = await server.mcp_manager.create_mcp_server(
PydanticMCPServer(
server_name=f"cleanup_server_{str(uuid.uuid4().hex[:8])}",
server_type=MCPServerType.SSE,
server_url=server_url,
organization_id=default_organization.id,
),
actor=default_user,
)
# Delete server
await server.mcp_manager.delete_mcp_server_by_id(created_server.id, actor=default_user)
# Both orphaned and any linked sessions for that URL+user should be gone
assert await server.mcp_manager.get_oauth_session_by_id(orphaned.id, actor=default_user) is None
@pytest.mark.asyncio
async def test_mcp_server_resync_tools(server, default_user, default_organization):
"""Test that resyncing MCP server tools correctly handles added, deleted, and updated tools."""
from unittest.mock import AsyncMock, patch
from letta.functions.mcp_client.types import MCPTool, MCPToolHealth
from letta.schemas.mcp import MCPServer as PydanticMCPServer, MCPServerType
from letta.schemas.tool import ToolCreate
# Create MCP server
mcp_server = await server.mcp_manager.create_mcp_server(
PydanticMCPServer(
server_name=f"test_resync_{uuid.uuid4().hex[:8]}",
server_type=MCPServerType.SSE,
server_url="https://test-resync.example.com/mcp",
organization_id=default_organization.id,
),
actor=default_user,
)
mcp_server_id = mcp_server.id
try:
# Create initial persisted tools (simulating previously added tools)
# Use sync method like in the existing mcp_tool fixture
tool1_create = ToolCreate.from_mcp(
mcp_server_name=mcp_server.server_name,
mcp_tool=MCPTool(
name="tool1",
description="Tool 1",
inputSchema={"type": "object", "properties": {"param1": {"type": "string"}}},
),
)
tool1 = await server.tool_manager.create_or_update_mcp_tool_async(
tool_create=tool1_create,
mcp_server_name=mcp_server.server_name,
mcp_server_id=mcp_server_id,
actor=default_user,
)
tool2_create = ToolCreate.from_mcp(
mcp_server_name=mcp_server.server_name,
mcp_tool=MCPTool(
name="tool2",
description="Tool 2 to be deleted",
inputSchema={"type": "object", "properties": {"param2": {"type": "number"}}},
),
)
tool2 = await server.tool_manager.create_or_update_mcp_tool_async(
tool_create=tool2_create,
mcp_server_name=mcp_server.server_name,
mcp_server_id=mcp_server_id,
actor=default_user,
)
# Mock the list_mcp_server_tools to return updated tools from server
# tool1 is updated, tool2 is deleted, tool3 is added
updated_tools = [
MCPTool(
name="tool1",
description="Tool 1 Updated",
inputSchema={"type": "object", "properties": {"param1": {"type": "string"}, "param1b": {"type": "boolean"}}},
health=MCPToolHealth(status="VALID", reasons=[]),
),
MCPTool(
name="tool3",
description="Tool 3 New",
inputSchema={"type": "object", "properties": {"param3": {"type": "array"}}},
health=MCPToolHealth(status="VALID", reasons=[]),
),
]
with patch.object(server.mcp_manager, "list_mcp_server_tools", new_callable=AsyncMock) as mock_list_tools:
mock_list_tools.return_value = updated_tools
# Run resync
result = await server.mcp_manager.resync_mcp_server_tools(
mcp_server_name=mcp_server.server_name,
actor=default_user,
)
# Verify the resync result
assert len(result.deleted) == 1
assert "tool2" in result.deleted
assert len(result.updated) == 1
assert "tool1" in result.updated
assert len(result.added) == 1
assert "tool3" in result.added
# Verify tool2 was actually deleted
try:
await server.tool_manager.get_tool_by_id_async(tool_id=tool2.id, actor=default_user)
assert False, "Tool2 should have been deleted"
except Exception:
pass # Expected - tool should be deleted
# Verify tool1 was updated with new schema
updated_tool1 = await server.tool_manager.get_tool_by_id_async(tool_id=tool1.id, actor=default_user)
assert "param1b" in updated_tool1.json_schema["parameters"]["properties"]
# Verify tool3 was added
tools = await server.tool_manager.list_tools_async(actor=default_user, names=["tool3"])
assert len(tools) == 1
assert tools[0].name == "tool3"
finally:
# Clean up
await server.mcp_manager.delete_mcp_server_by_id(mcp_server_id, actor=default_user)
# ======================================================================================================================
# MCPManager Tests - Encryption
# ======================================================================================================================
@pytest.fixture
def encryption_key():
"""Fixture to ensure encryption key is set for tests."""
original_key = settings.encryption_key
# Set a test encryption key if not already set
if not settings.encryption_key:
settings.encryption_key = "test-encryption-key-32-bytes!!"
yield settings.encryption_key
# Restore original
settings.encryption_key = original_key
@pytest.mark.asyncio
async def test_mcp_server_token_encryption_on_create(server, default_user, encryption_key):
"""Test that creating an MCP server encrypts the token in the database."""
from letta.functions.mcp_client.types import MCPServerType
from letta.orm.mcp_server import MCPServer as MCPServerModel
from letta.schemas.mcp import MCPServer
from letta.schemas.secret import Secret
# Create MCP server with token
mcp_server = MCPServer(
server_name="test-encrypted-server",
server_type=MCPServerType.STREAMABLE_HTTP,
server_url="https://api.example.com/mcp",
token="sk-test-secret-token-12345",
)
created_server = await server.mcp_manager.create_mcp_server(mcp_server, actor=default_user)
try:
# Verify server was created
assert created_server is not None
assert created_server.server_name == "test-encrypted-server"
# Verify plaintext token field is NOT set (no dual-write)
assert created_server.token is None
# Verify token_enc is a Secret object and decrypts correctly
assert created_server.token_enc is not None
assert isinstance(created_server.token_enc, Secret)
assert created_server.token_enc.get_plaintext() == "sk-test-secret-token-12345"
# Read directly from database to verify encryption
async with db_registry.async_session() as session:
server_orm = await MCPServerModel.read_async(
db_session=session,
identifier=created_server.id,
actor=default_user,
)
# Verify encrypted column is populated and different from plaintext
assert server_orm.token_enc is not None
assert server_orm.token_enc != "sk-test-secret-token-12345"
# Encrypted value should be longer
assert len(server_orm.token_enc) > len("sk-test-secret-token-12345")
finally:
# Clean up
await server.mcp_manager.delete_mcp_server_by_id(created_server.id, actor=default_user)
@pytest.mark.asyncio
async def test_mcp_server_token_decryption_on_read(server, default_user, encryption_key):
"""Test that reading an MCP server decrypts the token correctly."""
from letta.functions.mcp_client.types import MCPServerType
from letta.schemas.mcp import MCPServer
from letta.schemas.secret import Secret
# Create MCP server
mcp_server = MCPServer(
server_name="test-decrypt-server",
server_type=MCPServerType.STREAMABLE_HTTP,
server_url="https://api.example.com/mcp",
token="sk-test-decrypt-token-67890",
)
created_server = await server.mcp_manager.create_mcp_server(mcp_server, actor=default_user)
server_id = created_server.id
try:
# Read the server back
retrieved_server = await server.mcp_manager.get_mcp_server_by_id_async(server_id, actor=default_user)
# Verify plaintext token field is NOT set (no dual-write)
assert retrieved_server.token is None
# Verify the token is decrypted correctly via token_enc
assert retrieved_server.token_enc is not None
assert retrieved_server.token_enc.get_plaintext() == "sk-test-decrypt-token-67890"
# Verify we can get the decrypted token through the secret getter
token_secret = retrieved_server.get_token_secret()
assert isinstance(token_secret, Secret)
decrypted_token = token_secret.get_plaintext()
assert decrypted_token == "sk-test-decrypt-token-67890"
finally:
# Clean up
await server.mcp_manager.delete_mcp_server_by_id(server_id, actor=default_user)
@pytest.mark.asyncio
async def test_mcp_server_custom_headers_encryption(server, default_user, encryption_key):
"""Test that custom headers are encrypted as JSON strings."""
from letta.functions.mcp_client.types import MCPServerType
from letta.orm.mcp_server import MCPServer as MCPServerModel
from letta.schemas.mcp import MCPServer
from letta.schemas.secret import Secret
# Create MCP server with custom headers
custom_headers = {"Authorization": "Bearer token123", "X-API-Key": "secret-key-456"}
mcp_server = MCPServer(
server_name="test-headers-server",
server_type=MCPServerType.STREAMABLE_HTTP,
server_url="https://api.example.com/mcp",
custom_headers=custom_headers,
)
created_server = await server.mcp_manager.create_mcp_server(mcp_server, actor=default_user)
try:
# Verify plaintext custom_headers field is NOT set (no dual-write)
assert created_server.custom_headers is None
# Verify custom_headers are accessible via encrypted field
assert created_server.get_custom_headers_dict() == custom_headers
# Verify custom_headers_enc is a Secret object (stores JSON string)
assert created_server.custom_headers_enc is not None
assert isinstance(created_server.custom_headers_enc, Secret)
# Verify the getter method returns a Secret (JSON string)
headers_secret = created_server.get_custom_headers_secret()
assert isinstance(headers_secret, Secret)
# Verify the Secret contains JSON string
json_str = headers_secret.get_plaintext()
assert json_str is not None
import json
assert json.loads(json_str) == custom_headers
# Verify the convenience method returns dict directly
headers_dict = created_server.get_custom_headers_dict()
assert headers_dict == custom_headers
# Read from DB to verify encryption
async with db_registry.async_session() as session:
server_orm = await MCPServerModel.read_async(
db_session=session,
identifier=created_server.id,
actor=default_user,
)
# Verify encrypted column contains encrypted JSON string
assert server_orm.custom_headers_enc is not None
# Decrypt and verify it's valid JSON matching original headers
decrypted_json = Secret.from_encrypted(server_orm.custom_headers_enc).get_plaintext()
import json
decrypted_headers = json.loads(decrypted_json)
assert decrypted_headers == custom_headers
finally:
# Clean up
await server.mcp_manager.delete_mcp_server_by_id(created_server.id, actor=default_user)
@pytest.mark.asyncio
async def test_oauth_session_tokens_encryption(server, default_user, encryption_key):
"""Test that OAuth session tokens are encrypted in the database."""
from letta.orm.mcp_oauth import MCPOAuth as MCPOAuthModel
from letta.schemas.mcp import MCPOAuthSessionCreate, MCPOAuthSessionUpdate
from letta.schemas.secret import Secret
# Create OAuth session
session_create = MCPOAuthSessionCreate(
server_url="https://oauth.example.com",
server_name="test-oauth-server",
organization_id=default_user.organization_id,
user_id=default_user.id,
)
created_session = await server.mcp_manager.create_oauth_session(session_create, actor=default_user)
session_id = created_session.id
try:
# Update with OAuth tokens
session_update = MCPOAuthSessionUpdate(
access_token="access-token-abc123",
refresh_token="refresh-token-xyz789",
client_secret="client-secret-def456",
authorization_code="auth-code-ghi012",
)
updated_session = await server.mcp_manager.update_oauth_session(session_id, session_update, actor=default_user)
# Verify tokens are accessible
assert updated_session.access_token == "access-token-abc123"
assert updated_session.refresh_token == "refresh-token-xyz789"
assert updated_session.client_secret == "client-secret-def456"
assert updated_session.authorization_code == "auth-code-ghi012"
# Verify encrypted fields are Secret objects
assert isinstance(updated_session.access_token_enc, Secret)
assert isinstance(updated_session.refresh_token_enc, Secret)
assert isinstance(updated_session.client_secret_enc, Secret)
assert isinstance(updated_session.authorization_code_enc, Secret)
# Read from DB to verify all tokens are encrypted
async with db_registry.async_session() as session:
oauth_orm = await MCPOAuthModel.read_async(
db_session=session,
identifier=session_id,
actor=default_user,
)
# Verify all encrypted columns are populated and encrypted
assert oauth_orm.access_token_enc is not None
assert oauth_orm.refresh_token_enc is not None
assert oauth_orm.client_secret_enc is not None
assert oauth_orm.authorization_code_enc is not None
# Decrypt and verify
assert Secret.from_encrypted(oauth_orm.access_token_enc).get_plaintext() == "access-token-abc123"
assert Secret.from_encrypted(oauth_orm.refresh_token_enc).get_plaintext() == "refresh-token-xyz789"
assert Secret.from_encrypted(oauth_orm.client_secret_enc).get_plaintext() == "client-secret-def456"
assert Secret.from_encrypted(oauth_orm.authorization_code_enc).get_plaintext() == "auth-code-ghi012"
finally:
# Clean up
await server.mcp_manager.delete_oauth_session(session_id, actor=default_user)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_mcp_manager.py",
"license": "Apache License 2.0",
"lines": 889,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_message_manager.py | import uuid
import pytest
# Import shared fixtures and constants from conftest
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction
from letta.orm.errors import UniqueConstraintViolationError
from letta.schemas.enums import (
MessageRole,
)
from letta.schemas.letta_message import UpdateAssistantMessage, UpdateReasoningMessage, UpdateSystemMessage, UpdateUserMessage
from letta.schemas.letta_message_content import TextContent
from letta.schemas.message import Message as PydanticMessage, MessageUpdate
from letta.server.server import SyncServer
# ======================================================================================================================
# AgentManager Tests - Messages Relationship
# ======================================================================================================================
@pytest.mark.asyncio
async def test_reset_messages_no_messages(server: SyncServer, sarah_agent, default_user):
"""
Test that resetting messages on an agent clears message_ids to only system message,
but messages remain in the database.
"""
assert len(sarah_agent.message_ids) == 4
og_message_ids = sarah_agent.message_ids
# Reset messages
reset_agent = await server.agent_manager.reset_messages_async(agent_id=sarah_agent.id, actor=default_user)
assert len(reset_agent.message_ids) == 1
assert og_message_ids[0] == reset_agent.message_ids[0]
# Messages should still exist in the database (only cleared from context, not deleted)
assert await server.message_manager.size_async(agent_id=sarah_agent.id, actor=default_user) == 4
@pytest.mark.asyncio
async def test_reset_messages_default_messages(server: SyncServer, sarah_agent, default_user):
"""
Test that resetting messages with add_default_initial_messages=True
clears context and adds new default messages, while old messages remain in database.
"""
assert len(sarah_agent.message_ids) == 4
og_message_ids = sarah_agent.message_ids
# Reset messages
reset_agent = await server.agent_manager.reset_messages_async(
agent_id=sarah_agent.id, actor=default_user, add_default_initial_messages=True
)
assert len(reset_agent.message_ids) == 4
assert og_message_ids[0] == reset_agent.message_ids[0]
assert og_message_ids[1] != reset_agent.message_ids[1]
assert og_message_ids[2] != reset_agent.message_ids[2]
assert og_message_ids[3] != reset_agent.message_ids[3]
# Old messages (4) + new default messages (3) = 7 total in database
# (system message is preserved, so 4 old + 3 new non-system = 7)
assert await server.message_manager.size_async(agent_id=sarah_agent.id, actor=default_user) == 7
@pytest.mark.asyncio
async def test_reset_messages_with_existing_messages(server: SyncServer, sarah_agent, default_user):
"""
Test that resetting messages on an agent with actual messages
clears message_ids but keeps messages in the database.
"""
# 1. Create multiple messages for the agent
msg1 = await server.message_manager.create_many_messages_async(
[
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text="Hello, Sarah!")],
),
],
actor=default_user,
)
msg1 = msg1[0]
msg2 = await server.message_manager.create_many_messages_async(
[
PydanticMessage(
agent_id=sarah_agent.id,
role="assistant",
content=[TextContent(text="Hello, user!")],
),
],
actor=default_user,
)
msg2 = msg2[0]
# Verify the messages were created
agent_before = await server.agent_manager.get_agent_by_id_async(sarah_agent.id, default_user)
# This is 4 because creating the message does not necessarily add it to the in context message ids
assert len(agent_before.message_ids) == 4
assert await server.message_manager.size_async(agent_id=sarah_agent.id, actor=default_user) == 6
# 2. Reset all messages
reset_agent = await server.agent_manager.reset_messages_async(agent_id=sarah_agent.id, actor=default_user)
# 3. Verify the agent now has only system message in context
assert len(reset_agent.message_ids) == 1
# 4. Verify the messages still exist in the database (only cleared from context)
assert await server.message_manager.size_async(agent_id=sarah_agent.id, actor=default_user) == 6
@pytest.mark.asyncio
async def test_reset_messages_idempotency(server: SyncServer, sarah_agent, default_user):
"""
Test that calling reset_messages multiple times has no adverse effect.
"""
# Clear messages first (actually delete from DB for this test setup)
await server.message_manager.delete_messages_by_ids_async(message_ids=sarah_agent.message_ids[1:], actor=default_user)
# Create a single message
await server.message_manager.create_many_messages_async(
[
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text="Hello, Sarah!")],
),
],
actor=default_user,
)
# First reset - clears context but messages remain in DB
reset_agent = await server.agent_manager.reset_messages_async(agent_id=sarah_agent.id, actor=default_user)
assert len(reset_agent.message_ids) == 1
# DB has system message + the user message we created = 2
assert await server.message_manager.size_async(agent_id=sarah_agent.id, actor=default_user) == 2
# Second reset should do nothing new
reset_agent_again = await server.agent_manager.reset_messages_async(agent_id=sarah_agent.id, actor=default_user)
assert len(reset_agent_again.message_ids) == 1
assert await server.message_manager.size_async(agent_id=sarah_agent.id, actor=default_user) == 2
@pytest.mark.asyncio
async def test_reset_messages_preserves_system_message_id(server: SyncServer, sarah_agent, default_user):
"""
Test that resetting messages preserves the original system message ID.
"""
# Get the original system message ID
original_agent = await server.agent_manager.get_agent_by_id_async(sarah_agent.id, default_user)
original_system_message_id = original_agent.message_ids[0]
# Add some user messages
await server.message_manager.create_many_messages_async(
[
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text="Hello!")],
),
],
actor=default_user,
)
# Reset messages
reset_agent = await server.agent_manager.reset_messages_async(agent_id=sarah_agent.id, actor=default_user)
# Verify the system message ID is preserved
assert len(reset_agent.message_ids) == 1
assert reset_agent.message_ids[0] == original_system_message_id
# Verify the system message still exists in the database
system_message = await server.message_manager.get_message_by_id_async(message_id=original_system_message_id, actor=default_user)
assert system_message.role == "system"
@pytest.mark.asyncio
async def test_reset_messages_preserves_system_message_content(server: SyncServer, sarah_agent, default_user):
"""
Test that resetting messages preserves the original system message content.
"""
# Get the original system message
original_agent = await server.agent_manager.get_agent_by_id_async(sarah_agent.id, default_user)
original_system_message = await server.message_manager.get_message_by_id_async(
message_id=original_agent.message_ids[0], actor=default_user
)
# Add some messages and reset
await server.message_manager.create_many_messages_async(
[
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text="Hello!")],
),
],
actor=default_user,
)
reset_agent = await server.agent_manager.reset_messages_async(agent_id=sarah_agent.id, actor=default_user)
# Verify the system message content is unchanged
preserved_system_message = await server.message_manager.get_message_by_id_async(
message_id=reset_agent.message_ids[0], actor=default_user
)
assert preserved_system_message.content == original_system_message.content
assert preserved_system_message.role == "system"
assert preserved_system_message.id == original_system_message.id
@pytest.mark.asyncio
async def test_modify_letta_message(server: SyncServer, sarah_agent, default_user):
"""
Test updating a message.
"""
messages = await server.message_manager.list_messages(agent_id=sarah_agent.id, actor=default_user)
letta_messages = PydanticMessage.to_letta_messages_from_list(messages=messages)
system_message = next(msg for msg in letta_messages if msg.message_type == "system_message")
assistant_message = next(msg for msg in letta_messages if msg.message_type == "assistant_message")
user_message = next(msg for msg in letta_messages if msg.message_type == "user_message")
reasoning_message = next(msg for msg in letta_messages if msg.message_type == "reasoning_message")
# user message
update_user_message = UpdateUserMessage(content="Hello, Sarah!")
original_user_message = await server.message_manager.get_message_by_id_async(message_id=user_message.id, actor=default_user)
assert original_user_message.content[0].text != update_user_message.content
await server.message_manager.update_message_by_letta_message_async(
message_id=user_message.id, letta_message_update=update_user_message, actor=default_user
)
updated_user_message = await server.message_manager.get_message_by_id_async(message_id=user_message.id, actor=default_user)
assert updated_user_message.content[0].text == update_user_message.content
# system message
update_system_message = UpdateSystemMessage(content="You are a friendly assistant!")
original_system_message = await server.message_manager.get_message_by_id_async(message_id=system_message.id, actor=default_user)
assert original_system_message.content[0].text != update_system_message.content
await server.message_manager.update_message_by_letta_message_async(
message_id=system_message.id, letta_message_update=update_system_message, actor=default_user
)
updated_system_message = await server.message_manager.get_message_by_id_async(message_id=system_message.id, actor=default_user)
assert updated_system_message.content[0].text == update_system_message.content
# reasoning message
update_reasoning_message = UpdateReasoningMessage(reasoning="I am thinking")
original_reasoning_message = await server.message_manager.get_message_by_id_async(message_id=reasoning_message.id, actor=default_user)
assert original_reasoning_message.content[0].text != update_reasoning_message.reasoning
await server.message_manager.update_message_by_letta_message_async(
message_id=reasoning_message.id, letta_message_update=update_reasoning_message, actor=default_user
)
updated_reasoning_message = await server.message_manager.get_message_by_id_async(message_id=reasoning_message.id, actor=default_user)
assert updated_reasoning_message.content[0].text == update_reasoning_message.reasoning
# assistant message
def parse_send_message(tool_call):
import json
function_call = tool_call.function
arguments = json.loads(function_call.arguments)
return arguments["message"]
update_assistant_message = UpdateAssistantMessage(content="I am an agent!")
original_assistant_message = await server.message_manager.get_message_by_id_async(message_id=assistant_message.id, actor=default_user)
print("ORIGINAL", original_assistant_message.tool_calls)
print("MESSAGE", parse_send_message(original_assistant_message.tool_calls[0]))
assert parse_send_message(original_assistant_message.tool_calls[0]) != update_assistant_message.content
await server.message_manager.update_message_by_letta_message_async(
message_id=assistant_message.id, letta_message_update=update_assistant_message, actor=default_user
)
updated_assistant_message = await server.message_manager.get_message_by_id_async(message_id=assistant_message.id, actor=default_user)
print("UPDATED", updated_assistant_message.tool_calls)
print("MESSAGE", parse_send_message(updated_assistant_message.tool_calls[0]))
assert parse_send_message(updated_assistant_message.tool_calls[0]) == update_assistant_message.content
# TODO: tool calls/responses
@pytest.mark.asyncio
async def test_message_create(server: SyncServer, hello_world_message_fixture, default_user):
"""Test creating a message using hello_world_message_fixture fixture"""
assert hello_world_message_fixture.id is not None
assert hello_world_message_fixture.content[0].text == "Hello, world!"
assert hello_world_message_fixture.role == "user"
# Verify we can retrieve it
retrieved = await server.message_manager.get_message_by_id_async(
hello_world_message_fixture.id,
actor=default_user,
)
assert retrieved is not None
assert retrieved.id == hello_world_message_fixture.id
assert retrieved.content[0].text == hello_world_message_fixture.content[0].text
assert retrieved.role == hello_world_message_fixture.role
@pytest.mark.asyncio
async def test_message_get_by_id(server: SyncServer, hello_world_message_fixture, default_user):
"""Test retrieving a message by ID"""
retrieved = await server.message_manager.get_message_by_id_async(hello_world_message_fixture.id, actor=default_user)
assert retrieved is not None
assert retrieved.id == hello_world_message_fixture.id
assert retrieved.content[0].text == hello_world_message_fixture.content[0].text
@pytest.mark.asyncio
async def test_message_update(server: SyncServer, hello_world_message_fixture, default_user, other_user):
"""Test updating a message"""
new_text = "Updated text"
updated = await server.message_manager.update_message_by_id_async(
hello_world_message_fixture.id, MessageUpdate(content=new_text), actor=other_user
)
assert updated is not None
assert updated.content[0].text == new_text
retrieved = await server.message_manager.get_message_by_id_async(hello_world_message_fixture.id, actor=default_user)
assert retrieved.content[0].text == new_text
# Assert that orm metadata fields are populated
assert retrieved.created_by_id == default_user.id
assert retrieved.last_updated_by_id == other_user.id
@pytest.mark.asyncio
async def test_message_delete(server: SyncServer, hello_world_message_fixture, default_user):
"""Test deleting a message"""
await server.message_manager.delete_message_by_id_async(hello_world_message_fixture.id, actor=default_user)
retrieved = await server.message_manager.get_message_by_id_async(hello_world_message_fixture.id, actor=default_user)
assert retrieved is None
@pytest.mark.asyncio
async def test_message_conversation_id_persistence(server: SyncServer, sarah_agent, default_user):
"""Test that conversation_id is properly persisted and retrieved from DB to Pydantic object"""
from letta.schemas.conversation import CreateConversation
from letta.services.conversation_manager import ConversationManager
conversation_manager = ConversationManager()
# Test 1: Create a message without conversation_id (should be None - the default/backward-compat case)
message_no_conv = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text="Test message without conversation")],
)
created_no_conv = await server.message_manager.create_many_messages_async([message_no_conv], actor=default_user)
assert len(created_no_conv) == 1
assert created_no_conv[0].conversation_id is None
# Verify retrieval also has None - this confirms ORM-to-Pydantic conversion works for None
retrieved_no_conv = await server.message_manager.get_message_by_id_async(created_no_conv[0].id, actor=default_user)
assert retrieved_no_conv is not None
assert retrieved_no_conv.conversation_id is None
assert retrieved_no_conv.id == created_no_conv[0].id
# Test 2: Create a conversation and a message with that conversation_id
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test conversation"),
actor=default_user,
)
message_with_conv = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text="Test message with conversation")],
conversation_id=conversation.id,
)
created_with_conv = await server.message_manager.create_many_messages_async([message_with_conv], actor=default_user)
assert len(created_with_conv) == 1
assert created_with_conv[0].conversation_id == conversation.id
# Verify retrieval has the correct conversation_id - this confirms ORM-to-Pydantic conversion works for non-None
retrieved_with_conv = await server.message_manager.get_message_by_id_async(created_with_conv[0].id, actor=default_user)
assert retrieved_with_conv is not None
assert retrieved_with_conv.conversation_id == conversation.id
assert retrieved_with_conv.id == created_with_conv[0].id
# Test 3: Verify the field exists on the Pydantic model
assert hasattr(retrieved_with_conv, "conversation_id")
@pytest.mark.asyncio
async def test_message_size(server: SyncServer, hello_world_message_fixture, default_user):
"""Test counting messages with filters"""
base_message = hello_world_message_fixture
# Create additional test messages
messages = [
PydanticMessage(
agent_id=base_message.agent_id,
role=base_message.role,
content=[TextContent(text=f"Test message {i}")],
)
for i in range(4)
]
await server.message_manager.create_many_messages_async(messages, actor=default_user)
# Test total count
total = await server.message_manager.size_async(actor=default_user, role=MessageRole.user)
assert total == 6 # login message + base message + 4 test messages
# TODO: change login message to be a system not user message
# Test count with agent filter
agent_count = await server.message_manager.size_async(actor=default_user, agent_id=base_message.agent_id, role=MessageRole.user)
assert agent_count == 6
# Test count with role filter
role_count = await server.message_manager.size_async(actor=default_user, role=base_message.role)
assert role_count == 6
# Test count with non-existent filter
empty_count = await server.message_manager.size_async(actor=default_user, agent_id="non-existent", role=MessageRole.user)
assert empty_count == 0
async def create_test_messages(server: SyncServer, base_message: PydanticMessage, default_user) -> list[PydanticMessage]:
"""Helper function to create test messages for all tests"""
messages = [
PydanticMessage(
agent_id=base_message.agent_id,
role=base_message.role,
content=[TextContent(text=f"Test message {i}")],
)
for i in range(4)
]
await server.message_manager.create_many_messages_async(messages, actor=default_user)
return messages
@pytest.mark.asyncio
async def test_get_messages_by_ids(server: SyncServer, hello_world_message_fixture, default_user, sarah_agent):
"""Test basic message listing with limit"""
messages = await create_test_messages(server, hello_world_message_fixture, default_user)
message_ids = [m.id for m in messages]
results = await server.message_manager.get_messages_by_ids_async(message_ids=message_ids, actor=default_user)
assert sorted(message_ids) == sorted([r.id for r in results])
@pytest.mark.asyncio
async def test_message_listing_basic(server: SyncServer, hello_world_message_fixture, default_user, sarah_agent):
"""Test basic message listing with limit"""
await create_test_messages(server, hello_world_message_fixture, default_user)
results = await server.message_manager.list_user_messages_for_agent_async(agent_id=sarah_agent.id, limit=3, actor=default_user)
assert len(results) == 3
@pytest.mark.asyncio
async def test_message_listing_cursor(server: SyncServer, hello_world_message_fixture, default_user, sarah_agent):
"""Test cursor-based pagination functionality"""
await create_test_messages(server, hello_world_message_fixture, default_user)
# Make sure there are 6 messages
assert await server.message_manager.size_async(actor=default_user, role=MessageRole.user) == 6
# Get first page
first_page = await server.message_manager.list_user_messages_for_agent_async(agent_id=sarah_agent.id, actor=default_user, limit=3)
assert len(first_page) == 3
last_id_on_first_page = first_page[-1].id
# Get second page
second_page = await server.message_manager.list_user_messages_for_agent_async(
agent_id=sarah_agent.id, actor=default_user, after=last_id_on_first_page, limit=3
)
assert len(second_page) == 3 # Should have 3 remaining messages
assert all(r1.id != r2.id for r1 in first_page for r2 in second_page)
# Get the middle
middle_page = await server.message_manager.list_user_messages_for_agent_async(
agent_id=sarah_agent.id, actor=default_user, before=second_page[1].id, after=first_page[0].id
)
assert len(middle_page) == 3
assert middle_page[0].id == first_page[1].id
assert middle_page[1].id == first_page[-1].id
assert middle_page[-1].id == second_page[0].id
middle_page_desc = await server.message_manager.list_user_messages_for_agent_async(
agent_id=sarah_agent.id, actor=default_user, before=second_page[1].id, after=first_page[0].id, ascending=False
)
assert len(middle_page_desc) == 3
assert middle_page_desc[0].id == second_page[0].id
assert middle_page_desc[1].id == first_page[-1].id
assert middle_page_desc[-1].id == first_page[1].id
@pytest.mark.asyncio
async def test_message_listing_filtering(server: SyncServer, hello_world_message_fixture, default_user, sarah_agent):
"""Test filtering messages by agent ID"""
await create_test_messages(server, hello_world_message_fixture, default_user)
agent_results = await server.message_manager.list_user_messages_for_agent_async(agent_id=sarah_agent.id, actor=default_user, limit=10)
assert len(agent_results) == 6 # login message + base message + 4 test messages
assert all(msg.agent_id == hello_world_message_fixture.agent_id for msg in agent_results)
@pytest.mark.asyncio
async def test_message_listing_text_search(server: SyncServer, hello_world_message_fixture, default_user, sarah_agent):
"""Test searching messages by text content"""
await create_test_messages(server, hello_world_message_fixture, default_user)
search_results = await server.message_manager.list_user_messages_for_agent_async(
agent_id=sarah_agent.id, actor=default_user, query_text="Test message", limit=10
)
assert len(search_results) == 4
assert all("Test message" in msg.content[0].text for msg in search_results)
# Test no results
search_results = await server.message_manager.list_user_messages_for_agent_async(
agent_id=sarah_agent.id, actor=default_user, query_text="Letta", limit=10
)
assert len(search_results) == 0
@pytest.mark.asyncio
async def test_create_many_messages_async_basic(server: SyncServer, sarah_agent, default_user):
"""Test basic batch creation of messages"""
message_manager = server.message_manager
messages = []
for i in range(5):
msg = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text=f"Test message {i}")],
name=None,
tool_calls=None,
tool_call_id=None,
)
messages.append(msg)
created_messages = await message_manager.create_many_messages_async(pydantic_msgs=messages, actor=default_user)
assert len(created_messages) == 5
for i, msg in enumerate(created_messages):
assert msg.id is not None
assert msg.content[0].text == f"Test message {i}"
assert msg.agent_id == sarah_agent.id
@pytest.mark.asyncio
async def test_create_many_messages_async_allow_partial_false(server: SyncServer, sarah_agent, default_user):
"""Test that allow_partial=False (default) fails on duplicate IDs"""
message_manager = server.message_manager
initial_msg = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text="Initial message")],
)
created = await message_manager.create_many_messages_async(pydantic_msgs=[initial_msg], actor=default_user)
assert len(created) == 1
created_msg = created[0]
duplicate_msg = PydanticMessage(
id=created_msg.id,
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text="Duplicate message")],
)
with pytest.raises(UniqueConstraintViolationError):
await message_manager.create_many_messages_async(pydantic_msgs=[duplicate_msg], actor=default_user, allow_partial=False)
@pytest.mark.asyncio
async def test_create_many_messages_async_allow_partial_true_some_duplicates(server: SyncServer, sarah_agent, default_user):
"""Test that allow_partial=True handles partial duplicates correctly"""
message_manager = server.message_manager
initial_messages = []
for i in range(3):
msg = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text=f"Existing message {i}")],
)
initial_messages.append(msg)
created_initial = await message_manager.create_many_messages_async(pydantic_msgs=initial_messages, actor=default_user)
assert len(created_initial) == 3
existing_ids = [msg.id for msg in created_initial]
mixed_messages = []
for created_msg in created_initial:
duplicate_msg = PydanticMessage(
id=created_msg.id,
agent_id=sarah_agent.id,
role=MessageRole.user,
content=created_msg.content,
)
mixed_messages.append(duplicate_msg)
for i in range(3, 6):
msg = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text=f"New message {i}")],
)
mixed_messages.append(msg)
result = await message_manager.create_many_messages_async(pydantic_msgs=mixed_messages, actor=default_user, allow_partial=True)
assert len(result) == 6
result_ids = {msg.id for msg in result}
for existing_id in existing_ids:
assert existing_id in result_ids
@pytest.mark.asyncio
async def test_create_many_messages_async_allow_partial_true_all_duplicates(server: SyncServer, sarah_agent, default_user):
"""Test that allow_partial=True handles all duplicates correctly"""
message_manager = server.message_manager
initial_messages = []
for i in range(3):
msg = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text=f"Message {i}")],
)
initial_messages.append(msg)
created_initial = await message_manager.create_many_messages_async(pydantic_msgs=initial_messages, actor=default_user)
assert len(created_initial) == 3
duplicate_messages = []
for created_msg in created_initial:
duplicate_msg = PydanticMessage(
id=created_msg.id,
agent_id=sarah_agent.id,
role=MessageRole.user,
content=created_msg.content,
)
duplicate_messages.append(duplicate_msg)
result = await message_manager.create_many_messages_async(pydantic_msgs=duplicate_messages, actor=default_user, allow_partial=True)
assert len(result) == 3
for i, msg in enumerate(result):
assert msg.id == created_initial[i].id
assert msg.content[0].text == f"Message {i}"
@pytest.mark.asyncio
async def test_create_many_messages_async_empty_list(server: SyncServer, default_user):
"""Test that empty list returns empty list"""
message_manager = server.message_manager
result = await message_manager.create_many_messages_async(pydantic_msgs=[], actor=default_user)
assert result == []
@pytest.mark.asyncio
async def test_check_existing_message_ids(server: SyncServer, sarah_agent, default_user):
"""Test the check_existing_message_ids convenience function"""
message_manager = server.message_manager
messages = []
for i in range(3):
msg = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text=f"Message {i}")],
)
messages.append(msg)
created_messages = await message_manager.create_many_messages_async(pydantic_msgs=messages, actor=default_user)
existing_ids = [msg.id for msg in created_messages]
non_existent_ids = [f"message-{uuid.uuid4().hex[:8]}" for _ in range(3)]
all_ids = existing_ids + non_existent_ids
existing = await message_manager.check_existing_message_ids(message_ids=all_ids, actor=default_user)
assert existing == set(existing_ids)
for non_existent_id in non_existent_ids:
assert non_existent_id not in existing
@pytest.mark.asyncio
async def test_filter_existing_messages(server: SyncServer, sarah_agent, default_user):
"""Test the filter_existing_messages helper function"""
message_manager = server.message_manager
initial_messages = []
for i in range(3):
msg = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text=f"Existing {i}")],
)
initial_messages.append(msg)
created_existing = await message_manager.create_many_messages_async(pydantic_msgs=initial_messages, actor=default_user)
existing_messages = []
for created_msg in created_existing:
msg = PydanticMessage(
id=created_msg.id,
agent_id=sarah_agent.id,
role=MessageRole.user,
content=created_msg.content,
)
existing_messages.append(msg)
new_messages = []
for i in range(3):
msg = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text=f"New {i}")],
)
new_messages.append(msg)
all_messages = existing_messages + new_messages
new_filtered, existing_filtered = await message_manager.filter_existing_messages(messages=all_messages, actor=default_user)
assert len(new_filtered) == 3
assert len(existing_filtered) == 3
existing_filtered_ids = {msg.id for msg in existing_filtered}
for created_msg in created_existing:
assert created_msg.id in existing_filtered_ids
for msg in new_filtered:
assert msg.id not in existing_filtered_ids
@pytest.mark.asyncio
async def test_create_many_messages_async_with_turbopuffer(server: SyncServer, sarah_agent, default_user):
"""Test batch creation with turbopuffer embedding (if enabled)"""
message_manager = server.message_manager
messages = []
for i in range(3):
msg = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text=f"Important information about topic {i}")],
)
messages.append(msg)
created_messages = await message_manager.create_many_messages_async(
pydantic_msgs=messages, actor=default_user, strict_mode=True, project_id="test_project", template_id="test_template"
)
assert len(created_messages) == 3
for msg in created_messages:
assert msg.id is not None
assert msg.agent_id == sarah_agent.id
# ======================================================================================================================
# Pydantic Object Tests - Tool Call Message Conversion
# ======================================================================================================================
@pytest.mark.asyncio
async def test_convert_tool_call_messages_no_assistant_mode(server: SyncServer, sarah_agent, default_user):
"""Test that when assistant mode is off, all tool calls go into a single ToolCallMessage"""
# create a message with multiple tool calls
tool_calls = [
OpenAIToolCall(
id="call_1", type="function", function=OpenAIFunction(name="archival_memory_insert", arguments='{"content": "test memory 1"}')
),
OpenAIToolCall(
id="call_2", type="function", function=OpenAIFunction(name="conversation_search", arguments='{"query": "test search"}')
),
OpenAIToolCall(id="call_3", type="function", function=OpenAIFunction(name="send_message", arguments='{"message": "Hello there!"}')),
]
message = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.assistant,
content=[TextContent(text="Let me help you with that.")],
tool_calls=tool_calls,
)
# convert without assistant mode (reverse=True by default)
letta_messages = message.to_letta_messages(use_assistant_message=False)
# should have 2 messages in reverse order: tool call message, then reasoning message
assert len(letta_messages) == 2
assert letta_messages[0].message_type == "tool_call_message"
assert letta_messages[1].message_type == "reasoning_message"
# check the tool call message has all tool calls in the new field
tool_call_msg = letta_messages[0]
assert tool_call_msg.tool_calls is not None
assert len(tool_call_msg.tool_calls) == 3
# check backwards compatibility - first tool call in deprecated field
assert tool_call_msg.tool_call is not None
assert tool_call_msg.tool_call.name == "archival_memory_insert"
assert tool_call_msg.tool_call.tool_call_id == "call_1"
# verify all tool calls are present in the list
tool_names = [tc.name for tc in tool_call_msg.tool_calls]
assert "archival_memory_insert" in tool_names
assert "conversation_search" in tool_names
assert "send_message" in tool_names
@pytest.mark.asyncio
async def test_convert_tool_call_messages_with_assistant_mode(server: SyncServer, sarah_agent, default_user):
"""Test that with assistant mode, send_message becomes AssistantMessage and others are grouped"""
# create a message with tool calls including send_message
tool_calls = [
OpenAIToolCall(
id="call_1", type="function", function=OpenAIFunction(name="archival_memory_insert", arguments='{"content": "test memory 1"}')
),
OpenAIToolCall(id="call_2", type="function", function=OpenAIFunction(name="send_message", arguments='{"message": "Hello there!"}')),
OpenAIToolCall(
id="call_3", type="function", function=OpenAIFunction(name="conversation_search", arguments='{"query": "test search"}')
),
]
message = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.assistant,
content=[TextContent(text="Let me help you with that.")],
tool_calls=tool_calls,
)
# convert with assistant mode (reverse=True by default)
letta_messages = message.to_letta_messages(use_assistant_message=True)
# should have 4 messages in reverse order:
# conversation_search tool call, assistant message, archival_memory tool call, reasoning
assert len(letta_messages) == 4
assert letta_messages[0].message_type == "tool_call_message"
assert letta_messages[1].message_type == "assistant_message"
assert letta_messages[2].message_type == "tool_call_message"
assert letta_messages[3].message_type == "reasoning_message"
# check first tool call message (actually the last in forward order) has conversation_search
first_tool_msg = letta_messages[0]
assert len(first_tool_msg.tool_calls) == 1
assert first_tool_msg.tool_calls[0].name == "conversation_search"
assert first_tool_msg.tool_call.name == "conversation_search" # backwards compat
# check assistant message content
assistant_msg = letta_messages[1]
assert assistant_msg.content == "Hello there!"
# check last tool call message (actually the first in forward order) has archival_memory_insert
last_tool_msg = letta_messages[2]
assert len(last_tool_msg.tool_calls) == 1
assert last_tool_msg.tool_calls[0].name == "archival_memory_insert"
assert last_tool_msg.tool_call.name == "archival_memory_insert" # backwards compat
@pytest.mark.asyncio
async def test_convert_tool_call_messages_multiple_non_assistant_tools(server: SyncServer, sarah_agent, default_user):
"""Test that multiple non-assistant tools are batched together until assistant tool is reached"""
tool_calls = [
OpenAIToolCall(
id="call_1", type="function", function=OpenAIFunction(name="archival_memory_insert", arguments='{"content": "memory 1"}')
),
OpenAIToolCall(
id="call_2", type="function", function=OpenAIFunction(name="conversation_search", arguments='{"query": "search 1"}')
),
OpenAIToolCall(
id="call_3", type="function", function=OpenAIFunction(name="archival_memory_search", arguments='{"query": "archive search"}')
),
OpenAIToolCall(
id="call_4", type="function", function=OpenAIFunction(name="send_message", arguments='{"message": "Results found!"}')
),
]
message = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.assistant,
content=[TextContent(text="Processing...")],
tool_calls=tool_calls,
)
# convert with assistant mode (reverse=True by default)
letta_messages = message.to_letta_messages(use_assistant_message=True)
# should have 3 messages in reverse order: assistant, tool call (with 3 tools), reasoning
assert len(letta_messages) == 3
assert letta_messages[0].message_type == "assistant_message"
assert letta_messages[1].message_type == "tool_call_message"
assert letta_messages[2].message_type == "reasoning_message"
# check the tool call message has all 3 non-assistant tools
tool_msg = letta_messages[1]
assert len(tool_msg.tool_calls) == 3
tool_names = [tc.name for tc in tool_msg.tool_calls]
assert "archival_memory_insert" in tool_names
assert "conversation_search" in tool_names
assert "archival_memory_search" in tool_names
# check backwards compat field has first tool
assert tool_msg.tool_call.name == "archival_memory_insert"
# check assistant message
assert letta_messages[0].content == "Results found!"
@pytest.mark.asyncio
async def test_convert_single_tool_call_both_fields(server: SyncServer, sarah_agent, default_user):
"""Test that a single tool call is written to both tool_call and tool_calls fields"""
tool_calls = [
OpenAIToolCall(
id="call_1",
type="function",
function=OpenAIFunction(name="archival_memory_insert", arguments='{"content": "single tool call"}'),
),
]
message = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.assistant,
content=[TextContent(text="Saving to memory...")],
tool_calls=tool_calls,
)
# test without assistant mode (reverse=True by default)
letta_messages = message.to_letta_messages(use_assistant_message=False)
assert len(letta_messages) == 2 # tool call + reasoning (reversed)
tool_msg = letta_messages[0] # tool call is first due to reverse
# both fields should be populated
assert tool_msg.tool_call is not None
assert tool_msg.tool_call.name == "archival_memory_insert"
assert tool_msg.tool_calls is not None
assert len(tool_msg.tool_calls) == 1
assert tool_msg.tool_calls[0].name == "archival_memory_insert"
assert tool_msg.tool_calls[0].tool_call_id == "call_1"
# test with assistant mode (reverse=True by default)
letta_messages_assist = message.to_letta_messages(use_assistant_message=True)
assert len(letta_messages_assist) == 2 # tool call + reasoning (reversed)
tool_msg_assist = letta_messages_assist[0] # tool call is first due to reverse
# both fields should still be populated
assert tool_msg_assist.tool_call is not None
assert tool_msg_assist.tool_calls is not None
assert len(tool_msg_assist.tool_calls) == 1
@pytest.mark.asyncio
async def test_convert_tool_calls_only_assistant_tools(server: SyncServer, sarah_agent, default_user):
"""Test that only send_message tools are converted to AssistantMessages"""
tool_calls = [
OpenAIToolCall(
id="call_1", type="function", function=OpenAIFunction(name="send_message", arguments='{"message": "First message"}')
),
OpenAIToolCall(
id="call_2", type="function", function=OpenAIFunction(name="send_message", arguments='{"message": "Second message"}')
),
]
message = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.assistant,
content=[TextContent(text="Sending messages...")],
tool_calls=tool_calls,
)
# convert with assistant mode (reverse=True by default)
letta_messages = message.to_letta_messages(use_assistant_message=True)
# should have 3 messages in reverse order: 2 assistant messages, then reasoning
assert len(letta_messages) == 3
assert letta_messages[0].message_type == "assistant_message"
assert letta_messages[1].message_type == "assistant_message"
assert letta_messages[2].message_type == "reasoning_message"
# check assistant messages content (they appear in reverse order)
assert letta_messages[0].content == "Second message"
assert letta_messages[1].content == "First message"
@pytest.mark.asyncio
async def test_convert_assistant_message_with_dict_content(server: SyncServer, sarah_agent, default_user):
"""Test that send_message with dict content is properly serialized to JSON string
Regression test for bug where dict content like {'tofu': 1, 'mofu': 1, 'bofu': 1}
caused pydantic validation error because AssistantMessage.content expects a string.
"""
import json
# Test case 1: Simple dict as message content
tool_calls = [
OpenAIToolCall(
id="call_1",
type="function",
function=OpenAIFunction(name="send_message", arguments='{"message": {"tofu": 1, "mofu": 1, "bofu": 1}}'),
),
]
message = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.assistant,
content=[TextContent(text="Sending structured data...")],
tool_calls=tool_calls,
)
# convert with assistant mode - should not raise validation error
letta_messages = message.to_letta_messages(use_assistant_message=True)
assert len(letta_messages) == 2
assert letta_messages[0].message_type == "assistant_message"
assert letta_messages[1].message_type == "reasoning_message"
# check that dict was serialized to JSON string
assistant_msg = letta_messages[0]
assert isinstance(assistant_msg.content, str)
# verify the JSON-serialized content can be parsed back
parsed_content = json.loads(assistant_msg.content)
assert parsed_content == {"tofu": 1, "mofu": 1, "bofu": 1}
# Test case 2: Nested dict with various types
tool_calls_nested = [
OpenAIToolCall(
id="call_2",
type="function",
function=OpenAIFunction(
name="send_message",
arguments='{"message": {"status": "success", "data": {"count": 42, "items": ["a", "b"]}, "meta": null}}',
),
),
]
message_nested = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.assistant,
content=[TextContent(text="Sending complex data...")],
tool_calls=tool_calls_nested,
)
letta_messages_nested = message_nested.to_letta_messages(use_assistant_message=True)
assistant_msg_nested = letta_messages_nested[0]
assert isinstance(assistant_msg_nested.content, str)
parsed_nested = json.loads(assistant_msg_nested.content)
assert parsed_nested == {"status": "success", "data": {"count": 42, "items": ["a", "b"]}, "meta": None}
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_message_manager.py",
"license": "Apache License 2.0",
"lines": 845,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_organization_manager.py | import pytest
# Import shared fixtures and constants from conftest
from letta.constants import (
DEFAULT_ORG_NAME,
)
from letta.schemas.organization import Organization as PydanticOrganization, OrganizationUpdate
from letta.server.server import SyncServer
# ======================================================================================================================
# Organization Manager Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_list_organizations(server: SyncServer):
# Create a new org and confirm that it is created correctly
org_name = "test"
org = await server.organization_manager.create_organization_async(pydantic_org=PydanticOrganization(name=org_name))
orgs = await server.organization_manager.list_organizations_async()
assert len(orgs) == 1
assert orgs[0].name == org_name
# Delete it after
await server.organization_manager.delete_organization_by_id_async(org.id)
orgs = await server.organization_manager.list_organizations_async()
assert len(orgs) == 0
@pytest.mark.asyncio
async def test_create_default_organization(server: SyncServer):
await server.organization_manager.create_default_organization_async()
retrieved = await server.organization_manager.get_default_organization_async()
assert retrieved.name == DEFAULT_ORG_NAME
@pytest.mark.asyncio
async def test_update_organization_name(server: SyncServer):
org_name_a = "a"
org_name_b = "b"
org = await server.organization_manager.create_organization_async(pydantic_org=PydanticOrganization(name=org_name_a))
assert org.name == org_name_a
org = await server.organization_manager.update_organization_name_using_id_async(org_id=org.id, name=org_name_b)
assert org.name == org_name_b
@pytest.mark.asyncio
async def test_update_organization_privileged_tools(server: SyncServer):
org_name = "test"
org = await server.organization_manager.create_organization_async(pydantic_org=PydanticOrganization(name=org_name))
assert org.privileged_tools == False
org = await server.organization_manager.update_organization_async(org_id=org.id, org_update=OrganizationUpdate(privileged_tools=True))
assert org.privileged_tools == True
@pytest.mark.asyncio
async def test_list_organizations_pagination(server: SyncServer):
await server.organization_manager.create_organization_async(pydantic_org=PydanticOrganization(name="a"))
await server.organization_manager.create_organization_async(pydantic_org=PydanticOrganization(name="b"))
orgs_x = await server.organization_manager.list_organizations_async(limit=1)
assert len(orgs_x) == 1
orgs_y = await server.organization_manager.list_organizations_async(after=orgs_x[0].id, limit=1)
assert len(orgs_y) == 1
assert orgs_y[0].name != orgs_x[0].name
orgs = await server.organization_manager.list_organizations_async(after=orgs_y[0].id, limit=1)
assert len(orgs) == 0
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_organization_manager.py",
"license": "Apache License 2.0",
"lines": 53,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_run_manager.py | import uuid
from datetime import datetime, timedelta, timezone
from unittest.mock import AsyncMock, patch
import pytest
# Import shared fixtures and constants from conftest
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction
from letta.errors import LettaInvalidArgumentError
from letta.orm.errors import NoResultFound
from letta.schemas.enums import (
MessageRole,
RunStatus,
)
from letta.schemas.job import LettaRequestConfig
from letta.schemas.letta_message_content import TextContent
from letta.schemas.letta_stop_reason import StopReasonType
from letta.schemas.message import Message, Message as PydanticMessage, ToolReturn
from letta.schemas.openai.chat_completion_response import UsageStatistics
from letta.schemas.run import Run as PydanticRun, RunUpdate
from letta.schemas.user import User as PydanticUser
from letta.server.server import SyncServer
from letta.services.step_manager import FeedbackType
# ======================================================================================================================
# RunManager Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_create_run(server: SyncServer, sarah_agent, default_user):
"""Test creating a run."""
run_data = PydanticRun(
metadata={"type": "test"},
agent_id=sarah_agent.id,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# Assertions to ensure the created run matches the expected values
assert created_run.agent_id == sarah_agent.id
assert created_run.created_at
assert created_run.status == RunStatus.created
assert created_run.metadata == {"type": "test"}
@pytest.mark.asyncio
async def test_get_run_by_id(server: SyncServer, sarah_agent, default_user):
"""Test fetching a run by ID."""
# Create a run
run_data = PydanticRun(
metadata={"type": "test"},
agent_id=sarah_agent.id,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# Fetch the run by ID
fetched_run = await server.run_manager.get_run_by_id(created_run.id, actor=default_user)
# Assertions to ensure the fetched run matches the created run
assert fetched_run.id == created_run.id
assert fetched_run.status == RunStatus.created
assert fetched_run.metadata == {"type": "test"}
@pytest.mark.asyncio
async def test_list_runs(server: SyncServer, sarah_agent, default_user):
"""Test listing runs."""
# Create multiple runs
for i in range(3):
run_data = PydanticRun(
metadata={"type": f"test-{i}"},
agent_id=sarah_agent.id,
)
await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# List runs
runs = await server.run_manager.list_runs(actor=default_user)
# Assertions to check that the created runs are listed
assert len(runs) == 3
assert all(run.agent_id == sarah_agent.id for run in runs)
assert all(run.metadata["type"].startswith("test") for run in runs)
@pytest.mark.asyncio
async def test_list_runs_with_metadata(server: SyncServer, sarah_agent, default_user):
for i in range(3):
run_data = PydanticRun(agent_id=sarah_agent.id)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
if i == 1:
await server.run_manager.update_run_by_id_async(created_run.id, RunUpdate(status=RunStatus.completed), actor=default_user)
runs = await server.run_manager.list_runs(actor=default_user, statuses=[RunStatus.completed])
assert len(runs) == 1
assert runs[0].status == RunStatus.completed
runs = await server.run_manager.list_runs(actor=default_user)
assert len(runs) == 3
@pytest.mark.asyncio
async def test_update_run_by_id(server: SyncServer, sarah_agent, default_user):
"""Test updating a run by its ID."""
# Create a run
run_data = PydanticRun(
metadata={"type": "test"},
agent_id=sarah_agent.id,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# Update the run
updated_run = await server.run_manager.update_run_by_id_async(created_run.id, RunUpdate(status=RunStatus.completed), actor=default_user)
# Assertions to ensure the run was updated
assert updated_run.status == RunStatus.completed
@pytest.mark.asyncio
async def test_update_run_metadata_persistence(server: SyncServer, sarah_agent, default_user):
"""Test that metadata is properly persisted when updating a run."""
# Create a run with initial metadata
run_data = PydanticRun(
metadata={"type": "test", "initial": "value"},
agent_id=sarah_agent.id,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# Verify initial metadata
assert created_run.metadata == {"type": "test", "initial": "value"}
# Update the run with error metadata (simulating what happens in streaming service)
error_data = {
"error": {"type": "llm_timeout", "message": "The LLM request timed out. Please try again.", "detail": "Timeout after 30s"}
}
updated_run = await server.run_manager.update_run_by_id_async(
created_run.id,
RunUpdate(status=RunStatus.failed, stop_reason=StopReasonType.llm_api_error, metadata=error_data),
actor=default_user,
)
# Verify metadata was properly updated (metadata should merge, not overwrite)
assert updated_run.status == RunStatus.failed
assert updated_run.stop_reason == StopReasonType.llm_api_error
assert updated_run.metadata["type"] == "test"
assert updated_run.metadata["initial"] == "value"
assert "error" in updated_run.metadata
assert updated_run.metadata["error"]["type"] == "llm_timeout"
# Fetch the run again to ensure it's persisted in DB
fetched_run = await server.run_manager.get_run_by_id(created_run.id, actor=default_user)
assert fetched_run.metadata["type"] == "test"
assert fetched_run.metadata["initial"] == "value"
assert "error" in fetched_run.metadata
assert fetched_run.metadata["error"]["type"] == "llm_timeout"
@pytest.mark.asyncio
async def test_update_run_updates_agent_last_stop_reason(server: SyncServer, sarah_agent, default_user):
"""Test that completing a run updates the agent's last_stop_reason."""
# Verify agent starts with no last_stop_reason
await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
# Create a run
run_data = PydanticRun(agent_id=sarah_agent.id)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# Complete the run with end_turn stop reason
await server.run_manager.update_run_by_id_async(
created_run.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
# Verify agent's last_stop_reason was updated to end_turn
updated_agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert updated_agent.last_stop_reason == StopReasonType.end_turn
# Create another run and complete with different stop reason
run_data2 = PydanticRun(agent_id=sarah_agent.id)
created_run2 = await server.run_manager.create_run(pydantic_run=run_data2, actor=default_user)
# Complete with error stop reason
await server.run_manager.update_run_by_id_async(
created_run2.id, RunUpdate(status=RunStatus.failed, stop_reason=StopReasonType.error), actor=default_user
)
# Verify agent's last_stop_reason was updated to error
final_agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert final_agent.last_stop_reason == StopReasonType.error
@pytest.mark.asyncio
async def test_delete_run_by_id(server: SyncServer, sarah_agent, default_user):
"""Test deleting a run by its ID."""
# Create a run
run_data = PydanticRun(
metadata={"type": "test"},
agent_id=sarah_agent.id,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
print("created_run to delete", created_run.id)
# Delete the run
await server.run_manager.delete_run(created_run.id, actor=default_user)
# Fetch the run by ID
with pytest.raises(NoResultFound):
await server.run_manager.get_run_by_id(created_run.id, actor=default_user)
# List runs to ensure the run was deleted
runs = await server.run_manager.list_runs(actor=default_user)
assert len(runs) == 0
@pytest.mark.asyncio
async def test_update_run_auto_complete(server: SyncServer, default_user, sarah_agent):
"""Test that updating a run's status to 'completed' automatically sets completed_at."""
# Create a run
run_data = PydanticRun(
metadata={"type": "test"},
agent_id=sarah_agent.id,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
assert created_run.completed_at is None
# Update the run to completed status
updated_run = await server.run_manager.update_run_by_id_async(created_run.id, RunUpdate(status=RunStatus.completed), actor=default_user)
# Check that completed_at was automatically set
assert updated_run.completed_at is not None
assert isinstance(updated_run.completed_at, datetime)
@pytest.mark.asyncio
async def test_get_run_not_found(server: SyncServer, default_user):
"""Test fetching a non-existent run."""
non_existent_run_id = "nonexistent-id"
with pytest.raises(LettaInvalidArgumentError):
await server.run_manager.get_run_by_id(non_existent_run_id, actor=default_user)
@pytest.mark.asyncio
async def test_delete_run_not_found(server: SyncServer, default_user):
"""Test deleting a non-existent run."""
non_existent_run_id = "nonexistent-id"
with pytest.raises(LettaInvalidArgumentError):
await server.run_manager.delete_run(non_existent_run_id, actor=default_user)
@pytest.mark.asyncio
async def test_list_runs_pagination(server: SyncServer, sarah_agent, default_user):
"""Test listing runs with pagination."""
# Create multiple runs
for i in range(10):
run_data = PydanticRun(agent_id=sarah_agent.id)
await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# List runs with a limit
runs = await server.run_manager.list_runs(actor=default_user, limit=5)
assert len(runs) == 5
assert all(run.agent_id == sarah_agent.id for run in runs)
# Test cursor-based pagination
first_page = await server.run_manager.list_runs(actor=default_user, limit=3, ascending=True)
assert len(first_page) == 3
assert first_page[0].created_at <= first_page[1].created_at <= first_page[2].created_at
last_page = await server.run_manager.list_runs(actor=default_user, limit=3, ascending=False)
assert len(last_page) == 3
assert last_page[0].created_at >= last_page[1].created_at >= last_page[2].created_at
first_page_ids = set(run.id for run in first_page)
last_page_ids = set(run.id for run in last_page)
assert first_page_ids.isdisjoint(last_page_ids)
# Test pagination with "before" cursor in descending order (UI's default behavior)
# This is the critical scenario that was broken - clicking "Next" in the UI
second_page_desc = await server.run_manager.list_runs(
actor=default_user,
before=last_page[-1].id, # Use last (oldest) item from first page as cursor
limit=3,
ascending=False,
)
assert len(second_page_desc) == 3
# CRITICAL: Verify no overlap with first page (this was the bug - there was overlap before)
second_page_desc_ids = set(run.id for run in second_page_desc)
assert second_page_desc_ids.isdisjoint(last_page_ids), "Second page should not overlap with first page"
# Verify descending order is maintained
assert second_page_desc[0].created_at >= second_page_desc[1].created_at >= second_page_desc[2].created_at
# Verify second page contains older items than first page
assert second_page_desc[0].created_at < last_page[-1].created_at
@pytest.mark.asyncio
async def test_list_runs_by_status(server: SyncServer, default_user, sarah_agent):
"""Test listing runs filtered by status."""
# Create multiple runs with different statuses
run_data_created = PydanticRun(
status=RunStatus.created,
metadata={"type": "test-created"},
agent_id=sarah_agent.id,
)
run_data_in_progress = PydanticRun(
status=RunStatus.running,
metadata={"type": "test-running"},
agent_id=sarah_agent.id,
)
run_data_completed = PydanticRun(
status=RunStatus.completed,
metadata={"type": "test-completed"},
agent_id=sarah_agent.id,
)
await server.run_manager.create_run(pydantic_run=run_data_created, actor=default_user)
await server.run_manager.create_run(pydantic_run=run_data_in_progress, actor=default_user)
await server.run_manager.create_run(pydantic_run=run_data_completed, actor=default_user)
# List runs filtered by status
created_runs = await server.run_manager.list_runs(actor=default_user, statuses=[RunStatus.created])
in_progress_runs = await server.run_manager.list_runs(actor=default_user, statuses=[RunStatus.running])
completed_runs = await server.run_manager.list_runs(actor=default_user, statuses=[RunStatus.completed])
# Assertions
assert len(created_runs) == 1
assert created_runs[0].metadata["type"] == run_data_created.metadata["type"]
assert len(in_progress_runs) == 1
assert in_progress_runs[0].metadata["type"] == run_data_in_progress.metadata["type"]
assert len(completed_runs) == 1
assert completed_runs[0].metadata["type"] == run_data_completed.metadata["type"]
@pytest.mark.asyncio
async def test_list_runs_by_stop_reason(server: SyncServer, sarah_agent, default_user):
"""Test listing runs by stop reason."""
run_pydantic = PydanticRun(
agent_id=sarah_agent.id,
stop_reason=StopReasonType.requires_approval,
background=True,
)
run = await server.run_manager.create_run(pydantic_run=run_pydantic, actor=default_user)
assert run.stop_reason == StopReasonType.requires_approval
assert run.background == True
assert run.agent_id == sarah_agent.id
# list runs by stop reason
runs = await server.run_manager.list_runs(actor=default_user, stop_reason=StopReasonType.requires_approval)
assert len(runs) == 1
assert runs[0].id == run.id
# list runs by background
runs = await server.run_manager.list_runs(actor=default_user, background=True)
assert len(runs) == 1
assert runs[0].id == run.id
# list runs by agent_id
runs = await server.run_manager.list_runs(actor=default_user, agent_ids=[sarah_agent.id])
assert len(runs) == 1
assert runs[0].id == run.id
@pytest.mark.asyncio
async def test_list_runs_by_tools_used(server: SyncServer, sarah_agent, default_user):
"""Test listing runs filtered by tools used."""
# Seed tools first
from letta.services.tool_manager import ToolManager
tool_manager = ToolManager()
await tool_manager.upsert_base_tools_async(default_user)
web_search_tool_id = await tool_manager.get_tool_id_by_name_async("web_search", default_user)
run_code_tool_id = await tool_manager.get_tool_id_by_name_async("run_code", default_user)
if not web_search_tool_id or not run_code_tool_id:
pytest.skip("Required tools (web_search, run_code) are not available in the database")
# Create run with web_search tool
run_web = await server.run_manager.create_run(
pydantic_run=PydanticRun(agent_id=sarah_agent.id),
actor=default_user,
)
await server.message_manager.create_many_messages_async(
[
PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.assistant,
content=[TextContent(text="Using web search")],
tool_calls=[
OpenAIToolCall(
id="call_web",
type="function",
function=OpenAIFunction(name="web_search", arguments="{}"),
)
],
run_id=run_web.id,
)
],
actor=default_user,
)
# Create run with run_code tool
run_code = await server.run_manager.create_run(
pydantic_run=PydanticRun(agent_id=sarah_agent.id),
actor=default_user,
)
await server.message_manager.create_many_messages_async(
[
PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.assistant,
content=[TextContent(text="Using run code")],
tool_calls=[
OpenAIToolCall(
id="call_code",
type="function",
function=OpenAIFunction(name="run_code", arguments="{}"),
)
],
run_id=run_code.id,
)
],
actor=default_user,
)
# Complete runs to populate tools_used
await server.run_manager.update_run_by_id_async(
run_web.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
await server.run_manager.update_run_by_id_async(
run_code.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
# Test filtering by single tool
runs_web = await server.run_manager.list_runs(
actor=default_user,
agent_id=sarah_agent.id,
tools_used=[web_search_tool_id],
)
assert len(runs_web) == 1
assert runs_web[0].id == run_web.id
# Test filtering by multiple tools
runs_multi = await server.run_manager.list_runs(
actor=default_user,
agent_id=sarah_agent.id,
tools_used=[web_search_tool_id, run_code_tool_id],
)
assert len(runs_multi) == 2
assert {r.id for r in runs_multi} == {run_web.id, run_code.id}
@pytest.mark.asyncio
async def test_list_runs_by_step_count(server: SyncServer, sarah_agent, default_user):
"""Test listing runs filtered by step count."""
from letta.schemas.enums import ComparisonOperator
# Create runs with different numbers of steps
runs_data = []
# Run with 0 steps
run_0 = await server.run_manager.create_run(
pydantic_run=PydanticRun(
agent_id=sarah_agent.id,
metadata={"steps": 0},
),
actor=default_user,
)
runs_data.append((run_0, 0))
# Run with 2 steps
run_2 = await server.run_manager.create_run(
pydantic_run=PydanticRun(
agent_id=sarah_agent.id,
metadata={"steps": 2},
),
actor=default_user,
)
for i in range(2):
await server.step_manager.log_step_async(
agent_id=sarah_agent.id,
provider_name="openai",
provider_category="base",
model="gpt-4o-mini",
model_endpoint="https://api.openai.com/v1",
context_window_limit=8192,
usage=UsageStatistics(
completion_tokens=100,
prompt_tokens=50,
total_tokens=150,
),
run_id=run_2.id,
actor=default_user,
project_id=sarah_agent.project_id,
)
runs_data.append((run_2, 2))
# Run with 5 steps
run_5 = await server.run_manager.create_run(
pydantic_run=PydanticRun(
agent_id=sarah_agent.id,
metadata={"steps": 5},
),
actor=default_user,
)
for i in range(5):
await server.step_manager.log_step_async(
agent_id=sarah_agent.id,
provider_name="openai",
provider_category="base",
model="gpt-4o-mini",
model_endpoint="https://api.openai.com/v1",
context_window_limit=8192,
usage=UsageStatistics(
completion_tokens=100,
prompt_tokens=50,
total_tokens=150,
),
run_id=run_5.id,
actor=default_user,
project_id=sarah_agent.project_id,
)
runs_data.append((run_5, 5))
# Update all runs to trigger metrics update
for run, _ in runs_data:
await server.run_manager.update_run_by_id_async(
run.id,
RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn),
actor=default_user,
)
# Test EQ operator - exact match
runs_eq_2 = await server.run_manager.list_runs(
actor=default_user,
agent_id=sarah_agent.id,
step_count=2,
step_count_operator=ComparisonOperator.EQ,
)
assert len(runs_eq_2) == 1
assert runs_eq_2[0].id == run_2.id
# Test GTE operator - greater than or equal
runs_gte_2 = await server.run_manager.list_runs(
actor=default_user,
agent_id=sarah_agent.id,
step_count=2,
step_count_operator=ComparisonOperator.GTE,
)
assert len(runs_gte_2) == 2
run_ids_gte = {run.id for run in runs_gte_2}
assert run_2.id in run_ids_gte
assert run_5.id in run_ids_gte
# Test LTE operator - less than or equal
runs_lte_2 = await server.run_manager.list_runs(
actor=default_user,
agent_id=sarah_agent.id,
step_count=2,
step_count_operator=ComparisonOperator.LTE,
)
assert len(runs_lte_2) == 2
run_ids_lte = {run.id for run in runs_lte_2}
assert run_0.id in run_ids_lte
assert run_2.id in run_ids_lte
# Test GTE with 0 - should return all runs
runs_gte_0 = await server.run_manager.list_runs(
actor=default_user,
agent_id=sarah_agent.id,
step_count=0,
step_count_operator=ComparisonOperator.GTE,
)
assert len(runs_gte_0) == 3
# Test LTE with 0 - should return only run with 0 steps
runs_lte_0 = await server.run_manager.list_runs(
actor=default_user,
agent_id=sarah_agent.id,
step_count=0,
step_count_operator=ComparisonOperator.LTE,
)
assert len(runs_lte_0) == 1
assert runs_lte_0[0].id == run_0.id
@pytest.mark.asyncio
async def test_list_runs_by_base_template_id(server: SyncServer, sarah_agent, default_user):
"""Test listing runs by template family."""
run_data = PydanticRun(
agent_id=sarah_agent.id,
base_template_id="test-template-family",
)
await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
runs = await server.run_manager.list_runs(actor=default_user, template_family="test-template-family")
assert len(runs) == 1
async def test_e2e_run_callback(monkeypatch, server: SyncServer, default_user, sarah_agent):
"""Test that run callbacks are properly dispatched when a run is completed."""
captured = {}
# Create a simple mock for the async HTTP client
class MockAsyncResponse:
status_code = 202
async def mock_post(url, json, timeout):
captured["url"] = url
captured["json"] = json
return MockAsyncResponse()
class MockAsyncClient:
async def __aenter__(self):
return self
async def __aexit__(self, *args):
pass
async def post(self, url, json, timeout):
return await mock_post(url, json, timeout)
# Patch the AsyncClient
import letta.services.run_manager as run_manager_module
monkeypatch.setattr(run_manager_module, "AsyncClient", MockAsyncClient)
run_in = PydanticRun(
status=RunStatus.created, metadata={"foo": "bar"}, agent_id=sarah_agent.id, callback_url="http://example.test/webhook/runs"
)
created = await server.run_manager.create_run(pydantic_run=run_in, actor=default_user)
assert created.callback_url == "http://example.test/webhook/runs"
# Update the run status to completed, which should trigger the callback
updated = await server.run_manager.update_run_by_id_async(
created.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
# Verify the callback was triggered with the correct parameters
assert captured["url"] == created.callback_url, "Callback URL doesn't match"
assert captured["json"]["run_id"] == created.id, "Run ID in callback doesn't match"
assert captured["json"]["status"] == RunStatus.completed.value, "Run status in callback doesn't match"
# Verify the completed_at timestamp is reasonable
actual_dt = datetime.fromisoformat(captured["json"]["completed_at"]).replace(tzinfo=None)
# Remove timezone from updated.completed_at for comparison (it comes from DB as timezone-aware)
assert abs((actual_dt - updated.completed_at).total_seconds()) < 1, "Timestamp difference is too large"
assert isinstance(updated.callback_sent_at, datetime)
assert updated.callback_status_code == 202
@pytest.mark.asyncio
async def test_run_callback_only_on_terminal_status(server: SyncServer, sarah_agent, default_user, monkeypatch):
"""
Regression: ensure a non-terminal update (running) does NOT set completed_at or trigger callback,
and that a subsequent terminal update (completed) does trigger the callback exactly once.
"""
# Capture callback invocations
captured = {"count": 0, "url": None, "json": None}
class MockAsyncResponse:
status_code = 202
async def mock_post(url, json, timeout):
captured["count"] += 1
captured["url"] = url
captured["json"] = json
return MockAsyncResponse()
class MockAsyncClient:
async def __aenter__(self):
return self
async def __aexit__(self, *args):
pass
async def post(self, url, json, timeout):
return await mock_post(url, json, timeout)
# Patch the AsyncClient in run_manager module
import letta.services.run_manager as run_manager_module
monkeypatch.setattr(run_manager_module, "AsyncClient", MockAsyncClient)
# Create run with a callback URL
run_in = PydanticRun(
status=RunStatus.created,
metadata={"foo": "bar"},
agent_id=sarah_agent.id,
callback_url="http://example.test/webhook/runs",
)
created = await server.run_manager.create_run(pydantic_run=run_in, actor=default_user)
assert created.callback_url == "http://example.test/webhook/runs"
# 1) Non-terminal update: running
updated_running = await server.run_manager.update_run_by_id_async(created.id, RunUpdate(status=RunStatus.running), actor=default_user)
# Should not set completed_at or trigger callback
assert updated_running.completed_at is None
assert captured["count"] == 0
# 2) Terminal update: completed
updated_completed = await server.run_manager.update_run_by_id_async(
created.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
# Should trigger exactly one callback with expected payload
assert captured["count"] == 1
assert captured["url"] == created.callback_url
assert captured["json"]["run_id"] == created.id
assert captured["json"]["status"] == RunStatus.completed.value
# completed_at should be set and align closely with callback payload
assert updated_completed.completed_at is not None
actual_dt = datetime.fromisoformat(captured["json"]["completed_at"]).replace(tzinfo=None)
assert abs((actual_dt - updated_completed.completed_at).total_seconds()) < 1
assert isinstance(updated_completed.callback_sent_at, datetime)
assert updated_completed.callback_status_code == 202
# ======================================================================================================================
# RunManager Tests - Messages
# ======================================================================================================================
@pytest.mark.asyncio
async def test_run_messages_pagination(server: SyncServer, default_run, default_user, sarah_agent):
"""Test pagination of run messages."""
# create the run
run_pydantic = PydanticRun(
agent_id=sarah_agent.id,
status=RunStatus.created,
metadata={"foo": "bar"},
)
run = await server.run_manager.create_run(pydantic_run=run_pydantic, actor=default_user)
assert run.status == RunStatus.created
# Create multiple messages
message_ids = []
for i in range(5):
message = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text=f"Test message {i}")],
run_id=run.id,
)
msg = await server.message_manager.create_many_messages_async([message], actor=default_user)
message_ids.append(msg[0].id)
# Test pagination with limit
messages = await server.message_manager.list_messages(
run_id=run.id,
actor=default_user,
limit=2,
)
assert len(messages) == 2
assert messages[0].id == message_ids[0]
assert messages[1].id == message_ids[1]
# Test pagination with cursor
first_page = await server.message_manager.list_messages(
run_id=run.id,
actor=default_user,
limit=2,
ascending=True, # [M0, M1]
)
assert len(first_page) == 2
assert first_page[0].id == message_ids[0]
assert first_page[1].id == message_ids[1]
assert first_page[0].created_at <= first_page[1].created_at
last_page = await server.message_manager.list_messages(
run_id=run.id,
actor=default_user,
limit=2,
ascending=False, # [M4, M3]
)
assert len(last_page) == 2
assert last_page[0].id == message_ids[4]
assert last_page[1].id == message_ids[3]
assert last_page[0].created_at >= last_page[1].created_at
first_page_ids = set(msg.id for msg in first_page)
last_page_ids = set(msg.id for msg in last_page)
assert first_page_ids.isdisjoint(last_page_ids)
# Test middle page using both before and after
middle_page = await server.message_manager.list_messages(
run_id=run.id,
actor=default_user,
before=last_page[-1].id, # M3
after=first_page[0].id, # M0
ascending=True, # [M1, M2]
)
assert len(middle_page) == 2 # Should include message between first and last pages
assert middle_page[0].id == message_ids[1]
assert middle_page[1].id == message_ids[2]
head_tail_msgs = first_page_ids.union(last_page_ids)
assert middle_page[1].id not in head_tail_msgs
assert middle_page[0].id in first_page_ids
# Test descending order for middle page
middle_page = await server.message_manager.list_messages(
run_id=run.id,
actor=default_user,
before=last_page[-1].id, # M3
after=first_page[0].id, # M0
ascending=False, # [M2, M1]
)
assert len(middle_page) == 2 # Should include message between first and last pages
assert middle_page[0].id == message_ids[2]
assert middle_page[1].id == message_ids[1]
# Test getting earliest messages
msg_3 = last_page[-1].id
earliest_msgs = await server.message_manager.list_messages(
run_id=run.id,
actor=default_user,
ascending=False,
before=msg_3, # Get messages after M3 in descending order
)
assert len(earliest_msgs) == 3 # Should get M2, M1, M0
assert all(m.id not in last_page_ids for m in earliest_msgs)
assert earliest_msgs[0].created_at > earliest_msgs[1].created_at > earliest_msgs[2].created_at
# Test getting earliest messages with ascending order
earliest_msgs_ascending = await server.message_manager.list_messages(
run_id=run.id,
actor=default_user,
ascending=True,
before=msg_3, # Get messages before M3 in ascending order
)
assert len(earliest_msgs_ascending) == 3 # Should get M0, M1, M2
assert all(m.id not in last_page_ids for m in earliest_msgs_ascending)
assert earliest_msgs_ascending[0].created_at < earliest_msgs_ascending[1].created_at < earliest_msgs_ascending[2].created_at
@pytest.mark.asyncio
async def test_run_messages_ordering(server: SyncServer, default_run, default_user, sarah_agent):
"""Test that messages are ordered by created_at."""
# Create messages with different timestamps
base_time = datetime.now(timezone.utc)
message_times = [
base_time - timedelta(minutes=2),
base_time - timedelta(minutes=1),
base_time,
]
# create the run
run_pydantic = PydanticRun(
agent_id=sarah_agent.id,
)
run = await server.run_manager.create_run(pydantic_run=run_pydantic, actor=default_user)
assert run.status == RunStatus.created
for i, created_at in enumerate(message_times):
message = PydanticMessage(
role=MessageRole.user,
content=[TextContent(text="Test message")],
agent_id=sarah_agent.id,
created_at=created_at,
run_id=run.id,
)
await server.message_manager.create_many_messages_async([message], actor=default_user)
# Verify messages are returned in chronological order
returned_messages = await server.message_manager.list_messages(
run_id=run.id,
actor=default_user,
)
assert len(returned_messages) == 3
assert returned_messages[0].created_at < returned_messages[1].created_at
assert returned_messages[1].created_at < returned_messages[2].created_at
# Verify messages are returned in descending order
returned_messages = await server.message_manager.list_messages(
run_id=run.id,
actor=default_user,
ascending=False,
)
assert len(returned_messages) == 3
assert returned_messages[0].created_at > returned_messages[1].created_at
assert returned_messages[1].created_at > returned_messages[2].created_at
@pytest.mark.asyncio
async def test_job_messages_empty(server: SyncServer, default_run, default_user):
"""Test getting messages for a job with no messages."""
messages = await server.message_manager.list_messages(
run_id=default_run.id,
actor=default_user,
)
assert len(messages) == 0
@pytest.mark.asyncio
async def test_job_messages_filter(server: SyncServer, default_run, default_user, sarah_agent):
"""Test getting messages associated with a job."""
# Create the run
run_pydantic = PydanticRun(
agent_id=sarah_agent.id,
)
run = await server.run_manager.create_run(pydantic_run=run_pydantic, actor=default_user)
assert run.status == RunStatus.created
# Create test messages with different roles and tool calls
messages = [
PydanticMessage(
role=MessageRole.user,
content=[TextContent(text="Hello")],
agent_id=sarah_agent.id,
run_id=default_run.id,
),
PydanticMessage(
role=MessageRole.assistant,
content=[TextContent(text="Hi there!")],
agent_id=sarah_agent.id,
run_id=default_run.id,
),
PydanticMessage(
role=MessageRole.assistant,
content=[TextContent(text="Let me help you with that")],
agent_id=sarah_agent.id,
tool_calls=[
OpenAIToolCall(
id="call_1",
type="function",
function=OpenAIFunction(
name="test_tool",
arguments='{"arg1": "value1"}',
),
)
],
run_id=default_run.id,
),
]
await server.message_manager.create_many_messages_async(messages, actor=default_user)
# Test getting all messages
all_messages = await server.message_manager.list_messages(
run_id=default_run.id,
actor=default_user,
)
assert len(all_messages) == 3
# Test filtering by role
user_messages = await server.message_manager.list_messages(run_id=default_run.id, actor=default_user, roles=[MessageRole.user])
assert len(user_messages) == 1
assert user_messages[0].role == MessageRole.user
# Test limit
limited_messages = await server.message_manager.list_messages(run_id=default_run.id, actor=default_user, limit=2)
assert len(limited_messages) == 2
@pytest.mark.asyncio
async def test_get_run_messages(server: SyncServer, default_user: PydanticUser, sarah_agent):
"""Test getting messages for a run with request config."""
# Create a run with custom request config
run = await server.run_manager.create_run(
pydantic_run=PydanticRun(
agent_id=sarah_agent.id,
status=RunStatus.created,
request_config=LettaRequestConfig(
use_assistant_message=False, assistant_message_tool_name="custom_tool", assistant_message_tool_kwarg="custom_arg"
),
),
actor=default_user,
)
# Add some messages
messages = []
for i in range(4):
if i % 2 == 0:
# tool return message
messages.append(
PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.tool,
content=[TextContent(text='{"status": "OK"}')],
tool_call_id=f"call_{i // 2}",
tool_returns=[
ToolReturn(
tool_call_id=f"call_{i // 2}",
status="success",
func_response='{"status": "OK", "message": "Tool executed successfully"}',
)
],
run_id=run.id,
)
)
else:
# assistant message with tool call
messages.append(
PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.assistant,
content=[TextContent(text=f"Test message {i}")],
tool_calls=[
{
"type": "function",
"id": f"call_{i // 2}",
"function": {"name": "custom_tool", "arguments": '{"custom_arg": "test"}'},
}
],
run_id=run.id,
)
)
await server.message_manager.create_many_messages_async(messages, actor=default_user)
# Get messages and verify they're converted correctly
result = await server.message_manager.list_messages(run_id=run.id, actor=default_user)
result = Message.to_letta_messages_from_list(result)
# Verify correct number of messages. Assistant messages should be parsed
assert len(result) == 6
# Verify assistant messages are parsed according to request config
tool_call_messages = [msg for msg in result if msg.message_type == "tool_call_message"]
reasoning_messages = [msg for msg in result if msg.message_type == "reasoning_message"]
assert len(tool_call_messages) == 2
assert len(reasoning_messages) == 2
for msg in tool_call_messages:
assert msg.tool_call is not None
assert msg.tool_call.name == "custom_tool"
@pytest.mark.asyncio
async def test_get_run_messages_with_assistant_message(server: SyncServer, default_user: PydanticUser, sarah_agent):
"""Test getting messages for a run with request config."""
# Create a run with custom request config
run = await server.run_manager.create_run(
pydantic_run=PydanticRun(
agent_id=sarah_agent.id,
status=RunStatus.created,
request_config=LettaRequestConfig(
use_assistant_message=True, assistant_message_tool_name="custom_tool", assistant_message_tool_kwarg="custom_arg"
),
),
actor=default_user,
)
# Add some messages
messages = []
for i in range(4):
if i % 2 == 0:
# tool return message
messages.append(
PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.tool,
content=[TextContent(text='{"status": "OK"}')],
tool_call_id=f"call_{i // 2}",
tool_returns=[
ToolReturn(
tool_call_id=f"call_{i // 2}",
status="success",
func_response='{"status": "OK", "message": "Tool executed successfully"}',
)
],
run_id=run.id,
)
)
else:
# assistant message with tool call
messages.append(
PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.assistant,
content=[TextContent(text=f"Test message {i}")],
tool_calls=[
{
"type": "function",
"id": f"call_{i // 2}",
"function": {"name": "custom_tool", "arguments": '{"custom_arg": "test"}'},
}
],
run_id=run.id,
)
)
await server.message_manager.create_many_messages_async(messages, actor=default_user)
# Get messages and verify they're converted correctly
result = await server.message_manager.list_messages(run_id=run.id, actor=default_user)
result = Message.to_letta_messages_from_list(
result, assistant_message_tool_name="custom_tool", assistant_message_tool_kwarg="custom_arg"
)
# Verify correct number of messages. Assistant messages should be parsed
assert len(result) == 4
# Verify assistant messages are parsed according to request config
assistant_messages = [msg for msg in result if msg.message_type == "assistant_message"]
reasoning_messages = [msg for msg in result if msg.message_type == "reasoning_message"]
assert len(assistant_messages) == 2
assert len(reasoning_messages) == 2
for msg in assistant_messages:
assert msg.content == "test"
for msg in reasoning_messages:
assert "Test message" in msg.reasoning
# ======================================================================================================================
# RunManager Tests - Usage Statistics -
# ======================================================================================================================
@pytest.mark.asyncio
async def test_run_usage_stats_add_and_get(server: SyncServer, sarah_agent, default_run, default_user):
"""Test adding and retrieving run usage statistics."""
run_manager = server.run_manager
step_manager = server.step_manager
# Add usage statistics
await step_manager.log_step_async(
agent_id=sarah_agent.id,
provider_name="openai",
provider_category="base",
model="gpt-4o-mini",
model_endpoint="https://api.openai.com/v1",
context_window_limit=8192,
run_id=default_run.id,
usage=UsageStatistics(
completion_tokens=100,
prompt_tokens=50,
total_tokens=150,
),
actor=default_user,
project_id=sarah_agent.project_id,
)
# Get usage statistics
usage_stats = await run_manager.get_run_usage(run_id=default_run.id, actor=default_user)
# Verify the statistics
assert usage_stats.completion_tokens == 100
assert usage_stats.prompt_tokens == 50
assert usage_stats.total_tokens == 150
# get steps
steps = await step_manager.list_steps_async(run_id=default_run.id, actor=default_user)
assert len(steps) == 1
@pytest.mark.asyncio
async def test_run_usage_stats_get_no_stats(server: SyncServer, default_run, default_user):
"""Test getting usage statistics for a job with no stats."""
run_manager = server.run_manager
# Get usage statistics for a job with no stats
usage_stats = await run_manager.get_run_usage(run_id=default_run.id, actor=default_user)
# Verify default values
assert usage_stats.completion_tokens == 0
assert usage_stats.prompt_tokens == 0
assert usage_stats.total_tokens == 0
# get steps
steps = await server.step_manager.list_steps_async(run_id=default_run.id, actor=default_user)
assert len(steps) == 0
@pytest.mark.asyncio
async def test_run_usage_stats_add_multiple(server: SyncServer, sarah_agent, default_run, default_user):
"""Test adding multiple usage statistics entries for a job."""
run_manager = server.run_manager
step_manager = server.step_manager
# Add first usage statistics entry
await step_manager.log_step_async(
agent_id=sarah_agent.id,
provider_name="openai",
provider_category="base",
model="gpt-4o-mini",
model_endpoint="https://api.openai.com/v1",
context_window_limit=8192,
usage=UsageStatistics(
completion_tokens=100,
prompt_tokens=50,
total_tokens=150,
),
actor=default_user,
project_id=sarah_agent.project_id,
run_id=default_run.id,
)
# Add second usage statistics entry
await step_manager.log_step_async(
agent_id=sarah_agent.id,
provider_name="openai",
provider_category="base",
model="gpt-4o-mini",
model_endpoint="https://api.openai.com/v1",
context_window_limit=8192,
usage=UsageStatistics(
completion_tokens=200,
prompt_tokens=100,
total_tokens=300,
),
actor=default_user,
project_id=sarah_agent.project_id,
run_id=default_run.id,
)
# Get usage statistics (should return the latest entry)
usage_stats = await run_manager.get_run_usage(run_id=default_run.id, actor=default_user)
# Verify we get the most recent statistics
assert usage_stats.completion_tokens == 300
assert usage_stats.prompt_tokens == 150
assert usage_stats.total_tokens == 450
assert usage_stats.step_count == 2
# get steps
steps = await step_manager.list_steps_async(run_id=default_run.id, actor=default_user)
assert len(steps) == 2
# get agent steps
steps = await step_manager.list_steps_async(agent_id=sarah_agent.id, actor=default_user)
assert len(steps) == 2
# add step feedback
step_manager = server.step_manager
# Add feedback to first step
await step_manager.add_feedback_async(step_id=steps[0].id, feedback=FeedbackType.POSITIVE, actor=default_user)
# Test has_feedback filtering
steps_with_feedback = await step_manager.list_steps_async(agent_id=sarah_agent.id, has_feedback=True, actor=default_user)
assert len(steps_with_feedback) == 1
steps_without_feedback = await step_manager.list_steps_async(agent_id=sarah_agent.id, actor=default_user)
assert len(steps_without_feedback) == 2
@pytest.mark.asyncio
async def test_run_usage_stats_get_nonexistent_run(server: SyncServer, default_user):
"""Test getting usage statistics for a nonexistent run."""
run_manager = server.run_manager
with pytest.raises(LettaInvalidArgumentError):
await run_manager.get_run_usage(run_id="nonexistent_run", actor=default_user)
@pytest.mark.asyncio
async def test_get_run_request_config(server: SyncServer, sarah_agent, default_user):
"""Test getting request config from a run."""
request_config = LettaRequestConfig(
use_assistant_message=True, assistant_message_tool_name="send_message", assistant_message_tool_kwarg="message"
)
run_data = PydanticRun(
agent_id=sarah_agent.id,
request_config=request_config,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
retrieved_config = await server.run_manager.get_run_request_config(created_run.id, actor=default_user)
assert retrieved_config is not None
assert retrieved_config.use_assistant_message == request_config.use_assistant_message
assert retrieved_config.assistant_message_tool_name == request_config.assistant_message_tool_name
assert retrieved_config.assistant_message_tool_kwarg == request_config.assistant_message_tool_kwarg
@pytest.mark.asyncio
async def test_get_run_request_config_none(server: SyncServer, sarah_agent, default_user):
"""Test getting request config from a run with no config."""
run_data = PydanticRun(agent_id=sarah_agent.id)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
retrieved_config = await server.run_manager.get_run_request_config(created_run.id, actor=default_user)
assert retrieved_config is None
@pytest.mark.asyncio
async def test_get_run_request_config_nonexistent_run(server: SyncServer, default_user):
"""Test getting request config for a nonexistent run."""
with pytest.raises(LettaInvalidArgumentError):
await server.run_manager.get_run_request_config("nonexistent_run", actor=default_user)
# ======================================================================================================================
# RunManager Tests - Run Metrics
# ======================================================================================================================
@pytest.mark.asyncio
async def test_run_metrics_creation(server: SyncServer, sarah_agent, default_user):
"""Test that run metrics are created when a run is created."""
# Create a run
run_data = PydanticRun(
metadata={"type": "test_metrics"},
agent_id=sarah_agent.id,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# Get the run metrics
metrics = await server.run_manager.get_run_metrics_async(run_id=created_run.id, actor=default_user)
# Assertions
assert metrics is not None
assert metrics.id == created_run.id
assert metrics.agent_id == sarah_agent.id
assert metrics.organization_id == default_user.organization_id
# project_id may be None or set from the agent
assert metrics.run_start_ns is not None
assert metrics.run_start_ns > 0
assert metrics.run_ns is None # Should be None until run completes
assert metrics.num_steps is not None
assert metrics.num_steps == 0 # Should be 0 initially
@pytest.mark.asyncio
async def test_run_metrics_timestamp_tracking(server: SyncServer, sarah_agent, default_user):
"""Test that run_start_ns is properly tracked."""
import time
# Record time before creation
before_ns = int(time.time() * 1e9)
# Create a run
run_data = PydanticRun(
metadata={"type": "test_timestamp"},
agent_id=sarah_agent.id,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# Record time after creation
after_ns = int(time.time() * 1e9)
# Get the run metrics
metrics = await server.run_manager.get_run_metrics_async(run_id=created_run.id, actor=default_user)
# Verify timestamp is within expected range
assert metrics.run_start_ns is not None
assert before_ns <= metrics.run_start_ns <= after_ns, f"Expected {before_ns} <= {metrics.run_start_ns} <= {after_ns}"
@pytest.mark.asyncio
async def test_run_metrics_duration_calculation(server: SyncServer, sarah_agent, default_user):
"""Test that run duration (run_ns) is calculated when run completes."""
import asyncio
# Create a run
run_data = PydanticRun(
metadata={"type": "test_duration"},
agent_id=sarah_agent.id,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# Get initial metrics
initial_metrics = await server.run_manager.get_run_metrics_async(run_id=created_run.id, actor=default_user)
assert initial_metrics.run_ns is None # Should be None initially
assert initial_metrics.run_start_ns is not None
# Wait a bit to ensure there's measurable duration
await asyncio.sleep(0.1) # Wait 100ms
# Update the run to completed
await server.run_manager.update_run_by_id_async(
created_run.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
# Get updated metrics
final_metrics = await server.run_manager.get_run_metrics_async(run_id=created_run.id, actor=default_user)
# Assertions
assert final_metrics.run_ns is not None
assert final_metrics.run_ns > 0
# Duration should be at least 100ms (100_000_000 nanoseconds)
assert final_metrics.run_ns >= 100_000_000, f"Expected run_ns >= 100_000_000, got {final_metrics.run_ns}"
# Duration should be reasonable (less than 10 seconds)
assert final_metrics.run_ns < 10_000_000_000, f"Expected run_ns < 10_000_000_000, got {final_metrics.run_ns}"
@pytest.mark.asyncio
async def test_run_metrics_num_steps_tracking(server: SyncServer, sarah_agent, default_user):
"""Test that num_steps is properly tracked in run metrics."""
# Create a run
run_data = PydanticRun(
metadata={"type": "test_num_steps"},
agent_id=sarah_agent.id,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# Initial metrics should have 0 steps
initial_metrics = await server.run_manager.get_run_metrics_async(run_id=created_run.id, actor=default_user)
assert initial_metrics.num_steps == 0
# Add some steps
for i in range(3):
await server.step_manager.log_step_async(
agent_id=sarah_agent.id,
provider_name="openai",
provider_category="base",
model="gpt-4o-mini",
model_endpoint="https://api.openai.com/v1",
context_window_limit=8192,
usage=UsageStatistics(
completion_tokens=100 + i * 10,
prompt_tokens=50 + i * 5,
total_tokens=150 + i * 15,
),
run_id=created_run.id,
actor=default_user,
project_id=sarah_agent.project_id,
)
# Update the run to trigger metrics update
await server.run_manager.update_run_by_id_async(
created_run.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
# Get updated metrics
final_metrics = await server.run_manager.get_run_metrics_async(run_id=created_run.id, actor=default_user)
# Verify num_steps was updated
assert final_metrics.num_steps == 3
@pytest.mark.asyncio
async def test_run_metrics_not_found(server: SyncServer, default_user):
"""Test getting metrics for non-existent run."""
with pytest.raises(LettaInvalidArgumentError):
await server.run_manager.get_run_metrics_async(run_id="nonexistent_run", actor=default_user)
@pytest.mark.asyncio
async def test_run_metrics_partial_update(server: SyncServer, sarah_agent, default_user):
"""Test that non-terminal updates don't calculate run_ns."""
# Create a run
run_data = PydanticRun(
metadata={"type": "test_partial"},
agent_id=sarah_agent.id,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# Add a step
await server.step_manager.log_step_async(
agent_id=sarah_agent.id,
provider_name="openai",
provider_category="base",
model="gpt-4o-mini",
model_endpoint="https://api.openai.com/v1",
context_window_limit=8192,
usage=UsageStatistics(
completion_tokens=100,
prompt_tokens=50,
total_tokens=150,
),
run_id=created_run.id,
actor=default_user,
project_id=sarah_agent.project_id,
)
# Update to running (non-terminal)
await server.run_manager.update_run_by_id_async(created_run.id, RunUpdate(status=RunStatus.running), actor=default_user)
# Get metrics
metrics = await server.run_manager.get_run_metrics_async(run_id=created_run.id, actor=default_user)
# Verify run_ns is still None (not calculated for non-terminal updates)
assert metrics.run_ns is None
# But num_steps should be updated
assert metrics.num_steps == 1
@pytest.mark.asyncio
async def test_run_metrics_integration_with_run_steps(server: SyncServer, sarah_agent, default_user):
"""Test integration between run metrics and run steps."""
# Create a run
run_data = PydanticRun(
metadata={"type": "test_integration"},
agent_id=sarah_agent.id,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# Add multiple steps
step_ids = []
for i in range(5):
step = await server.step_manager.log_step_async(
agent_id=sarah_agent.id,
provider_name="openai",
provider_category="base",
model="gpt-4o-mini",
model_endpoint="https://api.openai.com/v1",
context_window_limit=8192,
usage=UsageStatistics(
completion_tokens=100,
prompt_tokens=50,
total_tokens=150,
),
run_id=created_run.id,
actor=default_user,
project_id=sarah_agent.project_id,
)
step_ids.append(step.id)
# Get run steps
run_steps = await server.run_manager.get_run_steps(run_id=created_run.id, actor=default_user)
# Verify steps are returned correctly
assert len(run_steps) == 5
assert all(step.run_id == created_run.id for step in run_steps)
# Update run to completed
await server.run_manager.update_run_by_id_async(
created_run.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
# Get final metrics
metrics = await server.run_manager.get_run_metrics_async(run_id=created_run.id, actor=default_user)
# Verify metrics reflect the steps
assert metrics.num_steps == 5
assert metrics.run_ns is not None
# TODO: add back once metrics are added
# @pytest.mark.asyncio
# async def test_record_ttft(server: SyncServer, default_user):
# """Test recording time to first token for a job."""
# # Create a job
# job_data = PydanticJob(
# status=RunStatus.created,
# metadata={"type": "test_timing"},
# )
# created_job = await server.job_manager.create_job_async(pydantic_job=job_data, actor=default_user)
#
# # Record TTFT
# ttft_ns = 1_500_000_000 # 1.5 seconds in nanoseconds
# await server.job_manager.record_ttft(created_job.id, ttft_ns, default_user)
#
# # Fetch the job and verify TTFT was recorded
# updated_job = await server.job_manager.get_job_by_id_async(created_job.id, default_user)
# assert updated_job.ttft_ns == ttft_ns
#
#
# @pytest.mark.asyncio
# async def test_record_response_duration(server: SyncServer, default_user):
# """Test recording total response duration for a job."""
# # Create a job
# job_data = PydanticJob(
# status=RunStatus.created,
# metadata={"type": "test_timing"},
# )
# created_job = await server.job_manager.create_job_async(pydantic_job=job_data, actor=default_user)
#
# # Record response duration
# duration_ns = 5_000_000_000 # 5 seconds in nanoseconds
# await server.job_manager.record_response_duration(created_job.id, duration_ns, default_user)
#
# # Fetch the job and verify duration was recorded
# updated_job = await server.job_manager.get_job_by_id_async(created_job.id, default_user)
# assert updated_job.total_duration_ns == duration_ns
#
#
# @pytest.mark.asyncio
# async def test_record_timing_metrics_together(server: SyncServer, default_user):
# """Test recording both TTFT and response duration for a job."""
# # Create a job
# job_data = PydanticJob(
# status=RunStatus.created,
# metadata={"type": "test_timing_combined"},
# )
# created_job = await server.job_manager.create_job_async(pydantic_job=job_data, actor=default_user)
#
# # Record both metrics
# ttft_ns = 2_000_000_000 # 2 seconds in nanoseconds
# duration_ns = 8_500_000_000 # 8.5 seconds in nanoseconds
#
# await server.job_manager.record_ttft(created_job.id, ttft_ns, default_user)
# await server.job_manager.record_response_duration(created_job.id, duration_ns, default_user)
#
# # Fetch the job and verify both metrics were recorded
# updated_job = await server.job_manager.get_job_by_id_async(created_job.id, default_user)
# assert updated_job.ttft_ns == ttft_ns
# assert updated_job.total_duration_ns == duration_ns
#
#
# @pytest.mark.asyncio
# async def test_record_timing_invalid_job(server: SyncServer, default_user):
# """Test recording timing metrics for non-existent job fails gracefully."""
# # Try to record TTFT for non-existent job - should not raise exception but log warning
# await server.job_manager.record_ttft("nonexistent_job_id", 1_000_000_000, default_user)
#
# # Try to record response duration for non-existent job - should not raise exception but log warning
# await server.job_manager.record_response_duration("nonexistent_job_id", 2_000_000_000, default_user)
#
# ======================================================================================================================
# convert_statuses_to_enum Tests
# ======================================================================================================================
def test_convert_statuses_to_enum_with_none():
"""Test that convert_statuses_to_enum returns None when input is None."""
from letta.server.rest_api.routers.v1.runs import convert_statuses_to_enum
result = convert_statuses_to_enum(None)
assert result is None
def test_convert_statuses_to_enum_with_single_status():
"""Test converting a single status string to RunStatus enum."""
from letta.server.rest_api.routers.v1.runs import convert_statuses_to_enum
result = convert_statuses_to_enum(["completed"])
assert result == [RunStatus.completed]
assert len(result) == 1
def test_convert_statuses_to_enum_with_multiple_statuses():
"""Test converting multiple status strings to RunStatus enums."""
from letta.server.rest_api.routers.v1.runs import convert_statuses_to_enum
result = convert_statuses_to_enum(["created", "running", "completed"])
assert result == [RunStatus.created, RunStatus.running, RunStatus.completed]
assert len(result) == 3
def test_convert_statuses_to_enum_with_all_statuses():
"""Test converting all possible status strings."""
from letta.server.rest_api.routers.v1.runs import convert_statuses_to_enum
all_statuses = ["created", "running", "completed", "failed", "cancelled"]
result = convert_statuses_to_enum(all_statuses)
assert result == [RunStatus.created, RunStatus.running, RunStatus.completed, RunStatus.failed, RunStatus.cancelled]
assert len(result) == 5
def test_convert_statuses_to_enum_with_empty_list():
"""Test converting an empty list."""
from letta.server.rest_api.routers.v1.runs import convert_statuses_to_enum
result = convert_statuses_to_enum([])
assert result == []
def test_convert_statuses_to_enum_with_invalid_status():
"""Test that invalid status strings raise ValueError."""
from letta.server.rest_api.routers.v1.runs import convert_statuses_to_enum
with pytest.raises(ValueError):
convert_statuses_to_enum(["invalid_status"])
@pytest.mark.asyncio
async def test_list_runs_with_multiple_statuses(server: SyncServer, sarah_agent, default_user):
"""Test listing runs with multiple status filters."""
# Create runs with different statuses
await server.run_manager.create_run(
pydantic_run=PydanticRun(
status=RunStatus.created,
agent_id=sarah_agent.id,
metadata={"type": "created"},
),
actor=default_user,
)
await server.run_manager.create_run(
pydantic_run=PydanticRun(
status=RunStatus.running,
agent_id=sarah_agent.id,
metadata={"type": "running"},
),
actor=default_user,
)
await server.run_manager.create_run(
pydantic_run=PydanticRun(
status=RunStatus.completed,
agent_id=sarah_agent.id,
metadata={"type": "completed"},
),
actor=default_user,
)
await server.run_manager.create_run(
pydantic_run=PydanticRun(
status=RunStatus.failed,
agent_id=sarah_agent.id,
metadata={"type": "failed"},
),
actor=default_user,
)
# Test filtering by multiple statuses
active_runs = await server.run_manager.list_runs(
actor=default_user, statuses=[RunStatus.created, RunStatus.running], agent_id=sarah_agent.id
)
assert len(active_runs) == 2
assert all(run.status in [RunStatus.created, RunStatus.running] for run in active_runs)
# Test filtering by terminal statuses
terminal_runs = await server.run_manager.list_runs(
actor=default_user, statuses=[RunStatus.completed, RunStatus.failed], agent_id=sarah_agent.id
)
assert len(terminal_runs) == 2
assert all(run.status in [RunStatus.completed, RunStatus.failed] for run in terminal_runs)
@pytest.mark.asyncio
async def test_list_runs_with_no_status_filter_returns_all(server: SyncServer, sarah_agent, default_user):
"""Test that not providing statuses parameter returns all runs."""
# Create runs with different statuses
await server.run_manager.create_run(pydantic_run=PydanticRun(status=RunStatus.created, agent_id=sarah_agent.id), actor=default_user)
await server.run_manager.create_run(pydantic_run=PydanticRun(status=RunStatus.running, agent_id=sarah_agent.id), actor=default_user)
await server.run_manager.create_run(pydantic_run=PydanticRun(status=RunStatus.completed, agent_id=sarah_agent.id), actor=default_user)
await server.run_manager.create_run(pydantic_run=PydanticRun(status=RunStatus.failed, agent_id=sarah_agent.id), actor=default_user)
await server.run_manager.create_run(pydantic_run=PydanticRun(status=RunStatus.cancelled, agent_id=sarah_agent.id), actor=default_user)
# List all runs without status filter
all_runs = await server.run_manager.list_runs(actor=default_user, agent_id=sarah_agent.id)
# Should return all 5 runs
assert len(all_runs) >= 5
# Verify we have all statuses represented
statuses_found = {run.status for run in all_runs}
assert RunStatus.created in statuses_found
assert RunStatus.running in statuses_found
assert RunStatus.completed in statuses_found
assert RunStatus.failed in statuses_found
assert RunStatus.cancelled in statuses_found
# ======================================================================================================================
# RunManager Tests - Duration Filtering
# ======================================================================================================================
@pytest.mark.asyncio
async def test_list_runs_by_duration_gt(server: SyncServer, sarah_agent, default_user):
"""Test listing runs filtered by duration greater than a threshold."""
import asyncio
# Create runs with different durations
runs_data = []
# Fast run (< 100ms)
run_fast = await server.run_manager.create_run(
pydantic_run=PydanticRun(agent_id=sarah_agent.id, metadata={"speed": "fast"}),
actor=default_user,
)
await asyncio.sleep(0.05) # 50ms
await server.run_manager.update_run_by_id_async(
run_fast.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
runs_data.append(run_fast)
# Medium run (~150ms)
run_medium = await server.run_manager.create_run(
pydantic_run=PydanticRun(agent_id=sarah_agent.id, metadata={"speed": "medium"}),
actor=default_user,
)
await asyncio.sleep(0.15) # 150ms
await server.run_manager.update_run_by_id_async(
run_medium.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
runs_data.append(run_medium)
# Slow run (~250ms)
run_slow = await server.run_manager.create_run(
pydantic_run=PydanticRun(agent_id=sarah_agent.id, metadata={"speed": "slow"}),
actor=default_user,
)
await asyncio.sleep(0.25) # 250ms
await server.run_manager.update_run_by_id_async(
run_slow.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
runs_data.append(run_slow)
# Filter runs with duration > 100ms (100,000,000 ns)
filtered_runs = await server.run_manager.list_runs(
actor=default_user,
agent_id=sarah_agent.id,
duration_filter={"value": 100_000_000, "operator": "gt"},
)
# Should return medium and slow runs
assert len(filtered_runs) >= 2
run_ids = {run.id for run in filtered_runs}
assert run_medium.id in run_ids
assert run_slow.id in run_ids
@pytest.mark.asyncio
async def test_list_runs_by_duration_lt(server: SyncServer, sarah_agent, default_user):
"""Test listing runs filtered by duration less than a threshold."""
import asyncio
# Create runs with different durations
# Fast run
run_fast = await server.run_manager.create_run(
pydantic_run=PydanticRun(agent_id=sarah_agent.id, metadata={"speed": "fast"}),
actor=default_user,
)
await asyncio.sleep(0.05) # 50ms
await server.run_manager.update_run_by_id_async(
run_fast.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
# Slow run
run_slow = await server.run_manager.create_run(
pydantic_run=PydanticRun(agent_id=sarah_agent.id, metadata={"speed": "slow"}),
actor=default_user,
)
await asyncio.sleep(0.30) # 300ms
await server.run_manager.update_run_by_id_async(
run_slow.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
# Get actual durations to set a threshold between them
fast_metrics = await server.run_manager.get_run_metrics_async(run_id=run_fast.id, actor=default_user)
slow_metrics = await server.run_manager.get_run_metrics_async(run_id=run_slow.id, actor=default_user)
# Set threshold between the two durations
threshold = (fast_metrics.run_ns + slow_metrics.run_ns) // 2
# Filter runs with duration < threshold
filtered_runs = await server.run_manager.list_runs(
actor=default_user,
agent_id=sarah_agent.id,
duration_filter={"value": threshold, "operator": "lt"},
)
# Should return only the fast run
assert len(filtered_runs) >= 1
assert run_fast.id in [run.id for run in filtered_runs]
# Verify slow run is not included
assert run_slow.id not in [run.id for run in filtered_runs]
@pytest.mark.asyncio
async def test_list_runs_by_duration_percentile(server: SyncServer, sarah_agent, default_user):
"""Test listing runs filtered by duration percentile."""
import asyncio
# Create runs with varied durations
run_ids = []
durations_ms = [50, 100, 150, 200, 250, 300, 350, 400, 450, 500]
for i, duration_ms in enumerate(durations_ms):
run = await server.run_manager.create_run(
pydantic_run=PydanticRun(agent_id=sarah_agent.id, metadata={"index": i}),
actor=default_user,
)
await asyncio.sleep(duration_ms / 1000.0) # Convert to seconds
await server.run_manager.update_run_by_id_async(
run.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
run_ids.append(run.id)
# Filter runs in top 20% (80th percentile)
# This should return approximately the slowest 20% of runs
filtered_runs = await server.run_manager.list_runs(
actor=default_user,
agent_id=sarah_agent.id,
duration_percentile=80,
)
# Should return at least 2 runs (approximately 20% of 10)
assert len(filtered_runs) >= 2
# Verify the slowest run is definitely included
filtered_ids = {run.id for run in filtered_runs}
assert run_ids[-1] in filtered_ids # Slowest run (500ms)
# Verify that filtered runs are among the slower runs
# At least one should be from the slowest 3
slowest_3_ids = set(run_ids[-3:])
assert len(filtered_ids & slowest_3_ids) >= 2, "Expected at least 2 of the slowest 3 runs"
@pytest.mark.asyncio
async def test_list_runs_by_duration_with_order_by(server: SyncServer, sarah_agent, default_user):
"""Test listing runs filtered by duration with different order_by options."""
import asyncio
# Create runs with different durations
runs = []
for i, duration_ms in enumerate([100, 200, 300]):
run = await server.run_manager.create_run(
pydantic_run=PydanticRun(agent_id=sarah_agent.id, metadata={"index": i}),
actor=default_user,
)
await asyncio.sleep(duration_ms / 1000.0)
await server.run_manager.update_run_by_id_async(
run.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
runs.append(run)
# Test order_by="duration" with ascending order
filtered_runs_asc = await server.run_manager.list_runs(
actor=default_user,
agent_id=sarah_agent.id,
order_by="duration",
ascending=True,
)
# Should be ordered from fastest to slowest
assert len(filtered_runs_asc) >= 3
# Get metrics to verify ordering
metrics_asc = []
for run in filtered_runs_asc[:3]:
metrics = await server.run_manager.get_run_metrics_async(run_id=run.id, actor=default_user)
metrics_asc.append(metrics.run_ns)
# Verify ascending order
assert metrics_asc[0] <= metrics_asc[1] <= metrics_asc[2]
# Test order_by="duration" with descending order (default)
filtered_runs_desc = await server.run_manager.list_runs(
actor=default_user,
agent_id=sarah_agent.id,
order_by="duration",
ascending=False,
)
# Should be ordered from slowest to fastest
assert len(filtered_runs_desc) >= 3
# Get metrics to verify ordering
metrics_desc = []
for run in filtered_runs_desc[:3]:
metrics = await server.run_manager.get_run_metrics_async(run_id=run.id, actor=default_user)
metrics_desc.append(metrics.run_ns)
# Verify descending order
assert metrics_desc[0] >= metrics_desc[1] >= metrics_desc[2]
@pytest.mark.asyncio
async def test_list_runs_combined_duration_filter_and_percentile(server: SyncServer, sarah_agent, default_user):
"""Test combining duration filter with percentile filter."""
import asyncio
# Create runs with varied durations
runs = []
for i, duration_ms in enumerate([50, 100, 150, 200, 250, 300, 350, 400]):
run = await server.run_manager.create_run(
pydantic_run=PydanticRun(agent_id=sarah_agent.id, metadata={"index": i}),
actor=default_user,
)
await asyncio.sleep(duration_ms / 1000.0)
await server.run_manager.update_run_by_id_async(
run.id, RunUpdate(status=RunStatus.completed, stop_reason=StopReasonType.end_turn), actor=default_user
)
runs.append(run)
# Filter runs that are:
# 1. In top 50% slowest (duration_percentile=50)
# 2. AND greater than 200ms (duration_filter > 200_000_000 ns)
filtered_runs = await server.run_manager.list_runs(
actor=default_user,
agent_id=sarah_agent.id,
duration_percentile=50,
duration_filter={"value": 200_000_000, "operator": "gt"},
)
# Should return runs that satisfy both conditions
assert len(filtered_runs) >= 2
# Verify all returned runs meet both criteria
for run in filtered_runs:
metrics = await server.run_manager.get_run_metrics_async(run_id=run.id, actor=default_user)
# Should be greater than 200ms
assert metrics.run_ns > 200_000_000
@pytest.mark.asyncio
async def test_get_run_with_status_no_lettuce(server: SyncServer, sarah_agent, default_user):
"""Test getting a run without Lettuce metadata."""
# Create a run without Lettuce metadata
run_data = PydanticRun(
metadata={"type": "test"},
agent_id=sarah_agent.id,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# Get run with status
fetched_run = await server.run_manager.get_run_with_status(run_id=created_run.id, actor=default_user)
# Verify run is returned correctly without Lettuce status check
assert fetched_run.id == created_run.id
assert fetched_run.status == RunStatus.created
assert fetched_run.metadata == {"type": "test"}
@pytest.mark.asyncio
async def test_get_run_with_status_lettuce_success(server: SyncServer, sarah_agent, default_user, monkeypatch):
"""Test getting a run with Lettuce metadata and successful status fetch."""
# Create a run with Lettuce metadata
run_data = PydanticRun(
metadata={"lettuce": True},
agent_id=sarah_agent.id,
status=RunStatus.running,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# Mock LettuceClient
mock_client = AsyncMock()
mock_client.get_status = AsyncMock(return_value="COMPLETED")
mock_lettuce_class = AsyncMock()
mock_lettuce_class.create = AsyncMock(return_value=mock_client)
# Patch LettuceClient where it's imported from
with patch("letta.services.lettuce.LettuceClient", mock_lettuce_class):
# Get run with status
fetched_run = await server.run_manager.get_run_with_status(run_id=created_run.id, actor=default_user)
# Verify status was updated from Lettuce
assert fetched_run.id == created_run.id
assert fetched_run.status == RunStatus.completed
mock_client.get_status.assert_called_once_with(run_id=created_run.id)
@pytest.mark.asyncio
async def test_get_run_with_status_lettuce_failure(server: SyncServer, sarah_agent, default_user, monkeypatch):
"""Test getting a run when Lettuce status fetch fails."""
# Create a run with Lettuce metadata
run_data = PydanticRun(
metadata={"lettuce": True},
agent_id=sarah_agent.id,
status=RunStatus.running,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# Mock LettuceClient to raise an exception
mock_lettuce_class = AsyncMock()
mock_lettuce_class.create = AsyncMock(side_effect=Exception("Lettuce connection failed"))
# Patch LettuceClient where it's imported from
with patch("letta.services.lettuce.LettuceClient", mock_lettuce_class):
# Get run with status - should gracefully handle error
fetched_run = await server.run_manager.get_run_with_status(run_id=created_run.id, actor=default_user)
# Verify run is returned with DB status (error was logged but not raised)
assert fetched_run.id == created_run.id
assert fetched_run.status == RunStatus.running # Original status from DB
@pytest.mark.asyncio
async def test_get_run_with_status_lettuce_terminal_status(server: SyncServer, sarah_agent, default_user, monkeypatch):
"""Test that Lettuce status is not fetched for runs with terminal status."""
# Create a run with Lettuce metadata but terminal status
run_data = PydanticRun(
metadata={"lettuce": True},
agent_id=sarah_agent.id,
status=RunStatus.completed,
)
created_run = await server.run_manager.create_run(pydantic_run=run_data, actor=default_user)
# Mock LettuceClient - should not be called
mock_client = AsyncMock()
mock_client.get_status = AsyncMock()
mock_lettuce_class = AsyncMock()
mock_lettuce_class.create = AsyncMock(return_value=mock_client)
# Patch LettuceClient where it's imported from
with patch("letta.services.lettuce.LettuceClient", mock_lettuce_class):
# Get run with status
fetched_run = await server.run_manager.get_run_with_status(run_id=created_run.id, actor=default_user)
# Verify status remains unchanged and Lettuce was not called
assert fetched_run.id == created_run.id
assert fetched_run.status == RunStatus.completed
mock_client.get_status.assert_not_called()
@pytest.mark.asyncio
async def test_get_run_with_status_not_found(server: SyncServer, default_user):
"""Test getting a non-existent run with get_run_with_status."""
# Use properly formatted run ID that doesn't exist
non_existent_run_id = f"run-{uuid.uuid4()}"
with pytest.raises(NoResultFound):
await server.run_manager.get_run_with_status(run_id=non_existent_run_id, actor=default_user)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_run_manager.py",
"license": "Apache License 2.0",
"lines": 1739,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_sandbox_manager.py | import time
import pytest
# Import shared fixtures and constants from conftest
from conftest import (
CREATE_DELAY_SQLITE,
USING_SQLITE,
)
from letta.constants import (
LETTA_TOOL_EXECUTION_DIR,
)
from letta.schemas.enums import (
SandboxType,
)
from letta.schemas.environment_variables import SandboxEnvironmentVariableCreate, SandboxEnvironmentVariableUpdate
from letta.schemas.sandbox_config import E2BSandboxConfig, LocalSandboxConfig, SandboxConfigCreate, SandboxConfigUpdate
from letta.server.server import SyncServer
from letta.settings import tool_settings
# ======================================================================================================================
# SandboxConfigManager Tests - Sandbox Configs
# ======================================================================================================================
@pytest.mark.asyncio
async def test_create_or_update_sandbox_config(server: SyncServer, default_user):
sandbox_config_create = SandboxConfigCreate(
config=E2BSandboxConfig(),
)
created_config = await server.sandbox_config_manager.create_or_update_sandbox_config_async(sandbox_config_create, actor=default_user)
# Assertions
assert created_config.type == SandboxType.E2B
assert created_config.get_e2b_config() == sandbox_config_create.config
assert created_config.organization_id == default_user.organization_id
@pytest.mark.asyncio
async def test_create_local_sandbox_config_defaults(server: SyncServer, default_user):
sandbox_config_create = SandboxConfigCreate(
config=LocalSandboxConfig(),
)
created_config = await server.sandbox_config_manager.create_or_update_sandbox_config_async(sandbox_config_create, actor=default_user)
# Assertions
assert created_config.type == SandboxType.LOCAL
assert created_config.get_local_config() == sandbox_config_create.config
assert created_config.get_local_config().sandbox_dir in {LETTA_TOOL_EXECUTION_DIR, tool_settings.tool_exec_dir}
assert created_config.organization_id == default_user.organization_id
@pytest.mark.asyncio
async def test_default_e2b_settings_sandbox_config(server: SyncServer, default_user):
created_config = await server.sandbox_config_manager.get_or_create_default_sandbox_config_async(
sandbox_type=SandboxType.E2B, actor=default_user
)
e2b_config = created_config.get_e2b_config()
# Assertions
assert e2b_config.timeout == 5 * 60
assert e2b_config.template == tool_settings.e2b_sandbox_template_id
@pytest.mark.asyncio
async def test_update_existing_sandbox_config(server: SyncServer, sandbox_config_fixture, default_user):
update_data = SandboxConfigUpdate(config=E2BSandboxConfig(template="template_2", timeout=120))
updated_config = await server.sandbox_config_manager.update_sandbox_config_async(
sandbox_config_fixture.id, update_data, actor=default_user
)
# Assertions
assert updated_config.config["template"] == "template_2"
assert updated_config.config["timeout"] == 120
@pytest.mark.asyncio
async def test_delete_sandbox_config(server: SyncServer, sandbox_config_fixture, default_user):
deleted_config = await server.sandbox_config_manager.delete_sandbox_config_async(sandbox_config_fixture.id, actor=default_user)
# Assertions to verify deletion
assert deleted_config.id == sandbox_config_fixture.id
# Verify it no longer exists
config_list = await server.sandbox_config_manager.list_sandbox_configs_async(actor=default_user)
assert sandbox_config_fixture.id not in [config.id for config in config_list]
@pytest.mark.asyncio
async def test_get_sandbox_config_by_type(server: SyncServer, sandbox_config_fixture, default_user):
retrieved_config = await server.sandbox_config_manager.get_sandbox_config_by_type_async(sandbox_config_fixture.type, actor=default_user)
# Assertions to verify correct retrieval
assert retrieved_config.id == sandbox_config_fixture.id
assert retrieved_config.type == sandbox_config_fixture.type
@pytest.mark.asyncio
async def test_list_sandbox_configs(server: SyncServer, default_user):
# Creating multiple sandbox configs
config_e2b_create = SandboxConfigCreate(
config=E2BSandboxConfig(),
)
config_local_create = SandboxConfigCreate(
config=LocalSandboxConfig(sandbox_dir=""),
)
config_e2b = await server.sandbox_config_manager.create_or_update_sandbox_config_async(config_e2b_create, actor=default_user)
if USING_SQLITE:
time.sleep(CREATE_DELAY_SQLITE)
config_local = await server.sandbox_config_manager.create_or_update_sandbox_config_async(config_local_create, actor=default_user)
# List configs without pagination
configs = await server.sandbox_config_manager.list_sandbox_configs_async(actor=default_user)
assert len(configs) >= 2
# List configs with pagination
paginated_configs = await server.sandbox_config_manager.list_sandbox_configs_async(actor=default_user, limit=1)
assert len(paginated_configs) == 1
next_page = await server.sandbox_config_manager.list_sandbox_configs_async(actor=default_user, after=paginated_configs[-1].id, limit=1)
assert len(next_page) == 1
assert next_page[0].id != paginated_configs[0].id
# List configs using sandbox_type filter
configs = await server.sandbox_config_manager.list_sandbox_configs_async(actor=default_user, sandbox_type=SandboxType.E2B)
assert len(configs) == 1
assert configs[0].id == config_e2b.id
configs = await server.sandbox_config_manager.list_sandbox_configs_async(actor=default_user, sandbox_type=SandboxType.LOCAL)
assert len(configs) == 1
assert configs[0].id == config_local.id
# ======================================================================================================================
# SandboxConfigManager Tests - Environment Variables
# ======================================================================================================================
@pytest.mark.asyncio
async def test_create_sandbox_env_var(server: SyncServer, sandbox_config_fixture, default_user):
env_var_create = SandboxEnvironmentVariableCreate(key="TEST_VAR", value="test_value", description="A test environment variable.")
created_env_var = await server.sandbox_config_manager.create_sandbox_env_var_async(
env_var_create, sandbox_config_id=sandbox_config_fixture.id, actor=default_user
)
# Assertions
assert created_env_var.key == env_var_create.key
assert created_env_var.value == env_var_create.value
assert created_env_var.organization_id == default_user.organization_id
@pytest.mark.asyncio
async def test_update_sandbox_env_var(server: SyncServer, sandbox_env_var_fixture, default_user):
update_data = SandboxEnvironmentVariableUpdate(value="updated_value")
updated_env_var = await server.sandbox_config_manager.update_sandbox_env_var_async(
sandbox_env_var_fixture.id, update_data, actor=default_user
)
# Assertions
assert updated_env_var.value == "updated_value"
assert updated_env_var.id == sandbox_env_var_fixture.id
@pytest.mark.asyncio
async def test_delete_sandbox_env_var(server: SyncServer, sandbox_config_fixture, sandbox_env_var_fixture, default_user):
deleted_env_var = await server.sandbox_config_manager.delete_sandbox_env_var_async(sandbox_env_var_fixture.id, actor=default_user)
# Assertions to verify deletion
assert deleted_env_var.id == sandbox_env_var_fixture.id
# Verify it no longer exists
env_vars = await server.sandbox_config_manager.list_sandbox_env_vars_async(
sandbox_config_id=sandbox_config_fixture.id, actor=default_user
)
assert sandbox_env_var_fixture.id not in [env_var.id for env_var in env_vars]
@pytest.mark.asyncio
async def test_list_sandbox_env_vars(server: SyncServer, sandbox_config_fixture, default_user):
# Creating multiple environment variables
env_var_create_a = SandboxEnvironmentVariableCreate(key="VAR1", value="value1")
env_var_create_b = SandboxEnvironmentVariableCreate(key="VAR2", value="value2")
await server.sandbox_config_manager.create_sandbox_env_var_async(
env_var_create_a, sandbox_config_id=sandbox_config_fixture.id, actor=default_user
)
if USING_SQLITE:
time.sleep(CREATE_DELAY_SQLITE)
await server.sandbox_config_manager.create_sandbox_env_var_async(
env_var_create_b, sandbox_config_id=sandbox_config_fixture.id, actor=default_user
)
# List env vars without pagination
env_vars = await server.sandbox_config_manager.list_sandbox_env_vars_async(
sandbox_config_id=sandbox_config_fixture.id, actor=default_user
)
assert len(env_vars) >= 2
# List env vars with pagination
paginated_env_vars = await server.sandbox_config_manager.list_sandbox_env_vars_async(
sandbox_config_id=sandbox_config_fixture.id, actor=default_user, limit=1
)
assert len(paginated_env_vars) == 1
next_page = await server.sandbox_config_manager.list_sandbox_env_vars_async(
sandbox_config_id=sandbox_config_fixture.id, actor=default_user, after=paginated_env_vars[-1].id, limit=1
)
assert len(next_page) == 1
assert next_page[0].id != paginated_env_vars[0].id
@pytest.mark.asyncio
async def test_get_sandbox_env_var_by_key(server: SyncServer, sandbox_env_var_fixture, default_user):
retrieved_env_var = await server.sandbox_config_manager.get_sandbox_env_var_by_key_and_sandbox_config_id_async(
sandbox_env_var_fixture.key, sandbox_env_var_fixture.sandbox_config_id, actor=default_user
)
# Assertions to verify correct retrieval
assert retrieved_env_var.id == sandbox_env_var_fixture.id
@pytest.mark.asyncio
async def test_gather_env_vars_layering(server: SyncServer, sandbox_config_fixture, default_user):
"""Test that _gather_env_vars properly layers env vars with correct priority.
Priority order (later overrides earlier):
1. Global sandbox env vars from DB (always included)
2. Provided sandbox env vars (agent-scoped, override global on key collision)
3. Agent state env vars
4. Additional runtime env vars (highest priority)
"""
from unittest.mock import MagicMock
from letta.services.tool_sandbox.local_sandbox import AsyncToolSandboxLocal
# Create global sandbox env vars in the database
global_var1 = SandboxEnvironmentVariableCreate(key="GLOBAL_ONLY", value="global_value")
global_var2 = SandboxEnvironmentVariableCreate(key="OVERRIDE_BY_PROVIDED", value="global_will_be_overridden")
global_var3 = SandboxEnvironmentVariableCreate(key="OVERRIDE_BY_AGENT", value="global_will_be_overridden_by_agent")
global_var4 = SandboxEnvironmentVariableCreate(key="OVERRIDE_BY_ADDITIONAL", value="global_will_be_overridden_by_additional")
await server.sandbox_config_manager.create_sandbox_env_var_async(
global_var1, sandbox_config_id=sandbox_config_fixture.id, actor=default_user
)
await server.sandbox_config_manager.create_sandbox_env_var_async(
global_var2, sandbox_config_id=sandbox_config_fixture.id, actor=default_user
)
await server.sandbox_config_manager.create_sandbox_env_var_async(
global_var3, sandbox_config_id=sandbox_config_fixture.id, actor=default_user
)
await server.sandbox_config_manager.create_sandbox_env_var_async(
global_var4, sandbox_config_id=sandbox_config_fixture.id, actor=default_user
)
# Define provided sandbox env vars (agent-scoped)
provided_env_vars = {
"OVERRIDE_BY_PROVIDED": "provided_value",
"PROVIDED_ONLY": "provided_only_value",
}
# Create a mock agent state with secrets
mock_agent_state = MagicMock()
mock_agent_state.get_agent_env_vars_as_dict.return_value = {
"OVERRIDE_BY_AGENT": "agent_value",
"AGENT_ONLY": "agent_only_value",
}
# Define additional runtime env vars
additional_env_vars = {
"OVERRIDE_BY_ADDITIONAL": "additional_value",
"ADDITIONAL_ONLY": "additional_only_value",
}
# Create a minimal sandbox instance to test _gather_env_vars
sandbox = AsyncToolSandboxLocal(
tool_name="test_tool",
args={},
user=default_user,
tool_id="test-tool-id",
sandbox_env_vars=provided_env_vars,
)
# Call _gather_env_vars
result = await sandbox._gather_env_vars(
agent_state=mock_agent_state,
additional_env_vars=additional_env_vars,
sbx_id=sandbox_config_fixture.id,
is_local=False, # Use False to avoid copying os.environ
)
# Verify layering:
# 1. Global vars included
assert result["GLOBAL_ONLY"] == "global_value"
# 2. Provided vars override global
assert result["OVERRIDE_BY_PROVIDED"] == "provided_value"
assert result["PROVIDED_ONLY"] == "provided_only_value"
# 3. Agent vars override provided/global
assert result["OVERRIDE_BY_AGENT"] == "agent_value"
assert result["AGENT_ONLY"] == "agent_only_value"
# 4. Additional vars have highest priority
assert result["OVERRIDE_BY_ADDITIONAL"] == "additional_value"
assert result["ADDITIONAL_ONLY"] == "additional_only_value"
# Verify LETTA IDs are injected
assert result["LETTA_TOOL_ID"] == "test-tool-id"
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_sandbox_manager.py",
"license": "Apache License 2.0",
"lines": 242,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_tool_manager.py | import uuid
import pytest
# Import shared fixtures and constants from conftest
from letta.constants import (
BASE_MEMORY_TOOLS,
BASE_SLEEPTIME_TOOLS,
BASE_TOOLS,
BASE_VOICE_SLEEPTIME_CHAT_TOOLS,
BASE_VOICE_SLEEPTIME_TOOLS,
BUILTIN_TOOLS,
FILES_TOOLS,
LETTA_TOOL_SET,
LOCAL_ONLY_MULTI_AGENT_TOOLS,
MCP_TOOL_TAG_NAME_PREFIX,
MULTI_AGENT_TOOLS,
)
from letta.errors import LettaAgentNotFoundError
from letta.functions.functions import parse_source_code
from letta.orm.errors import NoResultFound, UniqueConstraintViolationError
from letta.schemas.agent import CreateAgent
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import (
ToolType,
)
from letta.schemas.llm_config import LLMConfig
from letta.schemas.pip_requirement import PipRequirement
from letta.schemas.tool import Tool as PydanticTool, ToolUpdate
from letta.server.db import db_registry
from letta.server.server import SyncServer
from letta.services.tool_schema_generator import generate_schema_for_tool_creation
from letta.settings import settings
# ======================================================================================================================
# AgentManager Tests - Tools Relationship
# ======================================================================================================================
@pytest.mark.asyncio
async def test_attach_tool(server: SyncServer, sarah_agent, print_tool, default_user):
"""Test attaching a tool to an agent."""
# Attach the tool
await server.agent_manager.attach_tool_async(agent_id=sarah_agent.id, tool_id=print_tool.id, actor=default_user)
# Verify attachment through get_agent_by_id
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert print_tool.id in [t.id for t in agent.tools]
# Verify that attaching the same tool again doesn't cause duplication
await server.agent_manager.attach_tool_async(agent_id=sarah_agent.id, tool_id=print_tool.id, actor=default_user)
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert len([t for t in agent.tools if t.id == print_tool.id]) == 1
@pytest.mark.asyncio
async def test_detach_tool(server: SyncServer, sarah_agent, print_tool, default_user):
"""Test detaching a tool from an agent."""
# Attach the tool first
await server.agent_manager.attach_tool_async(agent_id=sarah_agent.id, tool_id=print_tool.id, actor=default_user)
# Verify it's attached
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert print_tool.id in [t.id for t in agent.tools]
# Detach the tool
await server.agent_manager.detach_tool_async(agent_id=sarah_agent.id, tool_id=print_tool.id, actor=default_user)
# Verify it's detached
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert print_tool.id not in [t.id for t in agent.tools]
# Verify that detaching an already detached tool doesn't cause issues
await server.agent_manager.detach_tool_async(agent_id=sarah_agent.id, tool_id=print_tool.id, actor=default_user)
@pytest.mark.asyncio
async def test_bulk_detach_tools(server: SyncServer, sarah_agent, print_tool, other_tool, default_user):
"""Test bulk detaching multiple tools from an agent."""
# First attach both tools
tool_ids = [print_tool.id, other_tool.id]
await server.agent_manager.bulk_attach_tools_async(agent_id=sarah_agent.id, tool_ids=tool_ids, actor=default_user)
# Verify both tools are attached
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert print_tool.id in [t.id for t in agent.tools]
assert other_tool.id in [t.id for t in agent.tools]
# Bulk detach both tools
await server.agent_manager.bulk_detach_tools_async(agent_id=sarah_agent.id, tool_ids=tool_ids, actor=default_user)
# Verify both tools are detached
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert print_tool.id not in [t.id for t in agent.tools]
assert other_tool.id not in [t.id for t in agent.tools]
@pytest.mark.asyncio
async def test_bulk_detach_tools_partial(server: SyncServer, sarah_agent, print_tool, other_tool, default_user):
"""Test bulk detaching tools when some are not attached."""
# Only attach one tool
await server.agent_manager.attach_tool_async(agent_id=sarah_agent.id, tool_id=print_tool.id, actor=default_user)
# Try to bulk detach both tools (one attached, one not)
tool_ids = [print_tool.id, other_tool.id]
await server.agent_manager.bulk_detach_tools_async(agent_id=sarah_agent.id, tool_ids=tool_ids, actor=default_user)
# Verify the attached tool was detached
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert print_tool.id not in [t.id for t in agent.tools]
assert other_tool.id not in [t.id for t in agent.tools]
@pytest.mark.asyncio
async def test_bulk_detach_tools_empty_list(server: SyncServer, sarah_agent, print_tool, default_user):
"""Test bulk detaching empty list of tools."""
# Attach a tool first
await server.agent_manager.attach_tool_async(agent_id=sarah_agent.id, tool_id=print_tool.id, actor=default_user)
# Bulk detach empty list
await server.agent_manager.bulk_detach_tools_async(agent_id=sarah_agent.id, tool_ids=[], actor=default_user)
# Verify the tool is still attached
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert print_tool.id in [t.id for t in agent.tools]
@pytest.mark.asyncio
async def test_bulk_detach_tools_idempotent(server: SyncServer, sarah_agent, print_tool, other_tool, default_user):
"""Test that bulk detach is idempotent."""
# Attach both tools
tool_ids = [print_tool.id, other_tool.id]
await server.agent_manager.bulk_attach_tools_async(agent_id=sarah_agent.id, tool_ids=tool_ids, actor=default_user)
# Bulk detach once
await server.agent_manager.bulk_detach_tools_async(agent_id=sarah_agent.id, tool_ids=tool_ids, actor=default_user)
# Verify tools are detached
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert len(agent.tools) == 0
# Bulk detach again (should be no-op, no errors)
await server.agent_manager.bulk_detach_tools_async(agent_id=sarah_agent.id, tool_ids=tool_ids, actor=default_user)
# Verify still no tools
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert len(agent.tools) == 0
@pytest.mark.asyncio
async def test_bulk_detach_tools_nonexistent_agent(server: SyncServer, print_tool, other_tool, default_user):
"""Test bulk detaching tools from a nonexistent agent."""
nonexistent_agent_id = f"agent-{uuid.uuid4()}"
tool_ids = [print_tool.id, other_tool.id]
with pytest.raises(LettaAgentNotFoundError):
await server.agent_manager.bulk_detach_tools_async(agent_id=nonexistent_agent_id, tool_ids=tool_ids, actor=default_user)
async def test_attach_tool_nonexistent_agent(server: SyncServer, print_tool, default_user):
"""Test attaching a tool to a nonexistent agent."""
with pytest.raises(LettaAgentNotFoundError):
await server.agent_manager.attach_tool_async(agent_id=f"agent-{uuid.uuid4()}", tool_id=print_tool.id, actor=default_user)
async def test_attach_tool_nonexistent_tool(server: SyncServer, sarah_agent, default_user):
"""Test attaching a nonexistent tool to an agent."""
with pytest.raises(NoResultFound):
await server.agent_manager.attach_tool_async(agent_id=sarah_agent.id, tool_id=f"tool-{uuid.uuid4()}", actor=default_user)
async def test_detach_tool_nonexistent_agent(server: SyncServer, print_tool, default_user):
"""Test detaching a tool from a nonexistent agent."""
with pytest.raises(LettaAgentNotFoundError):
await server.agent_manager.detach_tool_async(agent_id=f"agent-{uuid.uuid4()}", tool_id=print_tool.id, actor=default_user)
@pytest.mark.asyncio
async def test_list_attached_tools(server: SyncServer, sarah_agent, print_tool, other_tool, default_user):
"""Test listing tools attached to an agent."""
# Initially should have no tools
agent = await server.agent_manager.get_agent_by_id_async(sarah_agent.id, actor=default_user)
assert len(agent.tools) == 0
# Attach tools
await server.agent_manager.attach_tool_async(agent_id=sarah_agent.id, tool_id=print_tool.id, actor=default_user)
await server.agent_manager.attach_tool_async(agent_id=sarah_agent.id, tool_id=other_tool.id, actor=default_user)
# List tools and verify
agent = await server.agent_manager.get_agent_by_id_async(sarah_agent.id, actor=default_user)
attached_tool_ids = [t.id for t in agent.tools]
assert len(attached_tool_ids) == 2
assert print_tool.id in attached_tool_ids
assert other_tool.id in attached_tool_ids
@pytest.mark.asyncio
async def test_bulk_attach_tools(server: SyncServer, sarah_agent, print_tool, other_tool, default_user):
"""Test bulk attaching multiple tools to an agent."""
# Bulk attach both tools
tool_ids = [print_tool.id, other_tool.id]
await server.agent_manager.bulk_attach_tools_async(agent_id=sarah_agent.id, tool_ids=tool_ids, actor=default_user)
# Verify both tools are attached
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
attached_tool_ids = [t.id for t in agent.tools]
assert print_tool.id in attached_tool_ids
assert other_tool.id in attached_tool_ids
@pytest.mark.asyncio
async def test_bulk_attach_tools_with_duplicates(server: SyncServer, sarah_agent, print_tool, other_tool, default_user):
"""Test bulk attaching tools handles duplicates correctly."""
# First attach one tool
await server.agent_manager.attach_tool_async(agent_id=sarah_agent.id, tool_id=print_tool.id, actor=default_user)
# Bulk attach both tools (one already attached)
tool_ids = [print_tool.id, other_tool.id]
await server.agent_manager.bulk_attach_tools_async(agent_id=sarah_agent.id, tool_ids=tool_ids, actor=default_user)
# Verify both tools are attached and no duplicates
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
attached_tool_ids = [t.id for t in agent.tools]
assert len(attached_tool_ids) == 2
assert print_tool.id in attached_tool_ids
assert other_tool.id in attached_tool_ids
# Ensure no duplicates
assert len(set(attached_tool_ids)) == len(attached_tool_ids)
@pytest.mark.asyncio
async def test_bulk_attach_tools_empty_list(server: SyncServer, sarah_agent, default_user):
"""Test bulk attaching empty list of tools."""
# Bulk attach empty list
await server.agent_manager.bulk_attach_tools_async(agent_id=sarah_agent.id, tool_ids=[], actor=default_user)
# Verify no tools are attached
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert len(agent.tools) == 0
@pytest.mark.asyncio
async def test_bulk_attach_tools_nonexistent_tool(server: SyncServer, sarah_agent, print_tool, default_user):
"""Test bulk attaching tools with a nonexistent tool ID."""
# Try to bulk attach with one valid and one invalid tool ID
nonexistent_id = "nonexistent-tool-id"
tool_ids = [print_tool.id, nonexistent_id]
with pytest.raises(NoResultFound) as exc_info:
await server.agent_manager.bulk_attach_tools_async(agent_id=sarah_agent.id, tool_ids=tool_ids, actor=default_user)
# Verify error message contains the missing tool ID
assert nonexistent_id in str(exc_info.value)
# Verify no tools were attached (transaction should have rolled back)
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert len(agent.tools) == 0
@pytest.mark.asyncio
async def test_bulk_attach_tools_nonexistent_agent(server: SyncServer, print_tool, other_tool, default_user):
"""Test bulk attaching tools to a nonexistent agent."""
nonexistent_agent_id = f"agent-{uuid.uuid4()}"
tool_ids = [print_tool.id, other_tool.id]
with pytest.raises(LettaAgentNotFoundError):
await server.agent_manager.bulk_attach_tools_async(agent_id=nonexistent_agent_id, tool_ids=tool_ids, actor=default_user)
@pytest.mark.asyncio
async def test_attach_missing_files_tools_async(server: SyncServer, sarah_agent, default_user):
"""Test attaching missing file tools to an agent."""
# First ensure file tools exist in the system
await server.tool_manager.upsert_base_tools_async(actor=default_user, allowed_types={ToolType.LETTA_FILES_CORE})
# Get initial agent state (should have no file tools)
agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
initial_tool_count = len(agent_state.tools)
# Attach missing file tools
updated_agent_state = await server.agent_manager.attach_missing_files_tools_async(agent_state=agent_state, actor=default_user)
# Verify all file tools are now attached
file_tool_names = {tool.name for tool in updated_agent_state.tools if tool.tool_type == ToolType.LETTA_FILES_CORE}
assert file_tool_names == set(FILES_TOOLS)
# Verify the total tool count increased by the number of file tools
assert len(updated_agent_state.tools) == initial_tool_count + len(FILES_TOOLS)
@pytest.mark.asyncio
async def test_attach_missing_files_tools_async_partial(server: SyncServer, sarah_agent, default_user):
"""Test attaching missing file tools when some are already attached."""
# First ensure file tools exist in the system
await server.tool_manager.upsert_base_tools_async(actor=default_user, allowed_types={ToolType.LETTA_FILES_CORE})
# Get file tool IDs
all_tools = await server.tool_manager.list_tools_async(actor=default_user)
file_tools = [tool for tool in all_tools if tool.tool_type == ToolType.LETTA_FILES_CORE and tool.name in FILES_TOOLS]
# Manually attach just the first file tool
await server.agent_manager.attach_tool_async(agent_id=sarah_agent.id, tool_id=file_tools[0].id, actor=default_user)
# Get agent state with one file tool already attached
agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert len([t for t in agent_state.tools if t.tool_type == ToolType.LETTA_FILES_CORE]) == 1
# Attach missing file tools
updated_agent_state = await server.agent_manager.attach_missing_files_tools_async(agent_state=agent_state, actor=default_user)
# Verify all file tools are now attached
file_tool_names = {tool.name for tool in updated_agent_state.tools if tool.tool_type == ToolType.LETTA_FILES_CORE}
assert file_tool_names == set(FILES_TOOLS)
# Verify no duplicates
all_tool_ids = [tool.id for tool in updated_agent_state.tools]
assert len(all_tool_ids) == len(set(all_tool_ids))
@pytest.mark.asyncio
async def test_attach_missing_files_tools_async_idempotent(server: SyncServer, sarah_agent, default_user):
"""Test that attach_missing_files_tools is idempotent."""
# First ensure file tools exist in the system
await server.tool_manager.upsert_base_tools_async(actor=default_user, allowed_types={ToolType.LETTA_FILES_CORE})
# Get initial agent state
agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
# Attach missing file tools the first time
updated_agent_state = await server.agent_manager.attach_missing_files_tools_async(agent_state=agent_state, actor=default_user)
first_tool_count = len(updated_agent_state.tools)
# Call attach_missing_files_tools again (should be no-op)
final_agent_state = await server.agent_manager.attach_missing_files_tools_async(agent_state=updated_agent_state, actor=default_user)
# Verify tool count didn't change
assert len(final_agent_state.tools) == first_tool_count
# Verify still have all file tools
file_tool_names = {tool.name for tool in final_agent_state.tools if tool.tool_type == ToolType.LETTA_FILES_CORE}
assert file_tool_names == set(FILES_TOOLS)
@pytest.mark.asyncio
async def test_detach_all_files_tools_async(server: SyncServer, sarah_agent, default_user):
"""Test detaching all file tools from an agent."""
# First ensure file tools exist and attach them
await server.tool_manager.upsert_base_tools_async(actor=default_user, allowed_types={ToolType.LETTA_FILES_CORE})
# Get initial agent state and attach file tools
agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
agent_state = await server.agent_manager.attach_missing_files_tools_async(agent_state=agent_state, actor=default_user)
# Verify file tools are attached
file_tool_count_before = len([t for t in agent_state.tools if t.tool_type == ToolType.LETTA_FILES_CORE])
assert file_tool_count_before == len(FILES_TOOLS)
# Detach all file tools
updated_agent_state = await server.agent_manager.detach_all_files_tools_async(agent_state=agent_state, actor=default_user)
# Verify all file tools are detached
file_tool_count_after = len([t for t in updated_agent_state.tools if t.tool_type == ToolType.LETTA_FILES_CORE])
assert file_tool_count_after == 0
# Verify the returned state was modified in-place (no DB reload)
assert updated_agent_state.id == agent_state.id
assert len(updated_agent_state.tools) == len(agent_state.tools) - file_tool_count_before
@pytest.mark.asyncio
async def test_detach_all_files_tools_async_empty(server: SyncServer, sarah_agent, default_user):
"""Test detaching all file tools when no file tools are attached."""
# Get agent state (should have no file tools initially)
agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
initial_tool_count = len(agent_state.tools)
# Verify no file tools attached
file_tool_count = len([t for t in agent_state.tools if t.tool_type == ToolType.LETTA_FILES_CORE])
assert file_tool_count == 0
# Call detach_all_files_tools (should be no-op)
updated_agent_state = await server.agent_manager.detach_all_files_tools_async(agent_state=agent_state, actor=default_user)
# Verify nothing changed
assert len(updated_agent_state.tools) == initial_tool_count
assert updated_agent_state == agent_state # Should be the same object since no changes
@pytest.mark.asyncio
async def test_detach_all_files_tools_async_with_other_tools(server: SyncServer, sarah_agent, print_tool, default_user):
"""Test detaching all file tools preserves non-file tools."""
# First ensure file tools exist
await server.tool_manager.upsert_base_tools_async(actor=default_user, allowed_types={ToolType.LETTA_FILES_CORE})
# Attach a non-file tool
await server.agent_manager.attach_tool_async(agent_id=sarah_agent.id, tool_id=print_tool.id, actor=default_user)
# Get agent state and attach file tools
agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
agent_state = await server.agent_manager.attach_missing_files_tools_async(agent_state=agent_state, actor=default_user)
# Verify both file tools and print tool are attached
file_tools = [t for t in agent_state.tools if t.tool_type == ToolType.LETTA_FILES_CORE]
assert len(file_tools) == len(FILES_TOOLS)
assert print_tool.id in [t.id for t in agent_state.tools]
# Detach all file tools
updated_agent_state = await server.agent_manager.detach_all_files_tools_async(agent_state=agent_state, actor=default_user)
# Verify only file tools were removed, print tool remains
remaining_file_tools = [t for t in updated_agent_state.tools if t.tool_type == ToolType.LETTA_FILES_CORE]
assert len(remaining_file_tools) == 0
assert print_tool.id in [t.id for t in updated_agent_state.tools]
assert len(updated_agent_state.tools) == 1
@pytest.mark.asyncio
async def test_detach_all_files_tools_async_idempotent(server: SyncServer, sarah_agent, default_user):
"""Test that detach_all_files_tools is idempotent."""
# First ensure file tools exist and attach them
await server.tool_manager.upsert_base_tools_async(actor=default_user, allowed_types={ToolType.LETTA_FILES_CORE})
# Get initial agent state and attach file tools
agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
agent_state = await server.agent_manager.attach_missing_files_tools_async(agent_state=agent_state, actor=default_user)
# Detach all file tools once
agent_state = await server.agent_manager.detach_all_files_tools_async(agent_state=agent_state, actor=default_user)
# Verify no file tools
assert len([t for t in agent_state.tools if t.tool_type == ToolType.LETTA_FILES_CORE]) == 0
tool_count_after_first = len(agent_state.tools)
# Detach all file tools again (should be no-op)
final_agent_state = await server.agent_manager.detach_all_files_tools_async(agent_state=agent_state, actor=default_user)
# Verify still no file tools and same tool count
assert len([t for t in final_agent_state.tools if t.tool_type == ToolType.LETTA_FILES_CORE]) == 0
assert len(final_agent_state.tools) == tool_count_after_first
@pytest.mark.asyncio
async def test_attach_tool_with_default_requires_approval(server: SyncServer, sarah_agent, bash_tool, default_user):
"""Test that attaching a tool with default requires_approval adds associated tool rule."""
# Attach the tool
await server.agent_manager.attach_tool_async(agent_id=sarah_agent.id, tool_id=bash_tool.id, actor=default_user)
# Verify attachment through get_agent_by_id
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert bash_tool.id in [t.id for t in agent.tools]
tool_rules = [rule for rule in agent.tool_rules if rule.tool_name == bash_tool.name]
assert len(tool_rules) == 1
assert tool_rules[0].type == "requires_approval"
# Verify that attaching the same tool again doesn't cause duplication
await server.agent_manager.attach_tool_async(agent_id=sarah_agent.id, tool_id=bash_tool.id, actor=default_user)
agent = await server.agent_manager.get_agent_by_id_async(agent_id=sarah_agent.id, actor=default_user)
assert len([t for t in agent.tools if t.id == bash_tool.id]) == 1
tool_rules = [rule for rule in agent.tool_rules if rule.tool_name == bash_tool.name]
assert len(tool_rules) == 1
assert tool_rules[0].type == "requires_approval"
@pytest.mark.asyncio
async def test_attach_tool_with_default_requires_approval_on_creation(server: SyncServer, bash_tool, default_user):
"""Test that attaching a tool with default requires_approval adds associated tool rule."""
# Create agent with tool
agent = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="agent11",
agent_type="memgpt_v2_agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
tools=[bash_tool.name],
include_base_tools=False,
),
actor=default_user,
)
assert bash_tool.id in [t.id for t in agent.tools]
tool_rules = [rule for rule in agent.tool_rules if rule.tool_name == bash_tool.name]
assert len(tool_rules) == 1
assert tool_rules[0].type == "requires_approval"
# Verify that attaching the same tool again doesn't cause duplication
await server.agent_manager.attach_tool_async(agent_id=agent.id, tool_id=bash_tool.id, actor=default_user)
agent = await server.agent_manager.get_agent_by_id_async(agent_id=agent.id, actor=default_user)
assert len([t for t in agent.tools if t.id == bash_tool.id]) == 1
tool_rules = [rule for rule in agent.tool_rules if rule.tool_name == bash_tool.name]
assert len(tool_rules) == 1
assert tool_rules[0].type == "requires_approval"
# Modify approval on tool after attach
await server.agent_manager.modify_approvals_async(
agent_id=agent.id, tool_name=bash_tool.name, requires_approval=False, actor=default_user
)
agent = await server.agent_manager.get_agent_by_id_async(agent_id=agent.id, actor=default_user)
assert len([t for t in agent.tools if t.id == bash_tool.id]) == 1
tool_rules = [rule for rule in agent.tool_rules if rule.tool_name == bash_tool.name]
assert len(tool_rules) == 0
# Revert override
await server.agent_manager.modify_approvals_async(
agent_id=agent.id, tool_name=bash_tool.name, requires_approval=True, actor=default_user
)
agent = await server.agent_manager.get_agent_by_id_async(agent_id=agent.id, actor=default_user)
assert len([t for t in agent.tools if t.id == bash_tool.id]) == 1
tool_rules = [rule for rule in agent.tool_rules if rule.tool_name == bash_tool.name]
assert len(tool_rules) == 1
assert tool_rules[0].type == "requires_approval"
# ======================================================================================================================
# ToolManager Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_create_tool(server: SyncServer, print_tool, default_user, default_organization):
# Assertions to ensure the created tool matches the expected values
assert print_tool.created_by_id == default_user.id
assert print_tool.tool_type == ToolType.CUSTOM
@pytest.mark.asyncio
async def test_create_mcp_tool(server: SyncServer, mcp_tool, default_user, default_organization):
# Assertions to ensure the created tool matches the expected values
assert mcp_tool.created_by_id == default_user.id
assert mcp_tool.tool_type == ToolType.EXTERNAL_MCP
assert mcp_tool.metadata_[MCP_TOOL_TAG_NAME_PREFIX]["server_name"] == "test"
assert mcp_tool.metadata_[MCP_TOOL_TAG_NAME_PREFIX]["server_id"] == "test-server-id"
# Test should work with both SQLite and PostgreSQL
@pytest.mark.asyncio
async def test_create_tool_duplicate_name(server: SyncServer, print_tool, default_user, default_organization):
data = print_tool.model_dump(exclude=["id"])
tool = PydanticTool(**data)
with pytest.raises(UniqueConstraintViolationError):
await server.tool_manager.create_tool_async(tool, actor=default_user)
@pytest.mark.asyncio
async def test_create_tool_requires_approval(server: SyncServer, bash_tool, default_user, default_organization):
# Assertions to ensure the created tool matches the expected values
assert bash_tool.created_by_id == default_user.id
assert bash_tool.tool_type == ToolType.CUSTOM
assert bash_tool.default_requires_approval == True
@pytest.mark.asyncio
async def test_get_tool_by_id(server: SyncServer, print_tool, default_user):
# Fetch the tool by ID using the manager method
fetched_tool = await server.tool_manager.get_tool_by_id_async(print_tool.id, actor=default_user)
# Assertions to check if the fetched tool matches the created tool
assert fetched_tool.id == print_tool.id
assert fetched_tool.name == print_tool.name
assert fetched_tool.description == print_tool.description
assert fetched_tool.tags == print_tool.tags
assert fetched_tool.metadata_ == print_tool.metadata_
assert fetched_tool.source_code == print_tool.source_code
assert fetched_tool.source_type == print_tool.source_type
assert fetched_tool.tool_type == ToolType.CUSTOM
@pytest.mark.asyncio
async def test_get_tool_with_actor(server: SyncServer, print_tool, default_user):
# Fetch the print_tool by name and organization ID
fetched_tool = await server.tool_manager.get_tool_by_name_async(print_tool.name, actor=default_user)
# Assertions to check if the fetched tool matches the created tool
assert fetched_tool.id == print_tool.id
assert fetched_tool.name == print_tool.name
assert fetched_tool.created_by_id == default_user.id
assert fetched_tool.description == print_tool.description
assert fetched_tool.tags == print_tool.tags
assert fetched_tool.source_code == print_tool.source_code
assert fetched_tool.source_type == print_tool.source_type
assert fetched_tool.tool_type == ToolType.CUSTOM
@pytest.mark.asyncio
async def test_list_tools(server: SyncServer, print_tool, default_user):
# List tools (should include the one created by the fixture)
tools = await server.tool_manager.list_tools_async(actor=default_user, upsert_base_tools=False)
# Assertions to check that the created tool is listed
assert len(tools) == 1
assert any(t.id == print_tool.id for t in tools)
@pytest.mark.asyncio
async def test_list_tools_with_tool_types(server: SyncServer, default_user):
"""Test filtering tools by tool_types parameter."""
# create tools with different types
def calculator_tool(a: int, b: int) -> int:
"""Add two numbers.
Args:
a: First number
b: Second number
Returns:
Sum of a and b
"""
return a + b
def weather_tool(city: str) -> str:
"""Get weather for a city.
Args:
city: Name of the city
Returns:
Weather information
"""
return f"Weather in {city}"
# create custom tools
custom_tool1 = PydanticTool(
name="calculator_tool",
description="Math tool",
source_code=parse_source_code(calculator_tool),
source_type="python",
tool_type=ToolType.CUSTOM,
)
# Use generate_schema_for_tool_creation to generate schema
custom_tool1.json_schema = generate_schema_for_tool_creation(custom_tool1)
custom_tool1 = await server.tool_manager.create_or_update_tool_async(custom_tool1, actor=default_user)
custom_tool2 = PydanticTool(
# name="weather_tool",
description="Weather tool",
source_code=parse_source_code(weather_tool),
source_type="python",
tool_type=ToolType.CUSTOM,
)
# Use generate_schema_for_tool_creation to generate schema
custom_tool2.json_schema = generate_schema_for_tool_creation(custom_tool2)
custom_tool2 = await server.tool_manager.create_or_update_tool_async(custom_tool2, actor=default_user)
# test filtering by single tool type
tools = await server.tool_manager.list_tools_async(actor=default_user, tool_types=[ToolType.CUSTOM.value], upsert_base_tools=False)
assert len(tools) == 2
assert all(t.tool_type == ToolType.CUSTOM for t in tools)
# test filtering by multiple tool types (should get same result since we only have CUSTOM)
tools = await server.tool_manager.list_tools_async(
actor=default_user, tool_types=[ToolType.CUSTOM.value, ToolType.LETTA_CORE.value], upsert_base_tools=False
)
assert len(tools) == 2
# test filtering by non-existent tool type
tools = await server.tool_manager.list_tools_async(
actor=default_user, tool_types=[ToolType.EXTERNAL_MCP.value], upsert_base_tools=False
)
assert len(tools) == 0
@pytest.mark.asyncio
async def test_list_tools_with_exclude_tool_types(server: SyncServer, default_user, print_tool):
"""Test excluding tools by exclude_tool_types parameter."""
# we already have print_tool which is CUSTOM type
# create a tool with a different type (simulate by updating tool type directly)
def special_tool(msg: str) -> str:
"""Special tool.
Args:
msg: Message to return
Returns:
The message
"""
return msg
special = PydanticTool(
name="special_tool",
description="Special tool",
source_code=parse_source_code(special_tool),
source_type="python",
tool_type=ToolType.CUSTOM,
)
special.json_schema = generate_schema_for_tool_creation(special)
special = await server.tool_manager.create_or_update_tool_async(special, actor=default_user)
# test excluding EXTERNAL_MCP (should get all tools since none are MCP)
tools = await server.tool_manager.list_tools_async(
actor=default_user, exclude_tool_types=[ToolType.EXTERNAL_MCP.value], upsert_base_tools=False
)
assert len(tools) == 2 # print_tool and special
# test excluding CUSTOM (should get no tools)
tools = await server.tool_manager.list_tools_async(
actor=default_user, exclude_tool_types=[ToolType.CUSTOM.value], upsert_base_tools=False
)
assert len(tools) == 0
@pytest.mark.asyncio
async def test_list_tools_with_names(server: SyncServer, default_user):
"""Test filtering tools by names parameter."""
# create tools with specific names
def alpha_tool() -> str:
"""Alpha tool.
Returns:
Alpha string
"""
return "alpha"
def beta_tool() -> str:
"""Beta tool.
Returns:
Beta string
"""
return "beta"
def gamma_tool() -> str:
"""Gamma tool.
Returns:
Gamma string
"""
return "gamma"
alpha = PydanticTool(name="alpha_tool", description="Alpha", source_code=parse_source_code(alpha_tool), source_type="python")
alpha.json_schema = generate_schema_for_tool_creation(alpha)
alpha = await server.tool_manager.create_or_update_tool_async(alpha, actor=default_user)
beta = PydanticTool(name="beta_tool", description="Beta", source_code=parse_source_code(beta_tool), source_type="python")
beta.json_schema = generate_schema_for_tool_creation(beta)
beta = await server.tool_manager.create_or_update_tool_async(beta, actor=default_user)
gamma = PydanticTool(name="gamma_tool", description="Gamma", source_code=parse_source_code(gamma_tool), source_type="python")
gamma.json_schema = generate_schema_for_tool_creation(gamma)
gamma = await server.tool_manager.create_or_update_tool_async(gamma, actor=default_user)
# test filtering by single name
tools = await server.tool_manager.list_tools_async(actor=default_user, names=["alpha_tool"], upsert_base_tools=False)
assert len(tools) == 1
assert tools[0].name == "alpha_tool"
# test filtering by multiple names
tools = await server.tool_manager.list_tools_async(actor=default_user, names=["alpha_tool", "gamma_tool"], upsert_base_tools=False)
assert len(tools) == 2
assert set(t.name for t in tools) == {"alpha_tool", "gamma_tool"}
# test filtering by non-existent name
tools = await server.tool_manager.list_tools_async(actor=default_user, names=["non_existent_tool"], upsert_base_tools=False)
assert len(tools) == 0
@pytest.mark.asyncio
async def test_list_tools_with_tool_ids(server: SyncServer, default_user):
"""Test filtering tools by tool_ids parameter."""
# create multiple tools
def tool1() -> str:
"""Tool 1.
Returns:
String 1
"""
return "1"
def tool2() -> str:
"""Tool 2.
Returns:
String 2
"""
return "2"
def tool3() -> str:
"""Tool 3.
Returns:
String 3
"""
return "3"
t1 = PydanticTool(name="tool1", description="First", source_code=parse_source_code(tool1), source_type="python")
t1.json_schema = generate_schema_for_tool_creation(t1)
t1 = await server.tool_manager.create_or_update_tool_async(t1, actor=default_user)
t2 = PydanticTool(name="tool2", description="Second", source_code=parse_source_code(tool2), source_type="python")
t2.json_schema = generate_schema_for_tool_creation(t2)
t2 = await server.tool_manager.create_or_update_tool_async(t2, actor=default_user)
t3 = PydanticTool(name="tool3", description="Third", source_code=parse_source_code(tool3), source_type="python")
t3.json_schema = generate_schema_for_tool_creation(t3)
t3 = await server.tool_manager.create_or_update_tool_async(t3, actor=default_user)
# test filtering by single id
tools = await server.tool_manager.list_tools_async(actor=default_user, tool_ids=[t1.id], upsert_base_tools=False)
assert len(tools) == 1
assert tools[0].id == t1.id
# test filtering by multiple ids
tools = await server.tool_manager.list_tools_async(actor=default_user, tool_ids=[t1.id, t3.id], upsert_base_tools=False)
assert len(tools) == 2
assert set(t.id for t in tools) == {t1.id, t3.id}
# test filtering by non-existent id
tools = await server.tool_manager.list_tools_async(actor=default_user, tool_ids=["non-existent-id"], upsert_base_tools=False)
assert len(tools) == 0
@pytest.mark.asyncio
async def test_list_tools_with_search(server: SyncServer, default_user):
"""Test searching tools by partial name match."""
# create tools with searchable names
def calculator_add() -> str:
"""Calculator add.
Returns:
Add operation
"""
return "add"
def calculator_subtract() -> str:
"""Calculator subtract.
Returns:
Subtract operation
"""
return "subtract"
def weather_forecast() -> str:
"""Weather forecast.
Returns:
Forecast data
"""
return "forecast"
calc_add = PydanticTool(
name="calculator_add", description="Add numbers", source_code=parse_source_code(calculator_add), source_type="python"
)
calc_add.json_schema = generate_schema_for_tool_creation(calc_add)
calc_add = await server.tool_manager.create_or_update_tool_async(calc_add, actor=default_user)
calc_sub = PydanticTool(
name="calculator_subtract", description="Subtract numbers", source_code=parse_source_code(calculator_subtract), source_type="python"
)
calc_sub.json_schema = generate_schema_for_tool_creation(calc_sub)
calc_sub = await server.tool_manager.create_or_update_tool_async(calc_sub, actor=default_user)
weather = PydanticTool(
name="weather_forecast", description="Weather", source_code=parse_source_code(weather_forecast), source_type="python"
)
weather.json_schema = generate_schema_for_tool_creation(weather)
weather = await server.tool_manager.create_or_update_tool_async(weather, actor=default_user)
# test searching for "calculator" (should find both calculator tools)
tools = await server.tool_manager.list_tools_async(actor=default_user, search="calculator", upsert_base_tools=False)
assert len(tools) == 2
assert all("calculator" in t.name for t in tools)
# test case-insensitive search
tools = await server.tool_manager.list_tools_async(actor=default_user, search="CALCULATOR", upsert_base_tools=False)
assert len(tools) == 2
# test partial match
tools = await server.tool_manager.list_tools_async(actor=default_user, search="calc", upsert_base_tools=False)
assert len(tools) == 2
# test search with no matches
tools = await server.tool_manager.list_tools_async(actor=default_user, search="nonexistent", upsert_base_tools=False)
assert len(tools) == 0
@pytest.mark.asyncio
async def test_list_tools_return_only_letta_tools(server: SyncServer, default_user):
"""Test filtering for only Letta tools."""
# first, upsert base tools to ensure we have Letta tools
await server.tool_manager.upsert_base_tools_async(actor=default_user)
# create a custom tool
def custom_tool() -> str:
"""Custom tool.
Returns:
Custom string
"""
return "custom"
custom = PydanticTool(
name="custom_tool",
description="Custom",
source_code=parse_source_code(custom_tool),
source_type="python",
tool_type=ToolType.CUSTOM,
)
custom.json_schema = generate_schema_for_tool_creation(custom)
custom = await server.tool_manager.create_or_update_tool_async(custom, actor=default_user)
# test without filter (should get custom tool + all letta tools)
tools = await server.tool_manager.list_tools_async(actor=default_user, return_only_letta_tools=False, upsert_base_tools=False)
# should have at least the custom tool and some letta tools
assert len(tools) > 1
assert any(t.name == "custom_tool" for t in tools)
# test with filter (should only get letta tools)
tools = await server.tool_manager.list_tools_async(actor=default_user, return_only_letta_tools=True, upsert_base_tools=False)
assert len(tools) > 0
# all tools should have tool_type starting with "letta_"
assert all(t.tool_type.value.startswith("letta_") for t in tools)
# custom tool should not be in the list
assert not any(t.name == "custom_tool" for t in tools)
@pytest.mark.asyncio
async def test_list_tools_combined_filters(server: SyncServer, default_user):
"""Test combining multiple filters."""
# create various tools
def calc_add() -> str:
"""Calculator add.
Returns:
Add result
"""
return "add"
def calc_multiply() -> str:
"""Calculator multiply.
Returns:
Multiply result
"""
return "multiply"
def weather_tool() -> str:
"""Weather tool.
Returns:
Weather data
"""
return "weather"
calc1 = PydanticTool(
name="calc_add", description="Add", source_code=parse_source_code(calc_add), source_type="python", tool_type=ToolType.CUSTOM
)
calc1.json_schema = generate_schema_for_tool_creation(calc1)
calc1 = await server.tool_manager.create_or_update_tool_async(calc1, actor=default_user)
calc2 = PydanticTool(
description="Multiply",
source_code=parse_source_code(calc_multiply),
source_type="python",
tool_type=ToolType.CUSTOM,
)
calc2.json_schema = generate_schema_for_tool_creation(calc2)
calc2 = await server.tool_manager.create_or_update_tool_async(calc2, actor=default_user)
weather = PydanticTool(
name="weather_tool",
description="Weather",
source_code=parse_source_code(weather_tool),
source_type="python",
tool_type=ToolType.CUSTOM,
)
weather.json_schema = generate_schema_for_tool_creation(weather)
weather = await server.tool_manager.create_or_update_tool_async(weather, actor=default_user)
# combine search with tool_types
tools = await server.tool_manager.list_tools_async(
actor=default_user, search="calc", tool_types=[ToolType.CUSTOM.value], upsert_base_tools=False
)
assert len(tools) == 2
assert all("calc" in t.name and t.tool_type == ToolType.CUSTOM for t in tools)
# combine names with tool_ids
tools = await server.tool_manager.list_tools_async(actor=default_user, names=["calc_add"], tool_ids=[calc1.id], upsert_base_tools=False)
assert len(tools) == 1
assert tools[0].id == calc1.id
# combine search with exclude_tool_types
tools = await server.tool_manager.list_tools_async(
actor=default_user, search="cal", exclude_tool_types=[ToolType.EXTERNAL_MCP.value], upsert_base_tools=False
)
assert len(tools) == 2
@pytest.mark.asyncio
async def test_count_tools_async(server: SyncServer, default_user):
"""Test counting tools with various filters."""
# create multiple tools
def tool_a() -> str:
"""Tool A.
Returns:
String a
"""
return "a"
def tool_b() -> str:
"""Tool B.
Returns:
String b
"""
return "b"
def search_tool() -> str:
"""Search tool.
Returns:
Search result
"""
return "search"
ta = PydanticTool(
name="tool_a", description="A", source_code=parse_source_code(tool_a), source_type="python", tool_type=ToolType.CUSTOM
)
ta.json_schema = generate_schema_for_tool_creation(ta)
ta = await server.tool_manager.create_or_update_tool_async(ta, actor=default_user)
tb = PydanticTool(
name="tool_b", description="B", source_code=parse_source_code(tool_b), source_type="python", tool_type=ToolType.CUSTOM
)
tb.json_schema = generate_schema_for_tool_creation(tb)
tb = await server.tool_manager.create_or_update_tool_async(tb, actor=default_user)
# upsert base tools to ensure we have Letta tools for counting
await server.tool_manager.upsert_base_tools_async(actor=default_user)
# count all tools (should have 2 custom tools + letta tools)
count = await server.tool_manager.count_tools_async(actor=default_user)
assert count > 2 # at least our 2 custom tools + letta tools
# count with tool_types filter
count = await server.tool_manager.count_tools_async(actor=default_user, tool_types=[ToolType.CUSTOM.value])
assert count == 2 # only our custom tools
# count with search filter
count = await server.tool_manager.count_tools_async(actor=default_user, search="tool")
# should at least find our 2 tools (tool_a, tool_b)
assert count >= 2
# count with names filter
count = await server.tool_manager.count_tools_async(actor=default_user, names=["tool_a", "tool_b"])
assert count == 2
# count with return_only_letta_tools
count = await server.tool_manager.count_tools_async(actor=default_user, return_only_letta_tools=True)
assert count > 0 # should have letta tools
# count with exclude_tool_types (exclude all letta tool types)
count = await server.tool_manager.count_tools_async(
actor=default_user,
exclude_tool_types=[
ToolType.LETTA_CORE.value,
ToolType.LETTA_MEMORY_CORE.value,
ToolType.LETTA_MULTI_AGENT_CORE.value,
ToolType.LETTA_SLEEPTIME_CORE.value,
ToolType.LETTA_VOICE_SLEEPTIME_CORE.value,
ToolType.LETTA_BUILTIN.value,
ToolType.LETTA_FILES_CORE.value,
],
)
assert count == 2 # only our custom tools
@pytest.mark.asyncio
async def test_update_tool_by_id(server: SyncServer, print_tool, default_user):
updated_description = "updated_description"
return_char_limit = 10000
# Create a ToolUpdate object to modify the print_tool's description
tool_update = ToolUpdate(description=updated_description, return_char_limit=return_char_limit)
# Update the tool using the manager method
await server.tool_manager.update_tool_by_id_async(print_tool.id, tool_update, actor=default_user)
# Fetch the updated tool to verify the changes
updated_tool = await server.tool_manager.get_tool_by_id_async(print_tool.id, actor=default_user)
# Assertions to check if the update was successful
assert updated_tool.description == updated_description
assert updated_tool.return_char_limit == return_char_limit
assert updated_tool.tool_type == ToolType.CUSTOM
# Dangerous: we bypass safety to give it another tool type
await server.tool_manager.update_tool_by_id_async(
print_tool.id, tool_update, actor=default_user, updated_tool_type=ToolType.EXTERNAL_MCP
)
updated_tool = await server.tool_manager.get_tool_by_id_async(print_tool.id, actor=default_user)
assert updated_tool.tool_type == ToolType.EXTERNAL_MCP
# @pytest.mark.asyncio
# async def test_update_tool_source_code_refreshes_schema_and_name(server: SyncServer, print_tool, default_user):
# def counter_tool(counter: int):
# """
# Args:
# counter (int): The counter to count to.
#
# Returns:
# bool: If it successfully counted to the counter.
# """
# for c in range(counter):
# print(c)
#
# return True
#
# # Test begins
# og_json_schema = print_tool.json_schema
#
# source_code = parse_source_code(counter_tool)
#
# # Create a ToolUpdate object to modify the tool's source_code
# tool_update = ToolUpdate(source_code=source_code)
#
# # Update the tool using the manager method
# await server.tool_manager.update_tool_by_id_async(print_tool.id, tool_update, actor=default_user)
#
# # Fetch the updated tool to verify the changes
# updated_tool = await server.tool_manager.get_tool_by_id_async(print_tool.id, actor=default_user)
#
# # Assertions to check if the update was successful, and json_schema is updated as well
# assert updated_tool.source_code == source_code
# assert updated_tool.json_schema != og_json_schema
#
# new_schema = derive_openai_json_schema(source_code=updated_tool.source_code)
# assert updated_tool.json_schema == new_schema
# assert updated_tool.tool_type == ToolType.CUSTOM
# @pytest.mark.asyncio
# async def test_update_tool_source_code_refreshes_schema_only(server: SyncServer, print_tool, default_user):
# def counter_tool(counter: int):
# """
# Args:
# counter (int): The counter to count to.
#
# Returns:
# bool: If it successfully counted to the counter.
# """
# for c in range(counter):
# print(c)
#
# return True
#
# # Test begins
# og_json_schema = print_tool.json_schema
#
# source_code = parse_source_code(counter_tool)
# name = "counter_tool"
#
# # Create a ToolUpdate object to modify the tool's source_code
# tool_update = ToolUpdate(source_code=source_code)
#
# # Update the tool using the manager method
# await server.tool_manager.update_tool_by_id_async(print_tool.id, tool_update, actor=default_user)
#
# # Fetch the updated tool to verify the changes
# updated_tool = await server.tool_manager.get_tool_by_id_async(print_tool.id, actor=default_user)
#
# # Assertions to check if the update was successful, and json_schema is updated as well
# assert updated_tool.source_code == source_code
# assert updated_tool.json_schema != og_json_schema
#
# new_schema = derive_openai_json_schema(source_code=updated_tool.source_code, name=updated_tool.name)
# assert updated_tool.json_schema == new_schema
# assert updated_tool.name == name
# assert updated_tool.tool_type == ToolType.CUSTOM
@pytest.mark.asyncio
async def test_update_tool_multi_user(server: SyncServer, print_tool, default_user, other_user):
updated_description = "updated_description"
# Create a ToolUpdate object to modify the print_tool's description
tool_update = ToolUpdate(description=updated_description)
# Update the print_tool using the manager method, but WITH THE OTHER USER'S ID!
await server.tool_manager.update_tool_by_id_async(print_tool.id, tool_update, actor=other_user)
# Check that the created_by and last_updated_by fields are correct
# Fetch the updated print_tool to verify the changes
updated_tool = await server.tool_manager.get_tool_by_id_async(print_tool.id, actor=default_user)
assert updated_tool.last_updated_by_id == other_user.id
assert updated_tool.created_by_id == default_user.id
@pytest.mark.asyncio
async def test_delete_tool_by_id(server: SyncServer, print_tool, default_user):
# Delete the print_tool using the manager method
await server.tool_manager.delete_tool_by_id_async(print_tool.id, actor=default_user)
tools = await server.tool_manager.list_tools_async(actor=default_user, upsert_base_tools=False)
assert len(tools) == 0
@pytest.mark.asyncio
async def test_upsert_base_tools(server: SyncServer, default_user):
tools = await server.tool_manager.upsert_base_tools_async(actor=default_user)
# Calculate expected tools accounting for production filtering
if settings.environment == "prod":
expected_tool_names = sorted(LETTA_TOOL_SET - set(LOCAL_ONLY_MULTI_AGENT_TOOLS))
else:
expected_tool_names = sorted(LETTA_TOOL_SET)
assert sorted([t.name for t in tools]) == expected_tool_names
# Call it again to make sure it doesn't create duplicates
tools = await server.tool_manager.upsert_base_tools_async(actor=default_user)
assert sorted([t.name for t in tools]) == expected_tool_names
# Confirm that the return tools have no source_code, but a json_schema
for t in tools:
if t.name in BASE_TOOLS:
assert t.tool_type == ToolType.LETTA_CORE
elif t.name in BASE_MEMORY_TOOLS:
assert t.tool_type == ToolType.LETTA_MEMORY_CORE
elif t.name in MULTI_AGENT_TOOLS:
assert t.tool_type == ToolType.LETTA_MULTI_AGENT_CORE
elif t.name in BASE_SLEEPTIME_TOOLS:
assert t.tool_type == ToolType.LETTA_SLEEPTIME_CORE
elif t.name in BASE_VOICE_SLEEPTIME_TOOLS:
assert t.tool_type == ToolType.LETTA_VOICE_SLEEPTIME_CORE
elif t.name in BASE_VOICE_SLEEPTIME_CHAT_TOOLS:
assert t.tool_type == ToolType.LETTA_VOICE_SLEEPTIME_CORE
elif t.name in BUILTIN_TOOLS:
assert t.tool_type == ToolType.LETTA_BUILTIN
elif t.name in FILES_TOOLS:
assert t.tool_type == ToolType.LETTA_FILES_CORE
else:
pytest.fail(f"The tool name is unrecognized as a base tool: {t.name}")
assert t.source_code is None
assert t.json_schema
@pytest.mark.parametrize(
"tool_type,expected_names",
[
(ToolType.LETTA_CORE, BASE_TOOLS),
(ToolType.LETTA_MEMORY_CORE, BASE_MEMORY_TOOLS),
(ToolType.LETTA_MULTI_AGENT_CORE, MULTI_AGENT_TOOLS),
(ToolType.LETTA_SLEEPTIME_CORE, BASE_SLEEPTIME_TOOLS),
(ToolType.LETTA_VOICE_SLEEPTIME_CORE, sorted(set(BASE_VOICE_SLEEPTIME_TOOLS + BASE_VOICE_SLEEPTIME_CHAT_TOOLS) - {"send_message"})),
(ToolType.LETTA_BUILTIN, BUILTIN_TOOLS),
(ToolType.LETTA_FILES_CORE, FILES_TOOLS),
],
)
async def test_upsert_filtered_base_tools(server: SyncServer, default_user, tool_type, expected_names):
tools = await server.tool_manager.upsert_base_tools_async(actor=default_user, allowed_types={tool_type})
tool_names = sorted([t.name for t in tools])
# Adjust expected names for multi-agent tools in production
if tool_type == ToolType.LETTA_MULTI_AGENT_CORE and settings.environment == "prod":
expected_sorted = sorted(set(expected_names) - set(LOCAL_ONLY_MULTI_AGENT_TOOLS))
else:
expected_sorted = sorted(expected_names)
assert tool_names == expected_sorted
assert all(t.tool_type == tool_type for t in tools)
async def test_upsert_multiple_tool_types(server: SyncServer, default_user):
allowed = {ToolType.LETTA_CORE, ToolType.LETTA_BUILTIN, ToolType.LETTA_FILES_CORE}
tools = await server.tool_manager.upsert_base_tools_async(actor=default_user, allowed_types=allowed)
tool_names = {t.name for t in tools}
expected = set(BASE_TOOLS + BUILTIN_TOOLS + FILES_TOOLS)
assert tool_names == expected
assert all(t.tool_type in allowed for t in tools)
async def test_upsert_base_tools_with_empty_type_filter(server: SyncServer, default_user):
tools = await server.tool_manager.upsert_base_tools_async(actor=default_user, allowed_types=set())
assert tools == []
async def test_bulk_upsert_tools_async(server: SyncServer, default_user):
"""Test bulk upserting multiple tools at once"""
# create multiple test tools
tools_data = []
for i in range(5):
tool = PydanticTool(
name=f"bulk_test_tool_{i}",
description=f"Test tool {i} for bulk operations",
tags=["bulk", "test"],
source_code=f"def bulk_test_tool_{i}():\n '''Test tool {i} function'''\n return 'result_{i}'",
source_type="python",
)
tools_data.append(tool)
# initial bulk upsert - should create all tools
created_tools = await server.tool_manager.bulk_upsert_tools_async(tools_data, default_user)
assert len(created_tools) == 5
assert all(t.name.startswith("bulk_test_tool_") for t in created_tools)
assert all(t.description for t in created_tools)
# verify all tools were created
for i in range(5):
tool = await server.tool_manager.get_tool_by_name_async(f"bulk_test_tool_{i}", default_user)
assert tool is not None
assert tool.description == f"Test tool {i} for bulk operations"
# modify some tools and upsert again - should update existing tools
tools_data[0].description = "Updated description for tool 0"
tools_data[2].tags = ["bulk", "test", "updated"]
updated_tools = await server.tool_manager.bulk_upsert_tools_async(tools_data, default_user)
assert len(updated_tools) == 5
# verify updates were applied
tool_0 = await server.tool_manager.get_tool_by_name_async("bulk_test_tool_0", default_user)
assert tool_0.description == "Updated description for tool 0"
tool_2 = await server.tool_manager.get_tool_by_name_async("bulk_test_tool_2", default_user)
assert "updated" in tool_2.tags
# test with empty list
empty_result = await server.tool_manager.bulk_upsert_tools_async([], default_user)
assert empty_result == []
# test with tools missing descriptions (should auto-generate from json schema)
no_desc_tool = PydanticTool(
name="no_description_tool",
tags=["test"],
source_code="def no_description_tool():\n '''This is a docstring description'''\n return 'result'",
source_type="python",
)
result = await server.tool_manager.bulk_upsert_tools_async([no_desc_tool], default_user)
assert len(result) == 1
assert result[0].description is not None # should be auto-generated from docstring
async def test_bulk_upsert_tools_name_conflict(server: SyncServer, default_user):
"""Test bulk upserting tools handles name+org_id unique constraint correctly"""
# create a tool with a specific name
original_tool = PydanticTool(
name="unique_name_tool",
description="Original description",
tags=["original"],
source_code="def unique_name_tool():\n '''Original function'''\n return 'original'",
source_type="python",
)
# create it
created = await server.tool_manager.create_tool_async(original_tool, default_user)
original_id = created.id
# now try to bulk upsert with same name but different id
conflicting_tool = PydanticTool(
name="unique_name_tool", # same name
description="Updated via bulk upsert",
tags=["updated", "bulk"],
source_code="def unique_name_tool():\n '''Updated function'''\n return 'updated'",
source_type="python",
)
# bulk upsert should update the existing tool based on name conflict
result = await server.tool_manager.bulk_upsert_tools_async([conflicting_tool], default_user)
assert len(result) == 1
assert result[0].name == "unique_name_tool"
assert result[0].description == "Updated via bulk upsert"
assert "updated" in result[0].tags
assert "bulk" in result[0].tags
# verify only one tool exists with this name
all_tools = await server.tool_manager.list_tools_async(actor=default_user)
tools_with_name = [t for t in all_tools if t.name == "unique_name_tool"]
assert len(tools_with_name) == 1
# the id should remain the same as the original
assert tools_with_name[0].id == original_id
async def test_bulk_upsert_tools_mixed_create_update(server: SyncServer, default_user):
"""Test bulk upserting with mix of new tools and updates to existing ones"""
# create some existing tools
existing_tools = []
for i in range(3):
tool = PydanticTool(
name=f"existing_tool_{i}",
description=f"Existing tool {i}",
tags=["existing"],
source_code=f"def existing_tool_{i}():\n '''Existing {i}'''\n return 'existing_{i}'",
source_type="python",
)
created = await server.tool_manager.create_tool_async(tool, default_user)
existing_tools.append(created)
# prepare bulk upsert with mix of updates and new tools
bulk_tools = []
# update existing tool 0 by name
bulk_tools.append(
PydanticTool(
name="existing_tool_0", # matches by name
description="Updated existing tool 0",
tags=["existing", "updated"],
source_code="def existing_tool_0():\n '''Updated 0'''\n return 'updated_0'",
source_type="python",
)
)
# update existing tool 1 by name (since bulk upsert matches by name, not id)
bulk_tools.append(
PydanticTool(
name="existing_tool_1", # matches by name
description="Updated existing tool 1",
tags=["existing", "updated"],
source_code="def existing_tool_1():\n '''Updated 1'''\n return 'updated_1'",
source_type="python",
)
)
# add completely new tools
for i in range(3, 6):
bulk_tools.append(
PydanticTool(
name=f"new_tool_{i}",
description=f"New tool {i}",
tags=["new"],
source_code=f"def new_tool_{i}():\n '''New {i}'''\n return 'new_{i}'",
source_type="python",
)
)
# perform bulk upsert
result = await server.tool_manager.bulk_upsert_tools_async(bulk_tools, default_user)
assert len(result) == 5 # 2 updates + 3 new
# verify updates
tool_0 = await server.tool_manager.get_tool_by_name_async("existing_tool_0", default_user)
assert tool_0.description == "Updated existing tool 0"
assert "updated" in tool_0.tags
assert tool_0.id == existing_tools[0].id # id should remain same
# verify tool 1 was updated
tool_1 = await server.tool_manager.get_tool_by_id_async(existing_tools[1].id, default_user)
assert tool_1.name == "existing_tool_1" # name stays same
assert tool_1.description == "Updated existing tool 1"
assert "updated" in tool_1.tags
# verify new tools were created
for i in range(3, 6):
new_tool = await server.tool_manager.get_tool_by_name_async(f"new_tool_{i}", default_user)
assert new_tool is not None
assert new_tool.description == f"New tool {i}"
assert "new" in new_tool.tags
# verify existing_tool_2 was not affected
tool_2 = await server.tool_manager.get_tool_by_id_async(existing_tools[2].id, default_user)
assert tool_2.name == "existing_tool_2"
assert tool_2.description == "Existing tool 2"
assert tool_2.tags == ["existing"]
@pytest.mark.asyncio
async def test_bulk_upsert_tools_override_existing_true(server: SyncServer, default_user):
"""Test bulk_upsert_tools_async with override_existing_tools=True (default behavior)"""
# create some existing tools
existing_tool = PydanticTool(
name="test_override_tool",
description="Original description",
tags=["original"],
source_code="def test_override_tool():\n '''Original'''\n return 'original'",
source_type="python",
)
created = await server.tool_manager.create_tool_async(existing_tool, default_user)
original_id = created.id
# prepare updated version of the tool
updated_tool = PydanticTool(
name="test_override_tool",
description="Updated description",
tags=["updated"],
source_code="def test_override_tool():\n '''Updated'''\n return 'updated'",
source_type="python",
)
# bulk upsert with override_existing_tools=True (default)
result = await server.tool_manager.bulk_upsert_tools_async([updated_tool], default_user, override_existing_tools=True)
assert len(result) == 1
assert result[0].id == original_id # id should remain the same
assert result[0].description == "Updated description" # description should be updated
assert result[0].tags == ["updated"] # tags should be updated
# verify the tool was actually updated in the database
fetched = await server.tool_manager.get_tool_by_id_async(original_id, default_user)
assert fetched.description == "Updated description"
assert fetched.tags == ["updated"]
@pytest.mark.asyncio
async def test_bulk_upsert_tools_override_existing_false(server: SyncServer, default_user):
"""Test bulk_upsert_tools_async with override_existing_tools=False (skip existing)"""
# create some existing tools
existing_tool = PydanticTool(
name="test_no_override_tool",
description="Original description",
tags=["original"],
source_code="def test_no_override_tool():\n '''Original'''\n return 'original'",
source_type="python",
)
created = await server.tool_manager.create_tool_async(existing_tool, default_user)
original_id = created.id
# prepare updated version of the tool
updated_tool = PydanticTool(
name="test_no_override_tool",
description="Should not be updated",
tags=["should_not_update"],
source_code="def test_no_override_tool():\n '''Should not update'''\n return 'should_not_update'",
source_type="python",
)
# bulk upsert with override_existing_tools=False
result = await server.tool_manager.bulk_upsert_tools_async([updated_tool], default_user, override_existing_tools=False)
assert len(result) == 1
assert result[0].id == original_id # id should remain the same
assert result[0].description == "Original description" # description should NOT be updated
assert result[0].tags == ["original"] # tags should NOT be updated
# verify the tool was NOT updated in the database
fetched = await server.tool_manager.get_tool_by_id_async(original_id, default_user)
assert fetched.description == "Original description"
assert fetched.tags == ["original"]
@pytest.mark.asyncio
async def test_bulk_upsert_tools_override_mixed_scenario(server: SyncServer, default_user):
"""Test bulk_upsert_tools_async with override_existing_tools=False in mixed create/update scenario"""
# create some existing tools
existing_tools = []
for i in range(2):
tool = PydanticTool(
name=f"mixed_existing_{i}",
description=f"Original {i}",
tags=["original"],
source_code=f"def mixed_existing_{i}():\n '''Original {i}'''\n return 'original_{i}'",
source_type="python",
)
created = await server.tool_manager.create_tool_async(tool, default_user)
existing_tools.append(created)
# prepare bulk tools: 2 updates (that should be skipped) + 3 new creations
bulk_tools = []
# these should be skipped when override_existing_tools=False
for i in range(2):
bulk_tools.append(
PydanticTool(
name=f"mixed_existing_{i}",
description=f"Should not update {i}",
tags=["should_not_update"],
source_code=f"def mixed_existing_{i}():\n '''Should not update {i}'''\n return 'should_not_update_{i}'",
source_type="python",
)
)
# these should be created
for i in range(3):
bulk_tools.append(
PydanticTool(
name=f"mixed_new_{i}",
description=f"New tool {i}",
tags=["new"],
source_code=f"def mixed_new_{i}():\n '''New {i}'''\n return 'new_{i}'",
source_type="python",
)
)
# bulk upsert with override_existing_tools=False
result = await server.tool_manager.bulk_upsert_tools_async(bulk_tools, default_user, override_existing_tools=False)
assert len(result) == 5 # 2 existing (not updated) + 3 new
# verify existing tools were NOT updated
for i in range(2):
tool = await server.tool_manager.get_tool_by_name_async(f"mixed_existing_{i}", default_user)
assert tool.description == f"Original {i}" # should remain original
assert tool.tags == ["original"] # should remain original
assert tool.id == existing_tools[i].id # id should remain same
# verify new tools were created
for i in range(3):
new_tool = await server.tool_manager.get_tool_by_name_async(f"mixed_new_{i}", default_user)
assert new_tool is not None
assert new_tool.description == f"New tool {i}"
assert new_tool.tags == ["new"]
@pytest.mark.asyncio
async def test_create_tool_with_pip_requirements(server: SyncServer, default_user, default_organization):
def test_tool_with_deps():
"""
A test tool with pip dependencies.
Returns:
str: Hello message.
"""
return "hello"
# Create pip requirements
pip_reqs = [
PipRequirement(name="requests", version="2.28.0"),
PipRequirement(name="numpy"), # No version specified
]
# Set up tool details
source_code = parse_source_code(test_tool_with_deps)
source_type = "python"
description = "A test tool with pip dependencies"
tags = ["test"]
metadata = {"test": "pip_requirements"}
tool = PydanticTool(
description=description, tags=tags, source_code=source_code, source_type=source_type, metadata_=metadata, pip_requirements=pip_reqs
)
derived_json_schema = generate_schema_for_tool_creation(tool)
derived_name = derived_json_schema["name"]
tool.json_schema = derived_json_schema
tool.name = derived_name
created_tool = await server.tool_manager.create_or_update_tool_async(tool, actor=default_user)
# Assertions
assert created_tool.pip_requirements is not None
assert len(created_tool.pip_requirements) == 2
assert created_tool.pip_requirements[0].name == "requests"
assert created_tool.pip_requirements[0].version == "2.28.0"
assert created_tool.pip_requirements[1].name == "numpy"
assert created_tool.pip_requirements[1].version is None
async def test_create_tool_without_pip_requirements(server: SyncServer, print_tool):
# Verify that tools without pip_requirements have the field as None
assert print_tool.pip_requirements is None
async def test_update_tool_pip_requirements(server: SyncServer, print_tool, default_user):
# Add pip requirements to existing tool
pip_reqs = [
PipRequirement(name="pandas", version="1.5.0"),
PipRequirement(name="sumy"),
]
tool_update = ToolUpdate(pip_requirements=pip_reqs)
await server.tool_manager.update_tool_by_id_async(print_tool.id, tool_update, actor=default_user)
# Fetch the updated tool
updated_tool = await server.tool_manager.get_tool_by_id_async(print_tool.id, actor=default_user)
# Assertions
assert updated_tool.pip_requirements is not None
assert len(updated_tool.pip_requirements) == 2
assert updated_tool.pip_requirements[0].name == "pandas"
assert updated_tool.pip_requirements[0].version == "1.5.0"
assert updated_tool.pip_requirements[1].name == "sumy"
assert updated_tool.pip_requirements[1].version is None
async def test_update_tool_clear_pip_requirements(server: SyncServer, default_user, default_organization):
def test_tool_clear_deps():
"""
A test tool to clear dependencies.
Returns:
str: Hello message.
"""
return "hello"
# Create a tool with pip requirements
pip_reqs = [PipRequirement(name="requests")]
# Set up tool details
source_code = parse_source_code(test_tool_clear_deps)
source_type = "python"
description = "A test tool to clear dependencies"
tags = ["test"]
metadata = {"test": "clear_deps"}
tool = PydanticTool(
description=description, tags=tags, source_code=source_code, source_type=source_type, metadata_=metadata, pip_requirements=pip_reqs
)
derived_json_schema = generate_schema_for_tool_creation(tool)
derived_name = derived_json_schema["name"]
tool.json_schema = derived_json_schema
tool.name = derived_name
created_tool = await server.tool_manager.create_or_update_tool_async(tool, actor=default_user)
# Verify it has requirements
assert created_tool.pip_requirements is not None
assert len(created_tool.pip_requirements) == 1
# Clear the requirements
tool_update = ToolUpdate(pip_requirements=[])
await server.tool_manager.update_tool_by_id_async(created_tool.id, tool_update, actor=default_user)
# Fetch the updated tool
updated_tool = await server.tool_manager.get_tool_by_id_async(created_tool.id, actor=default_user)
# Assertions
assert updated_tool.pip_requirements == []
async def test_pip_requirements_roundtrip(server: SyncServer, default_user, default_organization):
def roundtrip_test_tool():
"""
Test pip requirements roundtrip.
Returns:
str: Test message.
"""
return "test"
# Create pip requirements with various version formats
pip_reqs = [
PipRequirement(name="requests", version="2.28.0"),
PipRequirement(name="flask", version="2.0"),
PipRequirement(name="django", version="4.1.0-beta"),
PipRequirement(name="numpy"), # No version
]
# Set up tool details
source_code = parse_source_code(roundtrip_test_tool)
source_type = "python"
description = "Test pip requirements roundtrip"
tags = ["test"]
metadata = {"test": "roundtrip"}
tool = PydanticTool(
description=description, tags=tags, source_code=source_code, source_type=source_type, metadata_=metadata, pip_requirements=pip_reqs
)
derived_json_schema = generate_schema_for_tool_creation(tool)
derived_name = derived_json_schema["name"]
tool.json_schema = derived_json_schema
tool.name = derived_name
created_tool = await server.tool_manager.create_or_update_tool_async(tool, actor=default_user)
# Fetch by ID
fetched_tool = await server.tool_manager.get_tool_by_id_async(created_tool.id, actor=default_user)
# Verify all requirements match exactly
assert fetched_tool.pip_requirements is not None
assert len(fetched_tool.pip_requirements) == 4
# Check each requirement
reqs_dict = {req.name: req.version for req in fetched_tool.pip_requirements}
assert reqs_dict["requests"] == "2.28.0"
assert reqs_dict["flask"] == "2.0"
assert reqs_dict["django"] == "4.1.0-beta"
assert reqs_dict["numpy"] is None
async def test_update_default_requires_approval(server: SyncServer, bash_tool, default_user):
# Update field
tool_update = ToolUpdate(default_requires_approval=False)
await server.tool_manager.update_tool_by_id_async(bash_tool.id, tool_update, actor=default_user)
# Fetch the updated tool
updated_tool = await server.tool_manager.get_tool_by_id_async(bash_tool.id, actor=default_user)
# Assertions
assert updated_tool.default_requires_approval == False
# Revert update
tool_update = ToolUpdate(default_requires_approval=True)
await server.tool_manager.update_tool_by_id_async(bash_tool.id, tool_update, actor=default_user)
# Fetch the updated tool
updated_tool = await server.tool_manager.get_tool_by_id_async(bash_tool.id, actor=default_user)
# Assertions
assert updated_tool.default_requires_approval == True
# ======================================================================================================================
# ToolManager Schema tests
# ======================================================================================================================
async def test_create_tool_with_json_schema(server: SyncServer, default_user, default_organization):
"""Test that json_schema is used when provided at creation."""
tool_manager = server.tool_manager
source_code = """
def test_function(arg1: str) -> str:
return arg1
"""
json_schema = {
"name": "test_function",
"description": "A test function",
"parameters": {"type": "object", "properties": {"arg1": {"type": "string"}}, "required": ["arg1"]},
}
tool = PydanticTool(
name="test_function",
tool_type=ToolType.CUSTOM,
source_code=source_code,
json_schema=json_schema,
)
created_tool = await tool_manager.create_tool_async(tool, default_user)
assert created_tool.json_schema == json_schema
assert created_tool.name == "test_function"
assert created_tool.description == "A test function"
async def test_create_tool_with_args_json_schema(server: SyncServer, default_user, default_organization):
"""Test that schema is generated from args_json_schema at creation."""
tool_manager = server.tool_manager
source_code = """
def test_function(arg1: str, arg2: int) -> str:
'''This is a test function'''
return f"{arg1} {arg2}"
"""
args_json_schema = {
"type": "object",
"properties": {
"arg1": {"type": "string", "description": "First argument"},
"arg2": {"type": "integer", "description": "Second argument"},
},
"required": ["arg1", "arg2"],
}
tool = PydanticTool(
name="test_function",
tool_type=ToolType.CUSTOM,
source_code=source_code,
args_json_schema=args_json_schema,
)
created_tool = await tool_manager.create_or_update_tool_async(tool, default_user)
assert created_tool.json_schema is not None
assert created_tool.json_schema["name"] == "test_function"
assert created_tool.json_schema["description"] == "This is a test function"
assert "parameters" in created_tool.json_schema
assert created_tool.json_schema["parameters"]["properties"]["arg1"]["type"] == "string"
assert created_tool.json_schema["parameters"]["properties"]["arg2"]["type"] == "integer"
async def test_create_tool_with_docstring_no_schema(server: SyncServer, default_user, default_organization):
"""Test that schema is generated from docstring when no schema provided."""
tool_manager = server.tool_manager
source_code = """
def test_function(arg1: str, arg2: int = 5) -> str:
'''
This is a test function
Args:
arg1: First argument
arg2: Second argument
Returns:
A string result
'''
return f"{arg1} {arg2}"
"""
tool = PydanticTool(
name="test_function",
tool_type=ToolType.CUSTOM,
source_code=source_code,
)
created_tool = await tool_manager.create_or_update_tool_async(tool, default_user)
assert created_tool.json_schema is not None
assert created_tool.json_schema["name"] == "test_function"
assert "This is a test function" in created_tool.json_schema["description"]
assert "parameters" in created_tool.json_schema
async def test_create_tool_with_docstring_and_args_schema(server: SyncServer, default_user, default_organization):
"""Test that args_json_schema takes precedence over docstring."""
tool_manager = server.tool_manager
source_code = """
def test_function(old_arg: str) -> str:
'''Old docstring that should be overridden'''
return old_arg
"""
args_json_schema = {
"type": "object",
"properties": {"new_arg": {"type": "string", "description": "New argument from schema"}},
"required": ["new_arg"],
}
tool = PydanticTool(
name="test_function",
tool_type=ToolType.CUSTOM,
source_code=source_code,
args_json_schema=args_json_schema,
)
created_tool = await tool_manager.create_or_update_tool_async(tool, default_user)
assert created_tool.json_schema is not None
assert created_tool.json_schema["name"] == "test_function"
# The description should come from the docstring
assert created_tool.json_schema["description"] == "Old docstring that should be overridden"
# But parameters should come from args_json_schema
assert "new_arg" in created_tool.json_schema["parameters"]["properties"]
assert "old_arg" not in created_tool.json_schema["parameters"]["properties"]
async def test_error_no_docstring_or_schema(server: SyncServer, default_user, default_organization):
"""Test error when no docstring or schema provided (minimal function)."""
tool_manager = server.tool_manager
# Minimal function with no docstring - should still derive basic schema
source_code = """
def test_function():
pass
"""
tool = PydanticTool(
name="test_function",
tool_type=ToolType.CUSTOM,
source_code=source_code,
)
with pytest.raises(ValueError):
await tool_manager.create_or_update_tool_async(tool, default_user)
async def test_error_on_create_tool_with_name_conflict(server: SyncServer, default_user, default_organization):
"""Test error when json_schema name conflicts with function name."""
tool_manager = server.tool_manager
source_code = """
def test_function(arg1: str) -> str:
return arg1
"""
# JSON schema with conflicting name
json_schema = {
"name": "different_name",
"description": "A test function",
"parameters": {"type": "object", "properties": {"arg1": {"type": "string"}}, "required": ["arg1"]},
}
tool = PydanticTool(
name="test_function",
tool_type=ToolType.CUSTOM,
source_code=source_code,
json_schema=json_schema,
)
# This should succeed at creation - the tool name takes precedence
created_tool = await tool_manager.create_tool_async(tool, default_user)
assert created_tool.name == "test_function"
async def test_update_tool_with_json_schema(server: SyncServer, default_user, default_organization):
"""Test update with a new json_schema."""
tool_manager = server.tool_manager
# Create initial tool
source_code = """
def test_function() -> str:
return "hello"
"""
tool = PydanticTool(
name="test_update_json_schema",
tool_type=ToolType.CUSTOM,
source_code=source_code,
json_schema={"name": "test_update_json_schema", "description": "Original"},
)
created_tool = await tool_manager.create_tool_async(tool, default_user)
# Update with new json_schema
new_schema = {
"name": "test_update_json_schema",
"description": "Updated description",
"parameters": {"type": "object", "properties": {"new_arg": {"type": "string"}}, "required": ["new_arg"]},
}
update = ToolUpdate(json_schema=new_schema)
updated_tool = await tool_manager.update_tool_by_id_async(created_tool.id, update, default_user)
assert updated_tool.json_schema == new_schema
assert updated_tool.json_schema["description"] == "Updated description"
async def test_update_tool_with_args_json_schema(server: SyncServer, default_user, default_organization):
"""Test update with args_json_schema."""
tool_manager = server.tool_manager
# Create initial tool
source_code = """
def test_function() -> str:
'''Original function'''
return "hello"
"""
tool = PydanticTool(
name="test_function",
tool_type=ToolType.CUSTOM,
source_code=source_code,
)
created_tool = await tool_manager.create_or_update_tool_async(tool, default_user)
# Update with args_json_schema
new_source_code = """
def test_function(new_arg: str) -> str:
'''Updated function'''
return new_arg
"""
args_json_schema = {
"type": "object",
"properties": {"new_arg": {"type": "string", "description": "New argument"}},
"required": ["new_arg"],
}
update = ToolUpdate(source_code=new_source_code, args_json_schema=args_json_schema)
updated_tool = await tool_manager.update_tool_by_id_async(created_tool.id, update, default_user)
assert updated_tool.json_schema is not None
assert updated_tool.json_schema["description"] == "Updated function"
assert "new_arg" in updated_tool.json_schema["parameters"]["properties"]
async def test_update_tool_with_no_schema(server: SyncServer, default_user, default_organization):
"""Test update with no schema changes."""
tool_manager = server.tool_manager
# Create initial tool
original_schema = {
"name": "test_no_schema_update",
"description": "Original description",
"parameters": {"type": "object", "properties": {}},
}
tool = PydanticTool(
name="test_no_schema_update",
tool_type=ToolType.CUSTOM,
source_code="def test_function(): pass",
json_schema=original_schema,
)
created_tool = await tool_manager.create_tool_async(tool, default_user)
# Update with only description (no schema change)
update = ToolUpdate(description="New description")
updated_tool = await tool_manager.update_tool_by_id_async(created_tool.id, update, default_user)
# Schema should remain unchanged
assert updated_tool.json_schema == original_schema
assert updated_tool.description == "New description"
async def test_update_tool_name(server: SyncServer, default_user, default_organization):
"""Test various name update scenarios."""
tool_manager = server.tool_manager
# Create initial tool
original_schema = {"name": "original_name", "description": "Test", "parameters": {"type": "object", "properties": {}}}
tool = PydanticTool(
name="original_name",
tool_type=ToolType.CUSTOM,
source_code="def original_name(): pass",
json_schema=original_schema,
)
created_tool = await tool_manager.create_or_update_tool_async(tool, default_user)
assert created_tool.name == "original_name"
assert created_tool.json_schema["name"] == "original_name"
matching_schema = {"name": "matched_name", "description": "Test", "parameters": {"type": "object", "properties": {}}}
update = ToolUpdate(json_schema=matching_schema)
updated_tool3 = await tool_manager.update_tool_by_id_async(created_tool.id, update, default_user)
assert updated_tool3.name == "matched_name"
assert updated_tool3.json_schema["name"] == "matched_name"
@pytest.mark.asyncio
async def test_list_tools_with_project_id_filtering(server: SyncServer, default_user):
"""Test listing tools with project_id filtering - global vs project-scoped tools."""
# Create separate functions for each tool (name must match function name)
def global_tool_func() -> str:
"""A global tool with no project_id.
Returns:
str: Test result
"""
return "global_result"
def project_a_tool_func() -> str:
"""A tool scoped to project A.
Returns:
str: Test result
"""
return "project_a_result"
def project_b_tool_func() -> str:
"""A tool scoped to project B.
Returns:
str: Test result
"""
return "project_b_result"
# Create a global tool (project_id = None)
global_tool = PydanticTool(
name="global_tool_func",
description="A global tool with no project_id",
source_code=parse_source_code(global_tool_func),
source_type="python",
tool_type=ToolType.CUSTOM,
project_id=None, # Global tool
)
global_tool.json_schema = generate_schema_for_tool_creation(global_tool)
global_tool = await server.tool_manager.create_or_update_tool_async(global_tool, actor=default_user)
# Create a tool scoped to project_a
project_a_id = f"project-{uuid.uuid4()}"
project_a_tool = PydanticTool(
name="project_a_tool_func",
description="A tool scoped to project A",
source_code=parse_source_code(project_a_tool_func),
source_type="python",
tool_type=ToolType.CUSTOM,
project_id=project_a_id,
)
project_a_tool.json_schema = generate_schema_for_tool_creation(project_a_tool)
project_a_tool = await server.tool_manager.create_or_update_tool_async(project_a_tool, actor=default_user)
# Create a tool scoped to project_b
project_b_id = f"project-{uuid.uuid4()}"
project_b_tool = PydanticTool(
name="project_b_tool_func",
description="A tool scoped to project B",
source_code=parse_source_code(project_b_tool_func),
source_type="python",
tool_type=ToolType.CUSTOM,
project_id=project_b_id,
)
project_b_tool.json_schema = generate_schema_for_tool_creation(project_b_tool)
project_b_tool = await server.tool_manager.create_or_update_tool_async(project_b_tool, actor=default_user)
# Test 1: When no project_id is provided, list ALL tools
all_tools = await server.tool_manager.list_tools_async(actor=default_user, upsert_base_tools=False, project_id=None)
all_tool_names = {t.name for t in all_tools}
assert "global_tool_func" in all_tool_names
assert "project_a_tool_func" in all_tool_names
assert "project_b_tool_func" in all_tool_names
# Test 2: When project_a_id is provided, list only global + project_a tools
project_a_tools = await server.tool_manager.list_tools_async(actor=default_user, upsert_base_tools=False, project_id=project_a_id)
project_a_tool_names = {t.name for t in project_a_tools}
assert "global_tool_func" in project_a_tool_names # Global tools should be included
assert "project_a_tool_func" in project_a_tool_names # Project A tool should be included
assert "project_b_tool_func" not in project_a_tool_names # Project B tool should NOT be included
# Test 3: When project_b_id is provided, list only global + project_b tools
project_b_tools = await server.tool_manager.list_tools_async(actor=default_user, upsert_base_tools=False, project_id=project_b_id)
project_b_tool_names = {t.name for t in project_b_tools}
assert "global_tool_func" in project_b_tool_names # Global tools should be included
assert "project_b_tool_func" in project_b_tool_names # Project B tool should be included
assert "project_a_tool_func" not in project_b_tool_names # Project A tool should NOT be included
# Test 4: When a non-existent project_id is provided, list only global tools
non_existent_project_id = f"project-{uuid.uuid4()}"
non_existent_project_tools = await server.tool_manager.list_tools_async(
actor=default_user, upsert_base_tools=False, project_id=non_existent_project_id
)
non_existent_tool_names = {t.name for t in non_existent_project_tools}
assert "global_tool_func" in non_existent_tool_names # Global tools should be included
assert "project_a_tool_func" not in non_existent_tool_names
assert "project_b_tool_func" not in non_existent_tool_names
@pytest.mark.asyncio
async def test_count_tools_with_project_id_filtering(server: SyncServer, default_user):
"""Test counting tools with project_id filtering - global vs project-scoped tools."""
# Create separate functions for each tool
def count_global_tool_0() -> str:
"""Global tool 0 for counting.
Returns:
str: Test result
"""
return "count_result"
def count_global_tool_1() -> str:
"""Global tool 1 for counting.
Returns:
str: Test result
"""
return "count_result"
def count_project_a_tool_0() -> str:
"""Project A tool 0 for counting.
Returns:
str: Test result
"""
return "count_result"
def count_project_a_tool_1() -> str:
"""Project A tool 1 for counting.
Returns:
str: Test result
"""
return "count_result"
def count_project_a_tool_2() -> str:
"""Project A tool 2 for counting.
Returns:
str: Test result
"""
return "count_result"
def count_project_b_tool() -> str:
"""Project B tool for counting.
Returns:
str: Test result
"""
return "count_result"
global_funcs = [count_global_tool_0, count_global_tool_1]
project_a_funcs = [count_project_a_tool_0, count_project_a_tool_1, count_project_a_tool_2]
# Create 2 global tools
for tool_func in global_funcs:
global_tool = PydanticTool(
name=tool_func.__name__,
description="Global tool for counting",
source_code=parse_source_code(tool_func),
source_type="python",
tool_type=ToolType.CUSTOM,
project_id=None,
)
global_tool.json_schema = generate_schema_for_tool_creation(global_tool)
await server.tool_manager.create_or_update_tool_async(global_tool, actor=default_user)
# Create 3 tools scoped to project_a
project_a_id = f"project-{uuid.uuid4()}"
for tool_func in project_a_funcs:
project_a_tool = PydanticTool(
name=tool_func.__name__,
description="Project A tool for counting",
source_code=parse_source_code(tool_func),
source_type="python",
tool_type=ToolType.CUSTOM,
project_id=project_a_id,
)
project_a_tool.json_schema = generate_schema_for_tool_creation(project_a_tool)
await server.tool_manager.create_or_update_tool_async(project_a_tool, actor=default_user)
# Create 1 tool scoped to project_b
project_b_id = f"project-{uuid.uuid4()}"
project_b_tool_pydantic = PydanticTool(
name="count_project_b_tool",
description="Project B tool for counting",
source_code=parse_source_code(count_project_b_tool),
source_type="python",
tool_type=ToolType.CUSTOM,
project_id=project_b_id,
)
project_b_tool_pydantic.json_schema = generate_schema_for_tool_creation(project_b_tool_pydantic)
await server.tool_manager.create_or_update_tool_async(project_b_tool_pydantic, actor=default_user)
# Test 1: Count without project_id filter should count all custom tools we created (2 + 3 + 1 = 6)
all_count = await server.tool_manager.count_tools_async(actor=default_user, tool_types=[ToolType.CUSTOM.value], search="count_")
assert all_count == 6
# Test 2: Count with project_a_id should count global + project_a tools (2 + 3 = 5)
project_a_count = await server.tool_manager.count_tools_async(
actor=default_user, tool_types=[ToolType.CUSTOM.value], search="count_", project_id=project_a_id
)
assert project_a_count == 5
# Test 3: Count with project_b_id should count global + project_b tools (2 + 1 = 3)
project_b_count = await server.tool_manager.count_tools_async(
actor=default_user, tool_types=[ToolType.CUSTOM.value], search="count_", project_id=project_b_id
)
assert project_b_count == 3
# Test 4: Count with non-existent project_id should only count global tools (2)
non_existent_project_id = f"project-{uuid.uuid4()}"
global_only_count = await server.tool_manager.count_tools_async(
actor=default_user, tool_types=[ToolType.CUSTOM.value], search="count_", project_id=non_existent_project_id
)
assert global_only_count == 2
@pytest.mark.asyncio
async def test_list_tools_with_corrupted_tool(server: SyncServer, default_user, print_tool):
"""Test that list_tools still works even if there's a corrupted tool (missing json_schema) in the database."""
# First, verify we have a normal tool
tools = await server.tool_manager.list_tools_async(actor=default_user, upsert_base_tools=False)
initial_tool_count = len(tools)
assert any(t.id == print_tool.id for t in tools)
# Now insert a corrupted tool directly into the database (bypassing normal validation)
# This simulates a tool that somehow got corrupted in the database
from letta.orm.tool import Tool as ToolModel
async with db_registry.async_session() as session:
# Create a tool with corrupted ID format (bypassing validation)
# This simulates a tool that somehow got corrupted in the database
corrupted_tool = ToolModel(
id=f"tool-corrupted-{uuid.uuid4()}",
name="corrupted_tool",
description="This tool has no json_schema",
tool_type=ToolType.CUSTOM,
source_code="def corrupted_tool(): pass",
json_schema=None, # Explicitly set to None to simulate corruption
organization_id=default_user.organization_id,
created_by_id=default_user.id,
last_updated_by_id=default_user.id,
tags=["corrupted"],
)
session.add(corrupted_tool)
# context manager now handles commits
# await session.commit()
corrupted_tool_id = corrupted_tool.id
# Now try to list tools - it should still work and not include the corrupted tool
# The corrupted tool should be automatically excluded from results
tools = await server.tool_manager.list_tools_async(actor=default_user, upsert_base_tools=False)
# Verify listing still works
assert len(tools) == initial_tool_count # Corrupted tool should not be in the results
assert any(t.id == print_tool.id for t in tools) # Normal tool should still be there
assert not any(t.id == corrupted_tool_id for t in tools) # Corrupted tool should not be there
# Verify the corrupted tool's name is not in the results
assert not any(t.name == "corrupted_tool" for t in tools)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_tool_manager.py",
"license": "Apache License 2.0",
"lines": 1878,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_user_manager.py | import pytest
# Import shared fixtures and constants from conftest
from letta.constants import (
DEFAULT_ORG_ID,
)
from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client
from letta.helpers.datetime_helpers import AsyncTimer
from letta.schemas.organization import Organization as PydanticOrganization
from letta.schemas.user import User as PydanticUser, UserUpdate
from letta.server.server import SyncServer
# ======================================================================================================================
# User Manager Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_list_users(server: SyncServer):
# Create default organization
org = await server.organization_manager.create_default_organization_async()
user_name = "user"
user = await server.user_manager.create_actor_async(PydanticUser(name=user_name, organization_id=org.id))
users = await server.user_manager.list_actors_async()
assert len(users) == 1
assert users[0].name == user_name
# Delete it after
await server.user_manager.delete_actor_by_id_async(user.id)
assert len(await server.user_manager.list_actors_async()) == 0
@pytest.mark.asyncio
async def test_create_default_user(server: SyncServer):
org = await server.organization_manager.create_default_organization_async()
await server.user_manager.create_default_actor_async(org_id=org.id)
retrieved = await server.user_manager.get_default_actor_async()
assert retrieved.name == server.user_manager.DEFAULT_USER_NAME
@pytest.mark.asyncio
async def test_update_user(server: SyncServer):
# Create default organization
default_org = await server.organization_manager.create_default_organization_async()
test_org = await server.organization_manager.create_organization_async(PydanticOrganization(name="test_org"))
user_name_a = "a"
user_name_b = "b"
# Assert it's been created
user = await server.user_manager.create_actor_async(PydanticUser(name=user_name_a, organization_id=default_org.id))
assert user.name == user_name_a
# Adjust name
user = await server.user_manager.update_actor_async(UserUpdate(id=user.id, name=user_name_b))
assert user.name == user_name_b
assert user.organization_id == DEFAULT_ORG_ID
# Adjust org id
user = await server.user_manager.update_actor_async(UserUpdate(id=user.id, organization_id=test_org.id))
assert user.name == user_name_b
assert user.organization_id == test_org.id
async def test_user_caching(server: SyncServer, default_user, performance_pct=0.4):
if isinstance(await get_redis_client(), NoopAsyncRedisClient):
pytest.skip("redis not available")
# Invalidate previous cache behavior.
await server.user_manager._invalidate_actor_cache(default_user.id)
before_stats = server.user_manager.get_actor_by_id_async.cache_stats
before_cache_misses = before_stats.misses
before_cache_hits = before_stats.hits
# First call (expected to miss the cache)
async with AsyncTimer() as timer:
actor = await server.user_manager.get_actor_by_id_async(default_user.id)
duration_first = timer.elapsed_ns
print(f"Call 1: {duration_first:.2e}ns")
assert actor.id == default_user.id
assert duration_first > 0 # Sanity check: took non-zero time
cached_hits = 10
durations = []
for i in range(cached_hits):
async with AsyncTimer() as timer:
actor_cached = await server.user_manager.get_actor_by_id_async(default_user.id)
duration = timer.elapsed_ns
durations.append(duration)
print(f"Call {i + 2}: {duration:.2e}ns")
assert actor_cached == actor
for d in durations:
assert d < duration_first * performance_pct
stats = server.user_manager.get_actor_by_id_async.cache_stats
print(f"Before calls: {before_stats}")
print(f"After calls: {stats}")
# Assert cache stats
assert stats.misses - before_cache_misses == 1
assert stats.hits - before_cache_hits == cached_hits
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_user_manager.py",
"license": "Apache License 2.0",
"lines": 81,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/mock_mcp_server.py | #!/usr/bin/env python3
"""
Simple MCP test server with basic and complex tools for testing purposes.
"""
import json
import logging
from typing import List, Optional
from mcp.server.fastmcp import FastMCP
from pydantic import BaseModel, Field
# Configure logging to stderr (not stdout for STDIO servers)
logging.basicConfig(level=logging.INFO)
# Initialize FastMCP server
mcp = FastMCP("test-server")
# Complex Pydantic models for testing
class Address(BaseModel):
"""An address with street, city, and zip code."""
street: str = Field(..., description="Street address")
city: str = Field(..., description="City name")
zip_code: str = Field(..., description="ZIP code")
country: str = Field(default="USA", description="Country name")
class Person(BaseModel):
"""A person with name, age, and optional address."""
name: str = Field(..., description="Person's full name")
age: int = Field(..., description="Person's age", ge=0, le=150)
email: Optional[str] = Field(None, description="Email address")
address: Optional[Address] = Field(None, description="Home address")
class TaskItem(BaseModel):
"""A task item with title, priority, and completion status."""
title: str = Field(..., description="Task title")
priority: int = Field(default=1, description="Priority level (1-5)", ge=1, le=5)
completed: bool = Field(default=False, description="Whether the task is completed")
tags: List[str] = Field(default_factory=list, description="List of tags")
class SearchFilter(BaseModel):
"""Filter criteria for searching."""
keywords: List[str] = Field(..., description="List of keywords to search for")
min_score: Optional[float] = Field(None, description="Minimum score threshold", ge=0.0, le=1.0)
categories: Optional[List[str]] = Field(None, description="Categories to filter by")
# Customer-reported schema models (matching mcp_schema.json pattern)
class Instantiation(BaseModel):
"""Instantiation object with optional node identifiers."""
# model_config = ConfigDict(json_schema_extra={"additionalProperties": False})
doid: Optional[str] = Field(None, description="DOID identifier")
nodeFamilyId: Optional[int] = Field(None, description="Node family ID")
nodeTypeId: Optional[int] = Field(None, description="Node type ID")
nodePositionId: Optional[int] = Field(None, description="Node position ID")
class InstantiationData(BaseModel):
"""Instantiation data with abstract and multiplicity flags."""
# model_config = ConfigDict(json_schema_extra={"additionalProperties": False})
isAbstract: Optional[bool] = Field(None, description="Whether the instantiation is abstract")
isMultiplicity: Optional[bool] = Field(None, description="Whether the instantiation has multiplicity")
instantiations: List[Instantiation] = Field(None, description="List of instantiations")
class ParameterPreset(BaseModel):
"""Parameter preset enum values."""
value: str = Field(..., description="Preset value (a, b, c, e, f, g, h, i, d, l, s, m, z, o, u, unknown)")
@mcp.tool()
async def echo(message: str) -> str:
"""Echo back the provided message.
Args:
message: The message to echo back
"""
return f"Echo: {message}"
@mcp.tool()
async def add(a: float, b: float) -> str:
"""Add two numbers together.
Args:
a: First number
b: Second number
"""
result = a + b
return f"{a} + {b} = {result}"
@mcp.tool()
async def multiply(a: float, b: float) -> str:
"""Multiply two numbers together.
Args:
a: First number
b: Second number
"""
result = a * b
return f"{a} × {b} = {result}"
@mcp.tool()
async def reverse_string(text: str) -> str:
"""Reverse a string.
Args:
text: The string to reverse
"""
return text[::-1]
# Complex tools using Pydantic models
@mcp.tool()
async def create_person(person: Person) -> str:
"""Create a person profile with nested address information.
Args:
person: Person object with name, age, optional email and address
"""
result = "Created person profile:\n"
result += f" Name: {person.name}\n"
result += f" Age: {person.age}\n"
if person.email:
result += f" Email: {person.email}\n"
if person.address:
result += " Address:\n"
result += f" {person.address.street}\n"
result += f" {person.address.city}, {person.address.zip_code}\n"
result += f" {person.address.country}\n"
return result
@mcp.tool()
async def manage_tasks(tasks: List[TaskItem]) -> str:
"""Manage multiple tasks with priorities and tags.
Args:
tasks: List of task items to manage
"""
if not tasks:
return "No tasks provided"
result = f"Managing {len(tasks)} task(s):\n\n"
for i, task in enumerate(tasks, 1):
status = "✓" if task.completed else "○"
result += f"{i}. [{status}] {task.title}\n"
result += f" Priority: {task.priority}/5\n"
if task.tags:
result += f" Tags: {', '.join(task.tags)}\n"
result += "\n"
completed = sum(1 for t in tasks if t.completed)
result += f"Summary: {completed}/{len(tasks)} completed"
return result
@mcp.tool()
async def search_with_filters(query: str, filters: SearchFilter) -> str:
"""Search with complex filter criteria including keywords and categories.
Args:
query: The main search query
filters: Complex filter object with keywords, score threshold, and categories
"""
result = f"Search Query: '{query}'\n\n"
result += "Filters Applied:\n"
result += f" Keywords: {', '.join(filters.keywords)}\n"
if filters.min_score is not None:
result += f" Minimum Score: {filters.min_score}\n"
if filters.categories:
result += f" Categories: {', '.join(filters.categories)}\n"
# Simulate search results
result += "\nFound 3 results matching criteria:\n"
result += f" 1. Result matching '{filters.keywords[0]}' (score: 0.95)\n"
result += f" 2. Result matching '{query}' (score: 0.87)\n"
result += " 3. Result matching multiple keywords (score: 0.82)\n"
return result
@mcp.tool()
async def process_nested_data(data: dict) -> str:
"""Process arbitrary nested dictionary data.
Args:
data: Nested dictionary with arbitrary structure
"""
result = "Processing nested data:\n"
result += json.dumps(data, indent=2)
result += "\n\nData structure stats:\n"
result += f" Keys at root level: {len(data)}\n"
def count_nested_items(obj, depth=0):
count = 0
max_depth = depth
if isinstance(obj, dict):
for v in obj.values():
sub_count, sub_depth = count_nested_items(v, depth + 1)
count += sub_count + 1
max_depth = max(max_depth, sub_depth)
elif isinstance(obj, list):
for item in obj:
sub_count, sub_depth = count_nested_items(item, depth + 1)
count += sub_count + 1
max_depth = max(max_depth, sub_depth)
return count, max_depth
total_items, max_depth = count_nested_items(data)
result += f" Total nested items: {total_items}\n"
result += f" Maximum nesting depth: {max_depth}\n"
return result
@mcp.tool()
async def get_parameter_type_description(
preset: str,
instantiation_data: InstantiationData,
connected_service_descriptor: Optional[str] = None,
) -> str:
"""Get parameter type description with complex nested structure.
This tool matches the customer-reported schema pattern with:
- Enum-like preset parameter
- Optional string field
- Optional nested object with arrays of objects
Args:
preset: The parameter preset (a, b, c, e, f, g, h, i, d, l, s, m, z, o, u, unknown)
connected_service_descriptor: Connected service descriptor string, if available
instantiation_data: Instantiation data dict with isAbstract, isMultiplicity, and instantiations list
"""
result = "Parameter Type Description\n"
result += "=" * 50 + "\n\n"
result += f"Preset: {preset}\n\n"
if connected_service_descriptor:
result += f"Connected Service: {connected_service_descriptor}\n\n"
if instantiation_data:
result += "Instantiation Data:\n"
result += f" Is Abstract: {instantiation_data.isAbstract}\n"
result += f" Is Multiplicity: {instantiation_data.isMultiplicity}\n"
result += f" Instantiations: {instantiation_data.instantiations}\n"
return result
def main():
# Initialize and run the server
mcp.run(transport="stdio")
if __name__ == "__main__":
main()
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/mock_mcp_server.py",
"license": "Apache License 2.0",
"lines": 203,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_run_status_conversion.py | """
Unit tests for the convert_statuses_to_enum function in the runs API router.
These tests verify that status string conversion to RunStatus enums works correctly.
"""
import pytest
from letta.schemas.enums import RunStatus
from letta.server.rest_api.routers.v1.runs import convert_statuses_to_enum
def test_convert_statuses_to_enum_with_none():
"""Test that convert_statuses_to_enum returns None when input is None."""
result = convert_statuses_to_enum(None)
assert result is None
def test_convert_statuses_to_enum_with_single_status():
"""Test converting a single status string to RunStatus enum."""
result = convert_statuses_to_enum(["completed"])
assert result == [RunStatus.completed]
assert len(result) == 1
def test_convert_statuses_to_enum_with_multiple_statuses():
"""Test converting multiple status strings to RunStatus enums."""
result = convert_statuses_to_enum(["created", "running", "completed"])
assert result == [RunStatus.created, RunStatus.running, RunStatus.completed]
assert len(result) == 3
def test_convert_statuses_to_enum_with_all_statuses():
"""Test converting all possible status strings."""
all_statuses = ["created", "running", "completed", "failed", "cancelled"]
result = convert_statuses_to_enum(all_statuses)
assert result == [RunStatus.created, RunStatus.running, RunStatus.completed, RunStatus.failed, RunStatus.cancelled]
assert len(result) == 5
def test_convert_statuses_to_enum_with_empty_list():
"""Test converting an empty list."""
result = convert_statuses_to_enum([])
assert result == []
def test_convert_statuses_to_enum_with_invalid_status():
"""Test that invalid status strings raise ValueError."""
with pytest.raises(ValueError):
convert_statuses_to_enum(["invalid_status"])
def test_convert_statuses_to_enum_preserves_order():
"""Test that the order of statuses is preserved."""
input_statuses = ["failed", "created", "completed", "running"]
result = convert_statuses_to_enum(input_statuses)
assert result == [RunStatus.failed, RunStatus.created, RunStatus.completed, RunStatus.running]
def test_convert_statuses_to_enum_with_duplicate_statuses():
"""Test that duplicate statuses are preserved."""
input_statuses = ["completed", "completed", "running"]
result = convert_statuses_to_enum(input_statuses)
assert result == [RunStatus.completed, RunStatus.completed, RunStatus.running]
assert len(result) == 3
def test_convert_statuses_to_enum_case_sensitivity():
"""Test that the function is case-sensitive and requires exact matches."""
with pytest.raises(ValueError):
convert_statuses_to_enum(["COMPLETED"])
with pytest.raises(ValueError):
convert_statuses_to_enum(["Completed"])
def test_convert_statuses_to_enum_with_mixed_valid_invalid():
"""Test that if any status is invalid, the entire conversion fails."""
with pytest.raises(ValueError):
convert_statuses_to_enum(["completed", "invalid", "running"])
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_run_status_conversion.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/data/data_analysis.py | #!/usr/bin/env python3
"""
Data Analysis Module - Advanced statistical and machine learning operations
Contains various data processing and analysis functions for research purposes.
"""
import warnings
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from typing import Dict, Optional
import numpy as np
import pandas as pd
class AnalysisType(Enum):
"""Enumeration of different analysis types."""
DESCRIPTIVE = "descriptive"
CORRELATION = "correlation"
REGRESSION = "regression"
CLUSTERING = "clustering"
TIME_SERIES = "time_series"
@dataclass
class AnalysisResult:
"""Container for analysis results."""
analysis_type: AnalysisType
timestamp: datetime
metrics: Dict[str, float]
metadata: Dict[str, any]
success: bool = True
error_message: Optional[str] = None
class DataPreprocessor:
"""
Advanced data preprocessing utility class.
Handles cleaning, transformation, and feature engineering.
"""
def __init__(self, missing_threshold: float = 0.5):
self.missing_threshold = missing_threshold
self.transformations_applied = []
def clean_data(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Comprehensive data cleaning pipeline.
Args:
df: Input DataFrame to clean
Returns:
Cleaned DataFrame
"""
original_shape = df.shape
# Remove columns with excessive missing values
missing_ratios = df.isnull().sum() / len(df)
cols_to_drop = missing_ratios[missing_ratios > self.missing_threshold].index
df_cleaned = df.drop(columns=cols_to_drop)
if len(cols_to_drop) > 0:
self.transformations_applied.append(f"Dropped {len(cols_to_drop)} columns")
# Handle remaining missing values
numeric_cols = df_cleaned.select_dtypes(include=[np.number]).columns
categorical_cols = df_cleaned.select_dtypes(include=["object"]).columns
# Fill numeric missing values with median
for col in numeric_cols:
if df_cleaned[col].isnull().any():
median_value = df_cleaned[col].median()
df_cleaned[col].fillna(median_value, inplace=True)
self.transformations_applied.append(f"Filled {col} with median")
# Fill categorical missing values with mode
for col in categorical_cols:
if df_cleaned[col].isnull().any():
mode_value = df_cleaned[col].mode().iloc[0] if not df_cleaned[col].mode().empty else "Unknown"
df_cleaned[col].fillna(mode_value, inplace=True)
self.transformations_applied.append(f"Filled {col} with mode")
# Remove duplicates
initial_rows = len(df_cleaned)
df_cleaned = df_cleaned.drop_duplicates()
duplicates_removed = initial_rows - len(df_cleaned)
if duplicates_removed > 0:
self.transformations_applied.append(f"Removed {duplicates_removed} duplicate rows")
print(f"Data cleaning complete: {original_shape} -> {df_cleaned.shape}")
return df_cleaned
def engineer_features(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Create new features from existing data.
Args:
df: Input DataFrame
Returns:
DataFrame with engineered features
"""
df_featured = df.copy()
# Numeric feature engineering
numeric_cols = df_featured.select_dtypes(include=[np.number]).columns
if len(numeric_cols) >= 2:
# Create interaction features
for i, col1 in enumerate(numeric_cols):
for col2 in numeric_cols[i + 1 :]:
df_featured[f"{col1}_{col2}_ratio"] = df_featured[col1] / (df_featured[col2] + 1e-8)
df_featured[f"{col1}_{col2}_sum"] = df_featured[col1] + df_featured[col2]
self.transformations_applied.append("Created interaction features")
# Binning continuous variables
for col in numeric_cols:
if df_featured[col].nunique() > 10: # Only bin if many unique values
df_featured[f"{col}_binned"] = pd.qcut(df_featured[col], q=5, labels=False, duplicates="drop")
self.transformations_applied.append(f"Binned {col}")
return df_featured
class StatisticalAnalyzer:
"""
Statistical analysis and hypothesis testing utilities.
"""
@staticmethod
def descriptive_statistics(df: pd.DataFrame) -> AnalysisResult:
"""
Calculate comprehensive descriptive statistics.
Args:
df: Input DataFrame
Returns:
AnalysisResult with descriptive metrics
"""
try:
numeric_df = df.select_dtypes(include=[np.number])
if numeric_df.empty:
return AnalysisResult(
analysis_type=AnalysisType.DESCRIPTIVE,
timestamp=datetime.now(),
metrics={},
metadata={},
success=False,
error_message="No numeric columns found",
)
metrics = {
"mean_values": numeric_df.mean().to_dict(),
"std_values": numeric_df.std().to_dict(),
"median_values": numeric_df.median().to_dict(),
"skewness": numeric_df.skew().to_dict(),
"kurtosis": numeric_df.kurtosis().to_dict(),
"correlation_with_target": None, # Would need target column
}
metadata = {
"total_rows": len(df),
"total_columns": len(df.columns),
"numeric_columns": len(numeric_df.columns),
"missing_values": df.isnull().sum().to_dict(),
}
return AnalysisResult(analysis_type=AnalysisType.DESCRIPTIVE, timestamp=datetime.now(), metrics=metrics, metadata=metadata)
except Exception as e:
return AnalysisResult(
analysis_type=AnalysisType.DESCRIPTIVE,
timestamp=datetime.now(),
metrics={},
metadata={},
success=False,
error_message=str(e),
)
@staticmethod
def correlation_analysis(df: pd.DataFrame, method: str = "pearson") -> AnalysisResult:
"""
Perform correlation analysis between variables.
Args:
df: Input DataFrame
method: Correlation method ('pearson', 'spearman', 'kendall')
Returns:
AnalysisResult with correlation metrics
"""
try:
numeric_df = df.select_dtypes(include=[np.number])
if len(numeric_df.columns) < 2:
return AnalysisResult(
analysis_type=AnalysisType.CORRELATION,
timestamp=datetime.now(),
metrics={},
metadata={},
success=False,
error_message="Need at least 2 numeric columns for correlation",
)
corr_matrix = numeric_df.corr(method=method)
# Find highest correlations (excluding diagonal)
corr_pairs = []
for i in range(len(corr_matrix.columns)):
for j in range(i + 1, len(corr_matrix.columns)):
col1, col2 = corr_matrix.columns[i], corr_matrix.columns[j]
corr_value = corr_matrix.iloc[i, j]
if not np.isnan(corr_value):
corr_pairs.append((col1, col2, abs(corr_value)))
# Sort by correlation strength
corr_pairs.sort(key=lambda x: x[2], reverse=True)
metrics = {
"correlation_matrix": corr_matrix.to_dict(),
"highest_correlations": corr_pairs[:10], # Top 10
"method_used": method,
}
metadata = {"variables_analyzed": list(numeric_df.columns), "total_pairs": len(corr_pairs)}
return AnalysisResult(analysis_type=AnalysisType.CORRELATION, timestamp=datetime.now(), metrics=metrics, metadata=metadata)
except Exception as e:
return AnalysisResult(
analysis_type=AnalysisType.CORRELATION,
timestamp=datetime.now(),
metrics={},
metadata={},
success=False,
error_message=str(e),
)
class TimeSeriesAnalyzer:
"""
Time series analysis and forecasting utilities.
"""
def __init__(self, frequency: str = "D"):
self.frequency = frequency
self.models_fitted = {}
def detect_seasonality(self, series: pd.Series) -> Dict[str, any]:
"""
Detect seasonal patterns in time series data.
Args:
series: Time series data
Returns:
Dictionary with seasonality information
"""
try:
# Simple seasonality detection using autocorrelation
autocorr_values = []
for lag in range(1, min(len(series) // 2, 365)):
if len(series) > lag:
autocorr = series.autocorr(lag=lag)
if not np.isnan(autocorr):
autocorr_values.append((lag, autocorr))
# Find peaks in autocorrelation
significant_lags = [(lag, corr) for lag, corr in autocorr_values if abs(corr) > 0.5]
significant_lags.sort(key=lambda x: abs(x[1]), reverse=True)
return {
"seasonal_lags": significant_lags[:5],
"strongest_seasonality": significant_lags[0] if significant_lags else None,
"autocorrelation_values": autocorr_values,
}
except Exception as e:
warnings.warn(f"Seasonality detection failed: {e}")
return {"error": str(e)}
def trend_analysis(self, series: pd.Series, window: int = 30) -> Dict[str, any]:
"""
Analyze trend patterns in time series.
Args:
series: Time series data
window: Rolling window size for trend calculation
Returns:
Dictionary with trend information
"""
try:
# Calculate rolling statistics
rolling_mean = series.rolling(window=window).mean()
rolling_std = series.rolling(window=window).std()
# Simple trend detection
first_third = rolling_mean.iloc[: len(rolling_mean) // 3].mean()
last_third = rolling_mean.iloc[-len(rolling_mean) // 3 :].mean()
trend_direction = "increasing" if last_third > first_third else "decreasing"
trend_strength = abs(last_third - first_third) / first_third if first_third != 0 else 0
return {
"trend_direction": trend_direction,
"trend_strength": trend_strength,
"rolling_mean": rolling_mean.to_dict(),
"rolling_std": rolling_std.to_dict(),
"volatility": rolling_std.mean(),
}
except Exception as e:
warnings.warn(f"Trend analysis failed: {e}")
return {"error": str(e)}
def generate_sample_data(n_samples: int = 1000) -> pd.DataFrame:
"""
Generate sample dataset for testing analysis functions.
Args:
n_samples: Number of samples to generate
Returns:
Sample DataFrame
"""
np.random.seed(42)
data = {
"feature_1": np.random.normal(100, 15, n_samples),
"feature_2": np.random.exponential(2, n_samples),
"feature_3": np.random.uniform(0, 100, n_samples),
"category": np.random.choice(["A", "B", "C"], n_samples),
"timestamp": pd.date_range("2023-01-01", periods=n_samples, freq="D"),
}
# Add some correlation
data["feature_4"] = data["feature_1"] * 0.7 + np.random.normal(0, 10, n_samples)
# Add missing values
missing_indices = np.random.choice(n_samples, size=int(0.05 * n_samples), replace=False)
for idx in missing_indices:
col = np.random.choice(["feature_1", "feature_2", "feature_3"])
data[col][idx] = np.nan
return pd.DataFrame(data)
def main():
"""
Demonstration of the data analysis pipeline.
"""
print("=== Data Analysis Pipeline Demo ===")
# Generate sample data
df = generate_sample_data(1000)
print(f"Generated dataset with shape: {df.shape}")
# Data preprocessing
preprocessor = DataPreprocessor(missing_threshold=0.1)
df_clean = preprocessor.clean_data(df)
df_featured = preprocessor.engineer_features(df_clean)
print(f"Applied transformations: {preprocessor.transformations_applied}")
# Statistical analysis
analyzer = StatisticalAnalyzer()
# Descriptive statistics
desc_result = analyzer.descriptive_statistics(df_featured)
if desc_result.success:
print(f"Descriptive analysis completed at {desc_result.timestamp}")
print(f"Analyzed {desc_result.metadata['numeric_columns']} numeric columns")
# Correlation analysis
corr_result = analyzer.correlation_analysis(df_featured)
if corr_result.success:
print(f"Correlation analysis completed")
print(f"Found {len(corr_result.metrics['highest_correlations'])} significant correlations")
# Time series analysis
ts_analyzer = TimeSeriesAnalyzer()
time_series = df_clean.set_index("timestamp")["feature_1"]
ts_analyzer.detect_seasonality(time_series)
trend = ts_analyzer.trend_analysis(time_series)
print(f"Time series trend: {trend.get('trend_direction', 'unknown')}")
print(f"Volatility: {trend.get('volatility', 0):.2f}")
if __name__ == "__main__":
main()
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/data/data_analysis.py",
"license": "Apache License 2.0",
"lines": 314,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:.github/scripts/model-sweep/generate_model_sweep_markdown.py | #!/usr/bin/env python3
import json
import os
import sys
from collections import defaultdict
from datetime import datetime
def load_feature_mappings(config_file=None):
"""Load feature mappings from config file."""
if config_file is None:
# Default to feature_mappings.json in the same directory as this script
script_dir = os.path.dirname(os.path.abspath(__file__))
config_file = os.path.join(script_dir, "feature_mappings.json")
try:
with open(config_file, "r") as f:
return json.load(f)
except FileNotFoundError:
print(f"Error: Could not find feature mappings config file '{config_file}'")
sys.exit(1)
except json.JSONDecodeError:
print(f"Error: Invalid JSON in feature mappings config file '{config_file}'")
sys.exit(1)
def get_support_status(passed_tests, feature_tests):
"""Determine support status for a feature category."""
if not feature_tests:
return "❓" # Unknown - no tests for this feature
# Filter out error tests when checking for support
non_error_tests = [test for test in feature_tests if not test.endswith("_error")]
[test for test in feature_tests if test.endswith("_error")]
# Check which non-error tests passed
passed_non_error_tests = [test for test in non_error_tests if test in passed_tests]
# If there are no non-error tests, only error tests, treat as unknown
if not non_error_tests:
return "❓" # Only error tests available
# Support is based only on non-error tests
if len(passed_non_error_tests) == len(non_error_tests):
return "✅" # Full support
elif len(passed_non_error_tests) == 0:
return "❌" # No support
else:
return "⚠️" # Partial support
def categorize_tests(all_test_names, feature_mapping):
"""Categorize test names into feature buckets."""
categorized = {feature: [] for feature in feature_mapping.keys()}
for test_name in all_test_names:
for feature, test_patterns in feature_mapping.items():
if test_name in test_patterns:
categorized[feature].append(test_name)
break
return categorized
def calculate_support_score(feature_support, feature_order):
"""Calculate a numeric support score for ranking models.
For partial support, the score is weighted by the position of the feature
in the feature_order list (earlier features get higher weight).
"""
score = 0
max_features = len(feature_order)
for feature, status in feature_support.items():
# Get position weight (earlier features get higher weight)
if feature in feature_order:
position_weight = (max_features - feature_order.index(feature)) / max_features
else:
position_weight = 0.5 # Default weight for unmapped features
if status == "✅": # Full support
score += 10 * position_weight
elif status == "⚠️": # Partial support - weighted by column position
score += 5 * position_weight
elif status == "❌": # No support
score += 1 * position_weight
# Unknown (❓) gets 0 points
return score
def calculate_provider_support_score(models_data, feature_order):
"""Calculate a provider-level support score based on all models' support scores."""
if not models_data:
return 0
# Calculate the average support score across all models in the provider
total_score = sum(model["support_score"] for model in models_data)
return total_score / len(models_data)
def get_test_function_line_numbers(test_file_path):
"""Extract line numbers for test functions from the test file."""
test_line_numbers = {}
try:
with open(test_file_path, "r") as f:
lines = f.readlines()
for i, line in enumerate(lines, 1):
if "def test_" in line and line.strip().startswith("def test_"):
# Extract function name
func_name = line.strip().split("def ")[1].split("(")[0]
test_line_numbers[func_name] = i
except FileNotFoundError:
print(f"Warning: Could not find test file at {test_file_path}")
return test_line_numbers
def get_github_repo_info():
"""Get GitHub repository information from git remote."""
try:
# Try to get the GitHub repo URL from git remote
import subprocess
result = subprocess.run(["git", "remote", "get-url", "origin"], capture_output=True, text=True, cwd=os.path.dirname(__file__))
if result.returncode == 0:
remote_url = result.stdout.strip()
# Parse GitHub URL
if "github.com" in remote_url:
if remote_url.startswith("https://"):
# https://github.com/user/repo.git -> user/repo
repo_path = remote_url.replace("https://github.com/", "").replace(".git", "")
elif remote_url.startswith("git@"):
# git@github.com:user/repo.git -> user/repo
repo_path = remote_url.split(":")[1].replace(".git", "")
else:
return None
return repo_path
except Exception:
pass
# Default fallback
return "letta-ai/letta"
def generate_test_details(model_info, feature_mapping):
"""Generate detailed test results for a model."""
details = []
# Get test function line numbers
script_dir = os.path.dirname(os.path.abspath(__file__))
test_file_path = os.path.join(script_dir, "model_sweep.py")
test_line_numbers = get_test_function_line_numbers(test_file_path)
# Use the main branch GitHub URL
base_github_url = "https://github.com/letta-ai/letta/blob/main/.github/scripts/model-sweep/model_sweep.py"
for feature, tests in model_info["categorized_tests"].items():
if not tests:
continue
details.append(f"### {feature}")
details.append("")
for test in sorted(tests):
if test in model_info["passed_tests"]:
status = "✅"
elif test in model_info["failed_tests"]:
status = "❌"
else:
status = "❓"
# Create GitHub link if we have line number info
if test in test_line_numbers:
line_num = test_line_numbers[test]
github_link = f"{base_github_url}#L{line_num}"
details.append(f"- {status} [`{test}`]({github_link})")
else:
details.append(f"- {status} `{test}`")
details.append("")
return details
def calculate_column_widths(all_provider_data, feature_mapping):
"""Calculate the maximum width needed for each column across all providers."""
widths = {"model": len("Model"), "context_window": len("Context Window"), "last_scanned": len("Last Scanned")}
# Feature column widths
for feature in feature_mapping.keys():
widths[feature] = len(feature)
# Check all model data for maximum widths
for provider_data in all_provider_data.values():
for model_info in provider_data:
# Model name width (including backticks)
model_width = len(f"`{model_info['name']}`")
widths["model"] = max(widths["model"], model_width)
# Context window width (with commas)
context_width = len(f"{model_info['context_window']:,}")
widths["context_window"] = max(widths["context_window"], context_width)
# Last scanned width
widths["last_scanned"] = max(widths["last_scanned"], len(str(model_info["last_scanned"])))
# Feature support symbols are always 2 chars, so no need to check
return widths
def process_model_sweep_report(input_file, output_file, config_file=None, debug=False):
"""Convert model sweep JSON data to MDX report."""
# Load feature mappings from config file
feature_mapping = load_feature_mappings(config_file)
# if debug:
# print("DEBUG: Feature mappings loaded:")
# for feature, tests in feature_mapping.items():
# print(f" {feature}: {tests}")
# print()
# Read the JSON data
with open(input_file, "r") as f:
data = json.load(f)
tests = data.get("tests", [])
# if debug:
# print("DEBUG: Tests loaded:")
# print([test['outcome'] for test in tests if 'haiku' in test['nodeid']])
# Calculate summary statistics
providers = set(test["metadata"]["llm_config"]["provider_name"] for test in tests)
models = set(test["metadata"]["llm_config"]["model"] for test in tests)
total_tests = len(tests)
# Start building the MDX
mdx_lines = [
"---",
"title: Support Models",
f"generated: {datetime.now().isoformat()}",
"---",
"",
"# Supported Models",
"",
"## Overview",
"",
"Letta routinely runs automated scans against available providers and models. These are the results of the latest scan.",
"",
f"Ran {total_tests} tests against {len(models)} models across {len(providers)} providers on {datetime.now().strftime('%B %dth, %Y')}",
"",
"",
]
# Group tests by provider
provider_groups = defaultdict(list)
for test in tests:
provider_name = test["metadata"]["llm_config"]["provider_name"]
provider_groups[provider_name].append(test)
# Process all providers first to collect model data
all_provider_data = {}
provider_support_scores = {}
for provider_name in provider_groups.keys():
provider_tests = provider_groups[provider_name]
# Group tests by model within this provider
model_groups = defaultdict(list)
for test in provider_tests:
model_name = test["metadata"]["llm_config"]["model"]
model_groups[model_name].append(test)
# Process all models to calculate support scores for ranking
model_data = []
for model_name in model_groups.keys():
model_tests = model_groups[model_name]
# if debug:
# print(f"DEBUG: Processing model '{model_name}' in provider '{provider_name}'")
# Extract unique test names for passed and failed tests
passed_tests = set()
failed_tests = set()
all_test_names = set()
for test in model_tests:
# Extract test name from nodeid (split on :: and [)
test_name = test["nodeid"].split("::")[1].split("[")[0]
all_test_names.add(test_name)
# if debug:
# print(f" Test name: {test_name}")
# print(f" Outcome: {test}")
if test["outcome"] == "passed":
passed_tests.add(test_name)
elif test["outcome"] == "failed":
failed_tests.add(test_name)
# if debug:
# print(f" All test names found: {sorted(all_test_names)}")
# print(f" Passed tests: {sorted(passed_tests)}")
# print(f" Failed tests: {sorted(failed_tests)}")
# Categorize tests into features
categorized_tests = categorize_tests(all_test_names, feature_mapping)
# if debug:
# print(f" Categorized tests:")
# for feature, tests in categorized_tests.items():
# print(f" {feature}: {tests}")
# Determine support status for each feature
feature_support = {}
for feature_name in feature_mapping.keys():
feature_support[feature_name] = get_support_status(passed_tests, categorized_tests[feature_name])
# if debug:
# print(f" Feature support:")
# for feature, status in feature_support.items():
# print(f" {feature}: {status}")
# print()
# Get context window and last scanned time
context_window = model_tests[0]["metadata"]["llm_config"]["context_window"]
# Try to get time_last_scanned from metadata, fallback to current time
try:
last_scanned = model_tests[0]["metadata"].get(
"time_last_scanned", model_tests[0]["metadata"].get("timestamp", datetime.now().isoformat())
)
# Format timestamp if it's a full ISO string
if "T" in str(last_scanned):
last_scanned = str(last_scanned).split("T")[0] # Just the date part
except Exception:
last_scanned = "Unknown"
# Calculate support score for ranking
feature_order = list(feature_mapping.keys())
support_score = calculate_support_score(feature_support, feature_order)
# Store model data for sorting
model_data.append(
{
"name": model_name,
"feature_support": feature_support,
"context_window": context_window,
"last_scanned": last_scanned,
"support_score": support_score,
"failed_tests": failed_tests,
"passed_tests": passed_tests,
"categorized_tests": categorized_tests,
}
)
# Sort models by support score (descending) then by name (ascending)
model_data.sort(key=lambda x: (-x["support_score"], x["name"]))
# Store provider data
all_provider_data[provider_name] = model_data
provider_support_scores[provider_name] = calculate_provider_support_score(model_data, list(feature_mapping.keys()))
# Calculate column widths for consistent formatting (add details column)
column_widths = calculate_column_widths(all_provider_data, feature_mapping)
column_widths["details"] = len("Details")
# Sort providers by support score (descending) then by name (ascending)
sorted_providers = sorted(provider_support_scores.keys(), key=lambda x: (-provider_support_scores[x], x))
# Generate tables for all providers first
for provider_name in sorted_providers:
model_data = all_provider_data[provider_name]
support_score = provider_support_scores[provider_name]
# Create dynamic headers with proper padding and centering
feature_names = list(feature_mapping.keys())
# Build header row with left-aligned first column, centered others
header_parts = [f"{'Model':<{column_widths['model']}}"]
for feature in feature_names:
header_parts.append(f"{feature:^{column_widths[feature]}}")
header_parts.extend(
[
f"{'Context Window':^{column_widths['context_window']}}",
f"{'Last Scanned':^{column_widths['last_scanned']}}",
f"{'Details':^{column_widths['details']}}",
]
)
header_row = "| " + " | ".join(header_parts) + " |"
# Build separator row with left-aligned first column, centered others
separator_parts = [f"{'-' * column_widths['model']}"]
for feature in feature_names:
separator_parts.append(f":{'-' * (column_widths[feature] - 2)}:")
separator_parts.extend(
[
f":{'-' * (column_widths['context_window'] - 2)}:",
f":{'-' * (column_widths['last_scanned'] - 2)}:",
f":{'-' * (column_widths['details'] - 2)}:",
]
)
separator_row = "|" + "|".join(separator_parts) + "|"
# Add provider section without percentage
mdx_lines.extend([f"## {provider_name}", "", header_row, separator_row])
# Generate table rows for sorted models with proper padding
for model_info in model_data:
# Create anchor for model details
model_anchor = model_info["name"].replace("/", "_").replace(":", "_").replace("-", "_").lower()
details_anchor = f"{provider_name.lower().replace(' ', '_')}_{model_anchor}_details"
# Build row with left-aligned first column, centered others
row_parts = [f"`{model_info['name']}`".ljust(column_widths["model"])]
for feature in feature_names:
row_parts.append(f"{model_info['feature_support'][feature]:^{column_widths[feature]}}")
row_parts.extend(
[
f"{model_info['context_window']:,}".center(column_widths["context_window"]),
f"{model_info['last_scanned']}".center(column_widths["last_scanned"]),
f"[View](#{details_anchor})".center(column_widths["details"]),
]
)
row = "| " + " | ".join(row_parts) + " |"
mdx_lines.append(row)
# Add spacing between provider tables
mdx_lines.extend(["", ""])
# Add detailed test results section after all tables
mdx_lines.extend(["---", "", "# Detailed Test Results", ""])
for provider_name in sorted_providers:
model_data = all_provider_data[provider_name]
mdx_lines.extend([f"## {provider_name}", ""])
for model_info in model_data:
model_anchor = model_info["name"].replace("/", "_").replace(":", "_").replace("-", "_").lower()
details_anchor = f"{provider_name.lower().replace(' ', '_')}_{model_anchor}_details"
mdx_lines.append(f'<a id="{details_anchor}"></a>')
mdx_lines.append(f"### {model_info['name']}")
mdx_lines.append("")
# Add test details
test_details = generate_test_details(model_info, feature_mapping)
mdx_lines.extend(test_details)
# Add spacing between providers in details section
mdx_lines.extend(["", ""])
# Write the MDX file
with open(output_file, "w") as f:
f.write("\n".join(mdx_lines))
print(f"Model sweep report saved to {output_file}")
def main():
input_file = "model_sweep_report.json"
output_file = "model_sweep_report.mdx"
config_file = None
debug = False
# Allow command line arguments
if len(sys.argv) > 1:
# Use the file located in the same directory as this script
script_dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(script_dir, sys.argv[1])
if len(sys.argv) > 2:
# Use the file located in the same directory as this script
script_dir = os.path.dirname(os.path.abspath(__file__))
output_file = os.path.join(script_dir, sys.argv[2])
if len(sys.argv) > 3:
config_file = sys.argv[3]
if len(sys.argv) > 4 and sys.argv[4] == "--debug":
debug = True
try:
process_model_sweep_report(input_file, output_file, config_file, debug)
except FileNotFoundError:
print(f"Error: Could not find input file '{input_file}'")
sys.exit(1)
except json.JSONDecodeError:
print(f"Error: Invalid JSON in file '{input_file}'")
sys.exit(1)
except Exception as e:
print(f"Error: {e}")
sys.exit(1)
if __name__ == "__main__":
main()
| {
"repo_id": "letta-ai/letta",
"file_path": ".github/scripts/model-sweep/generate_model_sweep_markdown.py",
"license": "Apache License 2.0",
"lines": 396,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:.github/scripts/model-sweep/model_sweep.py | import base64
import json
import os
import time
import uuid
from typing import Any, Dict, List
import httpx
import pytest
from letta_client import Letta, MessageCreate, Run
from letta_client.core.api_error import ApiError
from letta_client.types import (
AssistantMessage,
Base64Image,
ImageContent,
LettaUsageStatistics,
ReasoningMessage,
TextContent,
ToolCallMessage,
ToolReturnMessage,
UrlImage,
UserMessage,
)
from letta.schemas.agent import AgentState
from letta.schemas.llm_config import LLMConfig
# ------------------------------
# Helper Functions and Constants
# ------------------------------
def get_llm_config(filename: str, llm_config_dir: str = "tests/configs/llm_model_configs") -> LLMConfig:
filename = os.path.join(llm_config_dir, filename)
with open(filename, "r") as f:
config_data = json.load(f)
llm_config = LLMConfig(**config_data)
return llm_config
def roll_dice(num_sides: int) -> int:
"""
Returns a random number between 1 and num_sides.
Args:
num_sides (int): The number of sides on the die.
Returns:
int: A random integer between 1 and num_sides, representing the die roll.
"""
import random
return random.randint(1, num_sides)
USER_MESSAGE_OTID = str(uuid.uuid4())
USER_MESSAGE_RESPONSE: str = "Teamwork makes the dream work"
USER_MESSAGE_FORCE_REPLY: List[MessageCreate] = [
MessageCreate(
role="user",
content=f"This is an automated test message. Call the send_message tool with the message '{USER_MESSAGE_RESPONSE}'.",
otid=USER_MESSAGE_OTID,
)
]
USER_MESSAGE_ROLL_DICE: List[MessageCreate] = [
MessageCreate(
role="user",
content="This is an automated test message. Call the roll_dice tool with 16 sides and tell me the outcome.",
otid=USER_MESSAGE_OTID,
)
]
URL_IMAGE = "https://upload.wikimedia.org/wikipedia/commons/a/a7/Camponotus_flavomarginatus_ant.jpg"
USER_MESSAGE_URL_IMAGE: List[MessageCreate] = [
MessageCreate(
role="user",
content=[
ImageContent(source=UrlImage(url=URL_IMAGE)),
TextContent(text="What is in this image?"),
],
otid=USER_MESSAGE_OTID,
)
]
BASE64_IMAGE = base64.standard_b64encode(httpx.get(URL_IMAGE).content).decode("utf-8")
USER_MESSAGE_BASE64_IMAGE: List[MessageCreate] = [
MessageCreate(
role="user",
content=[
ImageContent(source=Base64Image(data=BASE64_IMAGE, media_type="image/jpeg")),
TextContent(text="What is in this image?"),
],
otid=USER_MESSAGE_OTID,
)
]
all_configs = [
"openai-gpt-4o-mini.json",
# "azure-gpt-4o-mini.json", # TODO: Re-enable on new agent loop
"claude-3-5-sonnet.json",
"claude-4-sonnet-extended.json",
"claude-3-7-sonnet-extended.json",
"gemini-1.5-pro.json",
"gemini-2.5-flash-vertex.json",
"gemini-2.5-pro-vertex.json",
"together-qwen-2.5-72b-instruct.json",
"ollama.json",
]
requested = os.getenv("LLM_CONFIG_FILE")
filenames = [requested] if requested else all_configs
TESTED_LLM_CONFIGS: List[LLMConfig] = [get_llm_config(fn) for fn in filenames]
def assert_greeting_with_assistant_message_response(
messages: List[Any],
streaming: bool = False,
token_streaming: bool = False,
from_db: bool = False,
) -> None:
"""
Asserts that the messages list follows the expected sequence:
ReasoningMessage -> AssistantMessage.
"""
expected_message_count = 3 if streaming or from_db else 2
assert len(messages) == expected_message_count
index = 0
if from_db:
assert isinstance(messages[index], UserMessage)
assert messages[index].otid == USER_MESSAGE_OTID
index += 1
# Agent Step 1
assert isinstance(messages[index], ReasoningMessage)
assert messages[index].otid and messages[index].otid[-1] == "0"
index += 1
assert isinstance(messages[index], AssistantMessage)
if not token_streaming:
assert USER_MESSAGE_RESPONSE in messages[index].content
assert messages[index].otid and messages[index].otid[-1] == "1"
index += 1
if streaming:
assert isinstance(messages[index], LettaUsageStatistics)
assert messages[index].prompt_tokens > 0
assert messages[index].completion_tokens > 0
assert messages[index].total_tokens > 0
assert messages[index].step_count > 0
def assert_greeting_without_assistant_message_response(
messages: List[Any],
streaming: bool = False,
token_streaming: bool = False,
from_db: bool = False,
) -> None:
"""
Asserts that the messages list follows the expected sequence:
ReasoningMessage -> ToolCallMessage -> ToolReturnMessage.
"""
expected_message_count = 4 if streaming or from_db else 3
assert len(messages) == expected_message_count
index = 0
if from_db:
assert isinstance(messages[index], UserMessage)
assert messages[index].otid == USER_MESSAGE_OTID
index += 1
# Agent Step 1
assert isinstance(messages[index], ReasoningMessage)
assert messages[index].otid and messages[index].otid[-1] == "0"
index += 1
assert isinstance(messages[index], ToolCallMessage)
assert messages[index].tool_call.name == "send_message"
if not token_streaming:
assert USER_MESSAGE_RESPONSE in messages[index].tool_call.arguments
assert messages[index].otid and messages[index].otid[-1] == "1"
index += 1
# Agent Step 2
assert isinstance(messages[index], ToolReturnMessage)
assert messages[index].otid and messages[index].otid[-1] == "0"
index += 1
if streaming:
assert isinstance(messages[index], LettaUsageStatistics)
def assert_tool_call_response(
messages: List[Any],
streaming: bool = False,
from_db: bool = False,
) -> None:
"""
Asserts that the messages list follows the expected sequence:
ReasoningMessage -> ToolCallMessage -> ToolReturnMessage ->
ReasoningMessage -> AssistantMessage.
"""
expected_message_count = 6 if streaming else 7 if from_db else 5
assert len(messages) == expected_message_count
index = 0
if from_db:
assert isinstance(messages[index], UserMessage)
assert messages[index].otid == USER_MESSAGE_OTID
index += 1
# Agent Step 1
assert isinstance(messages[index], ReasoningMessage)
assert messages[index].otid and messages[index].otid[-1] == "0"
index += 1
assert isinstance(messages[index], ToolCallMessage)
assert messages[index].otid and messages[index].otid[-1] == "1"
index += 1
# Agent Step 2
assert isinstance(messages[index], ToolReturnMessage)
assert messages[index].otid and messages[index].otid[-1] == "0"
index += 1
# Hidden User Message
if from_db:
assert isinstance(messages[index], UserMessage)
assert "request_heartbeat=true" in messages[index].content
index += 1
# Agent Step 3
assert isinstance(messages[index], ReasoningMessage)
assert messages[index].otid and messages[index].otid[-1] == "0"
index += 1
assert isinstance(messages[index], AssistantMessage)
assert messages[index].otid and messages[index].otid[-1] == "1"
index += 1
if streaming:
assert isinstance(messages[index], LettaUsageStatistics)
def assert_image_input_response(
messages: List[Any],
streaming: bool = False,
token_streaming: bool = False,
from_db: bool = False,
) -> None:
"""
Asserts that the messages list follows the expected sequence:
ReasoningMessage -> AssistantMessage.
"""
expected_message_count = 3 if streaming or from_db else 2
assert len(messages) == expected_message_count
index = 0
if from_db:
assert isinstance(messages[index], UserMessage)
assert messages[index].otid == USER_MESSAGE_OTID
index += 1
# Agent Step 1
assert isinstance(messages[index], ReasoningMessage)
assert messages[index].otid and messages[index].otid[-1] == "0"
index += 1
assert isinstance(messages[index], AssistantMessage)
assert messages[index].otid and messages[index].otid[-1] == "1"
index += 1
if streaming:
assert isinstance(messages[index], LettaUsageStatistics)
assert messages[index].prompt_tokens > 0
assert messages[index].completion_tokens > 0
assert messages[index].total_tokens > 0
assert messages[index].step_count > 0
def accumulate_chunks(chunks: List[Any]) -> List[Any]:
"""
Accumulates chunks into a list of messages.
"""
messages = []
current_message = None
prev_message_type = None
for chunk in chunks:
current_message_type = chunk.message_type
if prev_message_type != current_message_type:
messages.append(current_message)
current_message = None
if current_message is None:
current_message = chunk
else:
pass # TODO: actually accumulate the chunks. For now we only care about the count
prev_message_type = current_message_type
messages.append(current_message)
return [m for m in messages if m is not None]
def wait_for_run_completion(client: Letta, run_id: str, timeout: float = 30.0, interval: float = 0.5) -> Run:
start = time.time()
while True:
run = client.runs.retrieve(run_id)
if run.status == "completed":
return run
if run.status == "failed":
raise RuntimeError(f"Run {run_id} did not complete: status = {run.status}")
if time.time() - start > timeout:
raise TimeoutError(f"Run {run_id} did not complete within {timeout} seconds (last status: {run.status})")
time.sleep(interval)
def assert_tool_response_dict_messages(messages: List[Dict[str, Any]]) -> None:
"""
Asserts that a list of message dictionaries contains the expected types and statuses.
Expected order:
1. reasoning_message
2. tool_call_message
3. tool_return_message (with status 'success')
4. reasoning_message
5. assistant_message
"""
assert isinstance(messages, list)
assert messages[0]["message_type"] == "reasoning_message"
assert messages[1]["message_type"] == "assistant_message"
# ------------------------------
# Test Cases
# ------------------------------
# def test_that_ci_workflow_works(
# disable_e2b_api_key: Any,
# client: Letta,
# agent_state: AgentState,
# llm_config: LLMConfig,
# json_metadata: pytest.FixtureRequest,
# ) -> None:
# """
# Tests that the CI workflow works.
# """
# json_metadata["test_type"] = "debug"
def test_greeting_with_assistant_message(
disable_e2b_api_key: Any,
client: Letta,
agent_state: AgentState,
llm_config: LLMConfig,
json_metadata: pytest.FixtureRequest,
) -> None:
"""
Tests sending a message with a synchronous client.
Verifies that the response messages follow the expected order.
"""
json_metadata["llm_config"] = dict(llm_config)
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=USER_MESSAGE_FORCE_REPLY,
)
assert_greeting_with_assistant_message_response(response.messages)
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id)
assert_greeting_with_assistant_message_response(messages_from_db, from_db=True)
def test_greeting_without_assistant_message(
disable_e2b_api_key: Any,
client: Letta,
llm_config: LLMConfig,
agent_state: AgentState,
json_metadata: pytest.FixtureRequest,
) -> None:
"""
Tests sending a message with a synchronous client.
Verifies that the response messages follow the expected order.
"""
json_metadata["llm_config"] = dict(llm_config)
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=USER_MESSAGE_FORCE_REPLY,
use_assistant_message=False,
)
assert_greeting_without_assistant_message_response(response.messages)
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id, use_assistant_message=False)
assert_greeting_without_assistant_message_response(messages_from_db, from_db=True)
def test_tool_call(
disable_e2b_api_key: Any,
client: Letta,
llm_config: LLMConfig,
agent_state: AgentState,
json_metadata: pytest.FixtureRequest,
) -> None:
"""
Tests sending a message with a synchronous client.
Verifies that the response messages follow the expected order.
"""
json_metadata["llm_config"] = dict(llm_config)
dice_tool = client.tools.upsert_from_function(func=roll_dice)
client.agents.tools.attach(agent_id=agent_state.id, tool_id=dice_tool.id)
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=USER_MESSAGE_ROLL_DICE,
)
assert_tool_call_response(response.messages)
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id)
assert_tool_call_response(messages_from_db, from_db=True)
def test_url_image_input(
disable_e2b_api_key: Any,
client: Letta,
llm_config: LLMConfig,
agent_state: AgentState,
json_metadata: pytest.FixtureRequest,
) -> None:
"""
Tests sending a message with a synchronous client.
Verifies that the response messages follow the expected order.
"""
json_metadata["llm_config"] = dict(llm_config)
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=USER_MESSAGE_URL_IMAGE,
)
assert_image_input_response(response.messages)
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id)
assert_image_input_response(messages_from_db, from_db=True)
def test_base64_image_input(
disable_e2b_api_key: Any,
client: Letta,
llm_config: LLMConfig,
agent_state: AgentState,
json_metadata: pytest.FixtureRequest,
) -> None:
"""
Tests sending a message with a synchronous client.
Verifies that the response messages follow the expected order.
"""
json_metadata["llm_config"] = dict(llm_config)
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=USER_MESSAGE_BASE64_IMAGE,
)
assert_image_input_response(response.messages)
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id)
assert_image_input_response(messages_from_db, from_db=True)
def test_agent_loop_error(
disable_e2b_api_key: Any,
client: Letta,
llm_config: LLMConfig,
agent_state: AgentState,
json_metadata: pytest.FixtureRequest,
) -> None:
"""
Tests sending a message with a synchronous client.
Verifies that no new messages are persisted on error.
"""
json_metadata["llm_config"] = dict(llm_config)
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
tools = agent_state.tools
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config, tool_ids=[])
with pytest.raises(ApiError):
client.agents.messages.create(
agent_id=agent_state.id,
messages=USER_MESSAGE_FORCE_REPLY,
)
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id)
assert len(messages_from_db) == 0
client.agents.modify(agent_id=agent_state.id, tool_ids=[t.id for t in tools])
def test_step_streaming_greeting_with_assistant_message(
disable_e2b_api_key: Any,
client: Letta,
llm_config: LLMConfig,
agent_state: AgentState,
json_metadata: pytest.FixtureRequest,
) -> None:
"""
Tests sending a streaming message with a synchronous client.
Checks that each chunk in the stream has the correct message types.
"""
json_metadata["llm_config"] = dict(llm_config)
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
response = client.agents.messages.create_stream(
agent_id=agent_state.id,
messages=USER_MESSAGE_FORCE_REPLY,
)
chunks = list(response)
messages = accumulate_chunks(chunks)
assert_greeting_with_assistant_message_response(messages, streaming=True)
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id)
assert_greeting_with_assistant_message_response(messages_from_db, from_db=True)
def test_step_streaming_greeting_without_assistant_message(
disable_e2b_api_key: Any,
client: Letta,
llm_config: LLMConfig,
agent_state: AgentState,
json_metadata: pytest.FixtureRequest,
) -> None:
"""
Tests sending a streaming message with a synchronous client.
Checks that each chunk in the stream has the correct message types.
"""
json_metadata["llm_config"] = dict(llm_config)
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
response = client.agents.messages.create_stream(
agent_id=agent_state.id,
messages=USER_MESSAGE_FORCE_REPLY,
use_assistant_message=False,
)
chunks = list(response)
messages = accumulate_chunks(chunks)
assert_greeting_without_assistant_message_response(messages, streaming=True)
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id, use_assistant_message=False)
assert_greeting_without_assistant_message_response(messages_from_db, from_db=True)
def test_step_streaming_tool_call(
disable_e2b_api_key: Any,
client: Letta,
llm_config: LLMConfig,
agent_state: AgentState,
json_metadata: pytest.FixtureRequest,
) -> None:
"""
Tests sending a streaming message with a synchronous client.
Checks that each chunk in the stream has the correct message types.
"""
json_metadata["llm_config"] = dict(llm_config)
dice_tool = client.tools.upsert_from_function(func=roll_dice)
agent_state = client.agents.tools.attach(agent_id=agent_state.id, tool_id=dice_tool.id)
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
response = client.agents.messages.create_stream(
agent_id=agent_state.id,
messages=USER_MESSAGE_ROLL_DICE,
)
chunks = list(response)
messages = accumulate_chunks(chunks)
assert_tool_call_response(messages, streaming=True)
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id)
assert_tool_call_response(messages_from_db, from_db=True)
def test_step_stream_agent_loop_error(
disable_e2b_api_key: Any,
client: Letta,
llm_config: LLMConfig,
agent_state: AgentState,
json_metadata: pytest.FixtureRequest,
) -> None:
"""
Tests sending a message with a synchronous client.
Verifies that no new messages are persisted on error.
"""
json_metadata["llm_config"] = dict(llm_config)
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
tools = agent_state.tools
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config, tool_ids=[])
with pytest.raises(ApiError):
response = client.agents.messages.create_stream(
agent_id=agent_state.id,
messages=USER_MESSAGE_FORCE_REPLY,
)
list(response)
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id)
assert len(messages_from_db) == 0
client.agents.modify(agent_id=agent_state.id, tool_ids=[t.id for t in tools])
def test_token_streaming_greeting_with_assistant_message(
disable_e2b_api_key: Any,
client: Letta,
llm_config: LLMConfig,
agent_state: AgentState,
json_metadata: pytest.FixtureRequest,
) -> None:
"""
Tests sending a streaming message with a synchronous client.
Checks that each chunk in the stream has the correct message types.
"""
json_metadata["llm_config"] = dict(llm_config)
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
response = client.agents.messages.create_stream(
agent_id=agent_state.id,
messages=USER_MESSAGE_FORCE_REPLY,
stream_tokens=True,
)
chunks = list(response)
messages = accumulate_chunks(chunks)
assert_greeting_with_assistant_message_response(messages, streaming=True, token_streaming=True)
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id)
assert_greeting_with_assistant_message_response(messages_from_db, from_db=True)
def test_token_streaming_greeting_without_assistant_message(
disable_e2b_api_key: Any,
client: Letta,
llm_config: LLMConfig,
agent_state: AgentState,
json_metadata: pytest.FixtureRequest,
) -> None:
"""
Tests sending a streaming message with a synchronous client.
Checks that each chunk in the stream has the correct message types.
"""
json_metadata["llm_config"] = dict(llm_config)
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
response = client.agents.messages.create_stream(
agent_id=agent_state.id,
messages=USER_MESSAGE_FORCE_REPLY,
use_assistant_message=False,
stream_tokens=True,
)
chunks = list(response)
messages = accumulate_chunks(chunks)
assert_greeting_without_assistant_message_response(messages, streaming=True, token_streaming=True)
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id, use_assistant_message=False)
assert_greeting_without_assistant_message_response(messages_from_db, from_db=True)
def test_token_streaming_tool_call(
disable_e2b_api_key: Any,
client: Letta,
llm_config: LLMConfig,
agent_state: AgentState,
json_metadata: pytest.FixtureRequest,
) -> None:
"""
Tests sending a streaming message with a synchronous client.
Checks that each chunk in the stream has the correct message types.
"""
json_metadata["llm_config"] = dict(llm_config)
dice_tool = client.tools.upsert_from_function(func=roll_dice)
agent_state = client.agents.tools.attach(agent_id=agent_state.id, tool_id=dice_tool.id)
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
response = client.agents.messages.create_stream(
agent_id=agent_state.id,
messages=USER_MESSAGE_ROLL_DICE,
stream_tokens=True,
)
chunks = list(response)
messages = accumulate_chunks(chunks)
assert_tool_call_response(messages, streaming=True)
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id)
assert_tool_call_response(messages_from_db, from_db=True)
def test_token_streaming_agent_loop_error(
disable_e2b_api_key: Any,
client: Letta,
llm_config: LLMConfig,
agent_state: AgentState,
json_metadata: pytest.FixtureRequest,
) -> None:
"""
Tests sending a message with a synchronous client.
Verifies that no new messages are persisted on error.
"""
json_metadata["llm_config"] = dict(llm_config)
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)
tools = agent_state.tools
agent_state = client.agents.modify(agent_id=agent_state.id, llm_config=llm_config, tool_ids=[])
try:
response = client.agents.messages.create_stream(
agent_id=agent_state.id,
messages=USER_MESSAGE_FORCE_REPLY,
stream_tokens=True,
)
list(response)
except Exception:
pass # only some models throw an error TODO: make this consistent
messages_from_db = client.agents.messages.list(agent_id=agent_state.id, after=last_message[0].id)
assert len(messages_from_db) == 0
client.agents.modify(agent_id=agent_state.id, tool_ids=[t.id for t in tools])
def test_async_greeting_with_assistant_message(
disable_e2b_api_key: Any,
client: Letta,
llm_config: LLMConfig,
agent_state: AgentState,
json_metadata: pytest.FixtureRequest,
) -> None:
"""
Tests sending a message as an asynchronous job using the synchronous client.
Waits for job completion and asserts that the result messages are as expected.
"""
json_metadata["llm_config"] = dict(llm_config)
client.agents.modify(agent_id=agent_state.id, llm_config=llm_config)
run = client.agents.messages.create_async(
agent_id=agent_state.id,
messages=USER_MESSAGE_FORCE_REPLY,
)
run = wait_for_run_completion(client, run.id)
result = run.metadata.get("result")
assert result is not None, "Run metadata missing 'result' key"
messages = result["messages"]
assert_tool_response_dict_messages(messages)
def test_auto_summarize(
disable_e2b_api_key: Any,
client: Letta,
llm_config: LLMConfig,
json_metadata: pytest.FixtureRequest,
) -> None:
"""Test that summarization is automatically triggered."""
json_metadata["llm_config"] = dict(llm_config)
# pydantic prevents us for overriding the context window paramter in the passed LLMConfig
new_llm_config = llm_config.model_dump()
new_llm_config["context_window"] = 3000
pinned_context_window_llm_config = LLMConfig(**new_llm_config)
send_message_tool = client.tools.list(name="send_message")[0]
temp_agent_state = client.agents.create(
include_base_tools=False,
tool_ids=[send_message_tool.id],
llm_config=pinned_context_window_llm_config,
embedding="letta/letta-free",
tags=["supervisor"],
)
philosophical_question = """
You know, sometimes I wonder if the entire structure of our lives is built on a series of unexamined assumptions we just silently agreed to somewhere along the way—like how we all just decided that five days a week of work and two days of “rest” constitutes balance, or how 9-to-5 became the default rhythm of a meaningful life, or even how the idea of “success” got boiled down to job titles and property ownership and productivity metrics on a LinkedIn profile, when maybe none of that is actually what makes a life feel full, or grounded, or real. And then there’s the weird paradox of ambition, how we're taught to chase it like a finish line that keeps moving, constantly redefining itself right as you’re about to grasp it—because even when you get the job, or the degree, or the validation, there's always something next, something more, like a treadmill with invisible settings you didn’t realize were turned up all the way.
And have you noticed how we rarely stop to ask who set those definitions for us? Like was there ever a council that decided, yes, owning a home by thirty-five and retiring by sixty-five is the universal template for fulfillment? Or did it just accumulate like cultural sediment over generations, layered into us so deeply that questioning it feels uncomfortable, even dangerous? And isn’t it strange that we spend so much of our lives trying to optimize things—our workflows, our diets, our sleep, our morning routines—as though the point of life is to operate more efficiently rather than to experience it more richly? We build these intricate systems, these rulebooks for being a “high-functioning” human, but where in all of that is the space for feeling lost, for being soft, for wandering without a purpose just because it’s a sunny day and your heart is tugging you toward nowhere in particular?
Sometimes I lie awake at night and wonder if all the noise we wrap around ourselves—notifications, updates, performance reviews, even our internal monologues—might be crowding out the questions we were meant to live into slowly, like how to love better, or how to forgive ourselves, or what the hell we’re even doing here in the first place. And when you strip it all down—no goals, no KPIs, no curated identity—what’s actually left of us? Are we just a sum of the roles we perform, or is there something quieter underneath that we've forgotten how to hear?
And if there is something underneath all of it—something real, something worth listening to—then how do we begin to uncover it, gently, without rushing or reducing it to another task on our to-do list?
"""
MAX_ATTEMPTS = 10
prev_length = None
for attempt in range(MAX_ATTEMPTS):
client.agents.messages.create(
agent_id=temp_agent_state.id,
messages=[MessageCreate(role="user", content=philosophical_question)],
)
temp_agent_state = client.agents.retrieve(agent_id=temp_agent_state.id)
message_ids = temp_agent_state.message_ids
current_length = len(message_ids)
print("LENGTH OF IN_CONTEXT_MESSAGES:", current_length)
if prev_length is not None and current_length <= prev_length:
# TODO: Add more stringent checks here
print(f"Summarization was triggered, detected current_length {current_length} is at least prev_length {prev_length}.")
break
prev_length = current_length
else:
raise AssertionError("Summarization was not triggered after 10 messages")
| {
"repo_id": "letta-ai/letta",
"file_path": ".github/scripts/model-sweep/model_sweep.py",
"license": "Apache License 2.0",
"lines": 685,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:alembic/versions/0335b1eb9c40_add_batch_item_id_to_messages.py | """Add batch_item_id to messages
Revision ID: 0335b1eb9c40
Revises: 373dabcba6cf
Create Date: 2025-05-02 10:30:08.156190
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "0335b1eb9c40"
down_revision: Union[str, None] = "373dabcba6cf"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("messages", sa.Column("batch_item_id", sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("messages", "batch_item_id")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/0335b1eb9c40_add_batch_item_id_to_messages.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/05c3bc564286_add_metrics_to_agent_loop_runs.py | """add metrics to agent loop runs
Revision ID: 05c3bc564286
Revises: d007f4ca66bf
Create Date: 2025-08-06 14:30:48.255538
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "05c3bc564286"
down_revision: Union[str, None] = "d007f4ca66bf"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("jobs", sa.Column("ttft_ns", sa.BigInteger(), nullable=True))
op.add_column("jobs", sa.Column("total_duration_ns", sa.BigInteger(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("jobs", "total_duration_ns")
op.drop_column("jobs", "ttft_ns")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/05c3bc564286_add_metrics_to_agent_loop_runs.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/068588268b02_add_vector_db_provider_to_archives_table.py | """Add vector_db_provider to archives table
Revision ID: 068588268b02
Revises: d5103ee17ed5
Create Date: 2025-08-27 13:16:29.428231
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "068588268b02"
down_revision: Union[str, None] = "887a4367b560"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
if settings.letta_pg_uri_no_default:
# PostgreSQL - use enum type
vectordbprovider = sa.Enum("NATIVE", "TPUF", name="vectordbprovider")
vectordbprovider.create(op.get_bind(), checkfirst=True)
# Add column as nullable first
op.add_column("archives", sa.Column("vector_db_provider", vectordbprovider, nullable=True))
# Backfill existing rows with NATIVE
op.execute("UPDATE archives SET vector_db_provider = 'NATIVE' WHERE vector_db_provider IS NULL")
# Make column non-nullable
op.alter_column("archives", "vector_db_provider", nullable=False)
else:
# SQLite - use string type
# Add column as nullable first
op.add_column("archives", sa.Column("vector_db_provider", sa.String(), nullable=True))
# Backfill existing rows with NATIVE
op.execute("UPDATE archives SET vector_db_provider = 'NATIVE' WHERE vector_db_provider IS NULL")
# For SQLite, we need to recreate the table to make column non-nullable
# This is a limitation of SQLite ALTER TABLE
# For simplicity, we'll leave it nullable in SQLite
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("archives", "vector_db_provider")
if settings.letta_pg_uri_no_default:
# Drop enum type for PostgreSQL
vectordbprovider = sa.Enum("NATIVE", "TPUF", name="vectordbprovider")
vectordbprovider.drop(op.get_bind(), checkfirst=True)
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/068588268b02_add_vector_db_provider_to_archives_table.py",
"license": "Apache License 2.0",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/06fbbf65d4f1_support_for_project_id_for_blocks_and_.py | """support for project_id for blocks and groups
Revision ID: 06fbbf65d4f1
Revises: f55542f37641
Create Date: 2025-07-21 15:07:32.133538
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "06fbbf65d4f1"
down_revision: Union[str, None] = "f55542f37641"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("block", sa.Column("project_id", sa.String(), nullable=True))
op.add_column("groups", sa.Column("project_id", sa.String(), nullable=True))
# NOTE: running the backfill on alembic will result in locking with running application.
# This is okay if okay with downtime. Options also to do rolling migration or dynamic updates.
# Backfill project_id for blocks table
# Since all agents for a block have the same project_id, we can just grab the first one
# op.execute(
# text(
# """
# UPDATE block
# SET project_id = (
# SELECT a.project_id
# FROM blocks_agents ba
# JOIN agents a ON ba.agent_id = a.id
# WHERE ba.block_id = block.id
# AND a.project_id IS NOT NULL
# LIMIT 1
# )
# """
# )
# )
# Backfill project_id for groups table
# op.execute(
# text(
# """
# UPDATE groups
# SET project_id = (
# SELECT a.project_id
# FROM groups_agents ga
# JOIN agents a ON ga.agent_id = a.id
# WHERE ga.group_id = groups.id
# AND a.project_id IS NOT NULL
# LIMIT 1
# )
# """
# )
# )
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("groups", "project_id")
op.drop_column("block", "project_id")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/06fbbf65d4f1_support_for_project_id_for_blocks_and_.py",
"license": "Apache License 2.0",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
letta-ai/letta:alembic/versions/0b496eae90de_add_file_agent_table.py | """Add file agent table
Revision ID: 0b496eae90de
Revises: 341068089f14
Create Date: 2025-06-02 15:14:33.730687
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "0b496eae90de"
down_revision: Union[str, None] = "341068089f14"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"files_agents",
sa.Column("id", sa.String(), nullable=False),
sa.Column("file_id", sa.String(), nullable=False),
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("is_open", sa.Boolean(), nullable=False),
sa.Column("visible_content", sa.Text(), nullable=True),
sa.Column("last_accessed_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("FALSE"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["file_id"], ["files.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.PrimaryKeyConstraint("id", "file_id", "agent_id"),
)
op.create_index("ix_files_agents_file_id_agent_id", "files_agents", ["file_id", "agent_id"], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_files_agents_file_id_agent_id", table_name="files_agents")
op.drop_table("files_agents")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/0b496eae90de_add_file_agent_table.py",
"license": "Apache License 2.0",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/15b577c62f3f_add_hidden_property_to_agents.py | """Add hidden property to agents
Revision ID: 15b577c62f3f
Revises: 4c6c9ef0387d
Create Date: 2025-07-30 13:19:15.213121
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "15b577c62f3f"
down_revision: Union[str, None] = "4c6c9ef0387d"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("agents", sa.Column("hidden", sa.Boolean(), nullable=True))
# Set hidden=true for existing agents with project names starting with "templates"
connection = op.get_bind()
connection.execute(sa.text("UPDATE agents SET hidden = true WHERE project_id LIKE 'templates-%'"))
def downgrade() -> None:
op.drop_column("agents", "hidden")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/15b577c62f3f_add_hidden_property_to_agents.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/18e300709530_add_instructions_field_to_sources.py | """add instructions field to sources
Revision ID: 18e300709530
Revises: 878607e41ca4
Create Date: 2025-05-08 17:56:20.877183
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "18e300709530"
down_revision: Union[str, None] = "878607e41ca4"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("sources", sa.Column("instructions", sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("sources", "instructions")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/18e300709530_add_instructions_field_to_sources.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/18ff61fbc034_add_agent_id_index_to_mapping_tables.py | """add agent_id index to mapping tables
Revision ID: 18ff61fbc034
Revises: b888f21b151f
Create Date: 2025-09-10 19:16:39.118760
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "18ff61fbc034"
down_revision: Union[str, None] = "b888f21b151f"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_index("ix_blocks_agents_block_id", "blocks_agents", ["block_id"], unique=False)
op.create_index("ix_block_label", "block", ["label"], unique=False)
op.create_index("ix_agents_organization_id", "agents", ["organization_id"], unique=False)
op.create_index("ix_tools_agents_tool_id", "tools_agents", ["tool_id"], unique=False)
op.create_index("ix_sources_agents_source_id", "sources_agents", ["source_id"], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_sources_agents_source_id", table_name="sources_agents")
op.drop_index("ix_tools_agents_tool_id", table_name="tools_agents")
op.drop_index("ix_agents_organization_id", table_name="agents")
op.drop_index("ix_block_label", table_name="block")
op.drop_index("ix_blocks_agents_block_id", table_name="blocks_agents")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/18ff61fbc034_add_agent_id_index_to_mapping_tables.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/1af251a42c06_fix_files_agents_constraints.py | """Fix files_agents constraints
Revision ID: 1af251a42c06
Revises: 51999513bcf1
Create Date: 2025-06-30 11:50:42.200885
"""
from typing import Sequence, Union
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "1af251a42c06"
down_revision: Union[str, None] = "51999513bcf1"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_files_agents_agent_file_name", table_name="files_agents")
op.drop_index("ix_files_agents_file_id_agent_id", table_name="files_agents")
op.drop_constraint("uq_files_agents_agent_file_name", "files_agents", type_="unique")
op.drop_constraint("uq_files_agents_file_agent", "files_agents", type_="unique")
op.create_index("ix_agent_filename", "files_agents", ["agent_id", "file_name"], unique=False)
op.create_index("ix_file_agent", "files_agents", ["file_id", "agent_id"], unique=False)
op.create_unique_constraint("uq_agent_filename", "files_agents", ["agent_id", "file_name"])
op.create_unique_constraint("uq_file_agent", "files_agents", ["file_id", "agent_id"])
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint("uq_file_agent", "files_agents", type_="unique")
op.drop_constraint("uq_agent_filename", "files_agents", type_="unique")
op.drop_index("ix_file_agent", table_name="files_agents")
op.drop_index("ix_agent_filename", table_name="files_agents")
op.create_unique_constraint("uq_files_agents_file_agent", "files_agents", ["file_id", "agent_id"], postgresql_nulls_not_distinct=False)
op.create_unique_constraint(
"uq_files_agents_agent_file_name", "files_agents", ["agent_id", "file_name"], postgresql_nulls_not_distinct=False
)
op.create_index("ix_files_agents_file_id_agent_id", "files_agents", ["file_id", "agent_id"], unique=False)
op.create_index("ix_files_agents_agent_file_name", "files_agents", ["agent_id", "file_name"], unique=False)
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/1af251a42c06_fix_files_agents_constraints.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/1c6b6a38b713_add_pip_requirements_to_tools.py | """Add pip requirements to tools
Revision ID: 1c6b6a38b713
Revises: c96263433aef
Create Date: 2025-06-12 18:06:54.838510
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "1c6b6a38b713"
down_revision: Union[str, None] = "c96263433aef"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("tools", sa.Column("pip_requirements", sa.JSON(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("tools", "pip_requirements")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/1c6b6a38b713_add_pip_requirements_to_tools.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/1dc0fee72dea_add_block_related_indexes.py | """add block-related indexes
Revision ID: 1dc0fee72dea
Revises: 18e300709530
Create Date: 2025-05-12 17:06:32.055091
"""
from typing import Sequence, Union
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "1dc0fee72dea"
down_revision: Union[str, None] = "18e300709530"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade():
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# add index for blocks_agents table
op.create_index("ix_blocks_agents_block_label_agent_id", "blocks_agents", ["block_label", "agent_id"], unique=False)
# add index for just block_label
op.create_index("ix_blocks_block_label", "blocks_agents", ["block_label"], unique=False)
# add index for agent_tags for agent_id and tag
op.create_index("ix_agents_tags_agent_id_tag", "agents_tags", ["agent_id", "tag"], unique=False)
def downgrade():
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
op.drop_index("ix_blocks_agents_block_label_agent_id", table_name="blocks_agents")
op.drop_index("ix_blocks_block_label", table_name="blocks_agents")
op.drop_index("ix_agents_tags_agent_id_tag", table_name="agents_tags")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/1dc0fee72dea_add_block_related_indexes.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/220856bbf43b_add_read_only_column.py | """add read-only column
Revision ID: 220856bbf43b
Revises: 1dc0fee72dea
Create Date: 2025-05-13 14:42:17.353614
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "220856bbf43b"
down_revision: Union[str, None] = "1dc0fee72dea"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# add default value of `False`
op.add_column("block", sa.Column("read_only", sa.Boolean(), nullable=True))
op.execute(
"""
UPDATE block
SET read_only = False
"""
)
op.alter_column("block", "read_only", nullable=False)
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
op.drop_column("block", "read_only")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/220856bbf43b_add_read_only_column.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/2c059cad97cc_create_sqlite_baseline_schema.py | """create_sqlite_baseline_schema
Revision ID: 2c059cad97cc
Revises: 495f3f474131
Create Date: 2025-07-16 14:34:21.280233
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "2c059cad97cc"
down_revision: Union[str, None] = "495f3f474131"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Only run this migration for SQLite
if settings.letta_pg_uri_no_default:
return
# Create the exact schema that matches the current PostgreSQL state
# This is a snapshot of the schema at the time of this migration
# Based on the schema provided by Andy
# Organizations table
op.create_table(
"organizations",
sa.Column("id", sa.String(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("privileged_tools", sa.Boolean(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
# Agents table
op.create_table(
"agents",
sa.Column("id", sa.String(), nullable=False),
sa.Column("name", sa.String(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("description", sa.String(), nullable=True),
sa.Column("message_ids", sa.JSON(), nullable=True),
sa.Column("system", sa.String(), nullable=True),
sa.Column("agent_type", sa.String(), nullable=True),
sa.Column("llm_config", sa.JSON(), nullable=True),
sa.Column("embedding_config", sa.JSON(), nullable=True),
sa.Column("metadata_", sa.JSON(), nullable=True),
sa.Column("tool_rules", sa.JSON(), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("project_id", sa.String(), nullable=True),
sa.Column("template_id", sa.String(), nullable=True),
sa.Column("base_template_id", sa.String(), nullable=True),
sa.Column("message_buffer_autoclear", sa.Boolean(), nullable=False),
sa.Column("enable_sleeptime", sa.Boolean(), nullable=True),
sa.Column("response_format", sa.JSON(), nullable=True),
sa.Column("last_run_completion", sa.DateTime(timezone=True), nullable=True),
sa.Column("last_run_duration_ms", sa.Integer(), nullable=True),
sa.Column("timezone", sa.String(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
)
op.create_index("ix_agents_created_at", "agents", ["created_at", "id"])
# Block history table (created before block table so block can reference it)
op.create_table(
"block_history",
sa.Column("id", sa.String(), nullable=False),
sa.Column("description", sa.Text(), nullable=True),
sa.Column("label", sa.String(), nullable=False),
sa.Column("value", sa.Text(), nullable=False),
sa.Column("limit", sa.BigInteger(), nullable=False),
sa.Column("metadata_", sa.JSON(), nullable=True),
sa.Column("actor_type", sa.String(), nullable=True),
sa.Column("actor_id", sa.String(), nullable=True),
sa.Column("block_id", sa.String(), nullable=False),
sa.Column("sequence_number", sa.Integer(), nullable=False),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
# Note: block_id foreign key will be added later since block table doesn't exist yet
)
op.create_index("ix_block_history_block_id_sequence", "block_history", ["block_id", "sequence_number"], unique=True)
# Block table
op.create_table(
"block",
sa.Column("id", sa.String(), nullable=False),
sa.Column("value", sa.String(), nullable=False),
sa.Column("limit", sa.Integer(), nullable=False),
sa.Column("template_name", sa.String(), nullable=True),
sa.Column("label", sa.String(), nullable=False),
sa.Column("metadata_", sa.JSON(), nullable=True),
sa.Column("description", sa.String(), nullable=True),
sa.Column("is_template", sa.Boolean(), nullable=False),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("current_history_entry_id", sa.String(), nullable=True),
sa.Column("version", sa.Integer(), server_default="1", nullable=False),
sa.Column("read_only", sa.Boolean(), nullable=False),
sa.Column("preserve_on_migration", sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.ForeignKeyConstraint(["current_history_entry_id"], ["block_history.id"], name="fk_block_current_history_entry"),
sa.UniqueConstraint("id", "label", name="unique_block_id_label"),
)
op.create_index("created_at_label_idx", "block", ["created_at", "label"])
op.create_index("ix_block_current_history_entry_id", "block", ["current_history_entry_id"])
# Note: Foreign key constraint for block_history.block_id cannot be added in SQLite after table creation
# This will be enforced at the ORM level
# Sources table
op.create_table(
"sources",
sa.Column("id", sa.String(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("embedding_config", sa.JSON(), nullable=False),
sa.Column("description", sa.String(), nullable=True),
sa.Column("metadata_", sa.JSON(), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("instructions", sa.String(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.UniqueConstraint("name", "organization_id", name="uq_source_name_organization"),
)
op.create_index("source_created_at_id_idx", "sources", ["created_at", "id"])
# Files table
op.create_table(
"files",
sa.Column("id", sa.String(), nullable=False),
sa.Column("source_id", sa.String(), nullable=False),
sa.Column("file_name", sa.String(), nullable=True),
sa.Column("file_path", sa.String(), nullable=True),
sa.Column("file_type", sa.String(), nullable=True),
sa.Column("file_size", sa.Integer(), nullable=True),
sa.Column("file_creation_date", sa.String(), nullable=True),
sa.Column("file_last_modified_date", sa.String(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("processing_status", sa.String(), nullable=False),
sa.Column("error_message", sa.Text(), nullable=True),
sa.Column("original_file_name", sa.String(), nullable=True),
sa.Column("total_chunks", sa.Integer(), nullable=True),
sa.Column("chunks_embedded", sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["source_id"], ["sources.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
)
# Note: SQLite doesn't support expression indexes, so these are simplified
op.create_index("ix_files_org_created", "files", ["organization_id"])
op.create_index("ix_files_processing_status", "files", ["processing_status"])
op.create_index("ix_files_source_created", "files", ["source_id"])
# Users table
op.create_table(
"users",
sa.Column("id", sa.String(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
)
# Jobs table
op.create_table(
"jobs",
sa.Column("id", sa.String(), nullable=False),
sa.Column("user_id", sa.String(), nullable=False),
sa.Column("status", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("completed_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("metadata_", sa.JSON(), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("job_type", sa.String(), nullable=False),
sa.Column("request_config", sa.JSON(), nullable=True),
sa.Column("callback_url", sa.String(), nullable=True),
sa.Column("callback_sent_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("callback_status_code", sa.Integer(), nullable=True),
sa.Column("callback_error", sa.String(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["user_id"], ["users.id"]),
)
op.create_index("ix_jobs_created_at", "jobs", ["created_at", "id"])
# Tools table
op.create_table(
"tools",
sa.Column("id", sa.String(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("description", sa.String(), nullable=True),
sa.Column("source_type", sa.String(), nullable=False),
sa.Column("source_code", sa.String(), nullable=True),
sa.Column("json_schema", sa.JSON(), nullable=True),
sa.Column("tags", sa.JSON(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("return_char_limit", sa.Integer(), nullable=True),
sa.Column("tool_type", sa.String(), nullable=False),
sa.Column("args_json_schema", sa.JSON(), nullable=True),
sa.Column("metadata_", sa.JSON(), nullable=True),
sa.Column("pip_requirements", sa.JSON(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.UniqueConstraint("name", "organization_id", name="uix_name_organization"),
)
op.create_index("ix_tools_created_at_name", "tools", ["created_at", "name"])
# Additional tables based on Andy's schema
# Agents tags table
op.create_table(
"agents_tags",
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("tag", sa.String(), nullable=False),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"]),
sa.UniqueConstraint("agent_id", "tag", name="unique_agent_tag"),
)
op.create_index("ix_agents_tags_agent_id_tag", "agents_tags", ["agent_id", "tag"])
# Sandbox configs table
op.create_table(
"sandbox_configs",
sa.Column("id", sa.String(), nullable=False),
sa.Column("type", sa.String(), nullable=False), # sandboxtype in PG
sa.Column("config", sa.JSON(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.UniqueConstraint("type", "organization_id", name="uix_type_organization"),
)
# Sandbox environment variables table
op.create_table(
"sandbox_environment_variables",
sa.Column("id", sa.String(), nullable=False),
sa.Column("key", sa.String(), nullable=False),
sa.Column("value", sa.String(), nullable=False),
sa.Column("description", sa.String(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("sandbox_config_id", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.ForeignKeyConstraint(["sandbox_config_id"], ["sandbox_configs.id"]),
sa.UniqueConstraint("key", "sandbox_config_id", name="uix_key_sandbox_config"),
)
# Blocks agents table
op.create_table(
"blocks_agents",
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("block_id", sa.String(), nullable=False),
sa.Column("block_label", sa.String(), nullable=False),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"]),
sa.ForeignKeyConstraint(["block_id", "block_label"], ["block.id", "block.label"], deferrable=True, initially="DEFERRED"),
sa.UniqueConstraint("agent_id", "block_label", name="unique_label_per_agent"),
sa.UniqueConstraint("agent_id", "block_id", name="unique_agent_block"),
)
op.create_index("ix_blocks_agents_block_label_agent_id", "blocks_agents", ["block_label", "agent_id"])
op.create_index("ix_blocks_block_label", "blocks_agents", ["block_label"])
# Tools agents table
op.create_table(
"tools_agents",
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("tool_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["tool_id"], ["tools.id"], ondelete="CASCADE"),
sa.UniqueConstraint("agent_id", "tool_id", name="unique_agent_tool"),
)
# Sources agents table
op.create_table(
"sources_agents",
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("source_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["source_id"], ["sources.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("agent_id", "source_id"),
)
# Agent passages table (using BLOB for vectors in SQLite)
op.create_table(
"agent_passages",
sa.Column("id", sa.String(), nullable=False),
sa.Column("text", sa.String(), nullable=False),
sa.Column("embedding_config", sa.JSON(), nullable=False),
sa.Column("metadata_", sa.JSON(), nullable=False),
sa.Column("embedding", sa.BLOB(), nullable=True), # CommonVector becomes BLOB in SQLite
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("agent_id", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
)
# Note: agent_passages_org_idx is not created for SQLite as it's expected to be different
op.create_index("agent_passages_created_at_id_idx", "agent_passages", ["created_at", "id"])
op.create_index("ix_agent_passages_org_agent", "agent_passages", ["organization_id", "agent_id"])
# Source passages table (using BLOB for vectors in SQLite)
op.create_table(
"source_passages",
sa.Column("id", sa.String(), nullable=False),
sa.Column("text", sa.String(), nullable=False),
sa.Column("embedding_config", sa.JSON(), nullable=False),
sa.Column("metadata_", sa.JSON(), nullable=False),
sa.Column("embedding", sa.BLOB(), nullable=True), # CommonVector becomes BLOB in SQLite
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("file_id", sa.String(), nullable=True),
sa.Column("source_id", sa.String(), nullable=False),
sa.Column("file_name", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.ForeignKeyConstraint(["file_id"], ["files.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["source_id"], ["sources.id"], ondelete="CASCADE"),
)
# Note: source_passages_org_idx is not created for SQLite as it's expected to be different
op.create_index("source_passages_created_at_id_idx", "source_passages", ["created_at", "id"])
# Message sequence is handled by the sequence_id field in messages table
# Messages table
op.create_table(
"messages",
sa.Column("id", sa.String(), nullable=False),
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("role", sa.String(), nullable=False),
sa.Column("text", sa.String(), nullable=True),
sa.Column("model", sa.String(), nullable=True),
sa.Column("name", sa.String(), nullable=True),
sa.Column("tool_calls", sa.JSON(), nullable=False),
sa.Column("tool_call_id", sa.String(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("step_id", sa.String(), nullable=True),
sa.Column("otid", sa.String(), nullable=True),
sa.Column("tool_returns", sa.JSON(), nullable=True),
sa.Column("group_id", sa.String(), nullable=True),
sa.Column("content", sa.JSON(), nullable=True),
sa.Column("sequence_id", sa.BigInteger(), nullable=False),
sa.Column("sender_id", sa.String(), nullable=True),
sa.Column("batch_item_id", sa.String(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["step_id"], ["steps.id"], ondelete="SET NULL"),
sa.UniqueConstraint("sequence_id", name="uq_messages_sequence_id"),
)
op.create_index("ix_messages_agent_created_at", "messages", ["agent_id", "created_at"])
op.create_index("ix_messages_created_at", "messages", ["created_at", "id"])
op.create_index("ix_messages_agent_sequence", "messages", ["agent_id", "sequence_id"])
op.create_index("ix_messages_org_agent", "messages", ["organization_id", "agent_id"])
# Create sequence table for SQLite message sequence_id generation
op.create_table(
"message_sequence",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("next_val", sa.Integer(), nullable=False, server_default="1"),
sa.PrimaryKeyConstraint("id"),
)
# Initialize the sequence table with the next available sequence_id
op.execute("INSERT INTO message_sequence (id, next_val) VALUES (1, 1)")
# Now create the rest of the tables that might reference messages/steps
# Add missing tables and columns identified from alembic check
# Identities table
op.create_table(
"identities",
sa.Column("id", sa.String(), nullable=False),
sa.Column("identifier_key", sa.String(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("identity_type", sa.String(), nullable=False),
sa.Column("project_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("properties", sa.JSON(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.UniqueConstraint("identifier_key", "project_id", "organization_id", name="unique_identifier_key_project_id_organization_id"),
)
# MCP Server table
op.create_table(
"mcp_server",
sa.Column("id", sa.String(), nullable=False),
sa.Column("server_name", sa.String(), nullable=False),
sa.Column("server_type", sa.String(), nullable=False),
sa.Column("server_url", sa.String(), nullable=True),
sa.Column("stdio_config", sa.JSON(), nullable=True),
sa.Column("token", sa.String(), nullable=True),
sa.Column("custom_headers", sa.JSON(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("metadata_", sa.JSON(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.UniqueConstraint("server_name", "organization_id", name="uix_name_organization_mcp_server"),
)
# Providers table
op.create_table(
"providers",
sa.Column("id", sa.String(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("api_key", sa.String(), nullable=True),
sa.Column("access_key", sa.String(), nullable=True),
sa.Column("region", sa.String(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("provider_type", sa.String(), nullable=True),
sa.Column("base_url", sa.String(), nullable=True),
sa.Column("provider_category", sa.String(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.UniqueConstraint("name", "organization_id", name="unique_name_organization_id"),
)
# Agent environment variables table
op.create_table(
"agent_environment_variables",
sa.Column("id", sa.String(), nullable=False),
sa.Column("key", sa.String(), nullable=False),
sa.Column("value", sa.String(), nullable=False),
sa.Column("description", sa.String(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("agent_id", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.UniqueConstraint("key", "agent_id", name="uix_key_agent"),
)
op.create_index("idx_agent_environment_variables_agent_id", "agent_environment_variables", ["agent_id"])
# Groups table
op.create_table(
"groups",
sa.Column("id", sa.String(), nullable=False),
sa.Column("description", sa.String(), nullable=False),
sa.Column("manager_type", sa.String(), nullable=False),
sa.Column("manager_agent_id", sa.String(), nullable=True),
sa.Column("termination_token", sa.String(), nullable=True),
sa.Column("max_turns", sa.Integer(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("agent_ids", sa.JSON(), nullable=False),
sa.Column("sleeptime_agent_frequency", sa.Integer(), nullable=True),
sa.Column("turns_counter", sa.Integer(), nullable=True),
sa.Column("last_processed_message_id", sa.String(), nullable=True),
sa.Column("max_message_buffer_length", sa.Integer(), nullable=True),
sa.Column("min_message_buffer_length", sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.ForeignKeyConstraint(["manager_agent_id"], ["agents.id"], ondelete="RESTRICT"),
)
# Steps table
op.create_table(
"steps",
sa.Column("id", sa.String(), nullable=False),
sa.Column("job_id", sa.String(), nullable=True),
sa.Column("completion_tokens", sa.Integer(), nullable=False, default=0),
sa.Column("prompt_tokens", sa.Integer(), nullable=False, default=0),
sa.Column("total_tokens", sa.Integer(), nullable=False, default=0),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("origin", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=True),
sa.Column("provider_id", sa.String(), nullable=True),
sa.Column("provider_name", sa.String(), nullable=True),
sa.Column("model", sa.String(), nullable=True),
sa.Column("context_window_limit", sa.Integer(), nullable=True),
sa.Column("completion_tokens_details", sa.JSON(), nullable=True),
sa.Column("tags", sa.JSON(), nullable=True),
sa.Column("tid", sa.String(), nullable=True),
sa.Column("model_endpoint", sa.String(), nullable=True),
sa.Column("trace_id", sa.String(), nullable=True),
sa.Column("agent_id", sa.String(), nullable=True),
sa.Column("provider_category", sa.String(), nullable=True),
sa.Column("feedback", sa.String(), nullable=True),
sa.Column("project_id", sa.String(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["job_id"], ["jobs.id"], ondelete="SET NULL"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"], ondelete="RESTRICT"),
sa.ForeignKeyConstraint(["provider_id"], ["providers.id"], ondelete="RESTRICT"),
)
# Note: Foreign key constraint for block.current_history_entry_id -> block_history.id
# would need to be added here, but SQLite doesn't support ALTER TABLE ADD CONSTRAINT
# This will be handled by the ORM at runtime
# Add missing columns to existing tables
# All missing columns have been added to the table definitions above
# step_id was already added in the messages table creation above
# op.add_column('messages', sa.Column('step_id', sa.String(), nullable=True))
# op.create_foreign_key('fk_messages_step_id', 'messages', 'steps', ['step_id'], ['id'], ondelete='SET NULL')
# Add index to source_passages for file_id
op.create_index("source_passages_file_id_idx", "source_passages", ["file_id"])
# Unique constraint for sources was added during table creation above
# Create remaining association tables
# Identities agents table
op.create_table(
"identities_agents",
sa.Column("identity_id", sa.String(), nullable=False),
sa.Column("agent_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(["identity_id"], ["identities.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("identity_id", "agent_id"),
)
# Identities blocks table
op.create_table(
"identities_blocks",
sa.Column("identity_id", sa.String(), nullable=False),
sa.Column("block_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(["identity_id"], ["identities.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["block_id"], ["block.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("identity_id", "block_id"),
)
# Files agents table
op.create_table(
"files_agents",
sa.Column("id", sa.String(), nullable=False),
sa.Column("file_id", sa.String(), nullable=False),
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("source_id", sa.String(), nullable=False),
sa.Column("is_open", sa.Boolean(), nullable=False),
sa.Column("visible_content", sa.Text(), nullable=True),
sa.Column("last_accessed_at", sa.DateTime(timezone=True), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("file_name", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id", "file_id", "agent_id"),
sa.ForeignKeyConstraint(["file_id"], ["files.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["source_id"], ["sources.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.UniqueConstraint("file_id", "agent_id", name="uq_file_agent"),
sa.UniqueConstraint("agent_id", "file_name", name="uq_agent_filename"),
)
op.create_index("ix_agent_filename", "files_agents", ["agent_id", "file_name"])
op.create_index("ix_file_agent", "files_agents", ["file_id", "agent_id"])
# Groups agents table
op.create_table(
"groups_agents",
sa.Column("group_id", sa.String(), nullable=False),
sa.Column("agent_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(["group_id"], ["groups.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("group_id", "agent_id"),
)
# Groups blocks table
op.create_table(
"groups_blocks",
sa.Column("group_id", sa.String(), nullable=False),
sa.Column("block_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(["group_id"], ["groups.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["block_id"], ["block.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("group_id", "block_id"),
)
# LLM batch job table
op.create_table(
"llm_batch_job",
sa.Column("id", sa.String(), nullable=False),
sa.Column("status", sa.String(), nullable=False),
sa.Column("llm_provider", sa.String(), nullable=False),
sa.Column("create_batch_response", sa.JSON(), nullable=False),
sa.Column("latest_polling_response", sa.JSON(), nullable=True),
sa.Column("last_polled_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("letta_batch_job_id", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.ForeignKeyConstraint(["letta_batch_job_id"], ["jobs.id"], ondelete="CASCADE"),
)
op.create_index("ix_llm_batch_job_created_at", "llm_batch_job", ["created_at"])
op.create_index("ix_llm_batch_job_status", "llm_batch_job", ["status"])
# LLM batch items table
op.create_table(
"llm_batch_items",
sa.Column("id", sa.String(), nullable=False),
sa.Column("llm_config", sa.JSON(), nullable=False),
sa.Column("request_status", sa.String(), nullable=False),
sa.Column("step_status", sa.String(), nullable=False),
sa.Column("step_state", sa.JSON(), nullable=False),
sa.Column("batch_request_result", sa.JSON(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("llm_batch_id", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["llm_batch_id"], ["llm_batch_job.id"], ondelete="CASCADE"),
)
op.create_index("ix_llm_batch_items_agent_id", "llm_batch_items", ["agent_id"])
op.create_index("ix_llm_batch_items_llm_batch_id", "llm_batch_items", ["llm_batch_id"])
op.create_index("ix_llm_batch_items_status", "llm_batch_items", ["request_status"])
# Job messages table
op.create_table(
"job_messages",
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column("job_id", sa.String(), nullable=False),
sa.Column("message_id", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.ForeignKeyConstraint(["job_id"], ["jobs.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["message_id"], ["messages.id"], ondelete="CASCADE"),
sa.UniqueConstraint("job_id", "message_id", name="unique_job_message"),
)
# File contents table
op.create_table(
"file_contents",
sa.Column("file_id", sa.String(), nullable=False),
sa.Column("text", sa.Text(), nullable=False),
sa.Column("id", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.PrimaryKeyConstraint("file_id", "id"),
sa.ForeignKeyConstraint(["file_id"], ["files.id"], ondelete="CASCADE"),
sa.UniqueConstraint("file_id", name="uq_file_contents_file_id"),
)
# Provider traces table
op.create_table(
"provider_traces",
sa.Column("id", sa.String(), nullable=False),
sa.Column("request_json", sa.JSON(), nullable=False),
sa.Column("response_json", sa.JSON(), nullable=False),
sa.Column("step_id", sa.String(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("(CURRENT_TIMESTAMP)"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("(FALSE)"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"]),
)
op.create_index("ix_step_id", "provider_traces", ["step_id"])
# Complete the SQLite schema alignment by adding any remaining missing elements
try:
# Unique constraints for files_agents are already created with correct names in table definition above
# Foreign key for files_agents.source_id is already created in table definition above
# Foreign key for messages.step_id is already created in table definition above
pass
except Exception:
# Some operations may fail if the column/constraint already exists
# This is expected in some cases and we can continue
pass
# Note: The remaining alembic check differences are expected for SQLite:
# 1. Type differences (BLOB vs CommonVector) - Expected and handled by ORM
# 2. Foreign key constraint differences - SQLite handles these at runtime
# 3. Index differences - SQLite doesn't support all PostgreSQL index features
# 4. Some constraint naming differences - Cosmetic differences
#
# These differences do not affect functionality as the ORM handles the abstraction
# between SQLite and PostgreSQL appropriately.
def downgrade() -> None:
# Only run this migration for SQLite
if settings.letta_pg_uri_no_default:
return
# SQLite downgrade is not supported
raise NotImplementedError("SQLite downgrade is not supported. Use a fresh database instead.")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/2c059cad97cc_create_sqlite_baseline_schema.py",
"license": "Apache License 2.0",
"lines": 734,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:alembic/versions/341068089f14_add_preserve_on_migration_to_block.py | """add preserve_on_migration to block
Revision ID: 341068089f14
Revises: 348214cbc081
Create Date: 2025-05-29 10:39:44.494643
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "341068089f14"
down_revision: Union[str, None] = "348214cbc081"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("block", sa.Column("preserve_on_migration", sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("block", "preserve_on_migration")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/341068089f14_add_preserve_on_migration_to_block.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/348214cbc081_add_org_agent_id_indices.py | """add org agent id indices
Revision ID: 348214cbc081
Revises: dd049fbec729
Create Date: 2025-05-28 22:43:18.509397
"""
from typing import Sequence, Union
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "348214cbc081"
down_revision: Union[str, None] = "dd049fbec729"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.create_index("ix_agent_passages_org_agent", "agent_passages", ["organization_id", "agent_id"], unique=False)
op.create_index("ix_messages_org_agent", "messages", ["organization_id", "agent_id"], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_messages_org_agent", table_name="messages")
op.drop_index("ix_agent_passages_org_agent", table_name="agent_passages")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/348214cbc081_add_org_agent_id_indices.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/373dabcba6cf_add_byok_fields_and_unique_constraint.py | """add byok fields and unique constraint
Revision ID: 373dabcba6cf
Revises: c56081a05371
Create Date: 2025-04-30 19:38:25.010856
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "373dabcba6cf"
down_revision: Union[str, None] = "c56081a05371"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("providers", sa.Column("provider_type", sa.String(), nullable=True))
op.add_column("providers", sa.Column("base_url", sa.String(), nullable=True))
op.create_unique_constraint("unique_name_organization_id", "providers", ["name", "organization_id"])
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint("unique_name_organization_id", "providers", type_="unique")
op.drop_column("providers", "base_url")
op.drop_column("providers", "provider_type")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/373dabcba6cf_add_byok_fields_and_unique_constraint.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/4537f0996495_add_start_end_for_agent_file.py | """Add start end for agent file
Revision ID: 4537f0996495
Revises: 06fbbf65d4f1
Create Date: 2025-07-25 17:44:26.748765
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "4537f0996495"
down_revision: Union[str, None] = "06fbbf65d4f1"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("files_agents", sa.Column("start_line", sa.Integer(), nullable=True))
op.add_column("files_agents", sa.Column("end_line", sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("files_agents", "end_line")
op.drop_column("files_agents", "start_line")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/4537f0996495_add_start_end_for_agent_file.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/46699adc71a7_add_unique_constraint_to_source_names_.py | """Add unique constraint to source names and also add original file name column
Revision ID: 46699adc71a7
Revises: 1af251a42c06
Create Date: 2025-07-01 13:30:48.279151
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "46699adc71a7"
down_revision: Union[str, None] = "1af251a42c06"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("files", sa.Column("original_file_name", sa.String(), nullable=True))
# Handle existing duplicate source names before adding unique constraint
connection = op.get_bind()
# Find duplicates and rename them by appending a suffix
result = connection.execute(
sa.text(
"""
WITH duplicates AS (
SELECT name, organization_id,
ROW_NUMBER() OVER (PARTITION BY name, organization_id ORDER BY created_at) as rn,
id
FROM sources
WHERE (name, organization_id) IN (
SELECT name, organization_id
FROM sources
GROUP BY name, organization_id
HAVING COUNT(*) > 1
)
)
SELECT id, name, rn
FROM duplicates
WHERE rn > 1
"""
)
)
# Rename duplicates by appending a number suffix
for row in result:
source_id, original_name, duplicate_number = row
new_name = f"{original_name}_{duplicate_number}"
connection.execute(
sa.text("UPDATE sources SET name = :new_name WHERE id = :source_id"), {"new_name": new_name, "source_id": source_id}
)
op.create_unique_constraint("uq_source_name_organization", "sources", ["name", "organization_id"])
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint("uq_source_name_organization", "sources", type_="unique")
op.drop_column("files", "original_file_name")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/46699adc71a7_add_unique_constraint_to_source_names_.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/47d2277e530d_add_total_chunks_and_chunks_embedded_to_.py | """Add total_chunks and chunks_embedded to files
Revision ID: 47d2277e530d
Revises: 56254216524f
Create Date: 2025-07-03 14:32:08.539280
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "47d2277e530d"
down_revision: Union[str, None] = "56254216524f"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("files", sa.Column("total_chunks", sa.Integer(), nullable=True))
op.add_column("files", sa.Column("chunks_embedded", sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("files", "chunks_embedded")
op.drop_column("files", "total_chunks")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/47d2277e530d_add_total_chunks_and_chunks_embedded_to_.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/495f3f474131_write_source_id_directly_to_files_agents.py | """Write source_id directly to files agents
Revision ID: 495f3f474131
Revises: 47d2277e530d
Create Date: 2025-07-10 17:14:45.154738
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "495f3f474131"
down_revision: Union[str, None] = "47d2277e530d"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
# Step 1: Add the column as nullable first
op.add_column("files_agents", sa.Column("source_id", sa.String(), nullable=True))
# Step 2: Backfill source_id from files table
connection = op.get_bind()
connection.execute(
sa.text(
"""
UPDATE files_agents
SET source_id = files.source_id
FROM files
WHERE files_agents.file_id = files.id
"""
)
)
# Step 3: Make the column NOT NULL now that it's populated
op.alter_column("files_agents", "source_id", nullable=False)
# Step 4: Add the foreign key constraint
op.create_foreign_key(None, "files_agents", "sources", ["source_id"], ["id"], ondelete="CASCADE")
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "files_agents", type_="foreignkey")
op.drop_column("files_agents", "source_id")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/495f3f474131_write_source_id_directly_to_files_agents.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/4c6c9ef0387d_support_modal_sandbox_type.py | """support modal sandbox type
Revision ID: 4c6c9ef0387d
Revises: 4537f0996495
Create Date: 2025-07-29 15:10:08.996251
"""
from typing import Sequence, Union
from sqlalchemy import text
from alembic import op
from letta.settings import DatabaseChoice, settings
# revision identifiers, used by Alembic.
revision: str = "4c6c9ef0387d"
down_revision: Union[str, None] = "4537f0996495"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# SQLite just uses strings
if settings.database_engine == DatabaseChoice.POSTGRES:
op.execute("ALTER TYPE sandboxtype ADD VALUE 'MODAL' AFTER 'E2B'")
def downgrade() -> None:
if settings.database_engine == DatabaseChoice.POSTGRES:
connection = op.get_bind()
data_conflicts = connection.execute(
text(
"""
SELECT COUNT(*)
FROM sandbox_configs
WHERE "type" NOT IN ('E2B', 'LOCAL')
"""
)
).fetchone()
if data_conflicts[0]:
raise RuntimeError(
(
"Cannot downgrade enum: Data conflicts are detected in sandbox_configs.sandboxtype.\n"
"Please manually handle these records before handling the downgrades.\n"
f"{data_conflicts} invalid sandboxtype values"
)
)
# Postgres does not support dropping enum values. Create a new enum and swap them.
op.execute("CREATE TYPE sandboxtype_old AS ENUM ('E2B', 'LOCAL')")
op.execute('ALTER TABLE sandbox_configs ALTER COLUMN "type" TYPE sandboxtype_old USING "type"::text::sandboxtype_old')
op.execute("DROP TYPE sandboxtype")
op.execute("ALTER TYPE sandboxtype_old RENAME to sandboxtype")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/4c6c9ef0387d_support_modal_sandbox_type.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/51999513bcf1_steps_feedback_field.py | """steps feedback field
Revision ID: 51999513bcf1
Revises: 61ee53ec45a5
Create Date: 2025-06-20 14:09:22.993263
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "51999513bcf1"
down_revision: Union[str, None] = "c7ac45f69849"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("steps", sa.Column("feedback", sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("steps", "feedback")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/51999513bcf1_steps_feedback_field.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/54c76f7cabca_add_tags_to_passages_and_create_passage_.py | """Add tags to passages and create passage_tags junction table
Revision ID: 54c76f7cabca
Revises: c41c87205254
Create Date: 2025-08-28 15:13:01.549590
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "54c76f7cabca"
down_revision: Union[str, None] = "c41c87205254"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
# Database-specific timestamp defaults
if not settings.letta_pg_uri_no_default:
# SQLite uses CURRENT_TIMESTAMP
timestamp_default = sa.text("(CURRENT_TIMESTAMP)")
else:
# PostgreSQL uses now()
timestamp_default = sa.text("now()")
op.create_table(
"passage_tags",
sa.Column("id", sa.String(), nullable=False),
sa.Column("tag", sa.String(), nullable=False),
sa.Column("passage_id", sa.String(), nullable=False),
sa.Column("archive_id", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=timestamp_default, nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=timestamp_default, nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("FALSE"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(["archive_id"], ["archives.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.ForeignKeyConstraint(["passage_id"], ["archival_passages.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("passage_id", "tag", name="uq_passage_tag"),
)
op.create_index("ix_passage_tags_archive_id", "passage_tags", ["archive_id"], unique=False)
op.create_index("ix_passage_tags_archive_tag", "passage_tags", ["archive_id", "tag"], unique=False)
op.create_index("ix_passage_tags_org_archive", "passage_tags", ["organization_id", "archive_id"], unique=False)
op.create_index("ix_passage_tags_tag", "passage_tags", ["tag"], unique=False)
op.add_column("archival_passages", sa.Column("tags", sa.JSON(), nullable=True))
op.add_column("source_passages", sa.Column("tags", sa.JSON(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("source_passages", "tags")
op.drop_column("archival_passages", "tags")
op.drop_index("ix_passage_tags_tag", table_name="passage_tags")
op.drop_index("ix_passage_tags_org_archive", table_name="passage_tags")
op.drop_index("ix_passage_tags_archive_tag", table_name="passage_tags")
op.drop_index("ix_passage_tags_archive_id", table_name="passage_tags")
op.drop_table("passage_tags")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/54c76f7cabca_add_tags_to_passages_and_create_passage_.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/56254216524f_add_custom_headers_to_mcp_server.py | """add_custom_headers_to_mcp_server
Revision ID: 56254216524f
Revises: 60ed28ee7138
Create Date: 2025-07-02 14:08:59.163861
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "56254216524f"
down_revision: Union[str, None] = "60ed28ee7138"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("mcp_server", sa.Column("custom_headers", sa.JSON(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("mcp_server", "custom_headers")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/56254216524f_add_custom_headers_to_mcp_server.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/5b804970e6a0_add_hidden_property_to_groups_and_blocks.py | """add_hidden_property_to_groups_and_blocks
Revision ID: 5b804970e6a0
Revises: ddb69be34a72
Create Date: 2025-09-03 22:19:03.825077
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "5b804970e6a0"
down_revision: Union[str, None] = "ddb69be34a72"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Add hidden column to groups table
op.add_column("groups", sa.Column("hidden", sa.Boolean(), nullable=True))
# Add hidden column to block table
op.add_column("block", sa.Column("hidden", sa.Boolean(), nullable=True))
def downgrade() -> None:
# Remove hidden column from block table
op.drop_column("block", "hidden")
# Remove hidden column from groups table
op.drop_column("groups", "hidden")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/5b804970e6a0_add_hidden_property_to_groups_and_blocks.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/5d27a719b24d_add_organization_id_to_jobs_model.py | """add organization id to jobs model
Revision ID: 5d27a719b24d
Revises: 18ff61fbc034
Create Date: 2025-09-10 23:01:45.214589
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "5d27a719b24d"
down_revision: Union[str, None] = "18ff61fbc034"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("jobs", schema=None) as batch_op:
batch_op.add_column(sa.Column("organization_id", sa.String(), nullable=True))
batch_op.create_foreign_key("fk_jobs_organization_id", "organizations", ["organization_id"], ["id"])
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("jobs", schema=None) as batch_op:
batch_op.drop_constraint("fk_jobs_organization_id", type_="foreignkey")
batch_op.drop_column("organization_id")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/5d27a719b24d_add_organization_id_to_jobs_model.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/5fb8bba2c373_add_step_metrics.py | """add_step_metrics
Revision ID: 5fb8bba2c373
Revises: f7f757414d20
Create Date: 2025-08-07 17:40:11.923402
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "5fb8bba2c373"
down_revision: Union[str, None] = "f7f757414d20"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"step_metrics",
sa.Column("id", sa.String(), nullable=False),
sa.Column("organization_id", sa.String(), nullable=True),
sa.Column("provider_id", sa.String(), nullable=True),
sa.Column("job_id", sa.String(), nullable=True),
sa.Column("llm_request_ns", sa.BigInteger(), nullable=True),
sa.Column("tool_execution_ns", sa.BigInteger(), nullable=True),
sa.Column("step_ns", sa.BigInteger(), nullable=True),
sa.Column("base_template_id", sa.String(), nullable=True),
sa.Column("template_id", sa.String(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("FALSE"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("project_id", sa.String(), nullable=True),
sa.Column("agent_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["id"], ["steps.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["job_id"], ["jobs.id"], ondelete="SET NULL"),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"], ondelete="RESTRICT"),
sa.ForeignKeyConstraint(["provider_id"], ["providers.id"], ondelete="RESTRICT"),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("step_metrics")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/5fb8bba2c373_add_step_metrics.py",
"license": "Apache License 2.0",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/60ed28ee7138_add_project_id_to_step_model.py | """add project id to step model
Revision ID: 60ed28ee7138
Revises: 46699adc71a7
Create Date: 2025-07-01 13:12:44.485233
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "60ed28ee7138"
down_revision: Union[str, None] = "46699adc71a7"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("steps", sa.Column("project_id", sa.String(), nullable=True))
op.execute(
"""
UPDATE steps
SET project_id = agents.project_id
FROM agents
WHERE steps.agent_id = agents.id
AND steps.agent_id IS NOT NULL
AND agents.project_id IS NOT NULL
"""
)
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("steps", "project_id")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/60ed28ee7138_add_project_id_to_step_model.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/614c4e53b66e_add_unique_constraint_to_file_id_and_.py | """Add unique constraint to file_id and agent_id on file_agent
Revision ID: 614c4e53b66e
Revises: 0b496eae90de
Create Date: 2025-06-02 17:03:58.879839
"""
from typing import Sequence, Union
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "614c4e53b66e"
down_revision: Union[str, None] = "0b496eae90de"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint("uq_files_agents_file_agent", "files_agents", ["file_id", "agent_id"])
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint("uq_files_agents_file_agent", "files_agents", type_="unique")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/614c4e53b66e_add_unique_constraint_to_file_id_and_.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/61ee53ec45a5_add_index_on_source_passages_for_files.py | """add index on source passages for files
Revision ID: 61ee53ec45a5
Revises: 9758adf8fdd3
Create Date: 2025-06-20 11:10:02.744914
"""
from typing import Sequence, Union
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "61ee53ec45a5"
down_revision: Union[str, None] = "9758adf8fdd3"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.create_index("source_passages_file_id_idx", "source_passages", ["file_id"], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("source_passages_file_id_idx", table_name="source_passages")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/61ee53ec45a5_add_index_on_source_passages_for_files.py",
"license": "Apache License 2.0",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/6c53224a7a58_add_provider_category_to_steps.py | """add provider category to steps
Revision ID: 6c53224a7a58
Revises: cc8dc340836d
Create Date: 2025-05-21 10:09:43.761669
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "6c53224a7a58"
down_revision: Union[str, None] = "cc8dc340836d"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("steps", sa.Column("provider_category", sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("steps", "provider_category")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/6c53224a7a58_add_provider_category_to_steps.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/74e860718e0d_add_archival_memory_sharing.py | """add archival memory sharing
Revision ID: 74e860718e0d
Revises: 15b577c62f3f
Create Date: 2025-07-30 16:15:49.424711
"""
import time
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# Import custom columns if needed
try:
from letta.orm.custom_columns import CommonVector, EmbeddingConfigColumn
except ImportError:
# For environments where these aren't available
EmbeddingConfigColumn = sa.JSON
CommonVector = sa.BLOB
# revision identifiers, used by Alembic.
revision: str = "74e860718e0d"
down_revision: Union[str, None] = "15b577c62f3f"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# get database connection to check DB type
bind = op.get_bind()
is_sqlite = bind.dialect.name == "sqlite"
# create new tables with appropriate defaults
if is_sqlite:
op.create_table(
"archives",
sa.Column("name", sa.String(), nullable=False),
sa.Column("description", sa.String(), nullable=True),
sa.Column("metadata_", sa.JSON(), nullable=True),
sa.Column("id", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("0"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.PrimaryKeyConstraint("id"),
)
else:
# Check if archives table already exists
connection = op.get_bind()
result = connection.execute(
sa.text(
"""
SELECT EXISTS (
SELECT 1 FROM information_schema.tables
WHERE table_schema = 'public' AND table_name = 'archives'
)
"""
)
)
archives_exists = result.scalar()
if not archives_exists:
op.create_table(
"archives",
sa.Column("name", sa.String(), nullable=False),
sa.Column("description", sa.String(), nullable=True),
sa.Column("metadata_", sa.JSON(), nullable=True),
sa.Column("id", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("FALSE"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index("ix_archives_created_at", "archives", ["created_at", "id"], unique=False)
op.create_index("ix_archives_organization_id", "archives", ["organization_id"], unique=False)
if is_sqlite:
op.create_table(
"archives_agents",
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("archive_id", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("datetime('now')"), nullable=False),
sa.Column("is_owner", sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["archive_id"], ["archives.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("agent_id", "archive_id"),
# TODO: Remove this constraint when we support multiple archives per agent
sa.UniqueConstraint("agent_id", name="unique_agent_archive"),
)
else:
op.create_table(
"archives_agents",
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("archive_id", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=False),
sa.Column("is_owner", sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["archive_id"], ["archives.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("agent_id", "archive_id"),
# TODO: Remove this constraint when we support multiple archives per agent
sa.UniqueConstraint("agent_id", name="unique_agent_archive"),
)
if is_sqlite:
# For SQLite
# create temporary table to preserve existing agent_passages data
op.execute(
"""
CREATE TEMPORARY TABLE temp_agent_passages AS
SELECT * FROM agent_passages WHERE is_deleted = 0;
"""
)
# create default archives and migrate data
# First, create archives for each agent that has passages
op.execute(
"""
INSERT INTO archives (id, name, description, organization_id, created_at, updated_at, is_deleted)
SELECT DISTINCT
'archive-' || lower(hex(randomblob(16))),
COALESCE(a.name, 'Agent ' || a.id) || '''s Archive',
'Default archive created during migration',
a.organization_id,
datetime('now'),
datetime('now'),
0
FROM temp_agent_passages ap
JOIN agents a ON ap.agent_id = a.id;
"""
)
# create archives_agents relationships
op.execute(
"""
INSERT INTO archives_agents (agent_id, archive_id, is_owner, created_at)
SELECT
a.id as agent_id,
ar.id as archive_id,
1 as is_owner,
datetime('now') as created_at
FROM agents a
JOIN archives ar ON ar.organization_id = a.organization_id
AND ar.name = COALESCE(a.name, 'Agent ' || a.id) || '''s Archive'
WHERE EXISTS (
SELECT 1 FROM temp_agent_passages ap WHERE ap.agent_id = a.id
);
"""
)
# drop the old agent_passages table
op.drop_index("ix_agent_passages_org_agent", table_name="agent_passages")
op.drop_table("agent_passages")
# create the new archival_passages table with the new schema
op.create_table(
"archival_passages",
sa.Column("text", sa.String(), nullable=False),
sa.Column("embedding_config", EmbeddingConfigColumn, nullable=False),
sa.Column("metadata_", sa.JSON(), nullable=False),
sa.Column("embedding", CommonVector, nullable=True), # SQLite uses CommonVector for embeddings
sa.Column("id", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("0"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("archive_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.ForeignKeyConstraint(["archive_id"], ["archives.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
# migrate data from temp table to archival_passages with archive_id
op.execute(
"""
INSERT INTO archival_passages (
id, text, embedding_config, metadata_, embedding,
created_at, updated_at, is_deleted,
_created_by_id, _last_updated_by_id,
organization_id, archive_id
)
SELECT
ap.id, ap.text, ap.embedding_config, ap.metadata_, ap.embedding,
ap.created_at, ap.updated_at, ap.is_deleted,
ap._created_by_id, ap._last_updated_by_id,
ap.organization_id, ar.id as archive_id
FROM temp_agent_passages ap
JOIN agents a ON ap.agent_id = a.id
JOIN archives ar ON ar.organization_id = a.organization_id
AND ar.name = COALESCE(a.name, 'Agent ' || a.id) || '''s Archive';
"""
)
# drop temporary table
op.execute("DROP TABLE temp_agent_passages;")
# create indexes
op.create_index("ix_archival_passages_archive_id", "archival_passages", ["archive_id"])
op.create_index("ix_archival_passages_org_archive", "archival_passages", ["organization_id", "archive_id"])
op.create_index("archival_passages_created_at_id_idx", "archival_passages", ["created_at", "id"])
else:
# PostgreSQL
# add archive_id to agent_passages
op.add_column("agent_passages", sa.Column("archive_id", sa.String(), nullable=True))
# get connection for batch processing
connection = op.get_bind()
# get total count of agents with passages
total_agents_result = connection.execute(
sa.text(
"""
SELECT COUNT(DISTINCT a.id)
FROM agent_passages ap
JOIN agents a ON ap.agent_id = a.id
WHERE ap.is_deleted = FALSE
"""
)
)
total_agents = total_agents_result.scalar()
if total_agents > 0:
print(f"Starting archival memory migration for {total_agents} agents...")
start_time = time.time()
batch_size = 1000
# process agents one by one to maintain proper relationships
offset = 0
while offset < total_agents:
# Get batch of agents that need archives
batch_result = connection.execute(
sa.text(
"""
SELECT DISTINCT a.id, a.name, a.organization_id
FROM agent_passages ap
JOIN agents a ON ap.agent_id = a.id
WHERE ap.is_deleted = FALSE
AND NOT EXISTS (
SELECT 1 FROM archives_agents aa
WHERE aa.agent_id = a.id
)
ORDER BY a.id
LIMIT :batch_size
"""
).bindparams(batch_size=batch_size)
)
agents_batch = batch_result.fetchall()
if not agents_batch:
break # No more agents to process
batch_count = len(agents_batch)
print(f"Processing batch of {batch_count} agents (offset: {offset})...")
# Create archive and relationship for each agent
for agent_id, agent_name, org_id in agents_batch:
try:
# Create archive
archive_result = connection.execute(
sa.text(
"""
INSERT INTO archives (id, name, description, organization_id, created_at)
VALUES (
'archive-' || gen_random_uuid(),
:archive_name,
'Default archive created during migration',
:org_id,
NOW()
)
RETURNING id
"""
).bindparams(archive_name=f"{agent_name or f'Agent {agent_id}'}'s Archive", org_id=org_id)
)
archive_id = archive_result.scalar()
# Create agent-archive relationship
connection.execute(
sa.text(
"""
INSERT INTO archives_agents (agent_id, archive_id, is_owner, created_at)
VALUES (:agent_id, :archive_id, TRUE, NOW())
"""
).bindparams(agent_id=agent_id, archive_id=archive_id)
)
except Exception as e:
print(f"Warning: Failed to create archive for agent {agent_id}: {e}")
# Continue with other agents
offset += batch_count
print("Archive creation completed. Starting archive_id updates...")
# update agent_passages with archive_id in batches
total_passages_result = connection.execute(
sa.text(
"""
SELECT COUNT(*)
FROM agent_passages ap
WHERE ap.archive_id IS NULL
AND ap.is_deleted = FALSE
"""
)
)
total_passages = total_passages_result.scalar()
if total_passages > 0:
print(f"Updating archive_id for {total_passages} passages...")
updated_passages = 0
update_batch_size = 5000 # larger batch size for updates
while updated_passages < total_passages:
print(
f"Updating passages {updated_passages + 1} to {min(updated_passages + update_batch_size, total_passages)} of {total_passages}..."
)
# Use connection.execute instead of op.execute to get rowcount
result = connection.execute(
sa.text(
"""
UPDATE agent_passages ap
SET archive_id = aa.archive_id
FROM archives_agents aa
WHERE ap.agent_id = aa.agent_id
AND ap.archive_id IS NULL
AND ap.is_deleted = FALSE
AND ap.id IN (
SELECT id FROM agent_passages
WHERE archive_id IS NULL
AND is_deleted = FALSE
LIMIT :batch_size
)
"""
).bindparams(batch_size=update_batch_size)
)
rows_updated = result.rowcount
if rows_updated == 0:
break # no more rows to update
updated_passages += rows_updated
print(f"Archive_id update completed. Updated {updated_passages} passages.")
elapsed_time = time.time() - start_time
print(f"Data migration completed successfully in {elapsed_time:.2f} seconds.")
else:
print("No agents with passages found. Skipping data migration.")
# schema changes
op.alter_column("agent_passages", "archive_id", nullable=False)
op.create_foreign_key("agent_passages_archive_id_fkey", "agent_passages", "archives", ["archive_id"], ["id"], ondelete="CASCADE")
# drop old indexes and constraints
op.drop_index("ix_agent_passages_org_agent", table_name="agent_passages")
op.drop_index("agent_passages_org_idx", table_name="agent_passages")
op.drop_index("agent_passages_created_at_id_idx", table_name="agent_passages")
op.drop_constraint("agent_passages_agent_id_fkey", "agent_passages", type_="foreignkey")
op.drop_column("agent_passages", "agent_id")
# rename table and create new indexes
op.rename_table("agent_passages", "archival_passages")
op.create_index("ix_archival_passages_archive_id", "archival_passages", ["archive_id"])
op.create_index("ix_archival_passages_org_archive", "archival_passages", ["organization_id", "archive_id"])
op.create_index("archival_passages_org_idx", "archival_passages", ["organization_id"])
op.create_index("archival_passages_created_at_id_idx", "archival_passages", ["created_at", "id"])
def downgrade() -> None:
# Get database connection to check DB type
bind = op.get_bind()
is_sqlite = bind.dialect.name == "sqlite"
if is_sqlite:
# For SQLite, we need to migrate data back carefully
# create temporary table to preserve existing archival_passages data
op.execute(
"""
CREATE TEMPORARY TABLE temp_archival_passages AS
SELECT * FROM archival_passages WHERE is_deleted = 0;
"""
)
# drop the archival_passages table and indexes
op.drop_index("ix_archival_passages_org_archive", table_name="archival_passages")
op.drop_index("ix_archival_passages_archive_id", table_name="archival_passages")
op.drop_index("archival_passages_created_at_id_idx", table_name="archival_passages")
op.drop_table("archival_passages")
# recreate agent_passages with old schema
op.create_table(
"agent_passages",
sa.Column("text", sa.String(), nullable=False),
sa.Column("embedding_config", EmbeddingConfigColumn, nullable=False),
sa.Column("metadata_", sa.JSON(), nullable=False),
sa.Column("embedding", CommonVector, nullable=True), # SQLite uses CommonVector for embeddings
sa.Column("id", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("0"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("agent_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
)
# restore data from archival_passages back to agent_passages
# use the owner relationship from archives_agents to determine agent_id
op.execute(
"""
INSERT INTO agent_passages (
id, text, embedding_config, metadata_, embedding,
created_at, updated_at, is_deleted,
_created_by_id, _last_updated_by_id,
organization_id, agent_id
)
SELECT
ap.id, ap.text, ap.embedding_config, ap.metadata_, ap.embedding,
ap.created_at, ap.updated_at, ap.is_deleted,
ap._created_by_id, ap._last_updated_by_id,
ap.organization_id, aa.agent_id
FROM temp_archival_passages ap
JOIN archives_agents aa ON ap.archive_id = aa.archive_id AND aa.is_owner = 1;
"""
)
# drop temporary table
op.execute("DROP TABLE temp_archival_passages;")
# create original indexes
op.create_index("ix_agent_passages_org_agent", "agent_passages", ["organization_id", "agent_id"])
op.create_index("agent_passages_org_idx", "agent_passages", ["organization_id"])
op.create_index("agent_passages_created_at_id_idx", "agent_passages", ["created_at", "id"])
# drop new tables for SQLite
op.drop_table("archives_agents")
op.drop_index("ix_archives_organization_id", table_name="archives")
op.drop_index("ix_archives_created_at", table_name="archives")
op.drop_table("archives")
else:
# PostgreSQL:
# rename table back
op.drop_index("ix_archival_passages_org_archive", table_name="archival_passages")
op.drop_index("ix_archival_passages_archive_id", table_name="archival_passages")
op.drop_index("archival_passages_org_idx", table_name="archival_passages")
op.drop_index("archival_passages_created_at_id_idx", table_name="archival_passages")
op.rename_table("archival_passages", "agent_passages")
# add agent_id column back
op.add_column("agent_passages", sa.Column("agent_id", sa.String(), nullable=True))
# restore agent_id from archives_agents (use the owner relationship)
op.execute(
"""
UPDATE agent_passages ap
SET agent_id = aa.agent_id
FROM archives_agents aa
WHERE ap.archive_id = aa.archive_id AND aa.is_owner = TRUE;
"""
)
# schema changes
op.alter_column("agent_passages", "agent_id", nullable=False)
op.create_foreign_key("agent_passages_agent_id_fkey", "agent_passages", "agents", ["agent_id"], ["id"], ondelete="CASCADE")
# drop archive_id column and constraint
op.drop_constraint("agent_passages_archive_id_fkey", "agent_passages", type_="foreignkey")
op.drop_column("agent_passages", "archive_id")
# restore original indexes
op.create_index("ix_agent_passages_org_agent", "agent_passages", ["organization_id", "agent_id"])
op.create_index("agent_passages_org_idx", "agent_passages", ["organization_id"])
op.create_index("agent_passages_created_at_id_idx", "agent_passages", ["created_at", "id"])
# drop new tables for PostgreSQL
op.drop_table("archives_agents")
op.drop_index("ix_archives_organization_id", table_name="archives")
op.drop_index("ix_archives_created_at", table_name="archives")
op.drop_table("archives")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/74e860718e0d_add_archival_memory_sharing.py",
"license": "Apache License 2.0",
"lines": 448,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
letta-ai/letta:alembic/versions/750dd87faa12_add_build_request_latency_to_step_.py | """add build request latency to step metrics
Revision ID: 750dd87faa12
Revises: 5b804970e6a0
Create Date: 2025-09-06 14:28:32.119084
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "750dd87faa12"
down_revision: Union[str, None] = "5b804970e6a0"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("step_metrics", sa.Column("step_start_ns", sa.BigInteger(), nullable=True))
op.add_column("step_metrics", sa.Column("llm_request_start_ns", sa.BigInteger(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("step_metrics", "step_start_ns")
op.drop_column("step_metrics", "llm_request_start_ns")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/750dd87faa12_add_build_request_latency_to_step_.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/7f7933666957_add_stop_reason_to_jobs_table.py | """add stop_reason to jobs table
Revision ID: 7f7933666957
Revises: d06594144ef3
Create Date: 2025-09-16 13:20:42.368007
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "7f7933666957"
down_revision: Union[str, None] = "d06594144ef3"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Add stop_reason column to jobs table
op.add_column("jobs", sa.Column("stop_reason", sa.String(), nullable=True))
def downgrade() -> None:
op.drop_column("jobs", "stop_reason")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/7f7933666957_add_stop_reason_to_jobs_table.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/878607e41ca4_add_provider_category.py | """add provider category
Revision ID: 878607e41ca4
Revises: 0335b1eb9c40
Create Date: 2025-05-06 12:10:25.751536
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "878607e41ca4"
down_revision: Union[str, None] = "0335b1eb9c40"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("providers", sa.Column("provider_category", sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("providers", "provider_category")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/878607e41ca4_add_provider_category.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/887a4367b560_convert_stop_reason_from_enum_to_string.py | """convert_stop_reason_from_enum_to_string
Revision ID: 887a4367b560
Revises: d5103ee17ed5
Create Date: 2025-08-27 16:34:45.605580
"""
from typing import Sequence, Union
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "887a4367b560"
down_revision: Union[str, None] = "d5103ee17ed5"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite it doesn't enforce column types strictly,
# so the existing enum values will continue to work as strings.
if not settings.letta_pg_uri_no_default:
return
op.execute(
"""
ALTER TABLE steps
ALTER COLUMN stop_reason TYPE VARCHAR
USING stop_reason::VARCHAR
"""
)
def downgrade() -> None:
# This is a one-way migration as we can't easily recreate the enum type
# If needed, you would need to create the enum type and cast back
pass
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/887a4367b560_convert_stop_reason_from_enum_to_string.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/90fd814d0cda_add_callback_error_field_to_jobs.py | """Add callback error field to jobs
Revision ID: 90fd814d0cda
Revises: c0ef3ff26306
Create Date: 2025-06-16 13:04:53.496195
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "90fd814d0cda"
down_revision: Union[str, None] = "c0ef3ff26306"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("jobs", sa.Column("callback_error", sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("jobs", "callback_error")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/90fd814d0cda_add_callback_error_field_to_jobs.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/9556081ce65b_add_bedrock_creds_to_byok.py | """add bedrock creds to byok
Revision ID: 9556081ce65b
Revises: 90fd814d0cda
Create Date: 2025-06-18 11:15:39.461916
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "9556081ce65b"
down_revision: Union[str, None] = "90fd814d0cda"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("providers", sa.Column("access_key", sa.String(), nullable=True))
op.add_column("providers", sa.Column("region", sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("providers", "region")
op.drop_column("providers", "access_key")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/9556081ce65b_add_bedrock_creds_to_byok.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/9758adf8fdd3_add_run_completion_and_duration_to_.py | """add_run_completion_and_duration_to_agents_table
Revision ID: 9758adf8fdd3
Revises: 9556081ce65b
Create Date: 2025-06-18 18:22:31.135685
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "9758adf8fdd3"
down_revision: Union[str, None] = "9556081ce65b"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("agents", sa.Column("last_run_completion", sa.DateTime(timezone=True), nullable=True))
op.add_column("agents", sa.Column("last_run_duration_ms", sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("agents", "last_run_duration_ms")
op.drop_column("agents", "last_run_completion")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/9758adf8fdd3_add_run_completion_and_duration_to_.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/9792f94e961d_add_file_processing_status_to_.py | """Add file processing status to FileMetadata and related indices
Revision ID: 9792f94e961d
Revises: cdd4a1c11aee
Create Date: 2025-06-05 18:51:57.022594
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "9792f94e961d"
down_revision: Union[str, None] = "cdd4a1c11aee"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# Step 1: Create constraint
op.create_unique_constraint("uq_file_contents_file_id", "file_contents", ["file_id"])
# Step 2: Add processing_status as nullable first
op.add_column("files", sa.Column("processing_status", sa.String(), nullable=True))
op.add_column("files", sa.Column("error_message", sa.Text(), nullable=True))
# Step 3: Backfill existing rows with 'completed'
op.execute("UPDATE files SET processing_status = 'completed'")
# Step 4: Make the column non-nullable now that it's backfilled
op.alter_column("files", "processing_status", nullable=False)
# Step 5: Create indices
op.create_index("ix_files_org_created", "files", ["organization_id", sa.literal_column("created_at DESC")], unique=False)
op.create_index("ix_files_processing_status", "files", ["processing_status"], unique=False)
op.create_index("ix_files_source_created", "files", ["source_id", sa.literal_column("created_at DESC")], unique=False)
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index("ix_files_source_created", table_name="files")
op.drop_index("ix_files_processing_status", table_name="files")
op.drop_index("ix_files_org_created", table_name="files")
op.drop_column("files", "error_message")
op.drop_column("files", "processing_status")
op.drop_constraint("uq_file_contents_file_id", "file_contents", type_="unique")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/9792f94e961d_add_file_processing_status_to_.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/9ecbdbaa409f_add_table_to_store_mcp_servers.py | """add table to store mcp servers
Revision ID: 9ecbdbaa409f
Revises: 6c53224a7a58
Create Date: 2025-05-21 15:25:12.483026
"""
from typing import Sequence, Union
import sqlalchemy as sa
import letta
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "9ecbdbaa409f"
down_revision: Union[str, None] = "6c53224a7a58"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"mcp_server",
sa.Column("id", sa.String(), nullable=False),
sa.Column("server_name", sa.String(), nullable=False),
sa.Column("server_type", sa.String(), nullable=False),
sa.Column("server_url", sa.String(), nullable=True),
sa.Column("stdio_config", letta.orm.custom_columns.MCPStdioServerConfigColumn(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("FALSE"), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("metadata_", sa.JSON(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.UniqueConstraint("server_name", "organization_id", name="uix_name_organization_mcp_server"),
)
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("mcp_server")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/9ecbdbaa409f_add_table_to_store_mcp_servers.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/b888f21b151f_add_vector_db_provider_to_source.py | """Add vector db provider to source
Revision ID: b888f21b151f
Revises: 750dd87faa12
Create Date: 2025-09-08 14:49:58.846429
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "b888f21b151f"
down_revision: Union[str, None] = "750dd87faa12"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# determine backfill value based on current pinecone settings
try:
from pinecone import IndexEmbed, PineconeAsyncio # noqa: F401
pinecone_available = True
except ImportError:
pinecone_available = False
use_pinecone = all(
[
pinecone_available,
settings.enable_pinecone,
settings.pinecone_api_key,
settings.pinecone_agent_index,
settings.pinecone_source_index,
]
)
if settings.letta_pg_uri_no_default:
# commit required before altering enum in postgresql
connection = op.get_bind()
connection.execute(sa.text("COMMIT"))
connection.execute(sa.text("ALTER TYPE vectordbprovider ADD VALUE IF NOT EXISTS 'PINECONE'"))
connection.execute(sa.text("COMMIT"))
vectordbprovider = sa.Enum("NATIVE", "TPUF", "PINECONE", name="vectordbprovider", create_type=False)
op.add_column("sources", sa.Column("vector_db_provider", vectordbprovider, nullable=True))
if use_pinecone:
op.execute("UPDATE sources SET vector_db_provider = 'PINECONE' WHERE vector_db_provider IS NULL")
else:
op.execute("UPDATE sources SET vector_db_provider = 'NATIVE' WHERE vector_db_provider IS NULL")
op.alter_column("sources", "vector_db_provider", nullable=False)
else:
op.add_column("sources", sa.Column("vector_db_provider", sa.String(), nullable=True))
if use_pinecone:
op.execute("UPDATE sources SET vector_db_provider = 'PINECONE' WHERE vector_db_provider IS NULL")
else:
op.execute("UPDATE sources SET vector_db_provider = 'NATIVE' WHERE vector_db_provider IS NULL")
def downgrade() -> None:
op.drop_column("sources", "vector_db_provider")
# enum type remains as postgresql doesn't support removing values
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/b888f21b151f_add_vector_db_provider_to_source.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/c0ef3ff26306_add_token_to_mcp_server.py | """add_token_to_mcp_server
Revision ID: c0ef3ff26306
Revises: 1c6b6a38b713
Create Date: 2025-06-14 14:59:53.835883
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "c0ef3ff26306"
down_revision: Union[str, None] = "1c6b6a38b713"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("mcp_server", sa.Column("token", sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("mcp_server", "token")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/c0ef3ff26306_add_token_to_mcp_server.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/c41c87205254_add_default_requires_approval_field_on_.py | """add default requires approval field on tools
Revision ID: c41c87205254
Revises: 068588268b02
Create Date: 2025-08-28 13:17:51.636159
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "c41c87205254"
down_revision: Union[str, None] = "068588268b02"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("tools", sa.Column("default_requires_approval", sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("tools", "default_requires_approval")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/c41c87205254_add_default_requires_approval_field_on_.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/c4eb5a907b38_add_file_controls_to_agent_state.py | """Add file controls to agent state
Revision ID: c4eb5a907b38
Revises: cce9a6174366
Create Date: 2025-07-21 15:56:57.413000
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "c4eb5a907b38"
down_revision: Union[str, None] = "cce9a6174366"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("agents", sa.Column("max_files_open", sa.Integer(), nullable=True))
op.add_column("agents", sa.Column("per_file_view_window_char_limit", sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("agents", "per_file_view_window_char_limit")
op.drop_column("agents", "max_files_open")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/c4eb5a907b38_add_file_controls_to_agent_state.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/c56081a05371_add_buffer_length_min_max_for_voice_.py | """Add buffer length min max for voice sleeptime
Revision ID: c56081a05371
Revises: 28b8765bdd0a
Create Date: 2025-04-30 16:03:41.213750
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "c56081a05371"
down_revision: Union[str, None] = "28b8765bdd0a"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("groups", sa.Column("max_message_buffer_length", sa.Integer(), nullable=True))
op.add_column("groups", sa.Column("min_message_buffer_length", sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("groups", "min_message_buffer_length")
op.drop_column("groups", "max_message_buffer_length")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/c56081a05371_add_buffer_length_min_max_for_voice_.py",
"license": "Apache License 2.0",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/c7ac45f69849_add_timezone_to_agents_table.py | """Add timezone to agents table
Revision ID: c7ac45f69849
Revises: 61ee53ec45a5
Create Date: 2025-06-23 17:48:51.177458
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "c7ac45f69849"
down_revision: Union[str, None] = "61ee53ec45a5"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("agents", sa.Column("timezone", sa.String(), nullable=True, default="UTC"))
# ### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("agents", "timezone")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/c7ac45f69849_add_timezone_to_agents_table.py",
"license": "Apache License 2.0",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.