sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
letta-ai/letta:letta/schemas/providers/vllm.py | """
Note: this consolidates the vLLM provider for completions (deprecated by openai)
and chat completions. Support is provided primarily for the chat completions endpoint,
but to utilize the completions endpoint, set the proper `base_url` and
`default_prompt_formatter`.
"""
from typing import Literal
from pydantic import Field
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import ProviderCategory, ProviderType
from letta.schemas.llm_config import LLMConfig
from letta.schemas.providers.base import Provider
class VLLMProvider(Provider):
provider_type: Literal[ProviderType.vllm] = Field(ProviderType.vllm, description="The type of the provider.")
provider_category: ProviderCategory = Field(ProviderCategory.base, description="The category of the provider (base or byok)")
base_url: str = Field(..., description="Base URL for the vLLM API.")
api_key: str | None = Field(None, description="API key for the vLLM API.")
default_prompt_formatter: str | None = Field(
default=None, description="Default prompt formatter (aka model wrapper) to use on a /completions style API."
)
handle_base: str | None = Field(None, description="Custom handle base name for model handles (e.g., 'custom' instead of 'vllm').")
async def list_llm_models_async(self) -> list[LLMConfig]:
from letta.llm_api.openai import openai_get_model_list_async
base_url = self.base_url.rstrip("/") + "/v1" if not self.base_url.endswith("/v1") else self.base_url
response = await openai_get_model_list_async(base_url, api_key=self.api_key)
data = response.get("data", response)
configs = []
for model in data:
model_name = model["id"]
configs.append(
LLMConfig(
model=model_name,
model_endpoint_type="openai", # TODO (cliandy): this was previous vllm for the completions provider, why?
model_endpoint=base_url,
model_wrapper=self.default_prompt_formatter,
context_window=model["max_model_len"],
handle=self.get_handle(model_name, base_name=self.handle_base) if self.handle_base else self.get_handle(model_name),
max_tokens=self.get_default_max_output_tokens(model_name),
provider_name=self.name,
provider_category=self.provider_category,
)
)
return configs
async def list_embedding_models_async(self) -> list[EmbeddingConfig]:
# Note: vLLM technically can support embedding models though may require multiple instances
# for now, we will not support embedding models for vLLM.
return []
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/schemas/providers/vllm.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/schemas/providers/xai.py | from typing import Literal
from letta.log import get_logger
logger = get_logger(__name__)
from pydantic import Field
from letta.schemas.enums import ProviderCategory, ProviderType
from letta.schemas.llm_config import LLMConfig
from letta.schemas.providers.openai import OpenAIProvider
MODEL_CONTEXT_WINDOWS = {
"grok-3-fast": 131_072,
"grok-3": 131_072,
"grok-3-mini": 131_072,
"grok-3-mini-fast": 131_072,
"grok-4-0709": 256_000,
"grok-4-fast-reasoning": 2_000_000,
"grok-4-fast-non-reasoning": 2_000_000,
"grok-code-fast-1": 256_000
}
class XAIProvider(OpenAIProvider):
"""https://docs.x.ai/docs/api-reference"""
provider_type: Literal[ProviderType.xai] = Field(ProviderType.xai, description="The type of the provider.")
provider_category: ProviderCategory = Field(ProviderCategory.base, description="The category of the provider (base or byok)")
api_key: str | None = Field(None, description="API key for the xAI/Grok API.", deprecated=True)
base_url: str = Field("https://api.x.ai/v1", description="Base URL for the xAI/Grok API.")
def get_model_context_window_size(self, model_name: str) -> int | None:
# xAI doesn't return context window in the model listing,
# this is hardcoded from https://docs.x.ai/docs/models
return MODEL_CONTEXT_WINDOWS.get(model_name)
async def list_llm_models_async(self) -> list[LLMConfig]:
from letta.llm_api.openai import openai_get_model_list_async
api_key = await self.api_key_enc.get_plaintext_async() if self.api_key_enc else None
response = await openai_get_model_list_async(self.base_url, api_key=api_key)
data = response.get("data", response)
configs = []
for model in data:
assert "id" in model, f"xAI/Grok model missing 'id' field: {model}"
model_name = model["id"]
# In case xAI starts supporting it in the future:
if "context_length" in model:
context_window_size = model["context_length"]
else:
context_window_size = self.get_model_context_window_size(model_name)
if not context_window_size:
logger.warning(f"Couldn't find context window size for model {model_name}")
continue
configs.append(
LLMConfig(
model=model_name,
model_endpoint_type="xai",
model_endpoint=self.base_url,
context_window=context_window_size,
handle=self.get_handle(model_name),
max_tokens=self.get_default_max_output_tokens(model_name),
provider_name=self.name,
provider_category=self.provider_category,
)
)
return configs
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/schemas/providers/xai.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/schemas/secret.py | from typing import Any, Dict, Optional
from pydantic import BaseModel, ConfigDict, PrivateAttr
from pydantic_core import core_schema
from letta.helpers.crypto_utils import CryptoUtils
from letta.log import get_logger
from letta.utils import bounded_gather
logger = get_logger(__name__)
class Secret(BaseModel):
"""
A wrapper class for encrypted credentials that keeps values encrypted in memory.
This class ensures that sensitive data remains encrypted as much as possible
while passing through the codebase, only decrypting when absolutely necessary.
Usage:
- Create from plaintext: Secret.from_plaintext(value)
- Create from encrypted DB value: Secret.from_encrypted(encrypted_value)
- Get encrypted for storage: secret.get_encrypted()
- Get plaintext when needed: secret.get_plaintext()
"""
# Store the encrypted value as a regular field
encrypted_value: Optional[str] = None
# Cache the decrypted value to avoid repeated decryption (not serialized for security)
_plaintext_cache: Optional[str] = PrivateAttr(default=None)
model_config = ConfigDict(frozen=True)
@classmethod
def from_plaintext(cls, value: Optional[str]) -> "Secret":
"""
Create a Secret from a plaintext value, encrypting it if possible.
If LETTA_ENCRYPTION_KEY is configured, the value is encrypted.
If not, the plaintext value is stored directly in encrypted_value field.
Args:
value: The plaintext value to encrypt
Returns:
A Secret instance with the encrypted (or plaintext) value
"""
if value is None:
return cls.model_construct(encrypted_value=None)
# Guard against double encryption - check if value is already encrypted
if CryptoUtils.is_encrypted(value):
logger.warning("Creating Secret from already-encrypted value. This can be dangerous.")
# Try to encrypt, but fall back to storing plaintext if no encryption key
try:
encrypted = CryptoUtils.encrypt(value)
return cls.model_construct(encrypted_value=encrypted)
except ValueError as e:
# No encryption key available, store as plaintext in the _enc column
if "No encryption key configured" in str(e):
logger.warning(
"No encryption key configured. Storing Secret value as plaintext in _enc column. "
"Set LETTA_ENCRYPTION_KEY environment variable to enable encryption."
)
instance = cls.model_construct(encrypted_value=value)
instance._plaintext_cache = value # Cache it since we know the plaintext
return instance
raise # Re-raise if it's a different error
@classmethod
async def from_plaintext_async(cls, value: Optional[str]) -> "Secret":
"""
Create a Secret from a plaintext value, encrypting it asynchronously.
This async version runs encryption in a thread pool to avoid blocking
the event loop during the CPU-intensive PBKDF2 key derivation (100-500ms).
Use this method in all async contexts (FastAPI endpoints, async services, etc.)
to avoid blocking the event loop.
Args:
value: The plaintext value to encrypt
Returns:
A Secret instance with the encrypted (or plaintext) value
"""
if value is None:
return cls.model_construct(encrypted_value=None)
# Guard against double encryption - check if value is already encrypted
if CryptoUtils.is_encrypted(value):
logger.warning("Creating Secret from already-encrypted value. This can be dangerous.")
# Try to encrypt asynchronously, but fall back to storing plaintext if no encryption key
try:
encrypted = await CryptoUtils.encrypt_async(value)
return cls.model_construct(encrypted_value=encrypted)
except ValueError as e:
# No encryption key available, store as plaintext in the _enc column
if "No encryption key configured" in str(e):
logger.warning(
"No encryption key configured. Storing Secret value as plaintext in _enc column. "
"Set LETTA_ENCRYPTION_KEY environment variable to enable encryption."
)
instance = cls.model_construct(encrypted_value=value)
instance._plaintext_cache = value # Cache it since we know the plaintext
return instance
raise # Re-raise if it's a different error
@classmethod
async def from_plaintexts_async(cls, values: dict[str, str], max_concurrency: int = 10) -> dict[str, "Secret"]:
"""
Create multiple Secrets from plaintexts concurrently with bounded concurrency.
Uses bounded_gather() to encrypt values in parallel while limiting
concurrent operations to prevent overwhelming the event loop.
Args:
values: Dict of key -> plaintext value
max_concurrency: Maximum number of concurrent encryption operations (default: 10)
Returns:
Dict of key -> Secret
"""
if not values:
return {}
keys = list(values.keys())
async def encrypt_one(key: str) -> "Secret":
return await cls.from_plaintext_async(values[key])
secrets = await bounded_gather([encrypt_one(k) for k in keys], max_concurrency=max_concurrency)
return dict(zip(keys, secrets))
@classmethod
def from_encrypted(cls, encrypted_value: Optional[str]) -> "Secret":
"""
Create a Secret from an already encrypted value (read from DB).
Args:
encrypted_value: The encrypted value from the _enc column
Returns:
A Secret instance
"""
return cls.model_construct(encrypted_value=encrypted_value)
def get_encrypted(self) -> Optional[str]:
"""
Get the encrypted value.
Returns:
The encrypted value, or None if the secret is empty
"""
return self.encrypted_value
def get_plaintext(self) -> Optional[str]:
"""
Get the decrypted plaintext value (synchronous version).
WARNING: This performs CPU-intensive PBKDF2 key derivation that can block for 100-500ms.
Use get_plaintext_async() in async contexts to avoid blocking the event loop.
This should only be called when the plaintext is actually needed,
such as when making an external API call.
If the value is encrypted, it will be decrypted. If the value is stored
as plaintext (no encryption key was configured), it will be returned as-is.
Returns:
The decrypted plaintext value, or None if the secret is empty
"""
if self.encrypted_value is None:
return None
# Use cached value if available
if self._plaintext_cache is not None:
return self._plaintext_cache
# Try to decrypt
try:
plaintext = CryptoUtils.decrypt(self.encrypted_value)
# Cache the decrypted value (PrivateAttr fields can be mutated even with frozen=True)
self._plaintext_cache = plaintext
return plaintext
except ValueError as e:
error_msg = str(e)
# Handle missing encryption key - check if value is actually plaintext
if "No encryption key configured" in error_msg:
if CryptoUtils.is_encrypted(self.encrypted_value):
# Value was encrypted but we have no key - can't decrypt
logger.warning(
"Cannot decrypt Secret value - no encryption key configured. "
"The value was encrypted and requires the original key to decrypt."
)
return None
else:
# Value is plaintext (stored when no key was available)
logger.debug("Secret value is plaintext (stored without encryption)")
self._plaintext_cache = self.encrypted_value
return self.encrypted_value
# Handle decryption failure - check if value might be plaintext
elif "Failed to decrypt data" in error_msg:
if not CryptoUtils.is_encrypted(self.encrypted_value):
# It's plaintext that was stored when no key was available
logger.debug("Secret value appears to be plaintext (stored without encryption)")
self._plaintext_cache = self.encrypted_value
return self.encrypted_value
# Otherwise, it's corrupted or wrong key
logger.error("Failed to decrypt Secret value - data may be corrupted or wrong key")
raise
# Re-raise for other errors
raise
async def get_plaintext_async(self) -> Optional[str]:
"""
Get the decrypted plaintext value (async version).
Runs the CPU-intensive PBKDF2 key derivation in a thread pool to avoid
blocking the event loop. This prevents the event loop freeze that occurs
when decrypting secrets synchronously during HTTP request handling.
This should be used in all async contexts (FastAPI endpoints, async services, etc.)
to avoid blocking the event loop for 100-500ms per decryption.
Returns:
The decrypted plaintext value, or None if the secret is empty
"""
if self.encrypted_value is None:
return None
# Use cached value if available
if self._plaintext_cache is not None:
return self._plaintext_cache
# Try to decrypt (async)
try:
plaintext = await CryptoUtils.decrypt_async(self.encrypted_value)
# Cache the decrypted value
self._plaintext_cache = plaintext
return plaintext
except ValueError as e:
error_msg = str(e)
# Handle missing encryption key - check if value is actually plaintext
if "No encryption key configured" in error_msg:
if CryptoUtils.is_encrypted(self.encrypted_value):
logger.warning(
"Cannot decrypt Secret value - no encryption key configured. "
"The value was encrypted and requires the original key to decrypt."
)
return None
else:
logger.debug("Secret value is plaintext (stored without encryption)")
self._plaintext_cache = self.encrypted_value
return self.encrypted_value
# Handle decryption failure - check if value might be plaintext
elif "Failed to decrypt data" in error_msg:
if not CryptoUtils.is_encrypted(self.encrypted_value):
logger.debug("Secret value appears to be plaintext (stored without encryption)")
self._plaintext_cache = self.encrypted_value
return self.encrypted_value
logger.error("Failed to decrypt Secret value - data may be corrupted or wrong key")
raise
# Re-raise for other errors
raise
def is_empty(self) -> bool:
"""Check if the secret is empty/None."""
return self.encrypted_value is None
def __str__(self) -> str:
"""String representation that doesn't expose the actual value."""
if self.is_empty():
return "<Secret: empty>"
return "<Secret: ****>"
def __repr__(self) -> str:
"""Representation that doesn't expose the actual value."""
return self.__str__()
def __eq__(self, other: Any) -> bool:
"""
Compare two secrets by their plaintext values.
Note: This decrypts both values, so use sparingly.
"""
if not isinstance(other, Secret):
return False
return self.get_plaintext() == other.get_plaintext()
@classmethod
def __get_pydantic_core_schema__(cls, source_type: Any, handler) -> core_schema.CoreSchema:
"""
Customize Pydantic's validation and serialization behavior for Secret fields.
This allows Secret fields to automatically:
- Deserialize: Convert encrypted strings from DB → Secret objects
- Serialize: Convert Secret objects → encrypted strings for DB
"""
def validate_secret(value: Any) -> "Secret":
"""Convert various input types to Secret objects."""
if isinstance(value, Secret):
return value
elif isinstance(value, str):
# String from DB is assumed to be encrypted
return Secret.from_encrypted(value)
elif isinstance(value, dict):
# Dict might be from Pydantic serialization - check for encrypted_value key
if "encrypted_value" in value:
# This is a serialized Secret being deserialized
return cls(**value)
elif not value or value == {}:
# Empty dict means None
return Secret.from_plaintext(None)
else:
raise ValueError(f"Cannot convert dict to Secret: {value}")
elif value is None:
return Secret.from_plaintext(None)
else:
raise ValueError(f"Cannot convert {type(value)} to Secret")
def serialize_secret(secret: "Secret") -> Optional[str]:
"""Serialize Secret to encrypted string."""
if secret is None:
return None
return secret.get_encrypted()
python_schema = core_schema.chain_schema(
[
core_schema.no_info_plain_validator_function(validate_secret),
core_schema.is_instance_schema(cls),
]
)
return core_schema.json_or_python_schema(
json_schema=python_schema,
python_schema=python_schema,
serialization=core_schema.plain_serializer_function_ser_schema(
serialize_secret,
when_used="always",
),
)
@classmethod
def __get_pydantic_json_schema__(cls, core_schema: core_schema.CoreSchema, handler) -> Dict[str, Any]:
"""
Define JSON schema representation for Secret fields.
In JSON schema (OpenAPI docs), Secret fields appear as nullable strings.
The actual encryption/decryption happens at runtime via __get_pydantic_core_schema__.
Args:
core_schema: The core schema for this type
handler: Handler for generating JSON schema
Returns:
A JSON schema dict representing this type as a nullable string
"""
# Return a simple string schema for JSON schema generation
return {
"type": "string",
"nullable": True,
"description": "Encrypted secret value (stored as encrypted string)",
}
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/schemas/secret.py",
"license": "Apache License 2.0",
"lines": 305,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/schemas/source_metadata.py | from typing import List, Optional
from pydantic import Field
from letta.schemas.letta_base import LettaBase
class FileStats(LettaBase):
"""File statistics for metadata endpoint"""
file_id: str = Field(..., description="Unique identifier of the file")
file_name: str = Field(..., description="Name of the file")
file_size: Optional[int] = Field(None, description="Size of the file in bytes")
class SourceStats(LettaBase):
"""Aggregated metadata for a source"""
source_id: str = Field(..., description="Deprecated: Use `folder_id` field instead. Unique identifier of the source", deprecated=True)
source_name: str = Field(..., description="Deprecated: Use `folder_name` field instead. Name of the source", deprecated=True)
file_count: int = Field(0, description="Number of files in the source")
total_size: int = Field(0, description="Total size of all files in bytes")
files: List[FileStats] = Field(default_factory=list, description="List of file statistics")
class OrganizationSourcesStats(LettaBase):
"""Complete metadata response for organization sources"""
total_sources: int = Field(0, description="Total number of sources")
total_files: int = Field(0, description="Total number of files across all sources")
total_size: int = Field(0, description="Total size of all files in bytes")
sources: List[SourceStats] = Field(default_factory=list, description="List of source metadata")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/schemas/source_metadata.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/schemas/step_metrics.py | from typing import Optional
from pydantic import Field
from letta.schemas.enums import PrimitiveType
from letta.schemas.letta_base import LettaBase
class StepMetricsBase(LettaBase):
__id_prefix__ = PrimitiveType.STEP.value
class StepMetrics(StepMetricsBase):
id: str = Field(..., description="The id of the step this metric belongs to (matches steps.id).")
organization_id: Optional[str] = Field(None, description="The unique identifier of the organization.")
provider_id: Optional[str] = Field(None, description="The unique identifier of the provider.")
run_id: Optional[str] = Field(None, description="The unique identifier of the run.")
agent_id: Optional[str] = Field(None, description="The unique identifier of the agent.")
step_start_ns: Optional[int] = Field(None, description="The timestamp of the start of the step in nanoseconds.")
llm_request_start_ns: Optional[int] = Field(None, description="The timestamp of the start of the llm request in nanoseconds.")
llm_request_ns: Optional[int] = Field(None, description="Time spent on LLM requests in nanoseconds.")
tool_execution_ns: Optional[int] = Field(None, description="Time spent on tool execution in nanoseconds.")
step_ns: Optional[int] = Field(None, description="Total time for the step in nanoseconds.")
base_template_id: Optional[str] = Field(None, description="The base template ID that the step belongs to (cloud only).")
template_id: Optional[str] = Field(None, description="The template ID that the step belongs to (cloud only).")
project_id: Optional[str] = Field(None, description="The project that the step belongs to (cloud only).")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/schemas/step_metrics.py",
"license": "Apache License 2.0",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/server/rest_api/dependencies.py | from typing import TYPE_CHECKING, Optional
from fastapi import Header
from pydantic import BaseModel
from letta.errors import LettaInvalidArgumentError
from letta.otel.tracing import tracer
from letta.schemas.enums import PrimitiveType
from letta.schemas.provider_trace import BillingContext
from letta.validators import PRIMITIVE_ID_PATTERNS
if TYPE_CHECKING:
from letta.server.server import SyncServer
class ExperimentalParams(BaseModel):
"""Experimental parameters used across REST API endpoints."""
message_async: Optional[bool] = None
letta_v1_agent: Optional[bool] = None
letta_v1_agent_message_async: Optional[bool] = None
modal_sandbox: Optional[bool] = None
class HeaderParams(BaseModel):
"""Common header parameters used across REST API endpoints."""
actor_id: Optional[str] = None
user_agent: Optional[str] = None
project_id: Optional[str] = None
letta_source: Optional[str] = None
sdk_version: Optional[str] = None
experimental_params: Optional[ExperimentalParams] = None
billing_context: Optional[BillingContext] = None
def get_headers(
actor_id: Optional[str] = Header(None, alias="user_id"),
user_agent: Optional[str] = Header(None, alias="User-Agent"),
project_id: Optional[str] = Header(None, alias="X-Project-Id"),
letta_source: Optional[str] = Header(None, alias="X-Letta-Source", include_in_schema=False),
sdk_version: Optional[str] = Header(None, alias="X-Stainless-Package-Version", include_in_schema=False),
message_async: Optional[str] = Header(None, alias="X-Experimental-Message-Async", include_in_schema=False),
letta_v1_agent: Optional[str] = Header(None, alias="X-Experimental-Letta-V1-Agent", include_in_schema=False),
letta_v1_agent_message_async: Optional[str] = Header(
None, alias="X-Experimental-Letta-V1-Agent-Message-Async", include_in_schema=False
),
modal_sandbox: Optional[str] = Header(None, alias="X-Experimental-Modal-Sandbox", include_in_schema=False),
billing_plan_type: Optional[str] = Header(None, alias="X-Billing-Plan-Type", include_in_schema=False),
billing_cost_source: Optional[str] = Header(None, alias="X-Billing-Cost-Source", include_in_schema=False),
billing_customer_id: Optional[str] = Header(None, alias="X-Billing-Customer-Id", include_in_schema=False),
) -> HeaderParams:
"""Dependency injection function to extract common headers from requests."""
with tracer.start_as_current_span("dependency.get_headers"):
if actor_id is not None and PRIMITIVE_ID_PATTERNS[PrimitiveType.USER.value].match(actor_id) is None:
raise LettaInvalidArgumentError(
message=(f"Invalid user ID format: {actor_id}. Expected format: '{PrimitiveType.USER.value}-<uuid4>'"),
argument_name="user_id",
)
return HeaderParams(
actor_id=actor_id,
user_agent=user_agent,
project_id=project_id,
letta_source=letta_source,
sdk_version=sdk_version,
experimental_params=ExperimentalParams(
message_async=(message_async == "true") if message_async else None,
letta_v1_agent=(letta_v1_agent == "true") if letta_v1_agent else None,
letta_v1_agent_message_async=(letta_v1_agent_message_async == "true") if letta_v1_agent_message_async else None,
modal_sandbox=(modal_sandbox == "true") if modal_sandbox else None,
),
billing_context=BillingContext(
plan_type=billing_plan_type,
cost_source=billing_cost_source,
customer_id=billing_customer_id,
)
if any([billing_plan_type, billing_cost_source, billing_customer_id])
else None,
)
# TODO: why does this double up the interface?
async def get_letta_server() -> "SyncServer":
with tracer.start_as_current_span("dependency.get_letta_server"):
# Check if a global server is already instantiated
from letta.server.rest_api.app import server
# assert isinstance(server, SyncServer)
return server
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/dependencies.py",
"license": "Apache License 2.0",
"lines": 75,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/server/rest_api/json_parser.py | import json
from abc import ABC, abstractmethod
from typing import Any
from pydantic_core import from_json
from letta.log import get_logger
logger = get_logger(__name__)
class JSONParser(ABC):
@abstractmethod
def parse(self, input_str: str) -> Any:
raise NotImplementedError()
class PydanticJSONParser(JSONParser):
"""
https://docs.pydantic.dev/latest/concepts/json/#json-parsing
If `strict` is True, we will not allow for partial parsing of JSON.
Compared with `OptimisticJSONParser`, this parser is more strict.
Note: This will not partially parse strings which may be decrease parsing speed for message strings
"""
def __init__(self, strict=False):
self.strict = strict
def parse(self, input_str: str) -> Any:
if not input_str:
return {}
try:
return from_json(input_str, allow_partial="trailing-strings" if not self.strict else False)
except Exception as e:
logger.warning(f"PydanticJSONParser failed: {e} | input_str={input_str!r}, falling back to OptimisticJSONParser")
try:
fallback_parser = OptimisticJSONParser(strict=self.strict)
return fallback_parser.parse(input_str)
except Exception as fallback_e:
logger.error(f"Both parsers failed. Pydantic: {e}, Optimistic: {fallback_e} | input_str={input_str!r}")
raise fallback_e
class OptimisticJSONParser(JSONParser):
"""
A JSON parser that attempts to parse a given string using `json.loads`,
and if that fails, it parses as much valid JSON as possible while
allowing extra tokens to remain. Those extra tokens can be retrieved
from `self.last_parse_reminding`. If `strict` is False, the parser
tries to tolerate incomplete strings and incomplete numbers.
"""
def __init__(self, strict=False):
self.strict = strict
self.parsers = {
" ": self._parse_space,
"\r": self._parse_space,
"\n": self._parse_space,
"\t": self._parse_space,
"[": self._parse_array,
"{": self._parse_object,
'"': self._parse_string,
"t": self._parse_true,
"f": self._parse_false,
"T": self._parse_true,
"F": self._parse_false,
"n": self._parse_null,
}
# Register number parser for digits and signs
for char in "0123456789.-":
self.parsers[char] = self.parse_number
self.last_parse_reminding = None
self.on_extra_token = self._default_on_extra_token
def _default_on_extra_token(self, text, data, reminding):
print(f"Parsed JSON with extra tokens: {data}, remaining: {reminding}")
def parse(self, input_str):
"""
Try to parse the entire `input_str` as JSON. If parsing fails,
attempts a partial parse, storing leftover text in
`self.last_parse_reminding`. A callback (`on_extra_token`) is
triggered if extra tokens remain.
"""
if len(input_str) >= 1:
try:
return json.loads(input_str)
except json.JSONDecodeError as decode_error:
data, reminding = self._parse_any(input_str, decode_error)
self.last_parse_reminding = reminding
if self.on_extra_token and reminding:
self.on_extra_token(input_str, data, reminding)
return data
else:
return json.loads("{}")
def _parse_any(self, input_str, decode_error):
"""Determine which parser to use based on the first character."""
if not input_str:
raise decode_error
parser = self.parsers.get(input_str[0])
if parser is None:
raise decode_error
return parser(input_str, decode_error)
def _parse_space(self, input_str, decode_error):
"""Strip leading whitespace and parse again."""
return self._parse_any(input_str.strip(), decode_error)
def _parse_array(self, input_str, decode_error):
"""Parse a JSON array, returning the list and remaining string."""
# Skip the '['
input_str = input_str[1:]
array_values = []
input_str = input_str.strip()
while input_str:
if input_str[0] == "]":
# Skip the ']'
input_str = input_str[1:]
break
value, input_str = self._parse_any(input_str, decode_error)
array_values.append(value)
input_str = input_str.strip()
if input_str.startswith(","):
# Skip the ','
input_str = input_str[1:].strip()
return array_values, input_str
def _parse_object(self, input_str, decode_error):
"""Parse a JSON object, returning the dict and remaining string."""
# Skip the '{'
input_str = input_str[1:]
obj = {}
input_str = input_str.strip()
while input_str:
if input_str[0] == "}":
# Skip the '}'
input_str = input_str[1:]
break
key, input_str = self._parse_any(input_str, decode_error)
input_str = input_str.strip()
if not input_str or input_str[0] == "}":
obj[key] = None
break
if input_str[0] != ":":
raise decode_error
# Skip ':'
input_str = input_str[1:].strip()
if not input_str or input_str[0] in ",}":
obj[key] = None
if input_str.startswith(","):
input_str = input_str[1:]
break
value, input_str = self._parse_any(input_str, decode_error)
obj[key] = value
input_str = input_str.strip()
if input_str.startswith(","):
# Skip the ','
input_str = input_str[1:].strip()
return obj, input_str
def _parse_string(self, input_str, decode_error):
"""Parse a JSON string, respecting escaped quotes if present."""
end = input_str.find('"', 1)
while end != -1 and input_str[end - 1] == "\\":
end = input_str.find('"', end + 1)
if end == -1:
# Incomplete string
if not self.strict:
return input_str[1:], "" # Lenient mode returns partial string
raise decode_error # Raise error for incomplete string in strict mode
str_val = input_str[: end + 1]
input_str = input_str[end + 1 :]
if not self.strict:
return str_val[1:-1], input_str
return json.loads(str_val), input_str
def parse_number(self, input_str, decode_error):
"""
Parse a number (int or float). Allows digits, '.', '-', but
doesn't fully validate complex exponents unless they appear
before a non-number character.
"""
idx = 0
while idx < len(input_str) and input_str[idx] in "0123456789.-":
idx += 1
num_str = input_str[:idx]
remainder = input_str[idx:]
# If not strict, and it's only a sign or just '.', return as-is with empty remainder
if not self.strict and (not num_str or num_str in {"-", "."}):
return num_str, ""
try:
if num_str.endswith("."):
num = int(num_str[:-1])
else:
num = float(num_str) if any(c in num_str for c in ".eE") else int(num_str)
except ValueError:
raise decode_error
return num, remainder
def _parse_true(self, input_str, decode_error):
"""Parse a 'true' value."""
if input_str.startswith(("t", "T")):
return True, input_str[4:]
raise decode_error
def _parse_false(self, input_str, decode_error):
"""Parse a 'false' value."""
if input_str.startswith(("f", "F")):
return False, input_str[5:]
raise decode_error
def _parse_null(self, input_str, decode_error):
"""Parse a 'null' value."""
if input_str.startswith("n"):
return None, input_str[4:]
raise decode_error
# TODO: Keeping this around for posterity
# def main():
# test_string = '{"inner_thoughts":}'
#
# print(f"Testing string: {test_string!r}")
# print("=" * 50)
#
# print("OptimisticJSONParser (strict=False):")
# try:
# optimistic_parser = OptimisticJSONParser(strict=False)
# result = optimistic_parser.parse(test_string)
# print(f" Result: {result}")
# print(f" Remaining: {optimistic_parser.last_parse_reminding!r}")
# except Exception as e:
# print(f" Error: {e}")
#
# print()
#
# print("PydanticJSONParser (strict=False):")
# try:
# pydantic_parser = PydanticJSONParser(strict=False)
# result = pydantic_parser.parse(test_string)
# print(f" Result: {result}")
# except Exception as e:
# print(f" Error: {e}")
#
#
# if __name__ == "__main__":
# main()
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/json_parser.py",
"license": "Apache License 2.0",
"lines": 223,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/server/rest_api/middleware/check_password.py | from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import JSONResponse
class CheckPasswordMiddleware(BaseHTTPMiddleware):
def __init__(self, app, password: str):
super().__init__(app)
self.password = password
async def dispatch(self, request, call_next):
# Exclude health check endpoint from password protection
if request.url.path in {"/v1/health", "/v1/health/", "/latest/health/"}:
return await call_next(request)
if (
request.headers.get("X-BARE-PASSWORD") == f"password {self.password}"
or request.headers.get("Authorization") == f"Bearer {self.password}"
):
return await call_next(request)
return JSONResponse(
content={"detail": "Unauthorized"},
status_code=401,
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/middleware/check_password.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/server/rest_api/redis_stream_manager.py | """Redis stream manager for reading and writing SSE chunks with batching and TTL."""
import asyncio
import json
import time
from collections import defaultdict
from collections.abc import AsyncGenerator, AsyncIterator
from contextlib import aclosing
from typing import Dict, List, Optional
from letta.data_sources.redis_client import AsyncRedisClient
from letta.log import get_logger
from letta.schemas.enums import RunStatus
from letta.schemas.letta_message import LettaErrorMessage
from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType
from letta.schemas.run import RunUpdate
from letta.schemas.user import User
from letta.server.rest_api.streaming_response import RunCancelledException
from letta.services.run_manager import RunManager
from letta.utils import safe_create_task
logger = get_logger(__name__)
class RedisSSEStreamWriter:
"""
Efficiently writes SSE chunks to Redis streams with batching and TTL management.
Features:
- Batches writes using Redis pipelines for performance
- Automatically sets/refreshes TTL on streams
- Tracks sequential IDs for cursor-based recovery
- Handles flush on size or time thresholds
"""
def __init__(
self,
redis_client: AsyncRedisClient,
flush_interval: float = 0.5,
flush_size: int = 50,
stream_ttl_seconds: int = 10800, # 3 hours default
max_stream_length: int = 10000, # Max entries per stream
):
"""
Initialize the Redis SSE stream writer.
Args:
redis_client: Redis client instance
flush_interval: Seconds between automatic flushes
flush_size: Number of chunks to buffer before flushing
stream_ttl_seconds: TTL for streams in seconds (default: 6 hours)
max_stream_length: Maximum entries per stream before trimming
"""
self.redis = redis_client
self.flush_interval = flush_interval
self.flush_size = flush_size
self.stream_ttl = stream_ttl_seconds
self.max_stream_length = max_stream_length
# Buffer for batching: run_id -> list of chunks
self.buffer: Dict[str, List[Dict]] = defaultdict(list)
# Track sequence IDs per run
self.seq_counters: Dict[str, int] = defaultdict(lambda: 1)
# Track last flush time per run
self.last_flush: Dict[str, float] = defaultdict(float)
# Background flush task
self._flush_task = None
self._running = False
async def start(self):
"""Start the background flush task."""
if not self._running:
self._running = True
self._flush_task = safe_create_task(self._periodic_flush(), label="redis_periodic_flush")
async def stop(self):
"""Stop the background flush task and flush remaining data."""
self._running = False
if self._flush_task:
self._flush_task.cancel()
try:
await self._flush_task
except asyncio.CancelledError:
pass
for run_id in list(self.buffer.keys()):
if self.buffer[run_id]:
await self._flush_run(run_id)
async def write_chunk(
self,
run_id: str,
data: str,
is_complete: bool = False,
) -> int:
"""
Write an SSE chunk to the buffer for a specific run.
Args:
run_id: The run ID to write to
data: SSE-formatted chunk data
is_complete: Whether this is the final chunk
Returns:
The sequence ID assigned to this chunk
"""
seq_id = self.seq_counters[run_id]
self.seq_counters[run_id] += 1
chunk = {
"seq_id": seq_id,
"data": data,
"timestamp": int(time.time() * 1000),
}
if is_complete:
chunk["complete"] = "true"
self.buffer[run_id].append(chunk)
should_flush = (
len(self.buffer[run_id]) >= self.flush_size or is_complete or (time.time() - self.last_flush[run_id]) > self.flush_interval
)
if should_flush:
await self._flush_run(run_id)
return seq_id
async def _flush_run(self, run_id: str):
"""Flush buffered chunks for a specific run to Redis."""
if not self.buffer[run_id]:
return
chunks = self.buffer[run_id]
self.buffer[run_id] = []
stream_key = f"sse:run:{run_id}"
try:
client = await self.redis.get_client()
async with client.pipeline(transaction=False) as pipe:
for chunk in chunks:
await pipe.xadd(stream_key, chunk, maxlen=self.max_stream_length, approximate=True)
await pipe.expire(stream_key, self.stream_ttl)
await pipe.execute()
self.last_flush[run_id] = time.time()
logger.debug(f"Flushed {len(chunks)} chunks to Redis stream {stream_key}, seq_ids {chunks[0]['seq_id']}-{chunks[-1]['seq_id']}")
if chunks[-1].get("complete") == "true":
self._cleanup_run(run_id)
except Exception as e:
logger.error(f"Failed to flush chunks for run {run_id}: {e}")
# Put chunks back in buffer to retry
self.buffer[run_id] = chunks + self.buffer[run_id]
raise
async def _periodic_flush(self):
"""Background task to periodically flush buffers."""
while self._running:
try:
await asyncio.sleep(self.flush_interval)
# Check each run for time-based flush
current_time = time.time()
runs_to_flush = [
run_id
for run_id, last_flush in self.last_flush.items()
if (current_time - last_flush) > self.flush_interval and self.buffer[run_id]
]
for run_id in runs_to_flush:
await self._flush_run(run_id)
except asyncio.CancelledError:
break
except Exception as e:
logger.error(f"Error in periodic flush: {e}")
def _cleanup_run(self, run_id: str):
"""Clean up tracking data for a completed run."""
self.buffer.pop(run_id, None)
self.seq_counters.pop(run_id, None)
self.last_flush.pop(run_id, None)
async def mark_complete(self, run_id: str):
"""Mark a stream as complete and flush."""
# Add a [DONE] marker
await self.write_chunk(run_id, "data: [DONE]\n\n", is_complete=True)
async def create_background_stream_processor(
stream_generator: AsyncGenerator[str | bytes | tuple[str | bytes, int], None],
redis_client: AsyncRedisClient,
run_id: str,
writer: Optional[RedisSSEStreamWriter] = None,
run_manager: Optional[RunManager] = None,
actor: Optional[User] = None,
conversation_id: Optional[str] = None,
) -> None:
"""
Process a stream in the background and store chunks to Redis.
This function consumes the stream generator and writes all chunks
to Redis for later retrieval.
Args:
stream_generator: The async generator yielding SSE chunks
redis_client: Redis client instance
run_id: The run ID to store chunks under
writer: Optional pre-configured writer (creates new if not provided)
run_manager: Optional run manager for updating run status
actor: Optional actor for run status updates
conversation_id: Optional conversation ID for releasing lock on terminal states
"""
stop_reason = None
saw_done = False
saw_error = False
error_metadata = None
if writer is None:
writer = RedisSSEStreamWriter(redis_client)
await writer.start()
should_stop_writer = True
else:
should_stop_writer = False
try:
# Always close the upstream async generator so its `finally` blocks run.
# (e.g., stream adapters may persist terminal error metadata on close)
async with aclosing(stream_generator):
async for chunk in stream_generator:
if isinstance(chunk, tuple):
chunk = chunk[0]
# Track terminal events (check at line start to avoid false positives in message content)
if isinstance(chunk, str):
if "\ndata: [DONE]" in chunk or chunk.startswith("data: [DONE]"):
saw_done = True
if "\nevent: error" in chunk or chunk.startswith("event: error"):
saw_error = True
# Best-effort extraction of the error payload so we can persist it on the run.
# Chunk format is typically: "event: error\ndata: {json}\n\n"
if saw_error and error_metadata is None:
try:
# Grab the first `data:` line after `event: error`
for line in chunk.splitlines():
if line.startswith("data: "):
maybe_json = line[len("data: ") :].strip()
if maybe_json and maybe_json[0] in "[{":
error_metadata = {"error": json.loads(maybe_json)}
else:
error_metadata = {"error": {"message": maybe_json}}
break
except Exception:
# Don't let parsing failures interfere with streaming
error_metadata = {"error": {"message": "Failed to parse error payload from stream."}}
is_done = saw_done or saw_error
await writer.write_chunk(run_id=run_id, data=chunk, is_complete=is_done)
if is_done:
break
try:
# Extract stop_reason from stop_reason chunks
maybe_json_chunk = chunk.split("data: ")[1]
maybe_stop_reason = json.loads(maybe_json_chunk) if maybe_json_chunk and maybe_json_chunk[0] == "{" else None
if maybe_stop_reason and maybe_stop_reason.get("message_type") == "stop_reason":
stop_reason = maybe_stop_reason.get("stop_reason")
except Exception:
pass
# Stream ended naturally - check if we got a proper terminal
if not saw_done and not saw_error:
# Stream ended without terminal event - synthesize one
logger.warning(
f"Stream for run {run_id} ended without terminal event (no [DONE] or event:error). "
f"Last stop_reason seen: {stop_reason}. Synthesizing terminal."
)
if stop_reason:
# We have a stop_reason, send [DONE]
await writer.write_chunk(run_id=run_id, data="data: [DONE]\n\n", is_complete=True)
saw_done = True
else:
# No stop_reason and no terminal - this is an error condition
error_message = LettaErrorMessage(
run_id=run_id,
error_type="stream_incomplete",
message="Stream ended unexpectedly without stop_reason.",
detail=None,
)
# Write error chunks to Redis instead of yielding (this is a background task, not a generator)
await writer.write_chunk(
run_id=run_id,
data=f"data: {LettaStopReason(stop_reason=StopReasonType.error).model_dump_json()}\n\n",
is_complete=False,
)
await writer.write_chunk(
run_id=run_id, data=f"event: error\ndata: {error_message.model_dump_json()}\n\n", is_complete=False
)
await writer.write_chunk(run_id=run_id, data="data: [DONE]\n\n", is_complete=True)
saw_error = True
saw_done = True
# Set a default stop_reason so run status can be mapped in finally
stop_reason = StopReasonType.error.value
except RunCancelledException:
# Handle cancellation gracefully - don't write error chunk, cancellation event was already sent
logger.info(f"Stream processing stopped due to cancellation for run {run_id}")
# The cancellation event was already yielded by cancellation_aware_stream_wrapper
# Write [DONE] marker to properly close the stream for clients reading from Redis
await writer.write_chunk(run_id=run_id, data="data: [DONE]\n\n", is_complete=True)
saw_done = True
except Exception as e:
logger.error(f"Error processing stream for run {run_id}: {e}")
# Write error chunk
stop_reason = StopReasonType.error.value
error_message = LettaErrorMessage(
run_id=run_id,
error_type="internal_error",
message="An unknown error occurred with the LLM streaming request.",
detail=str(e),
)
await writer.write_chunk(
run_id=run_id, data=f"data: {LettaStopReason(stop_reason=stop_reason).model_dump_json()}\n\n", is_complete=False
)
await writer.write_chunk(run_id=run_id, data=f"event: error\ndata: {error_message.model_dump_json()}\n\n", is_complete=False)
await writer.write_chunk(run_id=run_id, data="data: [DONE]\n\n", is_complete=True)
saw_error = True
saw_done = True
# Mark run as failed immediately
if run_manager and actor:
await run_manager.update_run_by_id_async(
run_id=run_id,
update=RunUpdate(status=RunStatus.failed, stop_reason=StopReasonType.error.value, metadata={"error": str(e)}),
actor=actor,
conversation_id=conversation_id,
)
finally:
if should_stop_writer:
await writer.stop()
# Derive a final stop_reason if one wasn't observed explicitly
final_stop_reason = stop_reason
if final_stop_reason is None:
if saw_error:
final_stop_reason = StopReasonType.error.value
elif saw_done:
# Treat DONE without an explicit stop_reason as an error to avoid masking failures
final_stop_reason = StopReasonType.error.value
# Update run status to reflect terminal outcome
if run_manager and actor and final_stop_reason:
# Resolve stop_reason using canonical enum mapping to avoid drift.
try:
run_status = StopReasonType(final_stop_reason).run_status
except ValueError:
logger.warning(f"Unknown stop_reason '{final_stop_reason}' for run {run_id}, defaulting to completed")
run_status = RunStatus.completed
update_kwargs = {"status": run_status, "stop_reason": final_stop_reason}
if run_status == RunStatus.failed and error_metadata is not None:
update_kwargs["metadata"] = error_metadata
await run_manager.update_run_by_id_async(
run_id=run_id,
update=RunUpdate(**update_kwargs),
actor=actor,
conversation_id=conversation_id,
)
# Belt-and-suspenders: always append a terminal [DONE] chunk to ensure clients terminate
# Even if a previous chunk set `complete`, an extra [DONE] is harmless and ensures SDKs that
# rely on explicit [DONE] will exit.
logger.warning(
"[Stream Finalizer] Appending forced [DONE] for run=%s (saw_error=%s, saw_done=%s, final_stop_reason=%s)",
run_id,
saw_error,
saw_done,
final_stop_reason,
)
try:
await writer.mark_complete(run_id)
except Exception as e:
logger.warning(f"Failed to append terminal [DONE] for run {run_id}: {e}")
async def redis_sse_stream_generator(
redis_client: AsyncRedisClient,
run_id: str,
starting_after: Optional[int] = None,
poll_interval: float = 0.1,
batch_size: int = 100,
) -> AsyncIterator[str]:
"""
Generate SSE events from Redis stream chunks.
This generator reads chunks stored in Redis streams and yields them as SSE events.
It supports cursor-based recovery by allowing you to start from a specific seq_id.
Args:
redis_client: Redis client instance
run_id: The run ID to read chunks for
starting_after: Sequential ID (integer) to start reading from (default: None for beginning)
poll_interval: Seconds to wait between polls when no new data (default: 0.1)
batch_size: Number of entries to read per batch (default: 100)
Yields:
SSE-formatted chunks from the Redis stream
"""
stream_key = f"sse:run:{run_id}"
last_redis_id = "-"
cursor_seq_id = starting_after or 0
logger.debug(f"Starting redis_sse_stream_generator for run_id={run_id}, stream_key={stream_key}")
while True:
entries = await redis_client.xrange(stream_key, start=last_redis_id, count=batch_size)
if entries:
yielded_any = False
for entry_id, fields in entries:
if entry_id == last_redis_id:
continue
chunk_seq_id = int(fields.get("seq_id", 0))
if chunk_seq_id > cursor_seq_id:
data = fields.get("data", "")
if not data:
logger.debug(f"No data found for chunk {chunk_seq_id} in run {run_id}")
continue
if '"run_id":null' in data:
data = data.replace('"run_id":null', f'"run_id":"{run_id}"')
if '"seq_id":null' in data:
data = data.replace('"seq_id":null', f'"seq_id":{chunk_seq_id}')
yield data
yielded_any = True
if fields.get("complete") == "true":
return
last_redis_id = entry_id
if not yielded_any and len(entries) > 1:
continue
if not entries or (len(entries) == 1 and entries[0][0] == last_redis_id):
await asyncio.sleep(poll_interval)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/redis_stream_manager.py",
"license": "Apache License 2.0",
"lines": 387,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/server/rest_api/routers/v1/archives.py | from typing import Dict, List, Literal, Optional
from fastapi import APIRouter, Body, Depends, Query
from pydantic import BaseModel, Field
from letta import AgentState
from letta.schemas.agent import AgentRelationships
from letta.schemas.archive import Archive as PydanticArchive
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.passage import Passage
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
from letta.server.server import SyncServer
from letta.settings import settings
from letta.validators import ArchiveId, PassageId
router = APIRouter(prefix="/archives", tags=["archives"])
class ArchiveCreateRequest(BaseModel):
"""Request model for creating an archive.
Intentionally excludes vector_db_provider. These are derived internally (vector DB provider from env).
"""
name: str
embedding_config: Optional[EmbeddingConfig] = Field(
None, description="Deprecated: Use `embedding` field instead. Embedding configuration for the archive", deprecated=True
)
embedding: Optional[str] = Field(None, description="Embedding model handle for the archive")
description: Optional[str] = None
class ArchiveUpdateRequest(BaseModel):
"""Request model for updating an archive (partial).
Supports updating only name and description.
"""
name: Optional[str] = None
description: Optional[str] = None
class PassageCreateRequest(BaseModel):
"""Request model for creating a passage in an archive."""
text: str = Field(..., description="The text content of the passage")
metadata: Optional[Dict] = Field(default=None, description="Optional metadata for the passage")
tags: Optional[List[str]] = Field(default=None, description="Optional tags for categorizing the passage")
created_at: Optional[str] = Field(default=None, description="Optional creation datetime for the passage (ISO 8601 format)")
class PassageBatchCreateRequest(BaseModel):
"""Request model for creating multiple passages in an archive."""
passages: List[PassageCreateRequest] = Field(..., description="Passages to create in the archive")
@router.post("/", response_model=PydanticArchive, operation_id="create_archive")
async def create_archive(
archive: ArchiveCreateRequest = Body(...),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Create a new archive.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
embedding_config = archive.embedding_config
if embedding_config is None:
embedding_handle = archive.embedding
if embedding_handle is None:
embedding_handle = settings.default_embedding_handle
# Only resolve embedding config if we have an embedding handle
if embedding_handle is not None:
embedding_config = await server.get_embedding_config_from_handle_async(
handle=embedding_handle,
actor=actor,
)
# Otherwise, embedding_config remains None (text search only)
return await server.archive_manager.create_archive_async(
name=archive.name,
embedding_config=embedding_config,
description=archive.description,
actor=actor,
)
@router.get("/", response_model=List[PydanticArchive], operation_id="list_archives")
async def list_archives(
before: Optional[str] = Query(
None,
description="Archive ID cursor for pagination. Returns archives that come before this archive ID in the specified sort order",
),
after: Optional[str] = Query(
None,
description="Archive ID cursor for pagination. Returns archives that come after this archive ID in the specified sort order",
),
limit: Optional[int] = Query(50, description="Maximum number of archives to return"),
order: Literal["asc", "desc"] = Query(
"desc", description="Sort order for archives by creation time. 'asc' for oldest first, 'desc' for newest first"
),
order_by: Literal["created_at"] = Query("created_at", description="Field to sort by"),
name: Optional[str] = Query(None, description="Filter by archive name (exact match)"),
agent_id: Optional[str] = Query(None, description="Only archives attached to this agent ID"),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Get a list of all archives for the current organization with optional filters and pagination.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
archives = await server.archive_manager.list_archives_async(
actor=actor,
before=before,
after=after,
limit=limit,
ascending=(order == "asc"),
name=name,
agent_id=agent_id,
)
return archives
@router.get("/{archive_id}", response_model=PydanticArchive, operation_id="retrieve_archive")
async def retrieve_archive(
archive_id: ArchiveId,
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Get a single archive by its ID.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await server.archive_manager.get_archive_by_id_async(
archive_id=archive_id,
actor=actor,
)
@router.patch("/{archive_id}", response_model=PydanticArchive, operation_id="modify_archive")
async def modify_archive(
archive_id: ArchiveId,
archive: ArchiveUpdateRequest = Body(...),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Update an existing archive's name and/or description.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await server.archive_manager.update_archive_async(
archive_id=archive_id,
name=archive.name,
description=archive.description,
actor=actor,
)
@router.delete("/{archive_id}", status_code=204, operation_id="delete_archive")
async def delete_archive(
archive_id: ArchiveId,
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Delete an archive by its ID.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
await server.archive_manager.delete_archive_async(
archive_id=archive_id,
actor=actor,
)
return None
@router.get("/{archive_id}/agents", response_model=List[AgentState], operation_id="list_agents_for_archive")
async def list_agents_for_archive(
archive_id: ArchiveId,
before: Optional[str] = Query(
None,
description="Agent ID cursor for pagination. Returns agents that come before this agent ID in the specified sort order",
),
after: Optional[str] = Query(
None,
description="Agent ID cursor for pagination. Returns agents that come after this agent ID in the specified sort order",
),
limit: Optional[int] = Query(50, description="Maximum number of agents to return"),
order: Literal["asc", "desc"] = Query(
"desc", description="Sort order for agents by creation time. 'asc' for oldest first, 'desc' for newest first"
),
include: List[AgentRelationships] = Query(
[],
description=("Specify which relational fields to include in the response. No relationships are included by default."),
),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Get a list of agents that have access to an archive with pagination support.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await server.archive_manager.get_agents_for_archive_async(
archive_id=archive_id,
actor=actor,
before=before,
after=after,
limit=limit,
include=include,
ascending=(order == "asc"),
)
@router.post("/{archive_id}/passages", response_model=Passage, operation_id="create_passage_in_archive")
async def create_passage_in_archive(
archive_id: ArchiveId,
passage: PassageCreateRequest = Body(...),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Create a new passage in an archive.
This adds a passage to the archive and creates embeddings for vector storage.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await server.archive_manager.create_passage_in_archive_async(
archive_id=archive_id,
text=passage.text,
metadata=passage.metadata,
tags=passage.tags,
created_at=passage.created_at,
actor=actor,
)
@router.post("/{archive_id}/passages/batch", response_model=List[Passage], operation_id="create_passages_in_archive")
async def create_passages_in_archive(
archive_id: ArchiveId,
payload: PassageBatchCreateRequest = Body(...),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Create multiple passages in an archive.
This adds passages to the archive and creates embeddings for vector storage.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await server.archive_manager.create_passages_in_archive_async(
archive_id=archive_id,
passages=[passage.model_dump() for passage in payload.passages],
actor=actor,
)
@router.delete("/{archive_id}/passages/{passage_id}", status_code=204, operation_id="delete_passage_from_archive")
async def delete_passage_from_archive(
archive_id: ArchiveId,
passage_id: PassageId,
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Delete a passage from an archive.
This permanently removes the passage from both the database and vector storage (if applicable).
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
await server.archive_manager.delete_passage_from_archive_async(
archive_id=archive_id,
passage_id=passage_id,
actor=actor,
)
return None
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/routers/v1/archives.py",
"license": "Apache License 2.0",
"lines": 236,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/server/rest_api/routers/v1/folders.py | import asyncio
import mimetypes
import os
import tempfile
from pathlib import Path as PathLibPath
from typing import List, Literal, Optional
from fastapi import APIRouter, Depends, HTTPException, Query, UploadFile
from starlette import status
from starlette.responses import Response
import letta.constants as constants
from letta.errors import LettaInvalidArgumentError, LettaUnsupportedFileUploadError
from letta.helpers.pinecone_utils import (
delete_file_records_from_pinecone_index,
delete_source_records_from_pinecone_index,
should_use_pinecone,
)
from letta.helpers.tpuf_client import should_use_tpuf
from letta.log import get_logger
from letta.otel.tracing import trace_method
from letta.schemas.agent import AgentState
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import DuplicateFileHandling, FileProcessingStatus
from letta.schemas.file import FileMetadata
from letta.schemas.folder import Folder
from letta.schemas.passage import Passage
from letta.schemas.source import Source, SourceCreate, SourceUpdate
from letta.schemas.source_metadata import OrganizationSourcesStats
from letta.schemas.user import User
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
from letta.server.server import SyncServer
from letta.services.file_processor.embedder.openai_embedder import OpenAIEmbedder
from letta.services.file_processor.embedder.pinecone_embedder import PineconeEmbedder
from letta.services.file_processor.file_processor import FileProcessor
from letta.services.file_processor.file_types import get_allowed_media_types, get_extension_to_mime_type_map, register_mime_types
from letta.services.file_processor.parser.markitdown_parser import MarkitdownFileParser
from letta.services.file_processor.parser.mistral_parser import MistralFileParser
from letta.settings import settings
from letta.utils import safe_create_file_processing_task, safe_create_task, sanitize_filename
from letta.validators import FileId, FolderId
logger = get_logger(__name__)
# Register all supported file types with Python's mimetypes module
register_mime_types()
router = APIRouter(prefix="/folders", tags=["folders"])
@router.get("/count", response_model=int, operation_id="count_folders")
async def count_folders(
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Count all data folders created by a user.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await server.source_manager.size_async(actor=actor)
@router.get("/{folder_id}", response_model=Folder, operation_id="retrieve_folder")
async def retrieve_folder(
folder_id: FolderId,
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Get a folder by ID
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
folder = await server.source_manager.get_source_by_id(source_id=folder_id, actor=actor)
return folder
@router.get("/name/{folder_name}", response_model=str, operation_id="get_folder_by_name", deprecated=True)
async def get_folder_by_name(
folder_name: str,
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
**Deprecated**: Please use the list endpoint `GET /v1/folders?name=` instead.
Get a folder by name.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
folder = await server.source_manager.get_source_by_name(source_name=folder_name, actor=actor)
return folder.id
@router.get("/metadata", response_model=OrganizationSourcesStats, operation_id="retrieve_metadata")
async def retrieve_metadata(
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
include_detailed_per_source_metadata: bool = False,
):
"""
Get aggregated metadata for all folders in an organization.
Returns structured metadata including:
- Total number of folders
- Total number of files across all folders
- Total size of all files
- Per-source breakdown with file details (file_name, file_size per file) if include_detailed_per_source_metadata is True
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await server.file_manager.get_organization_sources_metadata(
actor=actor, include_detailed_per_source_metadata=include_detailed_per_source_metadata
)
@router.get("/", response_model=List[Folder], operation_id="list_folders")
async def list_folders(
before: Optional[str] = Query(
None, description="Folder ID cursor for pagination. Returns folders that come before this folder ID in the specified sort order"
),
after: Optional[str] = Query(
None, description="Folder ID cursor for pagination. Returns folders that come after this folder ID in the specified sort order"
),
limit: Optional[int] = Query(50, description="Maximum number of folders to return"),
order: Literal["asc", "desc"] = Query(
"asc", description="Sort order for folders by creation time. 'asc' for oldest first, 'desc' for newest first"
),
order_by: Literal["created_at"] = Query("created_at", description="Field to sort by"),
name: Optional[str] = Query(None, description="Folder name to filter by"),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
List all data folders created by a user.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await server.source_manager.list_sources(
actor=actor, before=before, after=after, limit=limit, ascending=(order == "asc"), name=name
)
@router.post("/", response_model=Folder, operation_id="create_folder")
async def create_folder(
folder_create: SourceCreate,
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Create a new data folder.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
# TODO: need to asyncify this
if not folder_create.embedding_config:
if not folder_create.embedding:
if settings.default_embedding_handle is None:
raise LettaInvalidArgumentError(
"Must specify either embedding or embedding_config in request", argument_name="default_embedding_handle"
)
else:
folder_create.embedding = settings.default_embedding_handle
folder_create.embedding_config = await server.get_embedding_config_from_handle_async(
handle=folder_create.embedding,
embedding_chunk_size=folder_create.embedding_chunk_size or constants.DEFAULT_EMBEDDING_CHUNK_SIZE,
actor=actor,
)
folder = Source(
name=folder_create.name,
embedding_config=folder_create.embedding_config,
description=folder_create.description,
instructions=folder_create.instructions,
metadata=folder_create.metadata,
)
return await server.source_manager.create_source(source=folder, actor=actor)
@router.patch("/{folder_id}", response_model=Folder, operation_id="modify_folder")
async def modify_folder(
folder: SourceUpdate,
folder_id: FolderId,
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Update the name or documentation of an existing data folder.
"""
# TODO: allow updating the handle/embedding config
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
await server.source_manager.get_source_by_id(source_id=folder_id, actor=actor)
return await server.source_manager.update_source(source_id=folder_id, source_update=folder, actor=actor)
@router.delete("/{folder_id}", response_model=None, operation_id="delete_folder")
async def delete_folder(
folder_id: FolderId,
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Delete a data folder.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
folder = await server.source_manager.get_source_by_id(source_id=folder_id, actor=actor)
agent_states = await server.source_manager.list_attached_agents(source_id=folder_id, actor=actor)
if should_use_tpuf():
logger.info(f"Deleting folder {folder_id} from Turbopuffer")
from letta.helpers.tpuf_client import TurbopufferClient
tpuf_client = TurbopufferClient()
await tpuf_client.delete_source_passages(source_id=folder_id, organization_id=actor.organization_id)
elif should_use_pinecone():
logger.info(f"Deleting folder {folder_id} from pinecone index")
await delete_source_records_from_pinecone_index(source_id=folder_id, actor=actor)
for agent_state in agent_states:
# Query files_agents directly to get exactly what was attached to this agent
file_ids = await server.file_agent_manager.get_file_ids_for_agent_by_source(
agent_id=agent_state.id, source_id=folder_id, actor=actor
)
if file_ids:
await server.remove_files_from_context_window(agent_state=agent_state, file_ids=file_ids, actor=actor)
if agent_state.enable_sleeptime:
block = await server.agent_manager.get_block_with_label_async(agent_id=agent_state.id, block_label=folder.name, actor=actor)
if block:
await server.block_manager.delete_block_async(block.id, actor)
await server.delete_source(source_id=folder_id, actor=actor)
@router.post("/{folder_id}/upload", response_model=FileMetadata, operation_id="upload_file_to_folder")
async def upload_file_to_folder(
file: UploadFile,
folder_id: FolderId,
duplicate_handling: DuplicateFileHandling = Query(DuplicateFileHandling.SUFFIX, description="How to handle duplicate filenames"),
name: Optional[str] = Query(None, description="Optional custom name to override the uploaded file's name"),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Upload a file to a data folder.
"""
# NEW: Cloud based file processing
# Determine file's MIME type
mimetypes.guess_type(file.filename)[0] or "application/octet-stream"
allowed_media_types = get_allowed_media_types()
# Normalize incoming Content-Type header (strip charset or any parameters).
raw_ct = file.content_type or ""
media_type = raw_ct.split(";", 1)[0].strip().lower()
# If client didn't supply a Content-Type or it's not one of the allowed types,
# attempt to infer from filename extension.
if media_type not in allowed_media_types and file.filename:
guessed, _ = mimetypes.guess_type(file.filename)
media_type = (guessed or "").lower()
if media_type not in allowed_media_types:
ext = PathLibPath(file.filename).suffix.lower()
ext_map = get_extension_to_mime_type_map()
media_type = ext_map.get(ext, media_type)
# If still not allowed, reject with 415.
if media_type not in allowed_media_types:
raise LettaUnsupportedFileUploadError(
message=(
f"Unsupported file type: {media_type or 'unknown'} "
f"(filename: {file.filename}). "
f"Supported types: PDF, text files (.txt, .md), JSON, and code files (.py, .js, .java, etc.)."
),
)
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
# Read file bytes once
file_bytes = await file.read()
# If enabled, delegate to Temporal workflow (Lettuce) and return its result
if settings.use_lettuce_for_file_uploads:
from letta.services.lettuce import LettuceClient
lettuce_client = await LettuceClient.create()
result = await lettuce_client.upload_file_to_folder(
folder_id=folder_id,
actor_id=actor.id,
file_name=file.filename,
content=file_bytes,
content_type=raw_ct or None,
duplicate_handling=duplicate_handling,
override_name=name,
)
if result is not None:
return result.file_metadata
folder = await server.source_manager.get_source_by_id(source_id=folder_id, actor=actor)
content = file_bytes
file_size_mb = len(content) / (1024 * 1024)
from letta.log import get_logger
logger = get_logger(__name__)
logger.info(f"File upload to folder: loaded {file_size_mb:.2f} MB into memory, filename: {file.filename}")
# Store original filename and handle duplicate logic
# Use custom name if provided, otherwise use the uploaded file's name
# If custom name is provided, use it directly (it's just metadata, not a filesystem path)
# Otherwise, sanitize the uploaded filename for security
original_filename = name if name else sanitize_filename(file.filename) # Basic sanitization only
# Check if duplicate exists
existing_file = await server.file_manager.get_file_by_original_name_and_source(
original_filename=original_filename, source_id=folder_id, actor=actor
)
unique_filename = None
if existing_file:
# Duplicate found, handle based on strategy
if duplicate_handling == DuplicateFileHandling.ERROR:
raise LettaInvalidArgumentError(
message=f"File '{original_filename}' already exists in folder '{folder.name}'",
argument_name="duplicate_handling",
)
elif duplicate_handling == DuplicateFileHandling.SKIP:
# Return existing file metadata with custom header to indicate it was skipped
response = Response(
content=existing_file.model_dump_json(), media_type="application/json", headers={"X-Upload-Result": "skipped"}
)
return response
elif duplicate_handling == DuplicateFileHandling.REPLACE:
# delete the file
await server.file_manager.delete_file(file_id=existing_file.id, actor=actor)
unique_filename = original_filename
if not unique_filename:
# For SUFFIX, continue to generate unique filename
# Generate unique filename (adds suffix if needed)
unique_filename = await server.file_manager.generate_unique_filename(
original_filename=original_filename, source=folder, organization_id=actor.organization_id
)
# create file metadata
file_metadata = FileMetadata(
source_id=folder_id,
file_name=unique_filename,
original_file_name=original_filename,
file_path=None,
file_type=mimetypes.guess_type(original_filename)[0] or file.content_type or "unknown",
file_size=file.size if file.size is not None else None,
processing_status=FileProcessingStatus.PARSING,
)
file_metadata = await server.file_manager.create_file(file_metadata, actor=actor)
# TODO: Do we need to pull in the full agent_states? Can probably simplify here right?
agent_states = await server.source_manager.list_attached_agents(source_id=folder_id, actor=actor)
# Use cloud processing for all files (simple files always, complex files with Mistral key)
logger.info("Running experimental cloud based file processing...")
safe_create_file_processing_task(
load_file_to_source_cloud(server, agent_states, content, folder_id, actor, folder.embedding_config, file_metadata),
file_metadata=file_metadata,
server=server,
actor=actor,
logger=logger,
label="file_processor.process",
)
safe_create_task(sleeptime_document_ingest_async(server, folder_id, actor), label="sleeptime_document_ingest_async")
return file_metadata
@router.get("/{folder_id}/agents", response_model=List[str], operation_id="list_agents_for_folder")
async def list_agents_for_folder(
folder_id: FolderId,
before: Optional[str] = Query(
None,
description="Agent ID cursor for pagination. Returns agents that come before this agent ID in the specified sort order",
),
after: Optional[str] = Query(
None,
description="Agent ID cursor for pagination. Returns agents that come after this agent ID in the specified sort order",
),
limit: Optional[int] = Query(50, description="Maximum number of agents to return"),
order: Literal["asc", "desc"] = Query(
"desc", description="Sort order for agents by creation time. 'asc' for oldest first, 'desc' for newest first"
),
order_by: Literal["created_at"] = Query("created_at", description="Field to sort by"),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Get all agent IDs that have the specified folder attached.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await server.source_manager.get_agents_for_source_id(
source_id=folder_id,
before=before,
after=after,
limit=limit,
ascending=(order == "asc"),
actor=actor,
)
@router.get("/{folder_id}/passages", response_model=List[Passage], operation_id="list_folder_passages")
async def list_folder_passages(
folder_id: FolderId,
before: Optional[str] = Query(
None,
description="Passage ID cursor for pagination. Returns passages that come before this passage ID in the specified sort order",
),
after: Optional[str] = Query(
None,
description="Passage ID cursor for pagination. Returns passages that come after this passage ID in the specified sort order",
),
limit: Optional[int] = Query(100, description="Maximum number of passages to return"),
order: Literal["asc", "desc"] = Query(
"desc", description="Sort order for passages by creation time. 'asc' for oldest first, 'desc' for newest first"
),
order_by: Literal["created_at"] = Query("created_at", description="Field to sort by"),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
List all passages associated with a data folder.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await server.agent_manager.query_source_passages_async(
actor=actor,
source_id=folder_id,
after=after,
before=before,
limit=limit,
ascending=(order == "asc"),
)
@router.get("/{folder_id}/files", response_model=List[FileMetadata], operation_id="list_files_for_folder")
async def list_files_for_folder(
folder_id: FolderId,
before: Optional[str] = Query(
None,
description="File ID cursor for pagination. Returns files that come before this file ID in the specified sort order",
),
after: Optional[str] = Query(
None,
description="File ID cursor for pagination. Returns files that come after this file ID in the specified sort order",
),
limit: Optional[int] = Query(1000, description="Maximum number of files to return"),
order: Literal["asc", "desc"] = Query(
"desc", description="Sort order for files by creation time. 'asc' for oldest first, 'desc' for newest first"
),
order_by: Literal["created_at"] = Query("created_at", description="Field to sort by"),
include_content: bool = Query(False, description="Whether to include full file content"),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
List paginated files associated with a data folder.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await server.file_manager.list_files(
source_id=folder_id,
before=before,
after=after,
limit=limit,
ascending=(order == "asc"),
actor=actor,
include_content=include_content,
strip_directory_prefix=True, # TODO: Reconsider this. This is purely for aesthetics.
)
@router.get("/{folder_id}/files/{file_id}", response_model=FileMetadata, operation_id="retrieve_file")
async def retrieve_file(
folder_id: FolderId,
file_id: FileId,
include_content: bool = Query(False, description="Whether to include full file content"),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Retrieve a file from a folder by ID.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
# NoResultFound will propagate and be handled as 404 by the global exception handler
file_metadata = await server.file_manager.get_file_by_id(
file_id=file_id, actor=actor, include_content=include_content, strip_directory_prefix=True
)
if file_metadata.source_id != folder_id:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"File with id={file_id} not found in folder {folder_id}")
return file_metadata
# @router.get("/{folder_id}/files/{file_id}", response_model=FileMetadata, operation_id="get_file_metadata")
# async def get_file_metadata(
# folder_id: str,
# file_id: str,
# include_content: bool = Query(False, description="Whether to include full file content"),
# server: "SyncServer" = Depends(get_letta_server),
# headers: HeaderParams = Depends(get_headers),
# ):
# """
# Retrieve metadata for a specific file by its ID.
# """
# actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
#
# # Get file metadata using the file manager
# file_metadata = await server.file_manager.get_file_by_id(
# file_id=file_id, actor=actor, include_content=include_content, strip_directory_prefix=True
# )
#
# if not file_metadata:
# raise HTTPException(status_code=404, detail=f"File with id={file_id} not found.")
#
# # Verify the file belongs to the specified folder
# if file_metadata.source_id != folder_id:
# raise HTTPException(status_code=404, detail=f"File with id={file_id} not found in folder {folder_id}.")
#
# if should_use_pinecone() and file_metadata.processing_status == FileProcessingStatus.EMBEDDING:
# ids = await list_pinecone_index_for_files(file_id=file_id, actor=actor)
# logger.info(
# f"Embedded chunks {len(ids)}/{file_metadata.total_chunks} for {file_id} ({file_metadata.file_name}) in organization {actor.organization_id}"
# )
#
# if len(ids) != file_metadata.chunks_embedded or len(ids) == file_metadata.total_chunks:
# if len(ids) != file_metadata.total_chunks:
# file_status = file_metadata.processing_status
# else:
# file_status = FileProcessingStatus.COMPLETED
# try:
# file_metadata = await server.file_manager.update_file_status(
# file_id=file_metadata.id, actor=actor, chunks_embedded=len(ids), processing_status=file_status
# )
# except ValueError as e:
# # state transition was blocked - this is a race condition
# # log it but don't fail the request since we're just reading metadata
# logger.warning(f"Race condition detected in get_file_metadata: {str(e)}")
# # return the current file state without updating
#
# return file_metadata
# it's redundant to include /delete in the URL path. The HTTP verb DELETE already implies that action.
# it's still good practice to return a status indicating the success or failure of the deletion
@router.delete("/{folder_id}/{file_id}", status_code=204, operation_id="delete_file_from_folder")
async def delete_file_from_folder(
folder_id: FolderId,
file_id: FileId,
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Delete a file from a folder.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
deleted_file = await server.file_manager.delete_file(file_id=file_id, actor=actor)
await server.remove_file_from_context_windows(source_id=folder_id, file_id=deleted_file.id, actor=actor)
if should_use_tpuf():
logger.info(f"Deleting file {file_id} from Turbopuffer")
from letta.helpers.tpuf_client import TurbopufferClient
tpuf_client = TurbopufferClient()
await tpuf_client.delete_file_passages(source_id=folder_id, file_id=file_id, organization_id=actor.organization_id)
elif should_use_pinecone():
logger.info(f"Deleting file {file_id} from pinecone index")
await delete_file_records_from_pinecone_index(file_id=file_id, actor=actor)
safe_create_task(sleeptime_document_ingest_async(server, folder_id, actor, clear_history=True), label="document_ingest_after_delete")
async def load_file_to_source_async(server: SyncServer, source_id: str, job_id: str, filename: str, bytes: bytes, actor: User):
# Create a temporary directory (deleted after the context manager exits)
with tempfile.TemporaryDirectory() as tmpdirname:
file_path = os.path.join(tmpdirname, filename)
# Write the file to the sanitized path (wrapped to avoid blocking event loop)
def _write_file():
with open(file_path, "wb") as buffer:
buffer.write(bytes)
await asyncio.to_thread(_write_file)
# Pass the file to load_file_to_source
await server.load_file_to_source(source_id, file_path, job_id, actor)
async def sleeptime_document_ingest_async(server: SyncServer, source_id: str, actor: User, clear_history: bool = False):
source = await server.source_manager.get_source_by_id(source_id=source_id, actor=actor)
agents = await server.source_manager.list_attached_agents(source_id=source_id, actor=actor)
for agent in agents:
if agent.enable_sleeptime:
await server.sleeptime_document_ingest_async(agent, source, actor, clear_history)
@trace_method
async def load_file_to_source_cloud(
server: SyncServer,
agent_states: List[AgentState],
content: bytes,
source_id: str,
actor: User,
embedding_config: EmbeddingConfig,
file_metadata: FileMetadata,
):
# Choose parser based on mistral API key availability
if settings.mistral_api_key:
file_parser = MistralFileParser()
else:
file_parser = MarkitdownFileParser()
# determine which embedder to use - turbopuffer takes precedence
if should_use_tpuf():
from letta.services.file_processor.embedder.turbopuffer_embedder import TurbopufferEmbedder
embedder = TurbopufferEmbedder(embedding_config=embedding_config)
elif should_use_pinecone():
embedder = PineconeEmbedder(embedding_config=embedding_config)
else:
embedder = OpenAIEmbedder(embedding_config=embedding_config)
file_processor = FileProcessor(file_parser=file_parser, embedder=embedder, actor=actor)
await file_processor.process(agent_states=agent_states, source_id=source_id, content=content, file_metadata=file_metadata)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/routers/v1/folders.py",
"license": "Apache License 2.0",
"lines": 545,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/server/rest_api/routers/v1/internal_templates.py | from typing import List, Optional
from fastapi import APIRouter, Body, Depends, Query
from pydantic import BaseModel
from letta.schemas.agent import AgentState, InternalTemplateAgentCreate
from letta.schemas.block import Block, InternalTemplateBlockCreate
from letta.schemas.group import Group, InternalTemplateGroupCreate
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
from letta.server.server import SyncServer
router = APIRouter(prefix="/_internal_templates", tags=["_internal_templates"])
@router.post("/groups", response_model=Group, operation_id="create_internal_template_group")
async def create_group(
group: InternalTemplateGroupCreate = Body(...),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Create a new multi-agent group with the specified configuration.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await server.group_manager.create_group_async(group, actor=actor)
@router.post("/agents", response_model=AgentState, operation_id="create_internal_template_agent")
async def create_agent(
agent: InternalTemplateAgentCreate = Body(...),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Create a new agent with template-related fields.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
# Default to ignore_invalid_tools=True for template-based agent creation
return await server.agent_manager.create_agent_async(agent, actor=actor, ignore_invalid_tools=True)
@router.post("/blocks", response_model=Block, operation_id="create_internal_template_block")
async def create_block(
block: InternalTemplateBlockCreate = Body(...),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Create a new block with template-related fields.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
block_obj = Block(**block.model_dump())
return await server.block_manager.create_or_update_block_async(block_obj, actor=actor)
@router.post("/blocks/batch", response_model=List[Block], operation_id="create_internal_template_blocks_batch")
async def create_blocks_batch(
blocks: List[InternalTemplateBlockCreate] = Body(...),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Create multiple blocks with template-related fields.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
created_blocks = []
for block in blocks:
block_obj = Block(**block.model_dump())
created_block = await server.block_manager.create_or_update_block_async(block_obj, actor=actor)
created_blocks.append(created_block)
return created_blocks
class DeploymentEntity(BaseModel):
"""A deployment entity."""
id: str
type: str
name: Optional[str] = None
description: Optional[str] = None
entity_id: Optional[str] = None
project_id: Optional[str] = None
class ListDeploymentEntitiesResponse(BaseModel):
"""Response model for listing deployment entities."""
entities: List[DeploymentEntity] = []
total_count: int
deployment_id: str
message: str
class DeleteDeploymentResponse(BaseModel):
"""Response model for delete deployment operation."""
deleted_blocks: List[str] = []
deleted_agents: List[str] = []
deleted_groups: List[str] = []
message: str
@router.get("/deployment/{deployment_id}", response_model=ListDeploymentEntitiesResponse, operation_id="list_deployment_entities")
async def list_deployment_entities(
deployment_id: str,
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
entity_types: Optional[List[str]] = Query(None, description="Filter by entity types (block, agent, group)"),
):
"""
List all entities (blocks, agents, groups) with the specified deployment_id.
Optionally filter by entity types.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
entities = []
# Parse entity_types filter - support both array and comma-separated string
allowed_types = {"block", "agent", "group"}
if entity_types is None:
# If no filter specified, include all types
types_to_include = allowed_types
else:
# Handle comma-separated strings in a single item
if len(entity_types) == 1 and "," in entity_types[0]:
entity_types = [t.strip() for t in entity_types[0].split(",")]
# Validate and filter types
types_to_include = {t.lower() for t in entity_types if t.lower() in allowed_types}
if not types_to_include:
types_to_include = allowed_types # Default to all if invalid types provided
# Query blocks if requested
if "block" in types_to_include:
from sqlalchemy import select
from letta.orm.block import Block as BlockModel
from letta.server.db import db_registry
async with db_registry.async_session() as session:
block_query = select(BlockModel).where(
BlockModel.deployment_id == deployment_id, BlockModel.organization_id == actor.organization_id
)
result = await session.execute(block_query)
blocks = result.scalars().all()
for block in blocks:
entities.append(
DeploymentEntity(
id=block.id,
type="block",
name=getattr(block, "template_name", None) or getattr(block, "label", None),
description=block.description,
entity_id=getattr(block, "entity_id", None),
project_id=getattr(block, "project_id", None),
)
)
# Query agents if requested
if "agent" in types_to_include:
from letta.orm.agent import Agent as AgentModel
async with db_registry.async_session() as session:
agent_query = select(AgentModel).where(
AgentModel.deployment_id == deployment_id, AgentModel.organization_id == actor.organization_id
)
result = await session.execute(agent_query)
agents = result.scalars().all()
for agent in agents:
entities.append(
DeploymentEntity(
id=agent.id,
type="agent",
name=agent.name,
description=agent.description,
entity_id=getattr(agent, "entity_id", None),
project_id=getattr(agent, "project_id", None),
)
)
# Query groups if requested
if "group" in types_to_include:
from letta.orm.group import Group as GroupModel
async with db_registry.async_session() as session:
group_query = select(GroupModel).where(
GroupModel.deployment_id == deployment_id, GroupModel.organization_id == actor.organization_id
)
result = await session.execute(group_query)
groups = result.scalars().all()
for group in groups:
entities.append(
DeploymentEntity(
id=group.id,
type="group",
name=None, # Groups don't have a name field
description=group.description,
entity_id=getattr(group, "entity_id", None),
project_id=getattr(group, "project_id", None),
)
)
message = f"Found {len(entities)} entities for deployment {deployment_id}"
if entity_types:
message += f" (filtered by types: {', '.join(types_to_include)})"
return ListDeploymentEntitiesResponse(entities=entities, total_count=len(entities), deployment_id=deployment_id, message=message)
@router.delete("/deployment/{deployment_id}", response_model=DeleteDeploymentResponse, operation_id="delete_deployment")
async def delete_deployment(
deployment_id: str,
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Delete all entities (blocks, agents, groups) with the specified deployment_id.
Deletion order: blocks -> agents -> groups to maintain referential integrity.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
deleted_blocks = []
deleted_agents = []
deleted_groups = []
# First delete blocks
from sqlalchemy import select
from letta.orm.block import Block as BlockModel
from letta.server.db import db_registry
async with db_registry.async_session() as session:
# Get all blocks with the deployment_id
block_query = select(BlockModel).where(
BlockModel.deployment_id == deployment_id, BlockModel.organization_id == actor.organization_id
)
result = await session.execute(block_query)
blocks = result.scalars().all()
for block in blocks:
try:
await server.block_manager.delete_block_async(block.id, actor)
deleted_blocks.append(block.id)
except Exception as e:
# Continue deleting other blocks even if one fails
print(f"Failed to delete block {block.id}: {e}")
# Then delete agents
from letta.orm.agent import Agent as AgentModel
async with db_registry.async_session() as session:
# Get all agents with the deployment_id
agent_query = select(AgentModel).where(
AgentModel.deployment_id == deployment_id, AgentModel.organization_id == actor.organization_id
)
result = await session.execute(agent_query)
agents = result.scalars().all()
for agent in agents:
try:
await server.agent_manager.delete_agent_async(agent.id, actor)
deleted_agents.append(agent.id)
except Exception as e:
# Continue deleting other agents even if one fails
print(f"Failed to delete agent {agent.id}: {e}")
# Finally delete groups
from letta.orm.group import Group as GroupModel
async with db_registry.async_session() as session:
# Get all groups with the deployment_id
group_query = select(GroupModel).where(
GroupModel.deployment_id == deployment_id, GroupModel.organization_id == actor.organization_id
)
result = await session.execute(group_query)
groups = result.scalars().all()
for group in groups:
try:
await server.group_manager.delete_group_async(group.id, actor)
deleted_groups.append(group.id)
except Exception as e:
# Continue deleting other groups even if one fails
print(f"Failed to delete group {group.id}: {e}")
total_deleted = len(deleted_blocks) + len(deleted_agents) + len(deleted_groups)
message = f"Successfully deleted {total_deleted} entities from deployment {deployment_id}"
return DeleteDeploymentResponse(
deleted_blocks=deleted_blocks, deleted_agents=deleted_agents, deleted_groups=deleted_groups, message=message
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/routers/v1/internal_templates.py",
"license": "Apache License 2.0",
"lines": 241,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/server/rest_api/routers/v1/telemetry.py | from typing import Optional
from fastapi import APIRouter, Depends
from letta.schemas.provider_trace import ProviderTrace
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
from letta.server.server import SyncServer
from letta.settings import settings
router = APIRouter(prefix="/telemetry", tags=["telemetry"])
@router.get("/{step_id}", response_model=Optional[ProviderTrace], operation_id="retrieve_provider_trace", deprecated=True)
async def retrieve_provider_trace(
step_id: str,
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
**DEPRECATED**: Use `GET /steps/{step_id}/trace` instead.
Retrieve provider trace by step ID.
"""
provider_trace = None
if settings.track_provider_trace:
try:
provider_trace = await server.telemetry_manager.get_provider_trace_by_step_id_async(
step_id=step_id, actor=await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
)
except Exception:
pass
return provider_trace
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/routers/v1/telemetry.py",
"license": "Apache License 2.0",
"lines": 26,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/server/rest_api/streaming_response.py | # Alternative implementation of StreamingResponse that allows for effectively
# stremaing HTTP trailers, as we cannot set codes after the initial response.
# Taken from: https://github.com/fastapi/fastapi/discussions/10138#discussioncomment-10377361
import asyncio
import json
import re
from collections.abc import AsyncIterator
from datetime import datetime, timezone
from typing import Dict, Optional
from uuid import uuid4
import anyio
from fastapi import HTTPException
from fastapi.responses import StreamingResponse
from starlette.types import Send
from letta.errors import LettaUnexpectedStreamCancellationError, PendingApprovalError
from letta.log import get_logger
from letta.schemas.enums import RunStatus
from letta.schemas.letta_message import LettaPing
from letta.schemas.user import User
from letta.server.rest_api.utils import capture_sentry_exception
from letta.services.run_manager import RunManager
from letta.settings import settings
from letta.utils import safe_create_task
logger = get_logger(__name__)
# Global registry of cancellation events per run_id
# Note: Events are small and we don't bother cleaning them up
_cancellation_events: Dict[str, asyncio.Event] = {}
def get_cancellation_event_for_run(run_id: str) -> asyncio.Event:
"""Get or create a cancellation event for a run."""
if run_id not in _cancellation_events:
_cancellation_events[run_id] = asyncio.Event()
return _cancellation_events[run_id]
class RunCancelledException(Exception):
"""Exception raised when a run is explicitly cancelled (not due to client timeout)"""
def __init__(self, run_id: str, message: str | None = None):
self.run_id = run_id
super().__init__(message or f"Run {run_id} was explicitly cancelled")
async def add_keepalive_to_stream(
stream_generator: AsyncIterator[str | bytes],
run_id: str,
keepalive_interval: float = 30.0,
) -> AsyncIterator[str | bytes]:
"""
Adds periodic keepalive messages to a stream to prevent connection timeouts.
Sends a keepalive ping every `keepalive_interval` seconds, regardless of
whether data is flowing. This ensures connections stay alive during long
operations like tool execution.
Args:
stream_generator: The original stream generator to wrap
keepalive_interval: Seconds between keepalive messages (default: 30)
Yields:
Original stream chunks interspersed with keepalive messages
"""
# Use a bounded queue to decouple reading from keepalive while preserving backpressure
# A small maxsize prevents unbounded memory growth if the client is slow
queue = asyncio.Queue(maxsize=1)
stream_exhausted = False
last_seq_id = None
async def stream_reader():
"""Read from the original stream and put items in the queue."""
nonlocal stream_exhausted
try:
async for item in stream_generator:
await queue.put(("data", item))
finally:
stream_exhausted = True
await queue.put(("end", None))
# Start the stream reader task
reader_task = safe_create_task(stream_reader(), label="stream_reader")
try:
while True:
try:
# Wait for data with a timeout equal to keepalive interval
msg_type, data = await asyncio.wait_for(queue.get(), timeout=keepalive_interval)
if msg_type == "end":
# Stream finished
break
elif msg_type == "data":
# Track seq_id from chunks for ping messages
if isinstance(data, str):
seq_id_match = re.search(r'"seq_id":(\d+)', data) # Look for "seq_id":<number> pattern in the SSE chunk
if seq_id_match:
last_seq_id = int(seq_id_match.group(1))
yield data
except asyncio.TimeoutError:
# No data received within keepalive interval
if not stream_exhausted:
# Send keepalive ping with the last seq_id to allow clients to track progress
yield f"data: {LettaPing(id=f'ping-{uuid4()}', date=datetime.now(timezone.utc), run_id=run_id, seq_id=last_seq_id).model_dump_json()}\n\n"
else:
# Stream is done but queue might be processing
# Check if there's anything left
try:
msg_type, data = queue.get_nowait()
if msg_type == "end":
break
elif msg_type == "data":
yield data
except asyncio.QueueEmpty:
# Really done now
break
finally:
# Clean up the reader task
reader_task.cancel()
try:
await reader_task
except asyncio.CancelledError:
pass
# TODO (cliandy) wrap this and handle types
async def cancellation_aware_stream_wrapper(
stream_generator: AsyncIterator[str | bytes],
run_manager: RunManager,
run_id: str,
actor: User,
cancellation_check_interval: float = 0.5,
cancellation_event: Optional[asyncio.Event] = None,
) -> AsyncIterator[str | bytes]:
"""
Wraps a stream generator to provide real-time run cancellation checking.
This wrapper periodically checks for run cancellation while streaming and
can interrupt the stream at any point, not just at step boundaries.
Args:
stream_generator: The original stream generator to wrap
run_manager: Run manager instance for checking run status
run_id: ID of the run to monitor for cancellation
actor: User/actor making the request
cancellation_check_interval: How often to check for cancellation (seconds)
Yields:
Stream chunks from the original generator until cancelled
Raises:
asyncio.CancelledError: If the run is cancelled during streaming
"""
last_cancellation_check = asyncio.get_event_loop().time()
try:
async for chunk in stream_generator:
# Check for cancellation periodically (not on every chunk for performance)
current_time = asyncio.get_event_loop().time()
if current_time - last_cancellation_check >= cancellation_check_interval:
try:
run = await run_manager.get_run_by_id(run_id=run_id, actor=actor)
if run.status == RunStatus.cancelled:
logger.info(f"Stream cancelled for run {run_id}, interrupting stream")
# Signal cancellation via shared event if available
if cancellation_event:
cancellation_event.set()
logger.info(f"Set cancellation event for run {run_id}")
# Send cancellation event to client
stop_event = {"message_type": "stop_reason", "stop_reason": "cancelled"}
yield f"data: {json.dumps(stop_event)}\n\n"
# Inject exception INTO the generator so its except blocks can catch it
try:
await stream_generator.athrow(RunCancelledException(run_id, f"Run {run_id} was cancelled"))
except (StopAsyncIteration, RunCancelledException):
# Generator closed gracefully or raised the exception back
break
except RunCancelledException:
# Re-raise cancellation immediately, don't catch it
raise
except Exception as e:
# Log warning but don't fail the stream if cancellation check fails
logger.warning(f"Failed to check run cancellation for run {run_id}: {e}")
last_cancellation_check = current_time
yield chunk
except RunCancelledException:
# Don't re-raise - we already injected the exception into the generator
# The generator has handled it and set its stream_was_cancelled flag
logger.info(f"Stream for run {run_id} was explicitly cancelled and cleaned up")
# Don't raise - let it exit gracefully
except asyncio.CancelledError:
# Re-raise CancelledError (likely client timeout) to ensure proper cleanup
logger.info(f"Stream for run {run_id} was cancelled (likely client timeout) and cleaned up")
raise
except Exception as e:
logger.error(f"Error in cancellation-aware stream wrapper for run {run_id}: {e}")
raise
class StreamingResponseWithStatusCode(StreamingResponse):
"""
Variation of StreamingResponse that can dynamically decide the HTTP status code,
based on the return value of the content iterator (parameter `content`).
Expects the content to yield either just str content as per the original `StreamingResponse`
or else tuples of (`content`: `str`, `status_code`: `int`).
"""
body_iterator: AsyncIterator[str | bytes]
response_started: bool = False
_client_connected: bool = True
async def stream_response(self, send: Send) -> None:
if settings.use_asyncio_shield:
try:
await asyncio.shield(self._protected_stream_response(send))
except asyncio.CancelledError:
logger.info("Stream response was cancelled, but shielded task should continue")
except (anyio.ClosedResourceError, anyio.BrokenResourceError):
logger.info("Client disconnected, but shielded task should continue")
self._client_connected = False
except PendingApprovalError as e:
# This is an expected error, don't log as error
logger.info(f"Pending approval conflict in stream response: {e}")
# Re-raise as HTTPException for proper client handling
raise HTTPException(
status_code=409, detail={"code": "PENDING_APPROVAL", "message": str(e), "pending_request_id": e.pending_request_id}
)
except Exception as e:
logger.error(f"Error in protected stream response: {e}")
raise
else:
await self._protected_stream_response(send)
async def _protected_stream_response(self, send: Send) -> None:
more_body = True
try:
first_chunk = await self.body_iterator.__anext__()
logger.debug("stream_response first chunk:", first_chunk)
if isinstance(first_chunk, tuple):
first_chunk_content, self.status_code = first_chunk
else:
first_chunk_content = first_chunk
if isinstance(first_chunk_content, str):
first_chunk_content = first_chunk_content.encode(self.charset)
try:
await send(
{
"type": "http.response.start",
"status": self.status_code,
"headers": self.raw_headers,
}
)
self.response_started = True
await send(
{
"type": "http.response.body",
"body": first_chunk_content,
"more_body": more_body,
}
)
except (anyio.ClosedResourceError, anyio.BrokenResourceError):
logger.info("Client disconnected during initial response, continuing processing without sending more chunks")
self._client_connected = False
async for chunk in self.body_iterator:
if isinstance(chunk, tuple):
content, status_code = chunk
if status_code // 100 != 2:
# An error occurred mid-stream
if not isinstance(content, bytes):
content = content.encode(self.charset)
more_body = False
raise Exception(f"An exception occurred mid-stream with status code {status_code} with content {content}")
else:
content = chunk
if isinstance(content, str):
content = content.encode(self.charset)
more_body = True
# Only attempt to send if client is still connected
if self._client_connected:
try:
await send(
{
"type": "http.response.body",
"body": content,
"more_body": more_body,
}
)
except (anyio.ClosedResourceError, anyio.BrokenResourceError):
logger.info("Client disconnected, continuing processing without sending more data")
self._client_connected = False
# Handle explicit run cancellations (should not throw error)
except RunCancelledException as exc:
logger.info(f"Stream was explicitly cancelled for run {exc.run_id}")
# Handle explicit cancellation gracefully without error
more_body = False
cancellation_resp = {"message": "Run was cancelled"}
cancellation_event = f"event: cancelled\ndata: {json.dumps(cancellation_resp)}\n\n".encode(self.charset)
if not self.response_started:
await send(
{
"type": "http.response.start",
"status": 200, # Use 200 for graceful cancellation
"headers": self.raw_headers,
}
)
raise
if self._client_connected:
try:
await send(
{
"type": "http.response.body",
"body": cancellation_event,
"more_body": more_body,
}
)
except (anyio.ClosedResourceError, anyio.BrokenResourceError):
self._client_connected = False
return
# Handle client timeouts (should throw error to inform user)
except asyncio.CancelledError as exc:
logger.warning("Stream was terminated due to unexpected cancellation from server")
# Handle unexpected cancellation with error
more_body = False
capture_sentry_exception(exc)
raise LettaUnexpectedStreamCancellationError("Stream was terminated due to unexpected cancellation from server")
except Exception as exc:
logger.exception(f"Unhandled Streaming Error: {str(exc)}")
more_body = False
# error_resp = {"error": {"message": str(exc)}}
error_resp = {"error": str(exc), "code": "INTERNAL_SERVER_ERROR"}
error_event = f"event: error\ndata: {json.dumps(error_resp)}\n\n".encode(self.charset)
logger.debug("response_started:", self.response_started)
if not self.response_started:
await send(
{
"type": "http.response.start",
"status": 500,
"headers": self.raw_headers,
}
)
raise
if self._client_connected:
try:
await send(
{
"type": "http.response.body",
"body": error_event,
"more_body": more_body,
}
)
except (anyio.ClosedResourceError, anyio.BrokenResourceError):
self._client_connected = False
capture_sentry_exception(exc)
return
if more_body and self._client_connected:
try:
await send({"type": "http.response.body", "body": b"", "more_body": False})
except (anyio.ClosedResourceError, anyio.BrokenResourceError):
self._client_connected = False
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/streaming_response.py",
"license": "Apache License 2.0",
"lines": 332,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/agent_serialization_manager.py | import uuid
from datetime import datetime, timezone
from typing import Any, Dict, List, Optional
from letta.constants import MCP_TOOL_TAG_NAME_PREFIX
from letta.errors import (
AgentExportIdMappingError,
AgentExportProcessingError,
AgentFileExportError,
AgentFileImportError,
AgentNotFoundForExportError,
)
from letta.helpers.pinecone_utils import should_use_pinecone
from letta.helpers.tpuf_client import should_use_tpuf
from letta.log import get_logger
from letta.schemas.agent import AgentState, CreateAgent
from letta.schemas.agent_file import (
AgentFileSchema,
AgentSchema,
BlockSchema,
FileAgentSchema,
FileSchema,
GroupSchema,
ImportResult,
MCPServerSchema,
MessageSchema,
SkillSchema,
SourceSchema,
ToolSchema,
)
from letta.schemas.block import Block
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import FileProcessingStatus
from letta.schemas.file import FileMetadata
from letta.schemas.group import Group, GroupCreate
from letta.schemas.llm_config import LLMConfig
from letta.schemas.mcp import MCPServer
from letta.schemas.message import Message
from letta.schemas.source import Source
from letta.schemas.tool import Tool
from letta.schemas.user import User
from letta.services.agent_manager import AgentManager
from letta.services.block_manager import BlockManager
from letta.services.file_manager import FileManager
from letta.services.file_processor.embedder.openai_embedder import OpenAIEmbedder
from letta.services.file_processor.embedder.pinecone_embedder import PineconeEmbedder
from letta.services.file_processor.file_processor import FileProcessor
from letta.services.file_processor.parser.markitdown_parser import MarkitdownFileParser
from letta.services.file_processor.parser.mistral_parser import MistralFileParser
from letta.services.files_agents_manager import FileAgentManager
from letta.services.group_manager import GroupManager
from letta.services.mcp_manager import MCPManager
from letta.services.message_manager import MessageManager
from letta.services.source_manager import SourceManager
from letta.services.tool_manager import ToolManager
from letta.settings import settings
from letta.utils import get_latest_alembic_revision, safe_create_task
logger = get_logger(__name__)
class AgentSerializationManager:
"""
Manages export and import of agent files between database and AgentFileSchema format.
Handles:
- ID mapping between database IDs and human-readable file IDs
- Coordination across multiple entity managers
- Transaction safety during imports
- Referential integrity validation
"""
def __init__(
self,
agent_manager: AgentManager,
tool_manager: ToolManager,
source_manager: SourceManager,
block_manager: BlockManager,
group_manager: GroupManager,
mcp_manager: MCPManager,
file_manager: FileManager,
file_agent_manager: FileAgentManager,
message_manager: MessageManager,
):
self.agent_manager = agent_manager
self.tool_manager = tool_manager
self.source_manager = source_manager
self.block_manager = block_manager
self.group_manager = group_manager
self.mcp_manager = mcp_manager
self.file_manager = file_manager
self.file_agent_manager = file_agent_manager
self.message_manager = message_manager
self.file_parser = MistralFileParser() if settings.mistral_api_key else MarkitdownFileParser()
# ID mapping state for export
self._db_to_file_ids: Dict[str, str] = {}
# Counters for generating Stripe-style IDs
self._id_counters: Dict[str, int] = {
AgentSchema.__id_prefix__: 0,
GroupSchema.__id_prefix__: 0,
BlockSchema.__id_prefix__: 0,
FileSchema.__id_prefix__: 0,
SourceSchema.__id_prefix__: 0,
ToolSchema.__id_prefix__: 0,
MessageSchema.__id_prefix__: 0,
FileAgentSchema.__id_prefix__: 0,
MCPServerSchema.__id_prefix__: 0,
}
def _reset_state(self):
"""Reset internal state for a new operation"""
self._db_to_file_ids.clear()
for key in self._id_counters:
self._id_counters[key] = 0
def _generate_file_id(self, entity_type: str) -> str:
"""Generate a Stripe-style ID for the given entity type"""
counter = self._id_counters[entity_type]
file_id = f"{entity_type}-{counter}"
self._id_counters[entity_type] += 1
return file_id
def _map_db_to_file_id(self, db_id: str, entity_type: str, allow_new: bool = True) -> str:
"""Map a database UUID to a file ID, creating if needed (export only)"""
if db_id in self._db_to_file_ids:
return self._db_to_file_ids[db_id]
if not allow_new:
raise AgentExportIdMappingError(db_id, entity_type)
file_id = self._generate_file_id(entity_type)
self._db_to_file_ids[db_id] = file_id
return file_id
def _extract_unique_tools(self, agent_states: List[AgentState]) -> List:
"""Extract unique tools across all agent states by ID"""
all_tools = []
for agent_state in agent_states:
if agent_state.tools:
all_tools.extend(agent_state.tools)
unique_tools = {}
for tool in all_tools:
unique_tools[tool.id] = tool
return sorted(unique_tools.values(), key=lambda x: x.name)
def _extract_unique_blocks(self, agent_states: List[AgentState]) -> List:
"""Extract unique blocks across all agent states by ID"""
all_blocks = []
for agent_state in agent_states:
if agent_state.memory and agent_state.memory.blocks:
all_blocks.extend(agent_state.memory.blocks)
unique_blocks = {}
for block in all_blocks:
unique_blocks[block.id] = block
return sorted(unique_blocks.values(), key=lambda x: x.label)
async def _extract_unique_sources_and_files_from_agents(
self, agent_states: List[AgentState], actor: User, files_agents_cache: dict | None = None
) -> tuple[List[Source], List[FileMetadata]]:
"""Extract unique sources and files from agent states using bulk operations"""
all_source_ids = set()
all_file_ids = set()
for agent_state in agent_states:
files_agents = await self.file_agent_manager.list_files_for_agent(
agent_id=agent_state.id,
actor=actor,
is_open_only=False,
return_as_blocks=False,
per_file_view_window_char_limit=agent_state.per_file_view_window_char_limit,
)
# cache the results for reuse during conversion
if files_agents_cache is not None:
files_agents_cache[agent_state.id] = files_agents
for file_agent in files_agents:
all_source_ids.add(file_agent.source_id)
all_file_ids.add(file_agent.file_id)
sources = await self.source_manager.get_sources_by_ids_async(list(all_source_ids), actor)
files = await self.file_manager.get_files_by_ids_async(list(all_file_ids), actor, include_content=True)
return sources, files
async def _convert_agent_state_to_schema(
self,
agent_state: AgentState,
actor: User,
files_agents_cache: dict | None = None,
scrub_messages: bool = False,
) -> AgentSchema:
"""Convert AgentState to AgentSchema with ID remapping"""
agent_file_id = self._map_db_to_file_id(agent_state.id, AgentSchema.__id_prefix__)
# use cached file-agent data if available, otherwise fetch
if files_agents_cache is not None and agent_state.id in files_agents_cache:
files_agents = files_agents_cache[agent_state.id]
else:
files_agents = await self.file_agent_manager.list_files_for_agent(
agent_id=agent_state.id,
actor=actor,
is_open_only=False,
return_as_blocks=False,
per_file_view_window_char_limit=agent_state.per_file_view_window_char_limit,
)
agent_schema = await AgentSchema.from_agent_state(
agent_state, message_manager=self.message_manager, files_agents=files_agents, actor=actor
)
agent_schema.id = agent_file_id
# Handle message scrubbing
if not scrub_messages:
# Ensure all in-context messages are present before ID remapping.
# AgentSchema.from_agent_state fetches a limited slice (~50) and may exclude messages still
# referenced by in_context_message_ids. Fetch any missing in-context messages by ID so remapping succeeds.
existing_msg_ids = {m.id for m in (agent_schema.messages or [])}
in_context_ids = agent_schema.in_context_message_ids or []
missing_in_context_ids = [mid for mid in in_context_ids if mid not in existing_msg_ids]
if missing_in_context_ids:
missing_msgs = await self.message_manager.get_messages_by_ids_async(message_ids=missing_in_context_ids, actor=actor)
fetched_ids = {m.id for m in missing_msgs}
not_found = [mid for mid in missing_in_context_ids if mid not in fetched_ids]
if not_found:
# Surface a clear mapping error; handled upstream by the route/export wrapper.
raise AgentExportIdMappingError(db_id=not_found[0], entity_type=MessageSchema.__id_prefix__)
for msg in missing_msgs:
agent_schema.messages.append(MessageSchema.from_message(msg))
else:
# Scrub all messages from export
agent_schema.messages = []
agent_schema.in_context_message_ids = []
# wipe the values of tool_exec_environment_variables (they contain secrets)
agent_secrets = agent_schema.secrets or agent_schema.tool_exec_environment_variables
if agent_secrets:
agent_schema.tool_exec_environment_variables = {key: "" for key in agent_secrets}
agent_schema.secrets = {key: "" for key in agent_secrets}
if not scrub_messages:
if agent_schema.messages:
for message in agent_schema.messages:
message_file_id = self._map_db_to_file_id(message.id, MessageSchema.__id_prefix__)
message.id = message_file_id
message.agent_id = agent_file_id
if agent_schema.in_context_message_ids:
agent_schema.in_context_message_ids = [
self._map_db_to_file_id(message_id, MessageSchema.__id_prefix__, allow_new=False)
for message_id in agent_schema.in_context_message_ids
]
if agent_schema.tool_ids:
agent_schema.tool_ids = [self._map_db_to_file_id(tool_id, ToolSchema.__id_prefix__) for tool_id in agent_schema.tool_ids]
if agent_schema.source_ids:
agent_schema.source_ids = [
self._map_db_to_file_id(source_id, SourceSchema.__id_prefix__) for source_id in agent_schema.source_ids
]
if agent_schema.block_ids:
agent_schema.block_ids = [self._map_db_to_file_id(block_id, BlockSchema.__id_prefix__) for block_id in agent_schema.block_ids]
if agent_schema.files_agents:
for file_agent in agent_schema.files_agents:
file_agent.file_id = self._map_db_to_file_id(file_agent.file_id, FileSchema.__id_prefix__)
file_agent.source_id = self._map_db_to_file_id(file_agent.source_id, SourceSchema.__id_prefix__)
file_agent.agent_id = agent_file_id
if agent_schema.group_ids:
agent_schema.group_ids = [self._map_db_to_file_id(group_id, GroupSchema.__id_prefix__) for group_id in agent_schema.group_ids]
return agent_schema
def _convert_tool_to_schema(self, tool) -> ToolSchema:
"""Convert Tool to ToolSchema with ID remapping"""
tool_file_id = self._map_db_to_file_id(tool.id, ToolSchema.__id_prefix__, allow_new=False)
tool_schema = ToolSchema.from_tool(tool)
tool_schema.id = tool_file_id
return tool_schema
def _convert_block_to_schema(self, block) -> BlockSchema:
"""Convert Block to BlockSchema with ID remapping"""
block_file_id = self._map_db_to_file_id(block.id, BlockSchema.__id_prefix__, allow_new=False)
block_schema = BlockSchema.from_block(block)
block_schema.id = block_file_id
return block_schema
def _convert_source_to_schema(self, source) -> SourceSchema:
"""Convert Source to SourceSchema with ID remapping"""
source_file_id = self._map_db_to_file_id(source.id, SourceSchema.__id_prefix__, allow_new=False)
source_schema = SourceSchema.from_source(source)
source_schema.id = source_file_id
return source_schema
def _convert_file_to_schema(self, file_metadata) -> FileSchema:
"""Convert FileMetadata to FileSchema with ID remapping"""
file_file_id = self._map_db_to_file_id(file_metadata.id, FileSchema.__id_prefix__, allow_new=False)
file_schema = FileSchema.from_file_metadata(file_metadata)
file_schema.id = file_file_id
file_schema.source_id = self._map_db_to_file_id(file_metadata.source_id, SourceSchema.__id_prefix__, allow_new=False)
return file_schema
async def _extract_unique_mcp_servers(self, tools: List, actor: User) -> List:
"""Extract unique MCP servers from tools based on metadata, using server_id if available, otherwise falling back to server_name."""
mcp_server_ids = set()
mcp_server_names = set()
for tool in tools:
# Check if tool has MCP metadata
if tool.metadata_ and MCP_TOOL_TAG_NAME_PREFIX in tool.metadata_:
mcp_metadata = tool.metadata_[MCP_TOOL_TAG_NAME_PREFIX]
# TODO: @jnjpng clean this up once we fully migrate to server_id being the main identifier
if "server_id" in mcp_metadata:
mcp_server_ids.add(mcp_metadata["server_id"])
elif "server_name" in mcp_metadata:
mcp_server_names.add(mcp_metadata["server_name"])
# Fetch MCP servers by ID
mcp_servers = []
fetched_server_ids = set()
if mcp_server_ids:
try:
mcp_servers = await self.mcp_manager.get_mcp_servers_by_ids(list(mcp_server_ids), actor)
fetched_server_ids.update([mcp_server.id for mcp_server in mcp_servers])
except Exception as e:
logger.warning(f"Failed to fetch MCP servers by IDs {mcp_server_ids}: {e}")
# Fetch MCP servers by name if not already fetched by ID
if mcp_server_names:
for server_name in mcp_server_names:
try:
mcp_server = await self.mcp_manager.get_mcp_server(server_name, actor)
if mcp_server and mcp_server.id not in fetched_server_ids:
mcp_servers.append(mcp_server)
except Exception as e:
logger.warning(f"Failed to fetch MCP server by name {server_name}: {e}")
return mcp_servers
def _convert_mcp_server_to_schema(self, mcp_server: MCPServer) -> MCPServerSchema:
"""Convert MCPServer to MCPServerSchema with ID remapping and auth scrubbing"""
try:
mcp_file_id = self._map_db_to_file_id(mcp_server.id, MCPServerSchema.__id_prefix__, allow_new=False)
mcp_schema = MCPServerSchema.from_mcp_server(mcp_server)
mcp_schema.id = mcp_file_id
return mcp_schema
except Exception as e:
logger.error(f"Failed to convert MCP server {mcp_server.id}: {e}")
raise
def _convert_group_to_schema(self, group: Group) -> GroupSchema:
"""Convert Group to GroupSchema with ID remapping"""
try:
group_file_id = self._map_db_to_file_id(group.id, GroupSchema.__id_prefix__, allow_new=False)
group_schema = GroupSchema.from_group(group)
group_schema.id = group_file_id
group_schema.agent_ids = [
self._map_db_to_file_id(agent_id, AgentSchema.__id_prefix__, allow_new=False) for agent_id in group_schema.agent_ids
]
if hasattr(group_schema.manager_config, "manager_agent_id"):
group_schema.manager_config.manager_agent_id = self._map_db_to_file_id(
group_schema.manager_config.manager_agent_id, AgentSchema.__id_prefix__, allow_new=False
)
return group_schema
except Exception as e:
logger.error(f"Failed to convert group {group.id}: {e}")
raise
async def export(
self,
agent_ids: List[str],
actor: User,
conversation_id: Optional[str] = None,
skills: Optional[List[SkillSchema]] = None,
scrub_messages: bool = False,
) -> AgentFileSchema:
"""
Export agents and their related entities to AgentFileSchema format.
Args:
agent_ids: List of agent UUIDs to export
conversation_id: Optional conversation ID. If provided, uses the conversation's
in-context message_ids instead of the agent's global message_ids.
skills: Optional list of skills to include in the export. Skills are resolved
client-side and passed as SkillSchema objects.
scrub_messages: If True, excludes all messages from the export. Useful for
sharing agent configs without conversation history.
Returns:
AgentFileSchema with all related entities
Raises:
AgentFileExportError: If export fails
"""
try:
self._reset_state()
agent_states = await self.agent_manager.get_agents_by_ids_async(agent_ids=agent_ids, actor=actor)
# If conversation_id is provided, override the agent's message_ids with conversation's
if conversation_id:
from letta.services.conversation_manager import ConversationManager
conversation_manager = ConversationManager()
conversation_message_ids = await conversation_manager.get_message_ids_for_conversation(
conversation_id=conversation_id,
actor=actor,
)
# Override message_ids for the first agent (conversation export is single-agent)
if agent_states:
agent_states[0].message_ids = conversation_message_ids
# Validate that all requested agents were found
if len(agent_states) != len(agent_ids):
found_ids = {agent.id for agent in agent_states}
missing_ids = [agent_id for agent_id in agent_ids if agent_id not in found_ids]
raise AgentNotFoundForExportError(missing_ids)
groups = []
group_agent_ids = []
for agent_state in agent_states:
if agent_state.multi_agent_group != None:
groups.append(agent_state.multi_agent_group)
group_agent_ids.extend(agent_state.multi_agent_group.agent_ids)
group_agent_ids = list(set(group_agent_ids) - set(agent_ids))
if group_agent_ids:
group_agent_states = await self.agent_manager.get_agents_by_ids_async(agent_ids=group_agent_ids, actor=actor)
if len(group_agent_states) != len(group_agent_ids):
found_ids = {agent.id for agent in group_agent_states}
missing_ids = [agent_id for agent_id in group_agent_ids if agent_id not in found_ids]
raise AgentFileExportError(f"The following agent IDs were not found: {missing_ids}")
agent_ids.extend(group_agent_ids)
agent_states.extend(group_agent_states)
# cache for file-agent relationships to avoid duplicate queries
files_agents_cache = {} # Maps agent_id to list of file_agent relationships
# Extract unique entities across all agents
tool_set = self._extract_unique_tools(agent_states)
block_set = self._extract_unique_blocks(agent_states)
# Extract MCP servers from tools BEFORE conversion (must be done before ID mapping)
mcp_server_set = await self._extract_unique_mcp_servers(tool_set, actor)
# Map MCP server IDs before converting schemas
for mcp_server in mcp_server_set:
self._map_db_to_file_id(mcp_server.id, MCPServerSchema.__id_prefix__)
# Extract sources and files from agent states BEFORE conversion (with caching)
source_set, file_set = await self._extract_unique_sources_and_files_from_agents(agent_states, actor, files_agents_cache)
# Convert to schemas with ID remapping (reusing cached file-agent data)
agent_schemas = [
await self._convert_agent_state_to_schema(
agent_state,
actor=actor,
files_agents_cache=files_agents_cache,
scrub_messages=scrub_messages,
)
for agent_state in agent_states
]
tool_schemas = [self._convert_tool_to_schema(tool) for tool in tool_set]
block_schemas = [self._convert_block_to_schema(block) for block in block_set]
source_schemas = [self._convert_source_to_schema(source) for source in source_set]
file_schemas = [self._convert_file_to_schema(file_metadata) for file_metadata in file_set]
mcp_server_schemas = [self._convert_mcp_server_to_schema(mcp_server) for mcp_server in mcp_server_set]
group_schemas = [self._convert_group_to_schema(group) for group in groups]
logger.info(f"Exporting {len(agent_ids)} agents to agent file format")
# Return AgentFileSchema with converted entities
return AgentFileSchema(
agents=agent_schemas,
groups=group_schemas,
blocks=block_schemas,
files=file_schemas,
sources=source_schemas,
tools=tool_schemas,
mcp_servers=mcp_server_schemas,
skills=skills or [],
metadata={"revision_id": await get_latest_alembic_revision()},
created_at=datetime.now(timezone.utc),
)
except Exception as e:
logger.error(f"Failed to export agent file: {e}")
raise AgentExportProcessingError(str(e), e) from e
async def import_file(
self,
schema: AgentFileSchema,
actor: User,
append_copy_suffix: bool = False,
override_name: Optional[str] = None,
override_existing_tools: bool = True,
dry_run: bool = False,
env_vars: Optional[Dict[str, Any]] = None,
override_embedding_config: Optional[EmbeddingConfig] = None,
override_llm_config: Optional[LLMConfig] = None,
project_id: Optional[str] = None,
) -> ImportResult:
"""
Import AgentFileSchema into the database.
Args:
schema: The agent file schema to import
dry_run: If True, validate but don't commit changes
Returns:
ImportResult with success status and details
Raises:
AgentFileImportError: If import fails
"""
try:
self._reset_state()
if dry_run:
logger.info("Starting dry run import validation")
else:
logger.info("Starting agent file import")
# Validate schema first
self._validate_schema(schema)
if dry_run:
return ImportResult(
success=True,
message="Dry run validation passed",
imported_count=0,
)
# Import in dependency order
imported_count = 0
file_to_db_ids = {} # Maps file IDs to new database IDs
# in-memory cache for file metadata to avoid repeated db calls
file_metadata_cache = {} # Maps database file ID to FileMetadata
# 1. Create MCP servers first (tools depend on them)
if schema.mcp_servers:
for mcp_server_schema in schema.mcp_servers:
server_data = mcp_server_schema.model_dump(exclude={"id"})
filtered_server_data = self._filter_dict_for_model(server_data, MCPServer)
create_schema = MCPServer(**filtered_server_data)
# Note: We don't have auth info from export, so the user will need to re-configure auth.
# TODO: @jnjpng store metadata about obfuscated metadata to surface to the user
created_mcp_server = await self.mcp_manager.create_or_update_mcp_server(create_schema, actor)
file_to_db_ids[mcp_server_schema.id] = created_mcp_server.id
imported_count += 1
# 2. Create tools (may depend on MCP servers) - using bulk upsert for efficiency
if schema.tools:
# convert tool schemas to pydantic tools
pydantic_tools = []
for tool_schema in schema.tools:
pydantic_tools.append(Tool(**tool_schema.model_dump(exclude={"id"})))
# bulk upsert all tools at once
created_tools = await self.tool_manager.bulk_upsert_tools_async(
pydantic_tools, actor, override_existing_tools=override_existing_tools
)
# map file ids to database ids
# note: tools are matched by name during upsert, so we need to match by name here too
created_tools_by_name = {tool.name: tool for tool in created_tools}
for tool_schema in schema.tools:
created_tool = created_tools_by_name.get(tool_schema.name)
if created_tool:
file_to_db_ids[tool_schema.id] = created_tool.id
imported_count += 1
else:
logger.warning(f"Tool {tool_schema.name} was not created during bulk upsert")
# 2. Create blocks (no dependencies) - using batch create for efficiency
if schema.blocks:
# convert block schemas to pydantic blocks (excluding IDs to create new blocks)
pydantic_blocks = []
for block_schema in schema.blocks:
pydantic_blocks.append(Block(**block_schema.model_dump(exclude={"id"})))
# batch create all blocks at once
created_blocks = await self.block_manager.batch_create_blocks_async(pydantic_blocks, actor)
# map file ids to database ids
for block_schema, created_block in zip(schema.blocks, created_blocks):
file_to_db_ids[block_schema.id] = created_block.id
imported_count += 1
# 3. Create sources (no dependencies) - using bulk upsert for efficiency
if schema.sources:
# convert source schemas to pydantic sources
pydantic_sources = []
# First, do a fast batch check for existing source names to avoid conflicts
source_names_to_check = [s.name for s in schema.sources]
existing_source_names = await self.source_manager.get_existing_source_names(source_names_to_check, actor)
# override embedding_config
if override_embedding_config:
for source_schema in schema.sources:
source_schema.embedding_config = override_embedding_config
source_schema.embedding = override_embedding_config.handle
for source_schema in schema.sources:
source_data = source_schema.model_dump(exclude={"id", "embedding", "embedding_chunk_size"})
# Check if source name already exists, if so add unique suffix
original_name = source_data["name"]
if original_name in existing_source_names:
unique_suffix = uuid.uuid4().hex[:8]
source_data["name"] = f"{original_name}_{unique_suffix}"
pydantic_sources.append(Source(**source_data))
# bulk upsert all sources at once
created_sources = await self.source_manager.bulk_upsert_sources_async(pydantic_sources, actor)
# map file ids to database ids
# note: sources are matched by name during upsert, so we need to match by name here too
created_sources_by_name = {source.name: source for source in created_sources}
for i, source_schema in enumerate(schema.sources):
# Use the pydantic source name (which may have been modified for uniqueness)
source_name = pydantic_sources[i].name
created_source = created_sources_by_name.get(source_name)
if created_source:
file_to_db_ids[source_schema.id] = created_source.id
imported_count += 1
else:
logger.warning(f"Source {source_name} was not created during bulk upsert")
# 4. Create files (depends on sources)
for file_schema in schema.files:
# Convert FileSchema back to FileMetadata
file_data = file_schema.model_dump(exclude={"id", "content"})
# Remap source_id from file ID to database ID
file_data["source_id"] = file_to_db_ids[file_schema.source_id]
# Set processing status to PARSING since we have parsed content but need to re-embed
file_data["processing_status"] = FileProcessingStatus.PARSING
file_data["error_message"] = None
file_data["total_chunks"] = None
file_data["chunks_embedded"] = None
file_metadata = FileMetadata(**file_data)
created_file = await self.file_manager.create_file(file_metadata, actor, text=file_schema.content)
file_to_db_ids[file_schema.id] = created_file.id
imported_count += 1
# 5. Process files for chunking/embedding (depends on files and sources)
# Start background tasks for file processing
background_tasks = []
if schema.files and any(f.content for f in schema.files):
# Use override embedding config if provided, otherwise use agent's config
embedder_config = override_embedding_config if override_embedding_config else schema.agents[0].embedding_config
# determine which embedder to use - turbopuffer takes precedence
if should_use_tpuf():
from letta.services.file_processor.embedder.turbopuffer_embedder import TurbopufferEmbedder
embedder = TurbopufferEmbedder(embedding_config=embedder_config)
elif should_use_pinecone():
embedder = PineconeEmbedder(embedding_config=embedder_config)
else:
embedder = OpenAIEmbedder(embedding_config=embedder_config)
file_processor = FileProcessor(
file_parser=self.file_parser,
embedder=embedder,
actor=actor,
)
for file_schema in schema.files:
if file_schema.content: # Only process files with content
file_db_id = file_to_db_ids[file_schema.id]
source_db_id = file_to_db_ids[file_schema.source_id]
# Get the created file metadata (with caching)
if file_db_id not in file_metadata_cache:
file_metadata_cache[file_db_id] = await self.file_manager.get_file_by_id(file_db_id, actor)
file_metadata = file_metadata_cache[file_db_id]
# Save the db call of fetching content again
file_metadata.content = file_schema.content
# Create background task for file processing
# TODO: This can be moved to celery or RQ or something
task = safe_create_task(
self._process_file_async(
file_metadata=file_metadata, source_id=source_db_id, file_processor=file_processor, actor=actor
),
label=f"process_file_{file_metadata.file_name}",
)
background_tasks.append(task)
logger.info(f"Started background processing for file {file_metadata.file_name} (ID: {file_db_id})")
# 6. Create agents with empty message history
for agent_schema in schema.agents:
# Override embedding_config if provided
if override_embedding_config:
agent_schema.embedding_config = override_embedding_config
agent_schema.embedding = override_embedding_config.handle
# Override llm_config if provided (keeps other defaults like context size)
if override_llm_config:
agent_schema.llm_config = override_llm_config
agent_schema.model = override_llm_config.handle
# Convert AgentSchema back to CreateAgent, remapping tool/block IDs
agent_data = agent_schema.model_dump(exclude={"id", "in_context_message_ids", "messages"})
# Handle agent name override: override_name takes precedence over append_copy_suffix
if override_name:
agent_data["name"] = override_name
elif append_copy_suffix:
agent_data["name"] = agent_data.get("name") + "_copy"
# Remap tool_ids from file IDs to database IDs
if agent_data.get("tool_ids"):
agent_data["tool_ids"] = [file_to_db_ids[file_id] for file_id in agent_data["tool_ids"]]
# Remap block_ids from file IDs to database IDs
if agent_data.get("block_ids"):
agent_data["block_ids"] = [file_to_db_ids[file_id] for file_id in agent_data["block_ids"]]
# Remap source_ids from file IDs to database IDs
if agent_data.get("source_ids"):
agent_data["source_ids"] = [file_to_db_ids[file_id] for file_id in agent_data["source_ids"]]
if env_vars and agent_data.get("secrets"):
# update environment variable values from the provided env_vars dict
for key in agent_data["secrets"]:
agent_data["secrets"][key] = env_vars.get(key, "")
agent_data["tool_exec_environment_variables"][key] = env_vars.get(key, "")
elif env_vars and agent_data.get("tool_exec_environment_variables"):
# also handle tool_exec_environment_variables for backwards compatibility
for key in agent_data["tool_exec_environment_variables"]:
agent_data["tool_exec_environment_variables"][key] = env_vars.get(key, "")
agent_data["secrets"][key] = env_vars.get(key, "")
# Override project_id if provided
if project_id:
agent_data["project_id"] = project_id
agent_create = CreateAgent(**agent_data)
created_agent = await self.agent_manager.create_agent_async(agent_create, actor, _init_with_no_messages=True)
file_to_db_ids[agent_schema.id] = created_agent.id
imported_count += 1
# 7. Create messages and update agent message_ids
for agent_schema in schema.agents:
agent_db_id = file_to_db_ids[agent_schema.id]
message_file_to_db_ids = {}
# Save placeholder message IDs so we can clean them up after successful import
agent_state = await self.agent_manager.get_agent_by_id_async(agent_db_id, actor)
placeholder_message_ids = list(agent_state.message_ids) if agent_state.message_ids else []
# Create messages for this agent
messages = []
for message_schema in agent_schema.messages:
# Convert MessageSchema back to Message, setting agent_id to new DB ID
message_data = message_schema.model_dump(exclude={"id", "type"})
message_data["agent_id"] = agent_db_id # Remap agent_id to new database ID
message_obj = Message(**message_data)
messages.append(message_obj)
# Map file ID to the generated database ID immediately
message_file_to_db_ids[message_schema.id] = message_obj.id
created_messages = await self.message_manager.create_many_messages_async(
pydantic_msgs=messages,
actor=actor,
project_id=created_agent.project_id,
template_id=created_agent.template_id,
)
imported_count += len(created_messages)
# Remap in_context_message_ids from file IDs to database IDs
in_context_db_ids = [message_file_to_db_ids[message_schema_id] for message_schema_id in agent_schema.in_context_message_ids]
# Update agent with the correct message_ids
await self.agent_manager.update_message_ids_async(agent_id=agent_db_id, message_ids=in_context_db_ids, actor=actor)
# Clean up placeholder messages now that import succeeded
for placeholder_id in placeholder_message_ids:
await self.message_manager.delete_message_by_id_async(message_id=placeholder_id, actor=actor)
# 8. Create file-agent relationships (depends on agents and files)
for agent_schema in schema.agents:
if agent_schema.files_agents:
agent_db_id = file_to_db_ids[agent_schema.id]
# Prepare files for bulk attachment
files_for_agent = []
visible_content_map = {}
for file_agent_schema in agent_schema.files_agents:
file_db_id = file_to_db_ids[file_agent_schema.file_id]
# Use cached file metadata if available (with content)
if file_db_id not in file_metadata_cache:
file_metadata_cache[file_db_id] = await self.file_manager.get_file_by_id(
file_db_id, actor, include_content=True
)
file_metadata = file_metadata_cache[file_db_id]
files_for_agent.append(file_metadata)
if file_agent_schema.visible_content:
visible_content_map[file_metadata.file_name] = file_agent_schema.visible_content
# Bulk attach files to agent
await self.file_agent_manager.attach_files_bulk(
agent_id=agent_db_id,
files_metadata=files_for_agent,
visible_content_map=visible_content_map,
actor=actor,
max_files_open=agent_schema.max_files_open,
)
imported_count += len(files_for_agent)
# Extract the imported agent IDs (database IDs)
imported_agent_ids = []
for agent_schema in schema.agents:
if agent_schema.id in file_to_db_ids:
imported_agent_ids.append(file_to_db_ids[agent_schema.id])
for group in schema.groups:
group_data = group.model_dump(exclude={"id"})
group_data["agent_ids"] = [file_to_db_ids[agent_id] for agent_id in group_data["agent_ids"]]
if "manager_agent_id" in group_data["manager_config"]:
group_data["manager_config"]["manager_agent_id"] = file_to_db_ids[group_data["manager_config"]["manager_agent_id"]]
created_group = await self.group_manager.create_group_async(GroupCreate(**group_data), actor)
file_to_db_ids[group.id] = created_group.id
imported_count += 1
# prepare result message
num_background_tasks = len(background_tasks)
if num_background_tasks > 0:
message = (
f"Import completed successfully. Imported {imported_count} entities. "
f"{num_background_tasks} file(s) are being processed in the background for embeddings."
)
else:
message = f"Import completed successfully. Imported {imported_count} entities."
return ImportResult(
success=True,
message=message,
imported_count=imported_count,
imported_agent_ids=imported_agent_ids,
id_mappings=file_to_db_ids,
)
except Exception as e:
logger.exception(f"Failed to import agent file: {e}")
raise AgentFileImportError(f"Import failed: {e}") from e
def _validate_id_format(self, schema: AgentFileSchema) -> List[str]:
"""Validate that all IDs follow the expected format"""
errors = []
# Define entity types and their expected prefixes
entity_checks = [
(schema.agents, AgentSchema.__id_prefix__),
(schema.groups, GroupSchema.__id_prefix__),
(schema.blocks, BlockSchema.__id_prefix__),
(schema.files, FileSchema.__id_prefix__),
(schema.sources, SourceSchema.__id_prefix__),
(schema.tools, ToolSchema.__id_prefix__),
(schema.mcp_servers, MCPServerSchema.__id_prefix__),
]
for entities, expected_prefix in entity_checks:
for entity in entities:
if not entity.id.startswith(f"{expected_prefix}-"):
errors.append(f"Invalid ID format: {entity.id} should start with '{expected_prefix}-'")
else:
# Check that the suffix is a valid integer
try:
suffix = entity.id[len(expected_prefix) + 1 :]
int(suffix)
except ValueError:
errors.append(f"Invalid ID format: {entity.id} should have integer suffix")
# Also check message IDs within agents
for agent in schema.agents:
for message in agent.messages:
if not message.id.startswith(f"{MessageSchema.__id_prefix__}-"):
errors.append(f"Invalid message ID format: {message.id} should start with '{MessageSchema.__id_prefix__}-'")
else:
# Check that the suffix is a valid integer
try:
suffix = message.id[len(MessageSchema.__id_prefix__) + 1 :]
int(suffix)
except ValueError:
errors.append(f"Invalid message ID format: {message.id} should have integer suffix")
return errors
def _validate_duplicate_ids(self, schema: AgentFileSchema) -> List[str]:
"""Validate that there are no duplicate IDs within or across entity types"""
errors = []
all_ids = set()
# Check each entity type for internal duplicates and collect all IDs
entity_collections = [
("agents", schema.agents),
("groups", schema.groups),
("blocks", schema.blocks),
("files", schema.files),
("sources", schema.sources),
("tools", schema.tools),
("mcp_servers", schema.mcp_servers),
]
for entity_type, entities in entity_collections:
entity_ids = [entity.id for entity in entities]
# Check for duplicates within this entity type
seen = set()
duplicates = set()
for entity_id in entity_ids:
if entity_id in seen:
duplicates.add(entity_id)
else:
seen.add(entity_id)
if duplicates:
errors.append(f"Duplicate {entity_type} IDs found: {duplicates}")
# Check for duplicates across all entity types
for entity_id in entity_ids:
if entity_id in all_ids:
errors.append(f"Duplicate ID across entity types: {entity_id}")
all_ids.add(entity_id)
# Also check message IDs within agents
for agent in schema.agents:
message_ids = [msg.id for msg in agent.messages]
# Check for duplicates within agent messages
seen = set()
duplicates = set()
for message_id in message_ids:
if message_id in seen:
duplicates.add(message_id)
else:
seen.add(message_id)
if duplicates:
errors.append(f"Duplicate message IDs in agent {agent.id}: {duplicates}")
# Check for duplicates across all entity types
for message_id in message_ids:
if message_id in all_ids:
errors.append(f"Duplicate ID across entity types: {message_id}")
all_ids.add(message_id)
return errors
def _validate_file_source_references(self, schema: AgentFileSchema) -> List[str]:
"""Validate that all file source_id references exist"""
errors = []
source_ids = {source.id for source in schema.sources}
for file in schema.files:
if file.source_id not in source_ids:
errors.append(f"File {file.id} references non-existent source {file.source_id}")
return errors
def _validate_file_agent_references(self, schema: AgentFileSchema) -> List[str]:
"""Validate that all file-agent relationships reference existing entities"""
errors = []
file_ids = {file.id for file in schema.files}
source_ids = {source.id for source in schema.sources}
{agent.id for agent in schema.agents}
for agent in schema.agents:
for file_agent in agent.files_agents:
if file_agent.file_id not in file_ids:
errors.append(f"File-agent relationship references non-existent file {file_agent.file_id}")
if file_agent.source_id not in source_ids:
errors.append(f"File-agent relationship references non-existent source {file_agent.source_id}")
if file_agent.agent_id != agent.id:
errors.append(f"File-agent relationship has mismatched agent_id {file_agent.agent_id} vs {agent.id}")
return errors
def _validate_schema(self, schema: AgentFileSchema):
"""
Validate the agent file schema for consistency and referential integrity.
Args:
schema: The schema to validate
Raises:
AgentFileImportError: If validation fails
"""
errors = []
# 1. ID Format Validation
errors.extend(self._validate_id_format(schema))
# 2. Duplicate ID Detection
errors.extend(self._validate_duplicate_ids(schema))
# 3. File Source Reference Validation
errors.extend(self._validate_file_source_references(schema))
# 4. File-Agent Reference Validation
errors.extend(self._validate_file_agent_references(schema))
if errors:
raise AgentFileImportError(f"Schema validation failed: {'; '.join(errors)}")
logger.info("Schema validation passed")
def _filter_dict_for_model(self, data: dict, model_cls):
"""Filter a dictionary to only include keys that are in the model fields"""
try:
allowed = model_cls.model_fields.keys() # Pydantic v2
except AttributeError:
allowed = model_cls.__fields__.keys() # Pydantic v1
return {k: v for k, v in data.items() if k in allowed}
async def _process_file_async(self, file_metadata: FileMetadata, source_id: str, file_processor: FileProcessor, actor: User):
"""
Process a file asynchronously in the background.
This method handles chunking and embedding of file content without blocking
the main import process.
Args:
file_metadata: The file metadata with content
source_id: The database ID of the source
file_processor: The file processor instance to use
actor: The user performing the action
"""
file_id = file_metadata.id
file_name = file_metadata.file_name
try:
logger.info(f"Starting background processing for file {file_name} (ID: {file_id})")
# process the file for chunking/embedding
passages = await file_processor.process_imported_file(file_metadata=file_metadata, source_id=source_id)
logger.info(f"Successfully processed file {file_name} with {len(passages)} passages")
# file status is automatically updated to COMPLETED by process_imported_file
return passages
except Exception as e:
logger.error(f"Failed to process file {file_name} (ID: {file_id}) in background: {e}")
# update file status to ERROR
try:
await self.file_manager.update_file_status(
file_id=file_id,
actor=actor,
processing_status=FileProcessingStatus.ERROR,
error_message=str(e) if str(e) else f"Agent serialization failed: {type(e).__name__}",
)
except Exception as update_error:
logger.error(f"Failed to update file status to ERROR for {file_id}: {update_error}")
# we don't re-raise here since this is a background task
# the file will be marked as ERROR and the import can continue
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/agent_serialization_manager.py",
"license": "Apache License 2.0",
"lines": 905,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/archive_manager.py | from datetime import datetime
from typing import Dict, List, Optional
from sqlalchemy import delete, or_, select
from letta.helpers.tpuf_client import should_use_tpuf
from letta.log import get_logger
from letta.orm import ArchivalPassage, Archive as ArchiveModel, ArchivesAgents
from letta.otel.tracing import trace_method
from letta.schemas.agent import AgentState as PydanticAgentState
from letta.schemas.archive import Archive as PydanticArchive
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import PrimitiveType, VectorDBProvider
from letta.schemas.passage import Passage as PydanticPassage
from letta.schemas.user import User as PydanticUser
from letta.server.db import db_registry
from letta.services.helpers.agent_manager_helper import validate_agent_exists_async
from letta.settings import DatabaseChoice, settings
from letta.utils import bounded_gather, decrypt_agent_secrets, enforce_types
from letta.validators import raise_on_invalid_id
logger = get_logger(__name__)
class ArchiveManager:
"""Manager class to handle business logic related to Archives."""
@enforce_types
@trace_method
async def create_archive_async(
self,
name: str,
embedding_config: Optional[EmbeddingConfig] = None,
description: Optional[str] = None,
actor: PydanticUser = None,
) -> PydanticArchive:
"""Create a new archive."""
try:
async with db_registry.async_session() as session:
# determine vector db provider based on settings
vector_db_provider = VectorDBProvider.TPUF if should_use_tpuf() else VectorDBProvider.NATIVE
archive = ArchiveModel(
name=name,
description=description,
organization_id=actor.organization_id,
vector_db_provider=vector_db_provider,
embedding_config=embedding_config,
)
await archive.create_async(session, actor=actor)
return archive.to_pydantic()
except Exception as e:
logger.exception(f"Failed to create archive {name}. error={e}")
raise
@enforce_types
@raise_on_invalid_id(param_name="archive_id", expected_prefix=PrimitiveType.ARCHIVE)
@trace_method
async def get_archive_by_id_async(
self,
archive_id: str,
actor: PydanticUser,
) -> PydanticArchive:
"""Get an archive by ID."""
async with db_registry.async_session() as session:
archive = await ArchiveModel.read_async(
db_session=session,
identifier=archive_id,
actor=actor,
)
return archive.to_pydantic()
@enforce_types
@raise_on_invalid_id(param_name="archive_id", expected_prefix=PrimitiveType.ARCHIVE)
@trace_method
async def update_archive_async(
self,
archive_id: str,
name: Optional[str] = None,
description: Optional[str] = None,
actor: PydanticUser = None,
) -> PydanticArchive:
"""Update archive name and/or description."""
async with db_registry.async_session() as session:
archive = await ArchiveModel.read_async(
db_session=session,
identifier=archive_id,
actor=actor,
check_is_deleted=True,
)
if name is not None:
archive.name = name
if description is not None:
archive.description = description
await archive.update_async(session, actor=actor)
return archive.to_pydantic()
@enforce_types
@raise_on_invalid_id(param_name="agent_id", expected_prefix=PrimitiveType.AGENT)
@trace_method
async def list_archives_async(
self,
*,
actor: PydanticUser,
before: Optional[str] = None,
after: Optional[str] = None,
limit: Optional[int] = 50,
ascending: bool = False,
name: Optional[str] = None,
agent_id: Optional[str] = None,
) -> List[PydanticArchive]:
"""List archives with pagination and optional filters.
Filters:
- name: exact match on name
- agent_id: only archives attached to given agent
"""
filter_kwargs = {}
if name is not None:
filter_kwargs["name"] = name
join_model = None
join_conditions = None
if agent_id is not None:
join_model = ArchivesAgents
join_conditions = [
ArchivesAgents.archive_id == ArchiveModel.id,
ArchivesAgents.agent_id == agent_id,
]
async with db_registry.async_session() as session:
if agent_id:
await validate_agent_exists_async(session, agent_id, actor)
archives = await ArchiveModel.list_async(
db_session=session,
before=before,
after=after,
limit=limit,
ascending=ascending,
actor=actor,
check_is_deleted=True,
join_model=join_model,
join_conditions=join_conditions,
**filter_kwargs,
)
return [a.to_pydantic() for a in archives]
@enforce_types
@raise_on_invalid_id(param_name="agent_id", expected_prefix=PrimitiveType.AGENT)
@raise_on_invalid_id(param_name="archive_id", expected_prefix=PrimitiveType.ARCHIVE)
@trace_method
async def attach_agent_to_archive_async(
self,
agent_id: str,
archive_id: str,
is_owner: bool = False,
actor: PydanticUser = None,
) -> None:
"""Attach an agent to an archive."""
async with db_registry.async_session() as session:
# Verify agent exists and user has access to it
await validate_agent_exists_async(session, agent_id, actor)
# Verify archive exists and user has access to it
await ArchiveModel.read_async(db_session=session, identifier=archive_id, actor=actor)
# Check if relationship already exists
existing = await session.execute(
select(ArchivesAgents).where(
ArchivesAgents.agent_id == agent_id,
ArchivesAgents.archive_id == archive_id,
)
)
existing_record = existing.scalar_one_or_none()
if existing_record:
# Update ownership if needed
if existing_record.is_owner != is_owner:
existing_record.is_owner = is_owner
await session.commit()
return
# Create the relationship
archives_agents = ArchivesAgents(
agent_id=agent_id,
archive_id=archive_id,
is_owner=is_owner,
)
session.add(archives_agents)
# context manager now handles commits
# await session.commit()
@enforce_types
@raise_on_invalid_id(param_name="agent_id", expected_prefix=PrimitiveType.AGENT)
@raise_on_invalid_id(param_name="archive_id", expected_prefix=PrimitiveType.ARCHIVE)
@trace_method
async def detach_agent_from_archive_async(
self,
agent_id: str,
archive_id: str,
actor: PydanticUser = None,
) -> None:
"""Detach an agent from an archive."""
async with db_registry.async_session() as session:
# Verify agent exists and user has access to it
await validate_agent_exists_async(session, agent_id, actor)
# Verify archive exists and user has access to it
await ArchiveModel.read_async(db_session=session, identifier=archive_id, actor=actor)
# Delete the relationship directly
result = await session.execute(
delete(ArchivesAgents).where(
ArchivesAgents.agent_id == agent_id,
ArchivesAgents.archive_id == archive_id,
)
)
if result.rowcount == 0:
logger.warning(f"Attempted to detach unattached agent {agent_id} from archive {archive_id}")
else:
logger.info(f"Detached agent {agent_id} from archive {archive_id}")
# context manager now handles commits
# await session.commit()
@enforce_types
@raise_on_invalid_id(param_name="agent_id", expected_prefix=PrimitiveType.AGENT)
@trace_method
async def get_default_archive_for_agent_async(
self,
agent_id: str,
actor: PydanticUser = None,
) -> Optional[PydanticArchive]:
"""Get the agent's default archive if it exists, return None otherwise."""
# First check if agent has any archives
from letta.services.agent_manager import AgentManager
agent_manager = AgentManager()
archive_ids = await agent_manager.get_agent_archive_ids_async(
agent_id=agent_id,
actor=actor,
)
if archive_ids:
# TODO: Remove this check once we support multiple archives per agent
if len(archive_ids) > 1:
raise ValueError(f"Agent {agent_id} has multiple archives, which is not yet supported")
# Get the archive
archive = await self.get_archive_by_id_async(
archive_id=archive_ids[0],
actor=actor,
)
return archive
# No archive found, return None
return None
@enforce_types
@raise_on_invalid_id(param_name="archive_id", expected_prefix=PrimitiveType.ARCHIVE)
@trace_method
async def delete_archive_async(
self,
archive_id: str,
actor: PydanticUser = None,
) -> None:
"""Delete an archive permanently."""
async with db_registry.async_session() as session:
archive_model = await ArchiveModel.read_async(
db_session=session,
identifier=archive_id,
actor=actor,
)
await archive_model.hard_delete_async(session, actor=actor)
logger.info(f"Deleted archive {archive_id}")
@enforce_types
@raise_on_invalid_id(param_name="archive_id", expected_prefix=PrimitiveType.ARCHIVE)
@trace_method
async def create_passage_in_archive_async(
self,
archive_id: str,
text: str,
metadata: Optional[Dict] = None,
tags: Optional[List[str]] = None,
created_at: Optional[str] = None,
actor: PydanticUser = None,
) -> PydanticPassage:
"""Create a passage in an archive.
Args:
archive_id: ID of the archive to add the passage to
text: The text content of the passage
metadata: Optional metadata for the passage
tags: Optional tags for categorizing the passage
created_at: Optional creation datetime in ISO 8601 format
actor: User performing the operation
Returns:
The created passage
Raises:
NoResultFound: If archive not found
"""
from letta.llm_api.llm_client import LLMClient
from letta.services.passage_manager import PassageManager
# Verify the archive exists and user has access
archive = await self.get_archive_by_id_async(archive_id=archive_id, actor=actor)
# Generate embeddings for the text if embedding config is available
embedding = None
if archive.embedding_config is not None:
embedding_client = LLMClient.create(
provider_type=archive.embedding_config.embedding_endpoint_type,
actor=actor,
)
embeddings = await embedding_client.request_embeddings([text], archive.embedding_config)
embedding = embeddings[0] if embeddings else None
# Parse created_at from ISO string if provided
parsed_created_at = None
if created_at:
parsed_created_at = datetime.fromisoformat(created_at)
# Create the passage object with embedding
passage = PydanticPassage(
text=text,
archive_id=archive_id,
organization_id=actor.organization_id,
metadata=metadata or {},
tags=tags,
embedding_config=archive.embedding_config,
embedding=embedding,
created_at=parsed_created_at,
)
# Use PassageManager to create the passage
passage_manager = PassageManager()
created_passage = await passage_manager.create_agent_passage_async(
pydantic_passage=passage,
actor=actor,
)
# If archive uses Turbopuffer, also write to Turbopuffer (dual-write)
if archive.vector_db_provider == VectorDBProvider.TPUF:
try:
from letta.helpers.tpuf_client import TurbopufferClient
tpuf_client = TurbopufferClient()
# Insert to Turbopuffer with the same ID as SQL, reusing existing embedding
await tpuf_client.insert_archival_memories(
archive_id=archive.id,
text_chunks=[created_passage.text],
passage_ids=[created_passage.id],
organization_id=actor.organization_id,
actor=actor,
embeddings=[created_passage.embedding],
)
logger.info(f"Uploaded passage {created_passage.id} to Turbopuffer for archive {archive_id}")
except Exception as e:
logger.error(f"Failed to upload passage to Turbopuffer: {e}")
# Don't fail the entire operation if Turbopuffer upload fails
# The passage is already saved to SQL
logger.info(f"Created passage {created_passage.id} in archive {archive_id}")
return created_passage
@enforce_types
@raise_on_invalid_id(param_name="archive_id", expected_prefix=PrimitiveType.ARCHIVE)
@trace_method
async def create_passages_in_archive_async(
self,
archive_id: str,
passages: List[Dict],
actor: PydanticUser = None,
) -> List[PydanticPassage]:
"""Create multiple passages in an archive.
Args:
archive_id: ID of the archive to add the passages to
passages: Passage create payloads
actor: User performing the operation
Returns:
The created passages
Raises:
NoResultFound: If archive not found
"""
if not passages:
return []
from letta.llm_api.llm_client import LLMClient
from letta.services.passage_manager import PassageManager
archive = await self.get_archive_by_id_async(archive_id=archive_id, actor=actor)
texts = [passage["text"] for passage in passages]
embedding_client = LLMClient.create(
provider_type=archive.embedding_config.embedding_endpoint_type,
actor=actor,
)
embeddings = await embedding_client.request_embeddings(texts, archive.embedding_config)
if len(embeddings) != len(passages):
raise ValueError("Embedding response count does not match passages count")
# Build PydanticPassage objects for batch creation
pydantic_passages: List[PydanticPassage] = []
for passage_payload, embedding in zip(passages, embeddings):
# Parse created_at from ISO string if provided
created_at = passage_payload.get("created_at")
if created_at and isinstance(created_at, str):
created_at = datetime.fromisoformat(created_at)
passage = PydanticPassage(
text=passage_payload["text"],
archive_id=archive_id,
organization_id=actor.organization_id,
metadata=passage_payload.get("metadata") or {},
tags=passage_payload.get("tags"),
embedding_config=archive.embedding_config,
embedding=embedding,
created_at=created_at,
)
pydantic_passages.append(passage)
# Use batch create for efficient single-transaction insert
passage_manager = PassageManager()
created_passages = await passage_manager.create_agent_passages_async(
pydantic_passages=pydantic_passages,
actor=actor,
)
if archive.vector_db_provider == VectorDBProvider.TPUF:
try:
from letta.helpers.tpuf_client import TurbopufferClient
tpuf_client = TurbopufferClient()
await tpuf_client.insert_archival_memories(
archive_id=archive.id,
text_chunks=[passage.text for passage in created_passages],
passage_ids=[passage.id for passage in created_passages],
organization_id=actor.organization_id,
actor=actor,
)
logger.info(f"Uploaded {len(created_passages)} passages to Turbopuffer for archive {archive_id}")
except Exception as e:
logger.error(f"Failed to upload passages to Turbopuffer: {e}")
logger.info(f"Created {len(created_passages)} passages in archive {archive_id}")
return created_passages
@enforce_types
@raise_on_invalid_id(param_name="archive_id", expected_prefix=PrimitiveType.ARCHIVE)
@raise_on_invalid_id(param_name="passage_id", expected_prefix=PrimitiveType.PASSAGE)
@trace_method
async def delete_passage_from_archive_async(
self,
archive_id: str,
passage_id: str,
actor: PydanticUser = None,
strict_mode: bool = False,
) -> None:
"""Delete a passage from an archive.
Args:
archive_id: ID of the archive containing the passage
passage_id: ID of the passage to delete
actor: User performing the operation
strict_mode: If True, raise errors on Turbopuffer failures
Raises:
NoResultFound: If archive or passage not found
ValueError: If passage does not belong to the specified archive
"""
from letta.services.passage_manager import PassageManager
await self.get_archive_by_id_async(archive_id=archive_id, actor=actor)
passage_manager = PassageManager()
passage = await passage_manager.get_agent_passage_by_id_async(passage_id=passage_id, actor=actor)
if passage.archive_id != archive_id:
raise ValueError(f"Passage {passage_id} does not belong to archive {archive_id}")
await passage_manager.delete_agent_passage_by_id_async(
passage_id=passage_id,
actor=actor,
strict_mode=strict_mode,
)
logger.info(f"Deleted passage {passage_id} from archive {archive_id}")
@enforce_types
@trace_method
async def get_or_create_default_archive_for_agent_async(
self,
agent_state: PydanticAgentState,
actor: PydanticUser = None,
) -> PydanticArchive:
"""Get the agent's default archive, creating one if it doesn't exist."""
# First check if agent has any archives
from sqlalchemy.exc import IntegrityError
from letta.services.agent_manager import AgentManager
agent_manager = AgentManager()
archive_ids = await agent_manager.get_agent_archive_ids_async(
agent_id=agent_state.id,
actor=actor,
)
if archive_ids:
# TODO: Remove this check once we support multiple archives per agent
if len(archive_ids) > 1:
raise ValueError(f"Agent {agent_state.id} has multiple archives, which is not yet supported")
# Get the archive
archive = await self.get_archive_by_id_async(
archive_id=archive_ids[0],
actor=actor,
)
return archive
# Create a default archive for this agent (embedding_config is optional)
archive_name = f"{agent_state.name}'s Archive"
archive = await self.create_archive_async(
name=archive_name,
embedding_config=agent_state.embedding_config,
description="Default archive created automatically",
actor=actor,
)
try:
# Attach the agent to the archive as owner
await self.attach_agent_to_archive_async(
agent_id=agent_state.id,
archive_id=archive.id,
is_owner=True,
actor=actor,
)
return archive
except IntegrityError:
# race condition: another concurrent request already created and attached an archive
# clean up the orphaned archive we just created
logger.info(f"Race condition detected for agent {agent_state.id}, cleaning up orphaned archive {archive.id}")
await self.delete_archive_async(archive_id=archive.id, actor=actor)
# fetch the existing archive that was created by the concurrent request
archive_ids = await agent_manager.get_agent_archive_ids_async(
agent_id=agent_state.id,
actor=actor,
)
if archive_ids:
archive = await self.get_archive_by_id_async(
archive_id=archive_ids[0],
actor=actor,
)
return archive
else:
# this shouldn't happen, but if it does, re-raise
raise
@enforce_types
@raise_on_invalid_id(param_name="archive_id", expected_prefix=PrimitiveType.ARCHIVE)
@trace_method
async def get_agents_for_archive_async(
self,
archive_id: str,
actor: PydanticUser,
before: Optional[str] = None,
after: Optional[str] = None,
limit: Optional[int] = 50,
ascending: bool = False,
include: List[str] = [],
) -> List[PydanticAgentState]:
"""Get agents that have access to an archive with pagination support.
Uses a subquery approach to avoid expensive JOINs.
"""
from letta.orm import Agent as AgentModel
async with db_registry.async_session() as session:
# Start with a basic query using subquery instead of JOIN
query = (
select(AgentModel)
.where(AgentModel.id.in_(select(ArchivesAgents.agent_id).where(ArchivesAgents.archive_id == archive_id)))
.where(AgentModel.organization_id == actor.organization_id)
)
# Apply pagination using cursor-based approach
if after:
result = (await session.execute(select(AgentModel.created_at, AgentModel.id).where(AgentModel.id == after))).first()
if result:
after_sort_value, after_id = result
# SQLite does not support as granular timestamping, so we need to round the timestamp
if settings.database_engine is DatabaseChoice.SQLITE and isinstance(after_sort_value, datetime):
after_sort_value = after_sort_value.strftime("%Y-%m-%d %H:%M:%S")
if ascending:
query = query.where(
AgentModel.created_at > after_sort_value,
or_(AgentModel.created_at == after_sort_value, AgentModel.id > after_id),
)
else:
query = query.where(
AgentModel.created_at < after_sort_value,
or_(AgentModel.created_at == after_sort_value, AgentModel.id < after_id),
)
if before:
result = (await session.execute(select(AgentModel.created_at, AgentModel.id).where(AgentModel.id == before))).first()
if result:
before_sort_value, before_id = result
# SQLite does not support as granular timestamping, so we need to round the timestamp
if settings.database_engine is DatabaseChoice.SQLITE and isinstance(before_sort_value, datetime):
before_sort_value = before_sort_value.strftime("%Y-%m-%d %H:%M:%S")
if ascending:
query = query.where(
AgentModel.created_at < before_sort_value,
or_(AgentModel.created_at == before_sort_value, AgentModel.id < before_id),
)
else:
query = query.where(
AgentModel.created_at > before_sort_value,
or_(AgentModel.created_at == before_sort_value, AgentModel.id > before_id),
)
# Apply sorting
if ascending:
query = query.order_by(AgentModel.created_at.asc(), AgentModel.id.asc())
else:
query = query.order_by(AgentModel.created_at.desc(), AgentModel.id.desc())
# Apply limit
if limit:
query = query.limit(limit)
# Execute the query
result = await session.execute(query)
agents_orm = result.scalars().all()
# Convert without decrypting to release DB connection before PBKDF2
agents_encrypted = await bounded_gather(
[agent.to_pydantic_async(include_relationships=[], include=include, decrypt=False) for agent in agents_orm]
)
# Decrypt secrets outside session
return await decrypt_agent_secrets(agents_encrypted)
@enforce_types
@trace_method
async def get_agent_from_passage_async(
self,
passage_id: str,
actor: PydanticUser,
) -> Optional[str]:
"""Get the agent ID that owns a passage (through its archive).
Returns the first agent found (for backwards compatibility).
Returns None if no agent found.
"""
async with db_registry.async_session() as session:
# First get the passage to find its archive_id
passage = await ArchivalPassage.read_async(
db_session=session,
identifier=passage_id,
actor=actor,
)
# Then find agents connected to that archive
result = await session.execute(select(ArchivesAgents.agent_id).where(ArchivesAgents.archive_id == passage.archive_id))
agent_ids = [row[0] for row in result.fetchall()]
if not agent_ids:
return None
# For now, return the first agent (backwards compatibility)
return agent_ids[0]
@enforce_types
@raise_on_invalid_id(param_name="archive_id", expected_prefix=PrimitiveType.ARCHIVE)
@trace_method
async def get_or_set_vector_db_namespace_async(
self,
archive_id: str,
) -> str:
"""Get the vector database namespace for an archive, creating it if it doesn't exist."""
from sqlalchemy import update
async with db_registry.async_session() as session:
# check if namespace already exists
result = await session.execute(select(ArchiveModel._vector_db_namespace).where(ArchiveModel.id == archive_id))
row = result.fetchone()
if row and row[0]:
return row[0]
# generate namespace name using same logic as tpuf_client
environment = settings.environment
if environment:
namespace_name = f"archive_{archive_id}_{environment.lower()}"
else:
namespace_name = f"archive_{archive_id}"
# update the archive with the namespace
await session.execute(update(ArchiveModel).where(ArchiveModel.id == archive_id).values(_vector_db_namespace=namespace_name))
# context manager now handles commits
# await session.commit()
return namespace_name
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/archive_manager.py",
"license": "Apache License 2.0",
"lines": 621,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/context_window_calculator/context_window_calculator.py | import asyncio
from typing import Any, Dict, List, Optional, Tuple
from openai.types.beta.function_tool import FunctionTool as OpenAITool
from letta.log import get_logger
from letta.schemas.agent import AgentState
from letta.schemas.enums import MessageRole
from letta.schemas.letta_message_content import TextContent
from letta.schemas.memory import ContextWindowOverview
from letta.schemas.message import Message
from letta.schemas.user import User as PydanticUser
from letta.services.context_window_calculator.token_counter import TokenCounter
from letta.services.message_manager import MessageManager
logger = get_logger(__name__)
class ContextWindowCalculator:
"""Handles context window calculations with different token counting strategies"""
@staticmethod
def _extract_tag_content(text: str, tag_name: str) -> Optional[str]:
"""
Extract content between XML-style opening and closing tags.
Args:
text: The text to search in
tag_name: The name of the tag (without < >)
Returns:
The content between tags (inclusive of tags), or None if not found
Note:
If duplicate tags exist, only the first occurrence is extracted.
"""
start_tag = f"<{tag_name}>"
end_tag = f"</{tag_name}>"
start_idx = text.find(start_tag)
if start_idx == -1:
return None
end_idx = text.find(end_tag, start_idx)
if end_idx == -1:
return None
return text[start_idx : end_idx + len(end_tag)]
@staticmethod
def _extract_system_prompt(system_message: str) -> Optional[str]:
"""
Extract the system prompt / base instructions from a system message.
First tries to find an explicit <base_instructions> tag. If not present
(e.g. custom system prompts from Letta Code agents), falls back to
extracting everything before the first known section tag.
Returns:
The system prompt text, or None if the message is empty.
Note:
The returned value is semantically different depending on agent type:
- Standard agents: includes the <base_instructions>...</base_instructions> tags
- Custom prompt agents (e.g. Letta Code): raw preamble text without any tags
"""
_extract = ContextWindowCalculator._extract_tag_content
# Preferred: explicit <base_instructions> wrapper
tagged = _extract(system_message, "base_instructions")
if tagged is not None:
return tagged
# Fallback: everything before the first known section tag
section_tags = ["<memory_blocks>", "<memory_filesystem>", "<tool_usage_rules>", "<directories>", "<memory_metadata>"]
first_section_pos = len(system_message)
for tag in section_tags:
pos = system_message.find(tag)
if pos != -1 and pos < first_section_pos:
first_section_pos = pos
prompt = system_message[:first_section_pos].strip()
return prompt if prompt else None
@staticmethod
def _extract_top_level_tag(system_message: str, tag_name: str, container_tag: str = "memory_blocks") -> Optional[str]:
"""
Extract a tag only if it appears outside a container tag.
This prevents extracting tags that are nested inside <memory_blocks> as
memory block labels (e.g. a block named "memory_filesystem" rendered as
<memory_filesystem> inside <memory_blocks>) from being confused with
top-level sections.
Handles the case where a tag appears both nested (inside the container)
and at top-level — scans all occurrences to find one outside the container.
Args:
system_message: The full system message text
tag_name: The tag to extract
container_tag: The container tag to check nesting against
Returns:
The tag content if found at top level, None otherwise.
"""
_extract = ContextWindowCalculator._extract_tag_content
start_tag = f"<{tag_name}>"
end_tag = f"</{tag_name}>"
# Find the container boundaries
container_start = system_message.find(f"<{container_tag}>")
container_end = system_message.find(f"</{container_tag}>")
has_container = container_start != -1 and container_end != -1
# Scan all occurrences of the tag to find one outside the container
search_start = 0
while True:
tag_start = system_message.find(start_tag, search_start)
if tag_start == -1:
return None
# Check if this occurrence is nested inside the container
if has_container and container_start < tag_start < container_end:
# Skip past this nested occurrence
search_start = tag_start + len(start_tag)
continue
# Found a top-level occurrence — extract it
tag_end = system_message.find(end_tag, tag_start)
if tag_end == -1:
return None
return system_message[tag_start : tag_end + len(end_tag)]
@staticmethod
def _extract_git_core_memory(system_message: str) -> Optional[str]:
"""
Extract bare file blocks for git-enabled agents.
Git-enabled agents render individual memory blocks as bare tags like
<system/human.md>...</system/human.md> WITHOUT any container tag.
These appear after </memory_filesystem> and before the next known
section tag (<tool_usage_rules>, <directories>, or <memory_metadata>).
Returns:
The text containing all bare file blocks, or None if not found.
"""
end_marker = "</memory_filesystem>"
end_pos = system_message.find(end_marker)
if end_pos == -1:
return None
start = end_pos + len(end_marker)
# Find the next known section tag
next_section_tags = ["<tool_usage_rules>", "<directories>", "<memory_metadata>"]
next_section_pos = len(system_message)
for tag in next_section_tags:
pos = system_message.find(tag, start)
if pos != -1 and pos < next_section_pos:
next_section_pos = pos
content = system_message[start:next_section_pos].strip()
return content if content else None
@staticmethod
def extract_system_components(system_message: str) -> Dict[str, Optional[str]]:
"""
Extract structured components from a formatted system message.
Parses the system message to extract sections marked by XML-style tags using
proper end-tag matching. Handles all agent types including:
- Standard agents with <base_instructions> wrapper
- Custom system prompts without <base_instructions> (e.g. Letta Code agents)
- Git-enabled agents with top-level <memory_filesystem> and bare file blocks
- React/workflow agents that don't render <memory_blocks>
Args:
system_message: A formatted system message containing XML-style section markers
Returns:
A dictionary with the following keys (value is None if section not found):
- system_prompt: The base instructions section (or text before first section tag)
- core_memory: The memory blocks section. For standard agents this is the
<memory_blocks>...</memory_blocks> content. For git-enabled agents (no
<memory_blocks> but top-level <memory_filesystem>), this captures the bare
file blocks (e.g. <system/human.md>) that follow </memory_filesystem>.
- memory_filesystem: Top-level memory filesystem (git-enabled agents only, NOT
the memory_filesystem block nested inside <memory_blocks>)
- tool_usage_rules: The tool usage rules section
- directories: The directories section (when sources are attached)
- external_memory_summary: The memory metadata section
"""
_extract = ContextWindowCalculator._extract_tag_content
_extract_top = ContextWindowCalculator._extract_top_level_tag
core_memory = _extract(system_message, "memory_blocks")
memory_filesystem = _extract_top(system_message, "memory_filesystem")
# Git-enabled agents: no <memory_blocks>, but bare file blocks after </memory_filesystem>
if core_memory is None and memory_filesystem is not None:
core_memory = ContextWindowCalculator._extract_git_core_memory(system_message)
return {
"system_prompt": ContextWindowCalculator._extract_system_prompt(system_message),
"core_memory": core_memory,
"memory_filesystem": memory_filesystem,
"tool_usage_rules": _extract_top(system_message, "tool_usage_rules"),
"directories": _extract_top(system_message, "directories"),
"external_memory_summary": _extract(system_message, "memory_metadata"),
}
@staticmethod
def extract_summary_memory(messages: List[Any]) -> Tuple[Optional[str], int]:
"""
Extract summary memory from the message list if present.
Summary memory is a special message injected at position 1 (after system message)
that contains a condensed summary of previous conversation history. This is used
when the full conversation history doesn't fit in the context window.
Args:
messages: List of message objects to search for summary memory
Returns:
A tuple of (summary_text, start_index) where:
- summary_text: The extracted summary content, or None if not found
- start_index: Index where actual conversation messages begin (1 or 2)
Detection Logic:
Looks for a user message at index 1 containing the phrase
"The following is a summary of the previous" which indicates
it's a summarized conversation history rather than a real user message.
"""
if (
len(messages) > 1
and messages[1].role == MessageRole.user
and messages[1].content
and len(messages[1].content) == 1
and isinstance(messages[1].content[0], TextContent)
and "The following is a summary of the previous " in messages[1].content[0].text
):
summary_memory = messages[1].content[0].text
start_index = 2
return summary_memory, start_index
return None, 1
async def calculate_context_window(
self,
agent_state: AgentState,
actor: PydanticUser,
token_counter: TokenCounter,
message_manager: MessageManager,
system_message_compiled: Message,
num_archival_memories: int,
num_messages: int,
message_ids: Optional[List[str]] = None,
) -> ContextWindowOverview:
"""Calculate context window information using the provided token counter
Args:
message_ids: Optional list of message IDs to use instead of agent_state.message_ids.
If provided, should NOT include the system message ID (index 0).
"""
# Use provided message_ids or fall back to agent_state.message_ids[1:]
effective_message_ids = message_ids if message_ids is not None else agent_state.message_ids[1:]
messages = await message_manager.get_messages_by_ids_async(message_ids=effective_message_ids, actor=actor)
in_context_messages = [system_message_compiled, *messages]
# Filter out None messages (can occur when system message is missing)
original_count = len(in_context_messages)
in_context_messages = [m for m in in_context_messages if m is not None]
if len(in_context_messages) < original_count:
logger.warning(
f"Filtered out {original_count - len(in_context_messages)} None messages for agent {agent_state.id}. "
f"This typically indicates missing system message or corrupted message data."
)
# Convert messages to appropriate format
converted_messages = token_counter.convert_messages(in_context_messages)
# Extract system components
components: Dict[str, Optional[str]] = {
"system_prompt": None,
"core_memory": None,
"memory_filesystem": None,
"tool_usage_rules": None,
"directories": None,
"external_memory_summary": None,
}
if (
in_context_messages
and in_context_messages[0].role == MessageRole.system
and in_context_messages[0].content
and len(in_context_messages[0].content) == 1
and isinstance(in_context_messages[0].content[0], TextContent)
):
system_message = in_context_messages[0].content[0].text
components = self.extract_system_components(system_message)
# Extract each component with fallbacks
system_prompt = components.get("system_prompt") or agent_state.system or ""
core_memory = components.get("core_memory") or ""
memory_filesystem = components.get("memory_filesystem") or ""
tool_usage_rules = components.get("tool_usage_rules") or ""
directories = components.get("directories") or ""
external_memory_summary = components.get("external_memory_summary") or ""
# Extract summary memory
summary_memory, message_start_index = self.extract_summary_memory(in_context_messages)
# Prepare tool definitions
available_functions_definitions = []
if agent_state.tools:
available_functions_definitions = [OpenAITool(type="function", function=f.json_schema) for f in agent_state.tools]
# Count tokens concurrently for all sections, skipping empty ones
token_counts = await asyncio.gather(
token_counter.count_text_tokens(system_prompt),
token_counter.count_text_tokens(core_memory) if core_memory else asyncio.sleep(0, result=0),
token_counter.count_text_tokens(memory_filesystem) if memory_filesystem else asyncio.sleep(0, result=0),
token_counter.count_text_tokens(tool_usage_rules) if tool_usage_rules else asyncio.sleep(0, result=0),
token_counter.count_text_tokens(directories) if directories else asyncio.sleep(0, result=0),
token_counter.count_text_tokens(external_memory_summary) if external_memory_summary else asyncio.sleep(0, result=0),
token_counter.count_text_tokens(summary_memory) if summary_memory else asyncio.sleep(0, result=0),
(
token_counter.count_message_tokens(converted_messages[message_start_index:])
if len(converted_messages) > message_start_index
else asyncio.sleep(0, result=0)
),
(
token_counter.count_tool_tokens(available_functions_definitions)
if available_functions_definitions
else asyncio.sleep(0, result=0)
),
)
(
num_tokens_system,
num_tokens_core_memory,
num_tokens_memory_filesystem,
num_tokens_tool_usage_rules,
num_tokens_directories,
num_tokens_external_memory_summary,
num_tokens_summary_memory,
num_tokens_messages,
num_tokens_available_functions_definitions,
) = token_counts
num_tokens_used_total = sum(token_counts)
return ContextWindowOverview(
# context window breakdown (in messages)
num_messages=len(in_context_messages),
num_archival_memory=num_archival_memories,
num_recall_memory=num_messages,
num_tokens_external_memory_summary=num_tokens_external_memory_summary,
external_memory_summary=external_memory_summary,
# top-level information
context_window_size_max=agent_state.llm_config.context_window,
context_window_size_current=num_tokens_used_total,
# context window breakdown (in tokens)
num_tokens_system=num_tokens_system,
system_prompt=system_prompt,
num_tokens_core_memory=num_tokens_core_memory,
core_memory=core_memory,
# New sections
num_tokens_memory_filesystem=num_tokens_memory_filesystem,
memory_filesystem=memory_filesystem if memory_filesystem else None,
num_tokens_tool_usage_rules=num_tokens_tool_usage_rules,
tool_usage_rules=tool_usage_rules if tool_usage_rules else None,
num_tokens_directories=num_tokens_directories,
directories=directories if directories else None,
# Summary and messages
num_tokens_summary_memory=num_tokens_summary_memory,
summary_memory=summary_memory,
num_tokens_messages=num_tokens_messages,
messages=in_context_messages,
# related to functions
num_tokens_functions_definitions=num_tokens_available_functions_definitions,
functions_definitions=available_functions_definitions,
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/context_window_calculator/context_window_calculator.py",
"license": "Apache License 2.0",
"lines": 323,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/context_window_calculator/token_counter.py | import hashlib
import json
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Dict, List, Optional
from letta.helpers.decorators import async_redis_cache
from letta.llm_api.anthropic_client import AnthropicClient
from letta.llm_api.google_vertex_client import GoogleVertexClient
from letta.log import get_logger
from letta.otel.tracing import trace_method
from letta.schemas.enums import ProviderType
from letta.schemas.message import Message
from letta.schemas.openai.chat_completion_request import Tool as OpenAITool
if TYPE_CHECKING:
from letta.schemas.user import User
logger = get_logger(__name__)
class TokenCounter(ABC):
"""Abstract base class for token counting strategies"""
@abstractmethod
async def count_text_tokens(self, text: str) -> int:
"""Count tokens in a text string"""
@abstractmethod
async def count_message_tokens(self, messages: List[Dict[str, Any]]) -> int:
"""Count tokens in a list of messages"""
@abstractmethod
async def count_tool_tokens(self, tools: List[Any]) -> int:
"""Count tokens in tool definitions"""
@abstractmethod
def convert_messages(self, messages: List[Any]) -> List[Dict[str, Any]]:
"""Convert messages to the appropriate format for this counter"""
class AnthropicTokenCounter(TokenCounter):
"""Token counter using Anthropic's API"""
def __init__(self, anthropic_client: AnthropicClient, model: str):
self.client = anthropic_client
self.model = model
@trace_method
@async_redis_cache(
key_func=lambda self, text: f"anthropic_text_tokens:{self.model}:{hashlib.sha256(text.encode()).hexdigest()[:16]}",
prefix="token_counter",
ttl_s=3600, # cache for 1 hour
)
async def count_text_tokens(self, text: str) -> int:
if not text:
return 0
return await self.client.count_tokens(model=self.model, messages=[{"role": "user", "content": text}])
@trace_method
@async_redis_cache(
key_func=lambda self,
messages: f"anthropic_message_tokens:{self.model}:{hashlib.sha256(json.dumps(messages, sort_keys=True).encode()).hexdigest()[:16]}",
prefix="token_counter",
ttl_s=3600, # cache for 1 hour
)
async def count_message_tokens(self, messages: List[Dict[str, Any]]) -> int:
if not messages:
return 0
return await self.client.count_tokens(model=self.model, messages=messages)
@trace_method
@async_redis_cache(
key_func=lambda self,
tools: f"anthropic_tool_tokens:{self.model}:{hashlib.sha256(json.dumps([t.model_dump() for t in tools], sort_keys=True).encode()).hexdigest()[:16]}",
prefix="token_counter",
ttl_s=3600, # cache for 1 hour
)
async def count_tool_tokens(self, tools: List[OpenAITool]) -> int:
if not tools:
return 0
return await self.client.count_tokens(model=self.model, tools=tools)
def convert_messages(self, messages: List[Any]) -> List[Dict[str, Any]]:
return Message.to_anthropic_dicts_from_list(messages, current_model=self.model)
class ApproxTokenCounter(TokenCounter):
"""Fast approximate token counter using byte-based heuristic (bytes / 4).
This is the same approach codex-cli uses - a simple approximation that assumes
~4 bytes per token on average for English text. Much faster than tiktoken
and doesn't require loading tokenizer models into memory.
Just serializes the input to JSON and divides byte length by 4.
"""
APPROX_BYTES_PER_TOKEN = 4
def __init__(self, model: str | None = None):
# Model is optional since we don't actually use a tokenizer
self.model = model
def _approx_token_count(self, text: str) -> int:
"""Approximate token count: ceil(byte_len / 4)"""
if not text:
return 0
byte_len = len(text.encode("utf-8"))
return (byte_len + self.APPROX_BYTES_PER_TOKEN - 1) // self.APPROX_BYTES_PER_TOKEN
async def count_text_tokens(self, text: str) -> int:
if not text:
return 0
return self._approx_token_count(text)
async def count_message_tokens(self, messages: List[Dict[str, Any]]) -> int:
if not messages:
return 0
return self._approx_token_count(json.dumps(messages))
async def count_tool_tokens(self, tools: List[OpenAITool]) -> int:
if not tools:
return 0
functions = [t.model_dump() for t in tools]
return self._approx_token_count(json.dumps(functions))
def convert_messages(self, messages: List[Any]) -> List[Dict[str, Any]]:
return Message.to_openai_dicts_from_list(messages)
class GeminiTokenCounter(TokenCounter):
"""Token counter using Google's Gemini token counting API"""
def __init__(self, gemini_client: GoogleVertexClient, model: str):
self.client = gemini_client
self.model = model
@trace_method
@async_redis_cache(
key_func=lambda self, text: f"gemini_text_tokens:{self.model}:{hashlib.sha256(text.encode()).hexdigest()[:16]}",
prefix="token_counter",
ttl_s=3600, # cache for 1 hour
)
async def count_text_tokens(self, text: str) -> int:
if not text:
return 0
# For text counting, wrap in a simple user message format for Google
return await self.client.count_tokens(model=self.model, messages=[{"role": "user", "parts": [{"text": text}]}])
@trace_method
@async_redis_cache(
key_func=lambda self,
messages: f"gemini_message_tokens:{self.model}:{hashlib.sha256(json.dumps(messages, sort_keys=True).encode()).hexdigest()[:16]}",
prefix="token_counter",
ttl_s=3600, # cache for 1 hour
)
async def count_message_tokens(self, messages: List[Dict[str, Any]]) -> int:
if not messages:
return 0
return await self.client.count_tokens(model=self.model, messages=messages)
@trace_method
@async_redis_cache(
key_func=lambda self,
tools: f"gemini_tool_tokens:{self.model}:{hashlib.sha256(json.dumps([t.model_dump() for t in tools], sort_keys=True).encode()).hexdigest()[:16]}",
prefix="token_counter",
ttl_s=3600, # cache for 1 hour
)
async def count_tool_tokens(self, tools: List[OpenAITool]) -> int:
if not tools:
return 0
return await self.client.count_tokens(model=self.model, tools=tools)
def convert_messages(self, messages: List[Any]) -> List[Dict[str, Any]]:
google_messages = Message.to_google_dicts_from_list(messages, current_model=self.model)
return google_messages
class TiktokenCounter(TokenCounter):
"""Token counter using tiktoken"""
def __init__(self, model: str):
self.model = model
@trace_method
@async_redis_cache(
key_func=lambda self, text: f"tiktoken_text_tokens:{self.model}:{hashlib.sha256(text.encode()).hexdigest()[:16]}",
prefix="token_counter",
ttl_s=3600, # cache for 1 hour
)
async def count_text_tokens(self, text: str) -> int:
from letta.log import get_logger
logger = get_logger(__name__)
if not text:
return 0
text_length = len(text)
text_preview = text[:100] + "..." if len(text) > 100 else text
logger.debug(f"TiktokenCounter.count_text_tokens: model={self.model}, text_length={text_length}, preview={repr(text_preview)}")
try:
import tiktoken
try:
encoding = tiktoken.encoding_for_model(self.model)
except KeyError:
logger.debug(f"Model {self.model} not found in tiktoken. Using cl100k_base encoding.")
encoding = tiktoken.get_encoding("cl100k_base")
result = len(encoding.encode(text))
logger.debug(f"TiktokenCounter.count_text_tokens: completed successfully, tokens={result}")
return result
except Exception as e:
logger.error(f"TiktokenCounter.count_text_tokens: FAILED with {type(e).__name__}: {e}, text_length={text_length}")
raise
@trace_method
@async_redis_cache(
key_func=lambda self,
messages: f"tiktoken_message_tokens:{self.model}:{hashlib.sha256(json.dumps(messages, sort_keys=True).encode()).hexdigest()[:16]}",
prefix="token_counter",
ttl_s=3600, # cache for 1 hour
)
async def count_message_tokens(self, messages: List[Dict[str, Any]]) -> int:
from letta.log import get_logger
logger = get_logger(__name__)
if not messages:
return 0
num_messages = len(messages)
total_content_length = sum(len(str(m.get("content", ""))) for m in messages)
logger.debug(
f"TiktokenCounter.count_message_tokens: model={self.model}, num_messages={num_messages}, total_content_length={total_content_length}"
)
try:
from letta.local_llm.utils import num_tokens_from_messages
result = num_tokens_from_messages(messages=messages, model=self.model)
logger.debug(f"TiktokenCounter.count_message_tokens: completed successfully, tokens={result}")
return result
except Exception as e:
logger.error(f"TiktokenCounter.count_message_tokens: FAILED with {type(e).__name__}: {e}, num_messages={num_messages}")
raise
@trace_method
@async_redis_cache(
key_func=lambda self,
tools: f"tiktoken_tool_tokens:{self.model}:{hashlib.sha256(json.dumps([t.model_dump() for t in tools], sort_keys=True).encode()).hexdigest()[:16]}",
prefix="token_counter",
ttl_s=3600, # cache for 1 hour
)
async def count_tool_tokens(self, tools: List[OpenAITool]) -> int:
if not tools:
return 0
from letta.local_llm.utils import num_tokens_from_functions
# Extract function definitions from OpenAITool objects
functions = [t.function.model_dump() for t in tools]
return num_tokens_from_functions(functions=functions, model=self.model)
def convert_messages(self, messages: List[Any]) -> List[Dict[str, Any]]:
return Message.to_openai_dicts_from_list(messages)
def create_token_counter(
model_endpoint_type: ProviderType,
model: Optional[str] = None,
actor: "User" = None,
agent_id: Optional[str] = None,
) -> "TokenCounter":
"""
Factory function to create the appropriate token counter based on model configuration.
Returns:
The appropriate TokenCounter instance
"""
from letta.llm_api.llm_client import LLMClient
from letta.settings import settings
# Use Gemini token counter for Google Vertex and Google AI
use_gemini = model_endpoint_type in ("google_vertex", "google_ai")
# Use Anthropic token counter if:
# 1. The model endpoint type is anthropic, OR
# 2. We're in PRODUCTION and anthropic_api_key is available (and not using Gemini)
use_anthropic = model_endpoint_type == "anthropic"
if use_gemini:
client = LLMClient.create(provider_type=model_endpoint_type, actor=actor)
token_counter = GeminiTokenCounter(client, model)
logger.debug(
f"Using GeminiTokenCounter for agent_id={agent_id}, model={model}, "
f"model_endpoint_type={model_endpoint_type}, "
f"environment={settings.environment}"
)
elif use_anthropic:
anthropic_client = LLMClient.create(provider_type=ProviderType.anthropic, actor=actor)
counter_model = model if model_endpoint_type == "anthropic" else None
token_counter = AnthropicTokenCounter(anthropic_client, counter_model)
logger.debug(
f"Using AnthropicTokenCounter for agent_id={agent_id}, model={counter_model}, "
f"model_endpoint_type={model_endpoint_type}, "
f"environment={settings.environment}"
)
else:
token_counter = ApproxTokenCounter()
logger.debug(
f"Using ApproxTokenCounter for agent_id={agent_id}, model={model}, "
f"model_endpoint_type={model_endpoint_type}, "
f"environment={settings.environment}"
)
return token_counter
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/context_window_calculator/token_counter.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/file_manager.py | import os
from datetime import datetime, timedelta, timezone
from typing import List, Optional
from sqlalchemy import func, select, update
from sqlalchemy.dialects.postgresql import insert as pg_insert
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import selectinload
from letta.constants import MAX_FILENAME_LENGTH
from letta.helpers.pinecone_utils import list_pinecone_index_for_files, should_use_pinecone
from letta.log import get_logger
from letta.orm.errors import NoResultFound
from letta.orm.file import FileContent as FileContentModel, FileMetadata as FileMetadataModel
from letta.orm.sqlalchemy_base import AccessType
from letta.otel.tracing import trace_method
from letta.schemas.enums import FileProcessingStatus, PrimitiveType
from letta.schemas.file import FileMetadata as PydanticFileMetadata
from letta.schemas.source import Source as PydanticSource
from letta.schemas.source_metadata import FileStats, OrganizationSourcesStats, SourceStats
from letta.schemas.user import User as PydanticUser
from letta.server.db import db_registry
from letta.settings import settings
from letta.utils import bounded_gather, enforce_types
from letta.validators import raise_on_invalid_id
logger = get_logger(__name__)
class DuplicateFileError(Exception):
"""Raised when a duplicate file is encountered and error handling is specified"""
def __init__(self, filename: str, source_name: str):
self.filename = filename
self.source_name = source_name
super().__init__(f"File '{filename}' already exists in source '{source_name}'")
class FileManager:
"""Manager class to handle business logic related to files."""
async def _invalidate_file_caches(
self, file_id: str, actor: PydanticUser, original_filename: str | None = None, source_id: str | None = None
):
"""Invalidate all caches related to a file."""
# TEMPORARILY DISABLED - caching is disabled
# # invalidate file content cache (all variants)
# await self.get_file_by_id.cache_invalidate(self, file_id, actor, include_content=True)
# await self.get_file_by_id.cache_invalidate(self, file_id, actor, include_content=False)
# # invalidate filename-based cache if we have the info
# if original_filename and source_id:
# await self.get_file_by_original_name_and_source.cache_invalidate(self, original_filename, source_id, actor)
@enforce_types
@trace_method
async def create_file(
self,
file_metadata: PydanticFileMetadata,
actor: PydanticUser,
*,
text: Optional[str] = None,
) -> PydanticFileMetadata:
# short-circuit if it already exists
try:
existing = await self.get_file_by_id(file_metadata.id, actor=actor)
except NoResultFound:
existing = None
if existing:
return existing
async with db_registry.async_session() as session:
try:
file_metadata.organization_id = actor.organization_id
file_orm = FileMetadataModel(**file_metadata.model_dump(to_orm=True, exclude_none=True))
await file_orm.create_async(session, actor=actor, no_commit=True)
if text is not None:
content_orm = FileContentModel(file_id=file_orm.id, text=text)
await content_orm.create_async(session, actor=actor, no_commit=True)
await session.commit()
await session.refresh(file_orm)
# invalidate cache for this new file
await self._invalidate_file_caches(file_orm.id, actor, file_orm.original_file_name, file_orm.source_id)
return file_orm.to_pydantic()
except IntegrityError:
await session.rollback()
return await self.get_file_by_id(file_metadata.id, actor=actor)
@enforce_types
@raise_on_invalid_id(param_name="file_id", expected_prefix=PrimitiveType.FILE)
@trace_method
# @async_redis_cache(
# key_func=lambda self, file_id, actor, include_content=False, strip_directory_prefix=False: f"{file_id}:{actor.organization_id}:{include_content}:{strip_directory_prefix}",
# prefix="file_content",
# ttl_s=3600,
# model_class=PydanticFileMetadata,
# )
async def get_file_by_id(
self, file_id: str, actor: PydanticUser, *, include_content: bool = False, strip_directory_prefix: bool = False
) -> Optional[PydanticFileMetadata]:
"""Retrieve a file by its ID.
If `include_content=True`, the FileContent relationship is eagerly
loaded so `to_pydantic(include_content=True)` never triggers a
lazy SELECT (avoids MissingGreenlet).
"""
async with db_registry.async_session() as session:
if include_content:
# explicit eager load
query = select(FileMetadataModel).where(FileMetadataModel.id == file_id).options(selectinload(FileMetadataModel.content))
# apply org-scoping if actor provided
if actor:
query = FileMetadataModel.apply_access_predicate(
query,
actor,
access=["read"],
access_type=AccessType.ORGANIZATION,
)
result = await session.execute(query)
file_orm = result.scalar_one_or_none()
else:
# fast path (metadata only)
try:
file_orm = await FileMetadataModel.read_async(
db_session=session,
identifier=file_id,
actor=actor,
)
except NoResultFound:
return None
if file_orm is None:
return None
return await file_orm.to_pydantic_async(include_content=include_content, strip_directory_prefix=strip_directory_prefix)
@enforce_types
@raise_on_invalid_id(param_name="file_id", expected_prefix=PrimitiveType.FILE)
@trace_method
async def update_file_status(
self,
*,
file_id: str,
actor: PydanticUser,
processing_status: Optional[FileProcessingStatus] = None,
error_message: Optional[str] = None,
total_chunks: Optional[int] = None,
chunks_embedded: Optional[int] = None,
enforce_state_transitions: bool = True,
) -> Optional[PydanticFileMetadata]:
"""
Update processing_status, error_message, total_chunks, and/or chunks_embedded on a FileMetadata row.
Enforces state transition rules (when enforce_state_transitions=True):
- PENDING -> PARSING -> EMBEDDING -> COMPLETED (normal flow)
- Any non-terminal state -> ERROR
- Same-state transitions are allowed (e.g., EMBEDDING -> EMBEDDING)
- ERROR and COMPLETED are terminal (no status transitions allowed, metadata updates blocked)
Args:
file_id: ID of the file to update
actor: User performing the update
processing_status: New processing status to set
error_message: Error message to set (if any)
total_chunks: Total number of chunks in the file
chunks_embedded: Number of chunks already embedded
enforce_state_transitions: Whether to enforce state transition rules (default: True).
Set to False to bypass validation for testing or special cases.
Returns:
Updated file metadata, or None if the update was blocked
* 1st round-trip → UPDATE with optional state validation
* 2nd round-trip → SELECT fresh row (same as read_async) if update succeeded
"""
if processing_status is None and error_message is None and total_chunks is None and chunks_embedded is None:
raise ValueError("Nothing to update")
# validate that ERROR status must have an error message
if processing_status == FileProcessingStatus.ERROR and not error_message:
raise ValueError("Error message is required when setting processing status to ERROR")
values: dict[str, object] = {"updated_at": datetime.utcnow()}
if processing_status is not None:
values["processing_status"] = processing_status
if error_message is not None:
values["error_message"] = error_message
if total_chunks is not None:
values["total_chunks"] = total_chunks
if chunks_embedded is not None:
values["chunks_embedded"] = chunks_embedded
# validate state transitions before making any database calls
if enforce_state_transitions and processing_status == FileProcessingStatus.PENDING:
# PENDING cannot be set after initial creation
raise ValueError(f"Cannot transition to PENDING state for file {file_id} - PENDING is only valid as initial state")
async with db_registry.async_session() as session:
# build where conditions
where_conditions = [
FileMetadataModel.id == file_id,
FileMetadataModel.organization_id == actor.organization_id,
]
# only add state transition validation if enforce_state_transitions is True
if enforce_state_transitions and processing_status is not None:
# enforce specific transitions based on target status
if processing_status == FileProcessingStatus.PARSING:
where_conditions.append(
FileMetadataModel.processing_status.in_([FileProcessingStatus.PENDING, FileProcessingStatus.PARSING])
)
elif processing_status == FileProcessingStatus.EMBEDDING:
where_conditions.append(
FileMetadataModel.processing_status.in_([FileProcessingStatus.PARSING, FileProcessingStatus.EMBEDDING])
)
elif processing_status == FileProcessingStatus.COMPLETED:
where_conditions.append(
FileMetadataModel.processing_status.in_([FileProcessingStatus.EMBEDDING, FileProcessingStatus.COMPLETED])
)
elif processing_status == FileProcessingStatus.ERROR:
# ERROR can be set from any non-terminal state
where_conditions.append(
FileMetadataModel.processing_status.notin_([FileProcessingStatus.ERROR, FileProcessingStatus.COMPLETED])
)
elif enforce_state_transitions and processing_status is None:
# If only updating metadata fields (not status), prevent updates to terminal states
where_conditions.append(
FileMetadataModel.processing_status.notin_([FileProcessingStatus.ERROR, FileProcessingStatus.COMPLETED])
)
# fast in-place update with state validation
stmt = (
update(FileMetadataModel)
.where(*where_conditions)
.values(**values)
.returning(FileMetadataModel.id) # return id if update succeeded
)
result = await session.execute(stmt)
updated_id = result.scalar()
if not updated_id:
# update was blocked
await session.commit()
if enforce_state_transitions:
# update was blocked by state transition rules - raise error
# fetch current state to provide informative error
current_file = await FileMetadataModel.read_async(
db_session=session,
identifier=file_id,
actor=actor,
)
current_status = current_file.processing_status
# build informative error message
if processing_status is not None:
if current_status in [FileProcessingStatus.ERROR, FileProcessingStatus.COMPLETED]:
raise ValueError(
f"Cannot update file {file_id} status from terminal state {current_status} to {processing_status}"
)
else:
raise ValueError(f"Invalid state transition for file {file_id}: {current_status} -> {processing_status}")
else:
raise ValueError(f"Cannot update file {file_id} in terminal state {current_status}")
else:
# validation was bypassed but update still failed (e.g., file doesn't exist)
return None
await session.commit()
# invalidate cache for this file
await self._invalidate_file_caches(file_id, actor)
# reload via normal accessor so we return a fully-attached object
file_orm = await FileMetadataModel.read_async(
db_session=session,
identifier=file_id,
actor=actor,
)
return file_orm.to_pydantic()
@enforce_types
@trace_method
async def check_and_update_file_status(
self,
file_metadata: PydanticFileMetadata,
actor: PydanticUser,
) -> PydanticFileMetadata:
"""
Check and update file status for timeout and embedding completion.
This method consolidates logic for:
1. Checking if a file has timed out during processing
2. Checking Pinecone embedding status and updating counts
Args:
file_metadata: The file metadata to check
actor: User performing the check
Returns:
Updated file metadata with current status
"""
# check for timeout if status is not terminal
if not file_metadata.processing_status.is_terminal_state():
if file_metadata.created_at:
# handle timezone differences between PostgreSQL (timezone-aware) and SQLite (timezone-naive)
if settings.letta_pg_uri_no_default:
# postgresql: both datetimes are timezone-aware
timeout_threshold = datetime.now(timezone.utc) - timedelta(minutes=settings.file_processing_timeout_minutes)
file_created_at = file_metadata.created_at
else:
# sqlite: both datetimes should be timezone-naive
timeout_threshold = datetime.utcnow() - timedelta(minutes=settings.file_processing_timeout_minutes)
file_created_at = file_metadata.created_at
if file_created_at < timeout_threshold:
# move file to error status with timeout message
timeout_message = settings.file_processing_timeout_error_message.format(settings.file_processing_timeout_minutes)
try:
file_metadata = await self.update_file_status(
file_id=file_metadata.id,
actor=actor,
processing_status=FileProcessingStatus.ERROR,
error_message=timeout_message,
)
except ValueError as e:
# state transition was blocked - log it but don't fail
logger.warning(f"Could not update file to timeout error state: {str(e)}")
# continue with existing file_metadata
# check pinecone embedding status
if should_use_pinecone() and file_metadata.processing_status == FileProcessingStatus.EMBEDDING:
ids = await list_pinecone_index_for_files(file_id=file_metadata.id, actor=actor)
logger.info(
f"Embedded chunks {len(ids)}/{file_metadata.total_chunks} for {file_metadata.id} ({file_metadata.file_name}) in organization {actor.organization_id}"
)
if len(ids) != file_metadata.chunks_embedded or len(ids) == file_metadata.total_chunks:
if len(ids) != file_metadata.total_chunks:
file_status = file_metadata.processing_status
else:
file_status = FileProcessingStatus.COMPLETED
try:
file_metadata = await self.update_file_status(
file_id=file_metadata.id, actor=actor, chunks_embedded=len(ids), processing_status=file_status
)
except ValueError as e:
# state transition was blocked - this is a race condition
# log it but don't fail since we're just checking status
logger.warning(f"Race condition detected in check_and_update_file_status: {str(e)}")
# return the current file state without updating
return file_metadata
@enforce_types
@raise_on_invalid_id(param_name="file_id", expected_prefix=PrimitiveType.FILE)
@trace_method
async def upsert_file_content(
self,
*,
file_id: str,
text: str,
actor: PydanticUser,
) -> PydanticFileMetadata:
async with db_registry.async_session() as session:
await FileMetadataModel.read_async(session, file_id, actor)
dialect_name = session.bind.dialect.name
if dialect_name == "postgresql":
stmt = (
pg_insert(FileContentModel)
.values(file_id=file_id, text=text)
.on_conflict_do_update(
index_elements=[FileContentModel.file_id],
set_={"text": text},
)
)
await session.execute(stmt)
else:
# Emulate upsert for SQLite and others
stmt = select(FileContentModel).where(FileContentModel.file_id == file_id)
result = await session.execute(stmt)
existing = result.scalar_one_or_none()
if existing:
await session.execute(update(FileContentModel).where(FileContentModel.file_id == file_id).values(text=text))
else:
session.add(FileContentModel(file_id=file_id, text=text))
await session.commit()
# invalidate cache for this file since content changed
await self._invalidate_file_caches(file_id, actor)
# Reload with content
query = select(FileMetadataModel).options(selectinload(FileMetadataModel.content)).where(FileMetadataModel.id == file_id)
result = await session.execute(query)
return await result.scalar_one().to_pydantic_async(include_content=True)
@enforce_types
@raise_on_invalid_id(param_name="source_id", expected_prefix=PrimitiveType.SOURCE)
@trace_method
async def list_files(
self,
source_id: str,
actor: PydanticUser,
before: Optional[str] = None,
after: Optional[str] = None,
limit: Optional[int] = 1000,
ascending: Optional[bool] = True,
include_content: bool = False,
strip_directory_prefix: bool = False,
check_status_updates: bool = False,
) -> List[PydanticFileMetadata]:
"""List all files with optional pagination and status checking.
Args:
source_id: Source to list files from
actor: User performing the request
before: Before filter
after: Pagination cursor
limit: Maximum number of files to return
ascending: Sort by ascending or descending order
include_content: Whether to include file content
strip_directory_prefix: Whether to strip directory prefix from filenames
check_status_updates: Whether to check and update status for timeout and embedding completion
Returns:
List of file metadata
"""
async with db_registry.async_session() as session:
options = [selectinload(FileMetadataModel.content)] if include_content else None
files = await FileMetadataModel.list_async(
db_session=session,
before=before,
after=after,
limit=limit,
ascending=ascending,
organization_id=actor.organization_id,
source_id=source_id,
query_options=options,
)
# convert all files to pydantic models
if include_content:
file_metadatas = await bounded_gather(
[
file.to_pydantic_async(include_content=include_content, strip_directory_prefix=strip_directory_prefix)
for file in files
]
)
else:
file_metadatas = [file.to_pydantic(strip_directory_prefix=strip_directory_prefix) for file in files]
# if status checking is enabled, check all files sequentially to avoid db pool exhaustion
# Each status check may update the file in the database, so concurrent checks with many
# files can create too many simultaneous database connections
if check_status_updates:
updated_file_metadatas = []
for file_metadata in file_metadatas:
updated_metadata = await self.check_and_update_file_status(file_metadata, actor)
updated_file_metadatas.append(updated_metadata)
file_metadatas = updated_file_metadatas
return file_metadatas
@enforce_types
@raise_on_invalid_id(param_name="file_id", expected_prefix=PrimitiveType.FILE)
@trace_method
async def delete_file(self, file_id: str, actor: PydanticUser) -> PydanticFileMetadata:
"""Delete a file by its ID."""
async with db_registry.async_session() as session:
file = await FileMetadataModel.read_async(db_session=session, identifier=file_id, actor=actor)
# invalidate cache for this file before deletion
await self._invalidate_file_caches(file_id, actor, file.original_file_name, file.source_id)
await file.hard_delete_async(db_session=session, actor=actor)
return file.to_pydantic()
@enforce_types
@trace_method
async def generate_unique_filename(self, original_filename: str, source: PydanticSource, organization_id: str) -> str:
"""
Generate a unique filename by adding a numeric suffix if duplicates exist.
Always returns a unique filename - does not handle duplicate policies.
Parameters:
original_filename (str): The original filename as uploaded.
source (PydanticSource): Source to check for duplicates within.
organization_id (str): Organization ID to check for duplicates within.
Returns:
str: A unique filename with source.name prefix and numeric suffix if needed.
"""
base, ext = os.path.splitext(original_filename)
# Reserve space for potential suffix: " (999)" = 6 characters
max_base_length = MAX_FILENAME_LENGTH - len(ext) - 6
if len(base) > max_base_length:
base = base[:max_base_length]
original_filename = f"{base}{ext}"
async with db_registry.async_session() as session:
# Count existing files with the same original_file_name in this source
query = select(func.count(FileMetadataModel.id)).where(
FileMetadataModel.original_file_name == original_filename,
FileMetadataModel.source_id == source.id,
FileMetadataModel.organization_id == organization_id,
FileMetadataModel.is_deleted == False,
)
result = await session.execute(query)
count = result.scalar() or 0
if count == 0:
# No duplicates, return original filename with source.name
return f"{source.name}/{original_filename}"
else:
# Add numeric suffix to make unique
return f"{source.name}/{base}_({count}){ext}"
@enforce_types
@raise_on_invalid_id(param_name="source_id", expected_prefix=PrimitiveType.SOURCE)
@trace_method
# @async_redis_cache(
# key_func=lambda self, original_filename, source_id, actor: f"{original_filename}:{source_id}:{actor.organization_id}",
# prefix="file_by_name",
# ttl_s=3600,
# model_class=PydanticFileMetadata,
# )
async def get_file_by_original_name_and_source(
self, original_filename: str, source_id: str, actor: PydanticUser
) -> Optional[PydanticFileMetadata]:
"""
Get a file by its original filename and source ID.
Parameters:
original_filename (str): The original filename to search for.
source_id (str): The source ID to search within.
actor (PydanticUser): The actor performing the request.
Returns:
Optional[PydanticFileMetadata]: The file metadata if found, None otherwise.
"""
async with db_registry.async_session() as session:
query = (
select(FileMetadataModel)
.where(
FileMetadataModel.original_file_name == original_filename,
FileMetadataModel.source_id == source_id,
FileMetadataModel.organization_id == actor.organization_id,
FileMetadataModel.is_deleted == False,
)
.limit(1)
)
result = await session.execute(query)
file_orm = result.scalar_one_or_none()
if file_orm:
return file_orm.to_pydantic()
return None
@enforce_types
@trace_method
async def get_organization_sources_metadata(
self, actor: PydanticUser, include_detailed_per_source_metadata: bool = False
) -> OrganizationSourcesStats:
"""
Get aggregated metadata for all sources in an organization with optimized queries.
Returns structured metadata including:
- Total number of sources
- Total number of files across all sources
- Total size of all files
- Per-source breakdown with file details (if include_detailed_per_source_metadata is True)
"""
async with db_registry.async_session() as session:
# Import here to avoid circular imports
from letta.orm.source import Source as SourceModel
# Single optimized query to get all sources with their file aggregations
query = (
select(
SourceModel.id,
SourceModel.name,
func.count(FileMetadataModel.id).label("file_count"),
func.coalesce(func.sum(FileMetadataModel.file_size), 0).label("total_size"),
)
.outerjoin(FileMetadataModel, (FileMetadataModel.source_id == SourceModel.id) & (FileMetadataModel.is_deleted == False))
.where(SourceModel.organization_id == actor.organization_id)
.where(SourceModel.is_deleted == False)
.group_by(SourceModel.id, SourceModel.name)
.order_by(SourceModel.name)
)
result = await session.execute(query)
source_aggregations = result.fetchall()
# Build response
metadata = OrganizationSourcesStats()
for row in source_aggregations:
source_id, source_name, file_count, total_size = row
if include_detailed_per_source_metadata:
# Get individual file details for this source
files_query = (
select(FileMetadataModel.id, FileMetadataModel.file_name, FileMetadataModel.file_size)
.where(
FileMetadataModel.source_id == source_id,
FileMetadataModel.organization_id == actor.organization_id,
FileMetadataModel.is_deleted == False,
)
.order_by(FileMetadataModel.file_name)
)
files_result = await session.execute(files_query)
files_rows = files_result.fetchall()
# Build file stats
files = [FileStats(file_id=file_row[0], file_name=file_row[1], file_size=file_row[2]) for file_row in files_rows]
# Build source metadata
source_metadata = SourceStats(
source_id=source_id, source_name=source_name, file_count=file_count, total_size=total_size, files=files
)
metadata.sources.append(source_metadata)
metadata.total_files += file_count
metadata.total_size += total_size
metadata.total_sources = len(source_aggregations)
return metadata
@enforce_types
@trace_method
async def get_files_by_ids_async(
self, file_ids: List[str], actor: PydanticUser, *, include_content: bool = False
) -> List[PydanticFileMetadata]:
"""
Get multiple files by their IDs in a single query.
Args:
file_ids: List of file IDs to retrieve
actor: User performing the action
include_content: Whether to include file content in the response
Returns:
List[PydanticFileMetadata]: List of files (may be fewer than requested if some don't exist)
"""
if not file_ids:
return []
async with db_registry.async_session() as session:
query = select(FileMetadataModel).where(
FileMetadataModel.id.in_(file_ids),
FileMetadataModel.organization_id == actor.organization_id,
FileMetadataModel.is_deleted == False,
)
# Eagerly load content if requested
if include_content:
query = query.options(selectinload(FileMetadataModel.content))
result = await session.execute(query)
files_orm = result.scalars().all()
if include_content:
return await bounded_gather([file.to_pydantic_async(include_content=include_content) for file in files_orm])
else:
return [file.to_pydantic() for file in files_orm]
@enforce_types
@trace_method
async def get_files_for_agents_async(
self, agent_ids: List[str], actor: PydanticUser, *, include_content: bool = False
) -> List[PydanticFileMetadata]:
"""
Get all files associated with the given agents via file-agent relationships.
Args:
agent_ids: List of agent IDs to find files for
actor: User performing the action
include_content: Whether to include file content in the response
Returns:
List[PydanticFileMetadata]: List of unique files associated with these agents
"""
if not agent_ids:
return []
async with db_registry.async_session() as session:
# We need to import FileAgent here to avoid circular imports
from letta.orm.files_agents import FileAgent as FileAgentModel
# Join through file-agent relationships
query = (
select(FileMetadataModel)
.join(FileAgentModel, FileMetadataModel.id == FileAgentModel.file_id)
.where(
FileAgentModel.agent_id.in_(agent_ids),
FileMetadataModel.organization_id == actor.organization_id,
FileMetadataModel.is_deleted == False,
FileAgentModel.is_deleted == False,
)
.distinct() # Ensure we don't get duplicate files
)
# Eagerly load content if requested
if include_content:
query = query.options(selectinload(FileMetadataModel.content))
result = await session.execute(query)
files_orm = result.scalars().all()
if include_content:
return await bounded_gather([file.to_pydantic_async(include_content=include_content) for file in files_orm])
else:
return [file.to_pydantic() for file in files_orm]
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/file_manager.py",
"license": "Apache License 2.0",
"lines": 626,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/file_processor/chunker/line_chunker.py | import re
from typing import List, Optional
from letta.log import get_logger
from letta.schemas.file import FileMetadata
from letta.services.file_processor.file_types import ChunkingStrategy, file_type_registry
logger = get_logger(__name__)
class LineChunker:
"""Content-aware line chunker that adapts chunking strategy based on file type"""
def __init__(self):
self.file_type_registry = file_type_registry
def _determine_chunking_strategy(self, file_metadata: FileMetadata) -> ChunkingStrategy:
"""Determine the best chunking strategy based on file metadata"""
# Try to get strategy from MIME type first
if file_metadata.file_type:
try:
return self.file_type_registry.get_chunking_strategy_by_mime_type(file_metadata.file_type)
except Exception:
pass
# Fallback to filename extension
if file_metadata.file_name:
try:
# Extract extension from filename
import os
_, ext = os.path.splitext(file_metadata.file_name)
if ext:
return self.file_type_registry.get_chunking_strategy_by_extension(ext)
except Exception:
pass
# Default fallback
return ChunkingStrategy.LINE_BASED
def _chunk_by_lines(self, text: str, preserve_indentation: bool = False) -> List[str]:
"""Traditional line-based chunking for code and structured data"""
# early stop, can happen if the there's nothing on a specific file
if not text:
return []
lines = []
for line in text.splitlines():
if preserve_indentation:
# For code: preserve leading whitespace (indentation), remove trailing whitespace
line = line.rstrip()
# Only skip completely empty lines
if line:
lines.append(line)
else:
# For structured data: strip all whitespace
line = line.strip()
if line:
lines.append(line)
return lines
def _chunk_by_sentences(self, text: str) -> List[str]:
"""Sentence-based chunking for documentation and markup"""
# early stop, can happen if the there's nothing on a specific file
if not text:
return []
# Simple sentence splitting on periods, exclamation marks, and question marks
# followed by whitespace or end of string
sentence_pattern = r"(?<=[.!?])\s+(?=[A-Z])"
# Split text into sentences
sentences = re.split(sentence_pattern, text.strip())
# Clean up sentences - remove extra whitespace and empty sentences
cleaned_sentences = []
for sentence in sentences:
sentence = re.sub(r"\s+", " ", sentence.strip()) # Normalize whitespace
if sentence:
cleaned_sentences.append(sentence)
return cleaned_sentences
def _chunk_by_characters(self, text: str, target_line_length: int = 100) -> List[str]:
"""Character-based wrapping for prose text"""
# early stop, can happen if the there's nothing on a specific file
if not text:
return []
words = text.split()
lines = []
current_line = []
current_length = 0
for word in words:
# Check if adding this word would exceed the target length
word_length = len(word)
if current_length + word_length + len(current_line) > target_line_length and current_line:
# Start a new line
lines.append(" ".join(current_line))
current_line = [word]
current_length = word_length
else:
current_line.append(word)
current_length += word_length
# Add the last line if there's content
if current_line:
lines.append(" ".join(current_line))
return [line for line in lines if line.strip()]
def chunk_text(
self,
file_metadata: FileMetadata,
start: Optional[int] = None,
end: Optional[int] = None,
add_metadata: bool = True,
validate_range: bool = False,
) -> List[str]:
"""Content-aware text chunking based on file type"""
strategy = self._determine_chunking_strategy(file_metadata)
text = file_metadata.content
# early stop, can happen if the there's nothing on a specific file
if not text:
logger.warning(f"File ({file_metadata}) has no content")
return []
# Apply the appropriate chunking strategy
if strategy == ChunkingStrategy.DOCUMENTATION:
content_lines = self._chunk_by_sentences(text)
elif strategy == ChunkingStrategy.CODE:
content_lines = self._chunk_by_lines(text, preserve_indentation=True)
else: # STRUCTURED_DATA or LINE_BASED
content_lines = self._chunk_by_lines(text, preserve_indentation=False)
total_chunks = len(content_lines)
chunk_type = "sentences" if strategy == ChunkingStrategy.DOCUMENTATION else "lines"
# Handle range validation and clamping
if start is not None or end is not None:
# Always validate that start < end if both are specified
if start is not None and end is not None and start >= end:
if validate_range:
raise ValueError(f"Invalid range: start ({start}) must be less than end ({end})")
# If validation is off, we still need to handle this case sensibly
# but we'll allow it to proceed with an empty result
# Always check that start is within bounds - this should error regardless of validation flag
if start is not None and start >= total_chunks:
raise ValueError(
f"File {file_metadata.file_name} has only {total_chunks} {chunk_type}, but requested offset {start + 1} is out of range"
)
# Apply bounds checking
if start is not None:
start = max(0, start) # Ensure non-negative
# Only clamp end if it exceeds the file length
if end is not None:
end = min(end, total_chunks)
# Apply slicing
content_lines = content_lines[start:end]
line_offset = start if start is not None else 0
else:
line_offset = 0
# Add line numbers for all strategies (1-indexed for user display)
content_lines = [f"{i + line_offset + 1}: {line}" for i, line in enumerate(content_lines)]
# Add metadata about total chunks
if add_metadata:
if start is not None and end is not None:
# Display 1-indexed ranges for users
start_display = start + 1
end_display = end
content_lines.insert(0, f"[Viewing {chunk_type} {start_display} to {end_display} (out of {total_chunks} {chunk_type})]")
elif start is not None:
# Only start specified - viewing from start to end
start_display = start + 1
content_lines.insert(0, f"[Viewing {chunk_type} {start_display} to end (out of {total_chunks} {chunk_type})]")
else:
content_lines.insert(0, f"[Viewing file start (out of {total_chunks} {chunk_type})]")
return content_lines
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/file_processor/chunker/line_chunker.py",
"license": "Apache License 2.0",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/file_processor/chunker/llama_index_chunker.py | from typing import List, Optional, Union
from mistralai import OCRPageObject
from letta.log import get_logger
from letta.otel.tracing import trace_method
from letta.services.file_processor.file_types import ChunkingStrategy, file_type_registry
logger = get_logger(__name__)
class LlamaIndexChunker:
"""LlamaIndex-based text chunking with automatic splitter selection"""
# Conservative default chunk sizes for fallback scenarios
DEFAULT_CONSERVATIVE_CHUNK_SIZE = 384
DEFAULT_CONSERVATIVE_CHUNK_OVERLAP = 25
def __init__(self, chunk_size: int = 512, chunk_overlap: int = 50, file_type: Optional[str] = None):
self.chunk_size = chunk_size
self.chunk_overlap = chunk_overlap
self.file_type = file_type
# Create appropriate parser based on file type
self.parser = self._create_parser_for_file_type(file_type, chunk_size, chunk_overlap)
# Log which parser was selected
parser_name = type(self.parser).__name__
logger.info(f"LlamaIndexChunker initialized with {parser_name} for file type: {file_type}")
def _create_parser_for_file_type(self, file_type: Optional[str], chunk_size: int, chunk_overlap: int):
"""Create appropriate parser based on file type"""
if not file_type:
# Default fallback
from llama_index.core.node_parser import SentenceSplitter
return SentenceSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
try:
# Get chunking strategy from file type registry
chunking_strategy = file_type_registry.get_chunking_strategy_by_mime_type(file_type)
logger.debug(f"Chunking strategy for {file_type}: {chunking_strategy}")
if chunking_strategy == ChunkingStrategy.CODE:
from llama_index.core.node_parser import CodeSplitter
return CodeSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
elif chunking_strategy == ChunkingStrategy.DOCUMENTATION:
if file_type in ["text/markdown", "text/x-markdown"]:
from llama_index.core.node_parser import MarkdownNodeParser
return MarkdownNodeParser()
elif file_type in ["text/html"]:
from llama_index.core.node_parser import HTMLNodeParser
return HTMLNodeParser()
else:
# Fall back to sentence splitter for other documentation
from llama_index.core.node_parser import SentenceSplitter
return SentenceSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
elif chunking_strategy == ChunkingStrategy.STRUCTURED_DATA:
if file_type in ["application/json", "application/jsonl"]:
from llama_index.core.node_parser import JSONNodeParser
return JSONNodeParser()
else:
# Fall back to sentence splitter for other structured data
from llama_index.core.node_parser import SentenceSplitter
return SentenceSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
else:
# Default to sentence splitter for PROSE and LINE_BASED
from llama_index.core.node_parser import SentenceSplitter
return SentenceSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
except Exception as e:
logger.warning(f"Failed to create specialized parser for {file_type}: {str(e)}. Using default SentenceSplitter.")
from llama_index.core.node_parser import SentenceSplitter
return SentenceSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
@trace_method
def chunk_text(self, content: Union[OCRPageObject, str]) -> List[str]:
"""Chunk text using LlamaIndex splitter"""
try:
# Handle different input types
if isinstance(content, OCRPageObject):
# Extract markdown from OCR page object
text_content = content.markdown
else:
# Assume it's a string
text_content = content
# Use the selected parser
if hasattr(self.parser, "split_text"):
# Most parsers have split_text method
return self.parser.split_text(text_content)
elif hasattr(self.parser, "get_nodes_from_documents"):
# Some parsers need Document objects
from llama_index.core import Document
from llama_index.core.node_parser import SentenceSplitter
document = Document(text=text_content)
nodes = self.parser.get_nodes_from_documents([document])
# Further split nodes that exceed chunk_size using SentenceSplitter
final_chunks = []
sentence_splitter = SentenceSplitter(chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap)
for node in nodes:
if len(node.text) > self.chunk_size:
# Split oversized nodes with sentence splitter
sub_chunks = sentence_splitter.split_text(node.text)
final_chunks.extend(sub_chunks)
else:
final_chunks.append(node.text)
return final_chunks
else:
# Fallback - try to call the parser directly
return self.parser(text_content)
except Exception as e:
logger.error(f"Chunking failed with {type(self.parser).__name__}: {str(e)}")
# Try fallback with SentenceSplitter
try:
logger.info("Attempting fallback to SentenceSplitter")
from llama_index.core.node_parser import SentenceSplitter
fallback_parser = SentenceSplitter(chunk_size=self.chunk_size, chunk_overlap=self.chunk_overlap)
# Extract text content if needed
if isinstance(content, OCRPageObject):
text_content = content.markdown
else:
text_content = content
return fallback_parser.split_text(text_content)
except Exception as fallback_error:
logger.error(f"Fallback chunking also failed: {str(fallback_error)}")
raise e # Raise the original error
@trace_method
def default_chunk_text(
self, content: Union[OCRPageObject, str], chunk_size: int | None = None, chunk_overlap: int | None = None
) -> List[str]:
"""Chunk text using default SentenceSplitter regardless of file type with conservative defaults"""
try:
from llama_index.core.node_parser import SentenceSplitter
# Use provided defaults or fallback to conservative values
chunk_size = chunk_size if chunk_size is not None else self.DEFAULT_CONSERVATIVE_CHUNK_SIZE
chunk_overlap = chunk_overlap if chunk_overlap is not None else self.DEFAULT_CONSERVATIVE_CHUNK_OVERLAP
default_parser = SentenceSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
# Handle different input types
if isinstance(content, OCRPageObject):
text_content = content.markdown
else:
text_content = content
return default_parser.split_text(text_content)
except Exception as e:
logger.error(f"Default chunking failed: {str(e)}")
raise
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/file_processor/chunker/llama_index_chunker.py",
"license": "Apache License 2.0",
"lines": 131,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/file_processor/embedder/base_embedder.py | from abc import ABC, abstractmethod
from typing import List
from letta.log import get_logger
from letta.schemas.enums import VectorDBProvider
from letta.schemas.passage import Passage
from letta.schemas.user import User
logger = get_logger(__name__)
class BaseEmbedder(ABC):
"""Abstract base class for embedding generation"""
def __init__(self):
# Default to NATIVE, subclasses will override this
self.vector_db_type = VectorDBProvider.NATIVE
@abstractmethod
async def generate_embedded_passages(self, file_id: str, source_id: str, chunks: List[str], actor: User) -> List[Passage]:
"""Generate embeddings for chunks with batching and concurrent processing"""
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/file_processor/embedder/base_embedder.py",
"license": "Apache License 2.0",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/services/file_processor/embedder/openai_embedder.py | import asyncio
import time
from typing import List, Optional, Tuple, cast
from letta.llm_api.llm_client import LLMClient
from letta.llm_api.openai_client import OpenAIClient
from letta.log import get_logger
from letta.otel.tracing import log_event, trace_method
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import ProviderType
from letta.schemas.passage import Passage
from letta.schemas.user import User
from letta.services.file_processor.embedder.base_embedder import BaseEmbedder
from letta.settings import model_settings
logger = get_logger(__name__)
# Global semaphore shared across ALL embedding operations to prevent overwhelming OpenAI API
# This ensures that even when processing multiple files concurrently, we don't exceed rate limits
_GLOBAL_EMBEDDING_SEMAPHORE = asyncio.Semaphore(3)
class OpenAIEmbedder(BaseEmbedder):
"""OpenAI-based embedding generation"""
def __init__(self, embedding_config: Optional[EmbeddingConfig] = None):
super().__init__()
# OpenAI embedder uses the native vector db (PostgreSQL)
# self.vector_db_type already set to VectorDBProvider.NATIVE by parent
self.default_embedding_config = (
EmbeddingConfig.default_config(model_name="text-embedding-3-small", provider="openai")
if model_settings.openai_api_key
else EmbeddingConfig.default_config(model_name="letta")
)
self.embedding_config = embedding_config or self.default_embedding_config
# TODO: Unify to global OpenAI client
self.client: OpenAIClient = cast(
OpenAIClient,
LLMClient.create(
provider_type=ProviderType.openai,
put_inner_thoughts_first=False,
actor=None, # Not necessary
),
)
@trace_method
async def _embed_batch(self, batch: List[str], batch_indices: List[int]) -> List[Tuple[int, List[float]]]:
"""Embed a single batch and return embeddings with their original indices"""
log_event(
"embedder.batch_started",
{
"batch_size": len(batch),
"model": self.embedding_config.embedding_model,
"embedding_endpoint_type": self.embedding_config.embedding_endpoint_type,
},
)
try:
embeddings = await self.client.request_embeddings(inputs=batch, embedding_config=self.embedding_config)
log_event("embedder.batch_completed", {"batch_size": len(batch), "embeddings_generated": len(embeddings)})
return [(idx, e) for idx, e in zip(batch_indices, embeddings)]
except Exception as e:
# if it's a token limit error and we can split, do it
if self._is_token_limit_error(e) and len(batch) > 1:
logger.warning(f"Token limit exceeded for batch of size {len(batch)}, splitting in half and retrying")
log_event(
"embedder.batch_split_retry",
{
"original_batch_size": len(batch),
"error": str(e),
"split_size": len(batch) // 2,
},
)
# split batch in half
mid = len(batch) // 2
batch1 = batch[:mid]
batch1_indices = batch_indices[:mid]
batch2 = batch[mid:]
batch2_indices = batch_indices[mid:]
# retry with smaller batches
result1 = await self._embed_batch(batch1, batch1_indices)
result2 = await self._embed_batch(batch2, batch2_indices)
return result1 + result2
else:
# re-raise for other errors or if batch size is already 1
raise
def _is_token_limit_error(self, error: Exception) -> bool:
"""Check if the error is due to token limit exceeded"""
# convert to string and check for token limit patterns
error_str = str(error).lower()
# TODO: This is quite brittle, works for now
# check for the specific patterns we see in token limit errors
is_token_limit = (
"max_tokens_per_request" in error_str
or ("requested" in error_str and "tokens" in error_str and "max" in error_str and "per request" in error_str)
or "token limit" in error_str
or ("bad request to openai" in error_str and "tokens" in error_str and "max" in error_str)
)
return is_token_limit
@trace_method
async def generate_embedded_passages(self, file_id: str, source_id: str, chunks: List[str], actor: User) -> List[Passage]:
"""Generate embeddings for chunks with batching and concurrent processing"""
if not chunks:
return []
# Filter out empty or whitespace-only chunks that would fail embedding
valid_chunks = [(i, chunk) for i, chunk in enumerate(chunks) if chunk and chunk.strip()]
if not valid_chunks:
logger.warning(f"No valid text chunks found for file {file_id}. PDF may contain only images without text layer.")
log_event(
"embedder.no_valid_chunks",
{"file_id": file_id, "source_id": source_id, "total_chunks": len(chunks), "reason": "All chunks empty or whitespace-only"},
)
return []
if len(valid_chunks) < len(chunks):
logger.info(f"Filtered out {len(chunks) - len(valid_chunks)} empty chunks from {len(chunks)} total")
log_event(
"embedder.chunks_filtered",
{
"file_id": file_id,
"original_chunks": len(chunks),
"valid_chunks": len(valid_chunks),
"filtered_chunks": len(chunks) - len(valid_chunks),
},
)
# Extract just the chunk text and indices for processing
[i for i, _ in valid_chunks]
chunks_to_embed = [chunk for _, chunk in valid_chunks]
embedding_start = time.time()
logger.info(f"Generating embeddings for {len(chunks_to_embed)} chunks using {self.embedding_config.embedding_model}")
log_event(
"embedder.generation_started",
{
"total_chunks": len(chunks_to_embed),
"model": self.embedding_config.embedding_model,
"embedding_endpoint_type": self.embedding_config.embedding_endpoint_type,
"batch_size": self.embedding_config.batch_size,
"file_id": file_id,
"source_id": source_id,
},
)
# Create batches with their original indices
batches = []
batch_indices = []
for i in range(0, len(chunks_to_embed), self.embedding_config.batch_size):
batch = chunks_to_embed[i : i + self.embedding_config.batch_size]
indices = list(range(i, min(i + self.embedding_config.batch_size, len(chunks_to_embed))))
batches.append(batch)
batch_indices.append(indices)
logger.info(f"Processing {len(batches)} batches")
log_event(
"embedder.batching_completed",
{"total_batches": len(batches), "batch_size": self.embedding_config.batch_size, "total_chunks": len(chunks_to_embed)},
)
# Use global semaphore to limit concurrent embedding requests across ALL file processing
# This prevents rate limiting even when processing multiple files simultaneously
async def process(batch: List[str], indices: List[int]):
async with _GLOBAL_EMBEDDING_SEMAPHORE:
try:
return await self._embed_batch(batch, indices)
except Exception as e:
logger.error("Failed to embed batch of size %s: %s", len(batch), e)
log_event("embedder.batch_failed", {"batch_size": len(batch), "error": str(e), "error_type": type(e).__name__})
raise
# Execute all batches with global semaphore control to limit concurrency
tasks = [process(batch, indices) for batch, indices in zip(batches, batch_indices)]
log_event(
"embedder.concurrent_processing_started",
{"concurrent_tasks": len(tasks), "max_concurrent_global": 3},
)
results = await asyncio.gather(*tasks)
log_event("embedder.concurrent_processing_completed", {"batches_processed": len(results)})
# Flatten results and sort by original index
indexed_embeddings = []
for batch_result in results:
indexed_embeddings.extend(batch_result)
# Sort by index to maintain original order
indexed_embeddings.sort(key=lambda x: x[0])
# Create Passage objects in original order
passages = []
for (idx, embedding), text in zip(indexed_embeddings, chunks_to_embed):
passage = Passage(
text=text,
file_id=file_id,
source_id=source_id,
embedding=embedding,
embedding_config=self.embedding_config,
organization_id=actor.organization_id,
)
passages.append(passage)
embedding_duration = time.time() - embedding_start
logger.info(f"Successfully generated {len(passages)} embeddings (took {embedding_duration:.2f}s)")
log_event(
"embedder.generation_completed",
{
"passages_created": len(passages),
"total_chunks_processed": len(chunks_to_embed),
"file_id": file_id,
"source_id": source_id,
"duration_seconds": embedding_duration,
},
)
return passages
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/file_processor/embedder/openai_embedder.py",
"license": "Apache License 2.0",
"lines": 194,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/file_processor/embedder/pinecone_embedder.py | from typing import List, Optional
from letta.helpers.pinecone_utils import upsert_file_records_to_pinecone_index
from letta.log import get_logger
from letta.otel.tracing import log_event, trace_method
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import VectorDBProvider
from letta.schemas.passage import Passage
from letta.schemas.user import User
from letta.services.file_processor.embedder.base_embedder import BaseEmbedder
try:
PINECONE_AVAILABLE = True
except ImportError:
PINECONE_AVAILABLE = False
logger = get_logger(__name__)
class PineconeEmbedder(BaseEmbedder):
"""Pinecone-based embedding generation"""
def __init__(self, embedding_config: Optional[EmbeddingConfig] = None):
super().__init__()
# set the vector db type for pinecone
self.vector_db_type = VectorDBProvider.PINECONE
if not PINECONE_AVAILABLE:
raise ImportError("Pinecone package is not installed. Install it with: pip install pinecone")
# set default embedding config if not provided
if embedding_config is None:
embedding_config = EmbeddingConfig.default_config(provider="pinecone")
self.embedding_config = embedding_config
@trace_method
async def generate_embedded_passages(self, file_id: str, source_id: str, chunks: List[str], actor: User) -> List[Passage]:
"""Generate embeddings and upsert to Pinecone, then return Passage objects"""
if not chunks:
return []
# Filter out empty or whitespace-only chunks
valid_chunks = [chunk for chunk in chunks if chunk and chunk.strip()]
if not valid_chunks:
logger.warning(f"No valid text chunks found for file {file_id}. PDF may contain only images without text layer.")
log_event(
"pinecone_embedder.no_valid_chunks",
{"file_id": file_id, "source_id": source_id, "total_chunks": len(chunks), "reason": "All chunks empty or whitespace-only"},
)
return []
if len(valid_chunks) < len(chunks):
logger.info(f"Filtered out {len(chunks) - len(valid_chunks)} empty chunks from {len(chunks)} total")
log_event(
"pinecone_embedder.chunks_filtered",
{
"file_id": file_id,
"original_chunks": len(chunks),
"valid_chunks": len(valid_chunks),
"filtered_chunks": len(chunks) - len(valid_chunks),
},
)
logger.info(f"Upserting {len(valid_chunks)} chunks to Pinecone using namespace {source_id}")
log_event(
"embedder.generation_started",
{
"total_chunks": len(valid_chunks),
"file_id": file_id,
"source_id": source_id,
},
)
# Upsert records to Pinecone using source_id as namespace
try:
await upsert_file_records_to_pinecone_index(file_id=file_id, source_id=source_id, chunks=valid_chunks, actor=actor)
logger.info(f"Successfully kicked off upserting {len(valid_chunks)} records to Pinecone")
log_event(
"embedder.upsert_started",
{"records_upserted": len(valid_chunks), "namespace": source_id, "file_id": file_id},
)
except Exception as e:
logger.error(f"Failed to upsert records to Pinecone: {str(e)}")
log_event("embedder.upsert_failed", {"error": str(e), "error_type": type(e).__name__})
raise
# Create Passage objects (without embeddings since Pinecone handles them)
passages = []
for i, text in enumerate(valid_chunks):
passage = Passage(
text=text,
file_id=file_id,
source_id=source_id,
embedding=None, # Pinecone handles embeddings internally
embedding_config=None, # None
organization_id=actor.organization_id,
)
passages.append(passage)
logger.info(f"Successfully created {len(passages)} passages")
log_event(
"embedder.generation_completed",
{"passages_created": len(passages), "total_chunks_processed": len(valid_chunks), "file_id": file_id, "source_id": source_id},
)
return passages
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/file_processor/embedder/pinecone_embedder.py",
"license": "Apache License 2.0",
"lines": 90,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/file_processor/embedder/turbopuffer_embedder.py | import time
from typing import List, Optional
from letta.helpers.tpuf_client import TurbopufferClient
from letta.log import get_logger
from letta.otel.tracing import log_event, trace_method
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import VectorDBProvider
from letta.schemas.passage import Passage
from letta.schemas.user import User
from letta.services.file_processor.embedder.base_embedder import BaseEmbedder
logger = get_logger(__name__)
class TurbopufferEmbedder(BaseEmbedder):
"""Turbopuffer-based embedding generation and storage"""
def __init__(self, embedding_config: Optional[EmbeddingConfig] = None):
super().__init__()
# set the vector db type for turbopuffer
self.vector_db_type = VectorDBProvider.TPUF
# use the default embedding config from TurbopufferClient if not provided
self.embedding_config = embedding_config or TurbopufferClient.default_embedding_config
self.tpuf_client = TurbopufferClient()
@trace_method
async def generate_embedded_passages(self, file_id: str, source_id: str, chunks: List[str], actor: User) -> List[Passage]:
"""Generate embeddings and store in Turbopuffer, then return Passage objects"""
if not chunks:
return []
# Filter out empty or whitespace-only chunks
valid_chunks = [chunk for chunk in chunks if chunk and chunk.strip()]
if not valid_chunks:
logger.warning(f"No valid text chunks found for file {file_id}. PDF may contain only images without text layer.")
log_event(
"turbopuffer_embedder.no_valid_chunks",
{"file_id": file_id, "source_id": source_id, "total_chunks": len(chunks), "reason": "All chunks empty or whitespace-only"},
)
return []
if len(valid_chunks) < len(chunks):
logger.info(f"Filtered out {len(chunks) - len(valid_chunks)} empty chunks from {len(chunks)} total")
log_event(
"turbopuffer_embedder.chunks_filtered",
{
"file_id": file_id,
"original_chunks": len(chunks),
"valid_chunks": len(valid_chunks),
"filtered_chunks": len(chunks) - len(valid_chunks),
},
)
logger.info(f"Generating embeddings for {len(valid_chunks)} chunks using Turbopuffer")
log_event(
"turbopuffer_embedder.generation_started",
{
"total_chunks": len(valid_chunks),
"file_id": file_id,
"source_id": source_id,
"embedding_model": self.embedding_config.embedding_model,
},
)
try:
# insert passages to Turbopuffer - it will handle embedding generation internally
embedding_start = time.time()
passages = await self.tpuf_client.insert_file_passages(
source_id=source_id,
file_id=file_id,
text_chunks=valid_chunks,
organization_id=actor.organization_id,
actor=actor,
)
embedding_duration = time.time() - embedding_start
logger.info(f"Successfully generated and stored {len(passages)} passages in Turbopuffer (took {embedding_duration:.2f}s)")
log_event(
"turbopuffer_embedder.generation_completed",
{
"passages_created": len(passages),
"total_chunks_processed": len(valid_chunks),
"file_id": file_id,
"source_id": source_id,
"duration_seconds": embedding_duration,
},
)
return passages
except Exception as e:
logger.error(f"Failed to generate embeddings with Turbopuffer: {str(e)}")
log_event(
"turbopuffer_embedder.generation_failed",
{"error": str(e), "error_type": type(e).__name__, "file_id": file_id, "source_id": source_id},
)
raise
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/file_processor/embedder/turbopuffer_embedder.py",
"license": "Apache License 2.0",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/file_processor/file_processor.py | import asyncio
import time
from typing import List
from mistralai import OCRPageObject, OCRResponse, OCRUsageInfo
from letta.log import get_logger
from letta.otel.context import get_ctx_attributes
from letta.otel.tracing import log_event, trace_method
from letta.schemas.agent import AgentState
from letta.schemas.enums import FileProcessingStatus, VectorDBProvider
from letta.schemas.file import FileMetadata
from letta.schemas.passage import Passage
from letta.schemas.user import User
from letta.services.agent_manager import AgentManager
from letta.services.file_manager import FileManager
from letta.services.file_processor.chunker.llama_index_chunker import LlamaIndexChunker
from letta.services.file_processor.embedder.base_embedder import BaseEmbedder
from letta.services.file_processor.parser.base_parser import FileParser
from letta.services.job_manager import JobManager
from letta.services.passage_manager import PassageManager
from letta.services.source_manager import SourceManager
logger = get_logger(__name__)
class FileProcessor:
"""Main PDF processing orchestrator"""
def __init__(
self,
file_parser: FileParser,
embedder: BaseEmbedder,
actor: User,
max_file_size: int = 50 * 1024 * 1024, # 50MB default
):
self.file_parser = file_parser
self.embedder = embedder
self.max_file_size = max_file_size
self.file_manager = FileManager()
self.source_manager = SourceManager()
self.passage_manager = PassageManager()
self.job_manager = JobManager()
self.agent_manager = AgentManager()
self.actor = actor
# get vector db type from the embedder
self.vector_db_type = embedder.vector_db_type
async def _chunk_and_embed_with_fallback(self, file_metadata: FileMetadata, ocr_response, source_id: str) -> List:
"""Chunk text and generate embeddings with fallback to default chunker if needed"""
filename = file_metadata.file_name
# Create file-type-specific chunker in thread pool to avoid blocking event loop
text_chunker = await asyncio.to_thread(
LlamaIndexChunker, file_type=file_metadata.file_type, chunk_size=self.embedder.embedding_config.embedding_chunk_size
)
# First attempt with file-specific chunker
try:
all_chunks = []
for page in ocr_response.pages:
# Run CPU-intensive chunking in thread pool to avoid blocking event loop
chunking_start = time.time()
chunks = await asyncio.to_thread(text_chunker.chunk_text, page)
chunking_duration = time.time() - chunking_start
if chunking_duration > 0.5:
logger.warning(f"Slow chunking operation for {filename}: {chunking_duration:.2f}s")
if not chunks:
log_event(
"file_processor.chunking_failed",
{
"filename": filename,
"page_index": ocr_response.pages.index(page),
},
)
raise ValueError("No chunks created from text")
all_chunks.extend(chunks)
# Update with chunks length
file_metadata = await self.file_manager.update_file_status(
file_id=file_metadata.id,
actor=self.actor,
processing_status=FileProcessingStatus.EMBEDDING,
total_chunks=len(all_chunks),
chunks_embedded=0,
)
all_passages = await self.embedder.generate_embedded_passages(
file_id=file_metadata.id,
source_id=source_id,
chunks=all_chunks,
actor=self.actor,
)
return all_passages
except Exception as e:
logger.warning(f"Failed to chunk/embed with file-specific chunker for {filename}: {str(e)}. Retrying with default chunker.")
log_event(
"file_processor.embedding_failed_retrying",
{"filename": filename, "error": str(e), "error_type": type(e).__name__},
)
# Retry with default chunker
try:
logger.info(f"Retrying chunking with default SentenceSplitter for {filename}")
all_chunks = []
for page in ocr_response.pages:
# Run CPU-intensive default chunking in thread pool to avoid blocking event loop
chunking_start = time.time()
chunks = await asyncio.to_thread(text_chunker.default_chunk_text, page)
chunking_duration = time.time() - chunking_start
if chunking_duration > 0.5:
logger.warning(f"Slow default chunking operation for {filename}: {chunking_duration:.2f}s")
if not chunks:
log_event(
"file_processor.default_chunking_failed",
{
"filename": filename,
"page_index": ocr_response.pages.index(page),
},
)
raise ValueError("No chunks created from text with default chunker")
all_chunks.extend(chunks)
all_passages = await self.embedder.generate_embedded_passages(
file_id=file_metadata.id,
source_id=source_id,
chunks=all_chunks,
actor=self.actor,
)
logger.info(f"Successfully generated passages with default chunker for {filename}")
log_event(
"file_processor.default_chunking_success",
{"filename": filename, "total_chunks": len(all_chunks)},
)
return all_passages
except Exception as fallback_error:
logger.error("Default chunking also failed for %s: %s", filename, fallback_error)
log_event(
"file_processor.default_chunking_also_failed",
{
"filename": filename,
"fallback_error": str(fallback_error),
"fallback_error_type": type(fallback_error).__name__,
},
)
raise fallback_error
# TODO: Factor this function out of SyncServer
@trace_method
async def process(
self,
agent_states: list[AgentState],
source_id: str,
content: bytes,
file_metadata: FileMetadata,
) -> list[Passage]:
filename = file_metadata.file_name
# Create file as early as possible with no content
file_metadata.processing_status = FileProcessingStatus.PARSING # Parsing now
file_metadata = await self.file_manager.create_file(file_metadata, self.actor)
log_event(
"file_processor.file_created",
{
"file_id": str(file_metadata.id),
"filename": filename,
"file_type": file_metadata.file_type,
"status": FileProcessingStatus.PARSING.value,
},
)
try:
# Ensure we're working with bytes
if isinstance(content, str):
content = content.encode("utf-8")
from letta.otel.metric_registry import MetricRegistry
MetricRegistry().file_process_bytes_histogram.record(len(content), attributes=get_ctx_attributes())
if len(content) > self.max_file_size:
log_event(
"file_processor.size_limit_exceeded",
{"filename": filename, "file_size": len(content), "max_file_size": self.max_file_size},
)
raise ValueError(f"PDF size exceeds maximum allowed size of {self.max_file_size} bytes")
logger.info(f"Starting OCR extraction for {filename}")
log_event("file_processor.ocr_started", {"filename": filename, "file_size": len(content), "mime_type": file_metadata.file_type})
ocr_response = await self.file_parser.extract_text(content, mime_type=file_metadata.file_type)
# update file with raw text
raw_markdown_text = "".join([page.markdown for page in ocr_response.pages])
log_event(
"file_processor.ocr_completed",
{"filename": filename, "pages_extracted": len(ocr_response.pages), "text_length": len(raw_markdown_text)},
)
file_metadata = await self.file_manager.upsert_file_content(file_id=file_metadata.id, text=raw_markdown_text, actor=self.actor)
await self.agent_manager.insert_file_into_context_windows(
source_id=source_id,
file_metadata_with_content=file_metadata,
actor=self.actor,
agent_states=agent_states,
)
if not ocr_response or len(ocr_response.pages) == 0:
log_event(
"file_processor.ocr_no_text",
{
"filename": filename,
"ocr_response_empty": not ocr_response,
"pages_count": len(ocr_response.pages) if ocr_response else 0,
},
)
raise ValueError("No text extracted from PDF")
logger.info("Chunking extracted text")
log_event(
"file_processor.chunking_started",
{"filename": filename, "pages_to_process": len(ocr_response.pages)},
)
# Chunk and embed with fallback logic
all_passages = await self._chunk_and_embed_with_fallback(
file_metadata=file_metadata,
ocr_response=ocr_response,
source_id=source_id,
)
if self.vector_db_type == VectorDBProvider.NATIVE:
all_passages = await self.passage_manager.create_many_source_passages_async(
passages=all_passages,
file_metadata=file_metadata,
actor=self.actor,
)
log_event(
"file_processor.passages_created",
{"filename": filename, "total_passages": len(all_passages)},
)
# Handle case where no passages were created (e.g., image-only PDF)
if len(all_passages) == 0:
logger.warning(f"No passages created for {filename}. File may contain only images without extractable text.")
log_event(
"file_processor.no_passages_created",
{"filename": filename, "file_id": str(file_metadata.id), "reason": "No extractable text content"},
)
logger.info(f"Successfully processed {filename}: {len(all_passages)} passages")
log_event(
"file_processor.processing_completed",
{
"filename": filename,
"file_id": str(file_metadata.id),
"total_passages": len(all_passages),
"status": FileProcessingStatus.COMPLETED.value,
},
)
# update job status
# pinecone completes slowly, so gets updated later
if self.vector_db_type != VectorDBProvider.PINECONE:
await self.file_manager.update_file_status(
file_id=file_metadata.id,
actor=self.actor,
processing_status=FileProcessingStatus.COMPLETED,
chunks_embedded=len(all_passages),
)
return all_passages
except Exception as e:
logger.exception("File processing failed for %s: %s", filename, e)
log_event(
"file_processor.processing_failed",
{
"filename": filename,
"file_id": str(file_metadata.id),
"error": str(e),
"error_type": type(e).__name__,
"status": FileProcessingStatus.ERROR.value,
},
)
await self.file_manager.update_file_status(
file_id=file_metadata.id,
actor=self.actor,
processing_status=FileProcessingStatus.ERROR,
error_message=str(e) if str(e) else f"File processing failed: {type(e).__name__}",
)
return []
def _create_ocr_response_from_content(self, content: str):
"""Create minimal OCR response from existing content"""
return OCRResponse(
model="import-skip-ocr",
pages=[
OCRPageObject(
index=0,
markdown=content,
images=[],
dimensions=None,
)
],
usage_info=OCRUsageInfo(pages_processed=1),
document_annotation=None,
)
# TODO: The file state machine here is kind of out of date, we need to match with the correct one above
@trace_method
async def process_imported_file(self, file_metadata: FileMetadata, source_id: str) -> List[Passage]:
"""Process an imported file that already has content - skip OCR, do chunking/embedding"""
filename = file_metadata.file_name
if not file_metadata.content:
logger.warning(f"No content found for imported file {filename}")
return []
content = file_metadata.content
processing_start = time.time()
try:
# Create OCR response from existing content
ocr_response = self._create_ocr_response_from_content(content)
# Update file status to embedding (valid transition from PARSING)
file_metadata = await self.file_manager.update_file_status(
file_id=file_metadata.id, actor=self.actor, processing_status=FileProcessingStatus.EMBEDDING
)
logger.info(f"Chunking imported file content for {filename}")
log_event("file_processor.import_chunking_started", {"filename": filename, "content_length": len(content)})
# Chunk and embed using existing logic
all_passages = await self._chunk_and_embed_with_fallback(
file_metadata=file_metadata, ocr_response=ocr_response, source_id=source_id
)
# Create passages in database (unless using Pinecone)
if self.vector_db_type == VectorDBProvider.NATIVE:
all_passages = await self.passage_manager.create_many_source_passages_async(
passages=all_passages, file_metadata=file_metadata, actor=self.actor
)
log_event("file_processor.import_passages_created", {"filename": filename, "total_passages": len(all_passages)})
# Update file status to completed (valid transition from EMBEDDING)
# pinecone completes slowly, so gets updated later
if self.vector_db_type != VectorDBProvider.PINECONE:
await self.file_manager.update_file_status(
file_id=file_metadata.id, actor=self.actor, processing_status=FileProcessingStatus.COMPLETED
)
else:
# For Pinecone, update chunk counts but keep status at EMBEDDING
# The status will be updated to COMPLETED later when chunks are confirmed embedded
await self.file_manager.update_file_status(
file_id=file_metadata.id, actor=self.actor, total_chunks=len(all_passages), chunks_embedded=0
)
processing_duration = time.time() - processing_start
logger.info(
f"Successfully processed imported file {filename}: {len(all_passages)} passages (total time: {processing_duration:.2f}s)"
)
log_event(
"file_processor.import_processing_completed",
{
"filename": filename,
"file_id": str(file_metadata.id),
"total_passages": len(all_passages),
"status": FileProcessingStatus.COMPLETED.value,
"total_duration_seconds": processing_duration,
},
)
return all_passages
except Exception as e:
logger.exception("Import file processing failed for %s: %s", filename, e)
log_event(
"file_processor.import_processing_failed",
{
"filename": filename,
"file_id": str(file_metadata.id),
"error": str(e),
"error_type": type(e).__name__,
"status": FileProcessingStatus.ERROR.value,
},
)
await self.file_manager.update_file_status(
file_id=file_metadata.id,
actor=self.actor,
processing_status=FileProcessingStatus.ERROR,
error_message=str(e) if str(e) else f"Import file processing failed: {type(e).__name__}",
)
return []
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/file_processor/file_processor.py",
"license": "Apache License 2.0",
"lines": 350,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/file_processor/file_types.py | """
Centralized file type configuration for supported file formats.
This module provides a single source of truth for file type definitions,
mime types, and file processing capabilities across the Letta codebase.
"""
import mimetypes
from dataclasses import dataclass
from enum import Enum
from typing import Dict, Set
class ChunkingStrategy(str, Enum):
"""Enum for different file chunking strategies."""
CODE = "code" # Line-based chunking for code files
STRUCTURED_DATA = "structured_data" # Line-based chunking for JSON, XML, etc.
DOCUMENTATION = "documentation" # Paragraph-aware chunking for Markdown, HTML
LINE_BASED = "line_based" # Default line-based chunking
@dataclass
class FileTypeInfo:
"""Information about a supported file type."""
extension: str
mime_type: str
is_simple_text: bool
description: str
chunking_strategy: ChunkingStrategy = ChunkingStrategy.LINE_BASED
class FileTypeRegistry:
"""Central registry for supported file types."""
def __init__(self):
"""Initialize the registry with default supported file types."""
self._file_types: Dict[str, FileTypeInfo] = {}
self._register_default_types()
def _register_default_types(self) -> None:
"""Register all default supported file types."""
# Document formats
self.register(".pdf", "application/pdf", False, "PDF document", ChunkingStrategy.LINE_BASED)
self.register(".txt", "text/plain", True, "Plain text file", ChunkingStrategy.LINE_BASED)
self.register(".md", "text/markdown", True, "Markdown document", ChunkingStrategy.DOCUMENTATION)
self.register(".markdown", "text/markdown", True, "Markdown document", ChunkingStrategy.DOCUMENTATION)
self.register(".json", "application/json", True, "JSON data file", ChunkingStrategy.STRUCTURED_DATA)
self.register(".jsonl", "application/jsonl", True, "JSON Lines file", ChunkingStrategy.STRUCTURED_DATA)
self.register(".csv", "text/csv", True, "CSV data file", ChunkingStrategy.STRUCTURED_DATA)
# Programming languages
self.register(".py", "text/x-python", True, "Python source code", ChunkingStrategy.CODE)
self.register(".js", "text/javascript", True, "JavaScript source code", ChunkingStrategy.CODE)
self.register(".ts", "text/x-typescript", True, "TypeScript source code", ChunkingStrategy.CODE)
self.register(".java", "text/x-java-source", True, "Java source code", ChunkingStrategy.CODE)
self.register(".cpp", "text/x-c++", True, "C++ source code", ChunkingStrategy.CODE)
self.register(".cxx", "text/x-c++", True, "C++ source code", ChunkingStrategy.CODE)
self.register(".c", "text/x-c", True, "C source code", ChunkingStrategy.CODE)
self.register(".h", "text/x-c", True, "C/C++ header file", ChunkingStrategy.CODE)
self.register(".cs", "text/x-csharp", True, "C# source code", ChunkingStrategy.CODE)
self.register(".php", "text/x-php", True, "PHP source code", ChunkingStrategy.CODE)
self.register(".rb", "text/x-ruby", True, "Ruby source code", ChunkingStrategy.CODE)
self.register(".go", "text/x-go", True, "Go source code", ChunkingStrategy.CODE)
self.register(".rs", "text/x-rust", True, "Rust source code", ChunkingStrategy.CODE)
self.register(".swift", "text/x-swift", True, "Swift source code", ChunkingStrategy.CODE)
self.register(".kt", "text/x-kotlin", True, "Kotlin source code", ChunkingStrategy.CODE)
self.register(".scala", "text/x-scala", True, "Scala source code", ChunkingStrategy.CODE)
self.register(".r", "text/x-r", True, "R source code", ChunkingStrategy.CODE)
self.register(".m", "text/x-objective-c", True, "Objective-C source code", ChunkingStrategy.CODE)
# Web technologies
self.register(".html", "text/html", True, "HTML document", ChunkingStrategy.CODE)
self.register(".htm", "text/html", True, "HTML document", ChunkingStrategy.CODE)
self.register(".css", "text/css", True, "CSS stylesheet", ChunkingStrategy.STRUCTURED_DATA)
self.register(".scss", "text/x-scss", True, "SCSS stylesheet", ChunkingStrategy.STRUCTURED_DATA)
self.register(".sass", "text/x-sass", True, "Sass stylesheet", ChunkingStrategy.STRUCTURED_DATA)
self.register(".less", "text/x-less", True, "Less stylesheet", ChunkingStrategy.STRUCTURED_DATA)
self.register(".vue", "text/x-vue", True, "Vue.js component", ChunkingStrategy.CODE)
self.register(".jsx", "text/x-jsx", True, "JSX source code", ChunkingStrategy.CODE)
self.register(".tsx", "text/x-tsx", True, "TSX source code", ChunkingStrategy.CODE)
# Configuration and data formats
self.register(".xml", "application/xml", True, "XML document", ChunkingStrategy.STRUCTURED_DATA)
self.register(".yaml", "text/x-yaml", True, "YAML configuration", ChunkingStrategy.STRUCTURED_DATA)
self.register(".yml", "text/x-yaml", True, "YAML configuration", ChunkingStrategy.STRUCTURED_DATA)
self.register(".toml", "application/toml", True, "TOML configuration", ChunkingStrategy.STRUCTURED_DATA)
self.register(".ini", "text/x-ini", True, "INI configuration", ChunkingStrategy.STRUCTURED_DATA)
self.register(".cfg", "text/x-conf", True, "Configuration file", ChunkingStrategy.STRUCTURED_DATA)
self.register(".conf", "text/x-conf", True, "Configuration file", ChunkingStrategy.STRUCTURED_DATA)
# Scripts and SQL
self.register(".sh", "text/x-shellscript", True, "Shell script", ChunkingStrategy.CODE)
self.register(".bash", "text/x-shellscript", True, "Bash script", ChunkingStrategy.CODE)
self.register(".ps1", "text/x-powershell", True, "PowerShell script", ChunkingStrategy.CODE)
self.register(".bat", "text/x-batch", True, "Batch script", ChunkingStrategy.CODE)
self.register(".cmd", "text/x-batch", True, "Command script", ChunkingStrategy.CODE)
self.register(".dockerfile", "text/x-dockerfile", True, "Dockerfile", ChunkingStrategy.CODE)
self.register(".sql", "text/x-sql", True, "SQL script", ChunkingStrategy.CODE)
def register(
self,
extension: str,
mime_type: str,
is_simple_text: bool,
description: str,
chunking_strategy: ChunkingStrategy = ChunkingStrategy.LINE_BASED,
) -> None:
"""
Register a new file type.
Args:
extension: File extension (with leading dot, e.g., '.py')
mime_type: MIME type for the file
is_simple_text: Whether this is a simple text file that can be read directly
description: Human-readable description of the file type
chunking_strategy: Strategy for chunking this file type
"""
if not extension.startswith("."):
extension = f".{extension}"
self._file_types[extension] = FileTypeInfo(
extension=extension,
mime_type=mime_type,
is_simple_text=is_simple_text,
description=description,
chunking_strategy=chunking_strategy,
)
def register_mime_types(self) -> None:
"""Register all file types with Python's mimetypes module."""
for file_type in self._file_types.values():
mimetypes.add_type(file_type.mime_type, file_type.extension)
# Also register some additional MIME type aliases that may be encountered
mimetypes.add_type("text/x-markdown", ".md")
mimetypes.add_type("application/x-jsonlines", ".jsonl")
mimetypes.add_type("text/xml", ".xml")
mimetypes.add_type("text/csv", ".csv")
def get_allowed_media_types(self) -> Set[str]:
"""
Get set of all allowed MIME types.
Returns:
Set of MIME type strings that are supported for upload
"""
allowed_types = {file_type.mime_type for file_type in self._file_types.values()}
# Add additional MIME type aliases
allowed_types.update(
{
"text/x-markdown", # Alternative markdown MIME type
"application/x-jsonlines", # Alternative JSONL MIME type
"text/xml", # Alternative XML MIME type
}
)
return allowed_types
def get_extension_to_mime_type_map(self) -> Dict[str, str]:
"""
Get mapping from file extensions to MIME types.
Returns:
Dictionary mapping extensions (with leading dot) to MIME types
"""
return {file_type.extension: file_type.mime_type for file_type in self._file_types.values()}
def get_simple_text_mime_types(self) -> Set[str]:
"""
Get set of MIME types that represent simple text files.
Returns:
Set of MIME type strings for files that can be read as plain text
"""
return {file_type.mime_type for file_type in self._file_types.values() if file_type.is_simple_text}
def is_simple_text_mime_type(self, mime_type: str) -> bool:
"""
Check if a MIME type represents simple text that can be read directly.
Args:
mime_type: MIME type to check
Returns:
True if the MIME type represents simple text
"""
# Check if it's in our registered simple text types
if mime_type in self.get_simple_text_mime_types():
return True
# Check for text/* types
if mime_type.startswith("text/"):
return True
# Check for known aliases that represent simple text
simple_text_aliases = {
"application/x-jsonlines", # Alternative JSONL MIME type
"text/xml", # Alternative XML MIME type
}
return mime_type in simple_text_aliases
def get_supported_extensions(self) -> Set[str]:
"""
Get set of all supported file extensions.
Returns:
Set of file extensions (with leading dots)
"""
return set(self._file_types.keys())
def is_supported_extension(self, extension: str) -> bool:
"""
Check if a file extension is supported.
Args:
extension: File extension (with or without leading dot)
Returns:
True if the extension is supported
"""
if not extension.startswith("."):
extension = f".{extension}"
return extension in self._file_types
def get_file_type_info(self, extension: str) -> FileTypeInfo:
"""
Get information about a file type by extension.
Args:
extension: File extension (with or without leading dot)
Returns:
FileTypeInfo object with details about the file type
Raises:
KeyError: If the extension is not supported
"""
if not extension.startswith("."):
extension = f".{extension}"
return self._file_types[extension]
def get_chunking_strategy_by_extension(self, extension: str) -> ChunkingStrategy:
"""
Get the chunking strategy for a file based on its extension.
Args:
extension: File extension (with or without leading dot)
Returns:
ChunkingStrategy enum value for the file type
Raises:
KeyError: If the extension is not supported
"""
file_type_info = self.get_file_type_info(extension)
return file_type_info.chunking_strategy
def get_chunking_strategy_by_mime_type(self, mime_type: str) -> ChunkingStrategy:
"""
Get the chunking strategy for a file based on its MIME type.
Args:
mime_type: MIME type of the file
Returns:
ChunkingStrategy enum value for the file type, or LINE_BASED if not found
"""
for file_type in self._file_types.values():
if file_type.mime_type == mime_type:
return file_type.chunking_strategy
return ChunkingStrategy.LINE_BASED
# Global registry instance
file_type_registry = FileTypeRegistry()
# Convenience functions for backward compatibility and ease of use
def register_mime_types() -> None:
"""Register all supported file types with Python's mimetypes module."""
file_type_registry.register_mime_types()
def get_allowed_media_types() -> Set[str]:
"""Get set of all allowed MIME types for file uploads."""
return file_type_registry.get_allowed_media_types()
def get_extension_to_mime_type_map() -> Dict[str, str]:
"""Get mapping from file extensions to MIME types."""
return file_type_registry.get_extension_to_mime_type_map()
def get_simple_text_mime_types() -> Set[str]:
"""Get set of MIME types that represent simple text files."""
return file_type_registry.get_simple_text_mime_types()
def is_simple_text_mime_type(mime_type: str) -> bool:
"""Check if a MIME type represents simple text."""
return file_type_registry.is_simple_text_mime_type(mime_type)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/file_processor/file_types.py",
"license": "Apache License 2.0",
"lines": 242,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/file_processor/parser/base_parser.py | from abc import ABC, abstractmethod
class FileParser(ABC):
"""Abstract base class for file parser"""
@abstractmethod
async def extract_text(self, content: bytes, mime_type: str):
"""Extract text from PDF content"""
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/file_processor/parser/base_parser.py",
"license": "Apache License 2.0",
"lines": 6,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/services/file_processor/parser/markitdown_parser.py | import logging
import os
import tempfile
from markitdown import MarkItDown
from mistralai import OCRPageObject, OCRResponse, OCRUsageInfo
from letta.log import get_logger
from letta.otel.tracing import trace_method
from letta.services.file_processor.file_types import is_simple_text_mime_type
from letta.services.file_processor.parser.base_parser import FileParser
logger = get_logger(__name__)
# Suppress pdfminer warnings that occur during PDF processing
logging.getLogger("pdfminer.pdffont").setLevel(logging.ERROR)
logging.getLogger("pdfminer.pdfinterp").setLevel(logging.ERROR)
logging.getLogger("pdfminer.pdfpage").setLevel(logging.ERROR)
logging.getLogger("pdfminer.converter").setLevel(logging.ERROR)
class MarkitdownFileParser(FileParser):
"""Markitdown-based file parsing for documents"""
def __init__(self, model: str = "markitdown"):
self.model = model
@trace_method
async def extract_text(self, content: bytes, mime_type: str) -> OCRResponse:
"""Extract text using markitdown."""
import asyncio
try:
# Handle simple text files directly
if is_simple_text_mime_type(mime_type):
logger.info(f"Extracting text directly (no processing needed): {self.model}")
text = content.decode("utf-8", errors="replace")
return OCRResponse(
model=self.model,
pages=[
OCRPageObject(
index=0,
markdown=text,
images=[],
dimensions=None,
)
],
usage_info=OCRUsageInfo(pages_processed=1),
document_annotation=None,
)
logger.info(f"Extracting text using markitdown: {self.model}")
# Run CPU/IO-intensive markitdown processing in thread pool to avoid blocking event loop
def blocking_markitdown_convert():
# Create temporary file to pass to markitdown
with tempfile.NamedTemporaryFile(delete=False, suffix=self._get_file_extension(mime_type)) as temp_file:
temp_file.write(content)
temp_file_path = temp_file.name
try:
md = MarkItDown(enable_plugins=False)
result = md.convert(temp_file_path)
return result.text_content
finally:
# Clean up temporary file
os.unlink(temp_file_path)
# Run blocking operations in thread pool
text_content = await asyncio.to_thread(blocking_markitdown_convert)
return OCRResponse(
model=self.model,
pages=[
OCRPageObject(
index=0,
markdown=text_content,
images=[],
dimensions=None,
)
],
usage_info=OCRUsageInfo(pages_processed=1),
document_annotation=None,
)
except Exception as e:
logger.error(f"Markitdown text extraction failed: {str(e)}")
raise
def _get_file_extension(self, mime_type: str) -> str:
"""Get file extension based on MIME type for markitdown processing."""
mime_to_ext = {
"application/pdf": ".pdf",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx",
"application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
"application/vnd.ms-excel": ".xls",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ".xlsx",
"text/csv": ".csv",
"application/json": ".json",
"text/xml": ".xml",
"application/xml": ".xml",
}
return mime_to_ext.get(mime_type, ".txt")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/file_processor/parser/markitdown_parser.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/services/file_processor/parser/mistral_parser.py | import base64
from mistralai import Mistral, OCRPageObject, OCRResponse, OCRUsageInfo
from letta.log import get_logger
from letta.otel.tracing import trace_method
from letta.services.file_processor.file_types import is_simple_text_mime_type
from letta.services.file_processor.parser.base_parser import FileParser
from letta.settings import settings
logger = get_logger(__name__)
class MistralFileParser(FileParser):
"""Mistral-based OCR extraction"""
def __init__(self, model: str = "mistral-ocr-latest"):
self.model = model
# TODO: Make this return something general if we add more file parsers
@trace_method
async def extract_text(self, content: bytes, mime_type: str) -> OCRResponse:
"""Extract text using Mistral OCR or shortcut for plain text."""
try:
# TODO: Kind of hacky...we try to exit early here?
# TODO: Create our internal file parser representation we return instead of OCRResponse
if is_simple_text_mime_type(mime_type):
logger.info(f"Extracting text directly (no Mistral): {self.model}")
text = content.decode("utf-8", errors="replace")
return OCRResponse(
model=self.model,
pages=[
OCRPageObject(
index=0,
markdown=text,
images=[],
dimensions=None,
)
],
usage_info=OCRUsageInfo(pages_processed=1), # You might need to construct this properly
document_annotation=None,
)
base64_encoded_content = base64.b64encode(content).decode("utf-8")
document_url = f"data:{mime_type};base64,{base64_encoded_content}"
logger.info(f"Extracting text using Mistral OCR model: {self.model}")
async with Mistral(api_key=settings.mistral_api_key) as mistral:
ocr_response = await mistral.ocr.process_async(
model="mistral-ocr-latest", document={"type": "document_url", "document_url": document_url}, include_image_base64=False
)
return ocr_response
except Exception as e:
logger.error(f"OCR extraction failed: {str(e)}")
raise
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/file_processor/parser/mistral_parser.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/services/files_agents_manager.py | from datetime import datetime, timezone
from typing import Dict, List, Optional, Union
from sqlalchemy import and_, delete, func, or_, select, update
from letta.log import get_logger
from letta.orm.errors import NoResultFound
from letta.orm.file import FileMetadata as FileMetadataModel
from letta.orm.files_agents import FileAgent as FileAgentModel
from letta.otel.tracing import trace_method
from letta.schemas.block import Block as PydanticBlock, FileBlock as PydanticFileBlock
from letta.schemas.file import FileAgent as PydanticFileAgent, FileMetadata
from letta.schemas.user import User as PydanticUser
from letta.server.db import db_registry
from letta.utils import enforce_types
logger = get_logger(__name__)
class FileAgentManager:
"""High-level helpers for CRUD / listing on the `files_agents` join table."""
@enforce_types
@trace_method
async def attach_file(
self,
*,
agent_id: str,
file_id: str,
file_name: str,
source_id: str,
actor: PydanticUser,
max_files_open: int,
is_open: bool = True,
visible_content: Optional[str] = None,
start_line: Optional[int] = None,
end_line: Optional[int] = None,
) -> tuple[PydanticFileAgent, List[str]]:
"""
Idempotently attach *file_id* to *agent_id* with LRU enforcement.
• If the row already exists → update `is_open`, `visible_content`
and always refresh `last_accessed_at`.
• Otherwise create a brand-new association.
• If is_open=True, enforces max_files_open using LRU eviction.
Returns:
Tuple of (file_agent, closed_file_names)
"""
if is_open:
# Use the efficient LRU + open method
closed_files, _was_already_open, _ = await self.enforce_max_open_files_and_open(
agent_id=agent_id,
file_id=file_id,
file_name=file_name,
source_id=source_id,
actor=actor,
visible_content=visible_content or "",
max_files_open=max_files_open,
start_line=start_line,
end_line=end_line,
)
# Get the updated file agent to return
file_agent = await self.get_file_agent_by_id(agent_id=agent_id, file_id=file_id, actor=actor)
return file_agent, closed_files
else:
# Original logic for is_open=False
async with db_registry.async_session() as session:
query = select(FileAgentModel).where(
and_(
FileAgentModel.agent_id == agent_id,
FileAgentModel.file_id == file_id,
FileAgentModel.file_name == file_name,
FileAgentModel.organization_id == actor.organization_id,
)
)
existing = await session.scalar(query)
now_ts = datetime.now(timezone.utc)
if existing:
# update only the fields that actually changed
if existing.is_open != is_open:
existing.is_open = is_open
if visible_content is not None and existing.visible_content != visible_content:
existing.visible_content = visible_content
existing.last_accessed_at = now_ts
existing.start_line = start_line
existing.end_line = end_line
await existing.update_async(session, actor=actor)
return existing.to_pydantic(), []
assoc = FileAgentModel(
agent_id=agent_id,
file_id=file_id,
file_name=file_name,
source_id=source_id,
organization_id=actor.organization_id,
is_open=is_open,
visible_content=visible_content,
last_accessed_at=now_ts,
start_line=start_line,
end_line=end_line,
)
await assoc.create_async(session, actor=actor)
return assoc.to_pydantic(), []
@enforce_types
@trace_method
async def update_file_agent_by_id(
self,
*,
agent_id: str,
file_id: str,
actor: PydanticUser,
is_open: Optional[bool] = None,
visible_content: Optional[str] = None,
start_line: Optional[int] = None,
end_line: Optional[int] = None,
) -> PydanticFileAgent:
"""Patch an existing association row."""
async with db_registry.async_session() as session:
assoc = await self._get_association_by_file_id(session, agent_id, file_id, actor)
if is_open is not None:
assoc.is_open = is_open
if visible_content is not None:
assoc.visible_content = visible_content
if start_line is not None:
assoc.start_line = start_line
if end_line is not None:
assoc.end_line = end_line
# touch timestamp
assoc.last_accessed_at = datetime.now(timezone.utc)
await assoc.update_async(session, actor=actor)
return assoc.to_pydantic()
@enforce_types
@trace_method
async def update_file_agent_by_name(
self,
*,
agent_id: str,
file_name: str,
actor: PydanticUser,
is_open: Optional[bool] = None,
visible_content: Optional[str] = None,
) -> PydanticFileAgent:
"""Patch an existing association row."""
async with db_registry.async_session() as session:
assoc = await self._get_association_by_file_name(session, agent_id, file_name, actor)
if is_open is not None:
assoc.is_open = is_open
if visible_content is not None:
assoc.visible_content = visible_content
# touch timestamp
assoc.last_accessed_at = datetime.now(timezone.utc)
await assoc.update_async(session, actor=actor)
return assoc.to_pydantic()
@enforce_types
@trace_method
async def detach_file(self, *, agent_id: str, file_id: str, actor: PydanticUser) -> None:
"""Hard-delete the association."""
async with db_registry.async_session() as session:
assoc = await self._get_association_by_file_id(session, agent_id, file_id, actor)
await assoc.hard_delete_async(session, actor=actor)
@enforce_types
@trace_method
async def detach_file_bulk(self, *, agent_file_pairs: List, actor: PydanticUser) -> int: # List of (agent_id, file_id) tuples
"""
Bulk delete multiple agent-file associations in a single query.
Args:
agent_file_pairs: List of (agent_id, file_id) tuples to delete
actor: User performing the action
Returns:
Number of rows deleted
"""
if not agent_file_pairs:
return 0
async with db_registry.async_session() as session:
# Build compound OR conditions for each agent-file pair
conditions = []
for agent_id, file_id in agent_file_pairs:
conditions.append(and_(FileAgentModel.agent_id == agent_id, FileAgentModel.file_id == file_id))
# Create delete statement with all conditions
stmt = delete(FileAgentModel).where(and_(or_(*conditions), FileAgentModel.organization_id == actor.organization_id))
result = await session.execute(stmt)
# context manager now handles commits
# await session.commit()
return result.rowcount
@enforce_types
@trace_method
async def get_file_agent_by_id(self, *, agent_id: str, file_id: str, actor: PydanticUser) -> Optional[PydanticFileAgent]:
async with db_registry.async_session() as session:
try:
assoc = await self._get_association_by_file_id(session, agent_id, file_id, actor)
return assoc.to_pydantic()
except NoResultFound:
return None
@enforce_types
@trace_method
async def get_all_file_blocks_by_name(
self,
*,
file_names: List[str],
agent_id: str,
per_file_view_window_char_limit: int,
actor: PydanticUser,
) -> List[PydanticBlock]:
"""
Retrieve multiple FileAgent associations by their file names for a specific agent.
Args:
file_names: List of file names to retrieve
agent_id: ID of the agent to retrieve file blocks for
per_file_view_window_char_limit: The per-file view window char limit
actor: The user making the request
Returns:
List of PydanticBlock objects found (may be fewer than requested if some file names don't exist)
"""
if not file_names:
return []
async with db_registry.async_session() as session:
# Use IN clause for efficient bulk retrieval
query = select(FileAgentModel).where(
and_(
FileAgentModel.file_name.in_(file_names),
FileAgentModel.agent_id == agent_id,
FileAgentModel.organization_id == actor.organization_id,
)
)
# Execute query and get all results
rows = (await session.execute(query)).scalars().all()
# Convert to Pydantic models
return [row.to_pydantic_block(per_file_view_window_char_limit=per_file_view_window_char_limit) for row in rows]
@enforce_types
@trace_method
async def get_file_agent_by_file_name(self, *, agent_id: str, file_name: str, actor: PydanticUser) -> Optional[PydanticFileAgent]:
async with db_registry.async_session() as session:
try:
assoc = await self._get_association_by_file_name(session, agent_id, file_name, actor)
return assoc.to_pydantic()
except NoResultFound:
return None
@enforce_types
@trace_method
async def list_files_for_agent(
self,
agent_id: str,
per_file_view_window_char_limit: int,
actor: PydanticUser,
is_open_only: bool = False,
return_as_blocks: bool = False,
) -> Union[List[PydanticFileAgent], List[PydanticFileBlock]]:
"""Return associations for *agent_id* (filtering by `is_open` if asked)."""
async with db_registry.async_session() as session:
conditions = [
FileAgentModel.agent_id == agent_id,
FileAgentModel.organization_id == actor.organization_id,
]
if is_open_only:
conditions.append(FileAgentModel.is_open.is_(True))
rows = (await session.execute(select(FileAgentModel).where(and_(*conditions)))).scalars().all()
if return_as_blocks:
return [r.to_pydantic_block(per_file_view_window_char_limit=per_file_view_window_char_limit) for r in rows]
else:
return [r.to_pydantic() for r in rows]
@enforce_types
@trace_method
async def get_file_ids_for_agent_by_source(
self,
agent_id: str,
source_id: str,
actor: PydanticUser,
) -> List[str]:
"""
Get all file IDs attached to an agent from a specific source.
This queries the files_agents junction table directly, ensuring we get
exactly the files that were attached, regardless of any changes to the
source's file list.
"""
async with db_registry.async_session() as session:
stmt = select(FileAgentModel.file_id).where(
and_(
FileAgentModel.agent_id == agent_id,
FileAgentModel.source_id == source_id,
FileAgentModel.organization_id == actor.organization_id,
)
)
result = await session.execute(stmt)
return list(result.scalars().all())
@enforce_types
@trace_method
async def list_files_for_agent_paginated(
self,
agent_id: str,
actor: PydanticUser,
cursor: Optional[str] = None,
limit: int = 20,
is_open: Optional[bool] = None,
before: Optional[str] = None,
after: Optional[str] = None,
ascending: bool = False,
) -> tuple[List[PydanticFileAgent], Optional[str], bool]:
"""
Return paginated file associations for an agent.
Args:
agent_id: The agent ID to get files for
actor: User performing the action
cursor: Pagination cursor (file-agent ID to start after) - deprecated, use before/after
limit: Maximum number of results to return
is_open: Optional filter for open/closed status (None = all, True = open only, False = closed only)
before: File-agent ID cursor for pagination. Returns files that come before this ID in the specified sort order
after: File-agent ID cursor for pagination. Returns files that come after this ID in the specified sort order
ascending: Sort order (True = ascending by created_at/id, False = descending)
Returns:
Tuple of (file_agents, next_cursor, has_more)
"""
async with db_registry.async_session() as session:
conditions = [
FileAgentModel.agent_id == agent_id,
FileAgentModel.organization_id == actor.organization_id,
FileAgentModel.is_deleted == False,
]
# apply is_open filter if specified
if is_open is not None:
conditions.append(FileAgentModel.is_open == is_open)
# handle pagination cursors (support both old and new style)
if before:
conditions.append(FileAgentModel.id < before)
elif after:
conditions.append(FileAgentModel.id > after)
elif cursor:
# fallback to old cursor behavior for backwards compatibility
conditions.append(FileAgentModel.id > cursor)
query = select(FileAgentModel).where(and_(*conditions))
# apply sorting based on pagination method
if before or after:
# For new cursor-based pagination, use created_at + id ordering
if ascending:
query = query.order_by(FileAgentModel.created_at.asc(), FileAgentModel.id.asc())
else:
query = query.order_by(FileAgentModel.created_at.desc(), FileAgentModel.id.desc())
else:
# For old cursor compatibility, maintain original behavior (ascending by ID)
query = query.order_by(FileAgentModel.id)
# fetch limit + 1 to check if there are more results
query = query.limit(limit + 1)
result = await session.execute(query)
rows = result.scalars().all()
# check if we got more records than requested (meaning there are more pages)
has_more = len(rows) > limit
if has_more:
# trim back to the requested limit
rows = rows[:limit]
# get cursor for next page (ID of last item in current page)
next_cursor = rows[-1].id if rows else None
return [r.to_pydantic() for r in rows], next_cursor, has_more
@enforce_types
@trace_method
async def list_agents_for_file(
self,
file_id: str,
actor: PydanticUser,
is_open_only: bool = False,
) -> List[PydanticFileAgent]:
"""Return associations for *file_id* (filtering by `is_open` if asked)."""
async with db_registry.async_session() as session:
conditions = [
FileAgentModel.file_id == file_id,
FileAgentModel.organization_id == actor.organization_id,
]
if is_open_only:
conditions.append(FileAgentModel.is_open.is_(True))
rows = (await session.execute(select(FileAgentModel).where(and_(*conditions)))).scalars().all()
return [r.to_pydantic() for r in rows]
@enforce_types
@trace_method
async def mark_access(self, *, agent_id: str, file_id: str, actor: PydanticUser) -> None:
"""Update only `last_accessed_at = now()` without loading the row."""
async with db_registry.async_session() as session:
stmt = (
update(FileAgentModel)
.where(
FileAgentModel.agent_id == agent_id,
FileAgentModel.file_id == file_id,
FileAgentModel.organization_id == actor.organization_id,
)
.values(last_accessed_at=func.now())
)
await session.execute(stmt)
# context manager now handles commits
# await session.commit()
@enforce_types
@trace_method
async def mark_access_bulk(self, *, agent_id: str, file_names: List[str], actor: PydanticUser) -> None:
"""Update `last_accessed_at = now()` for multiple files by name without loading rows."""
if not file_names:
return
async with db_registry.async_session() as session:
stmt = (
update(FileAgentModel)
.where(
FileAgentModel.agent_id == agent_id,
FileAgentModel.file_name.in_(file_names),
FileAgentModel.organization_id == actor.organization_id,
)
.values(last_accessed_at=func.now())
)
await session.execute(stmt)
# context manager now handles commits
# await session.commit()
@enforce_types
@trace_method
async def close_all_other_files(self, *, agent_id: str, keep_file_names: List[str], actor: PydanticUser) -> List[str]:
"""Close every open file for this agent except those in keep_file_names.
Args:
agent_id: ID of the agent
keep_file_names: List of file names to keep open
actor: User performing the action
Returns:
List of file names that were closed
"""
async with db_registry.async_session() as session:
stmt = (
update(FileAgentModel)
.where(
and_(
FileAgentModel.agent_id == agent_id,
FileAgentModel.organization_id == actor.organization_id,
FileAgentModel.is_open.is_(True),
# Only add the NOT IN filter when there are names to keep
~FileAgentModel.file_name.in_(keep_file_names) if keep_file_names else True,
)
)
.values(is_open=False, visible_content=None)
.returning(FileAgentModel.file_name) # Gets the names we closed
.execution_options(synchronize_session=False) # No need to sync ORM state
)
closed_file_names = [row.file_name for row in (await session.execute(stmt))]
# context manager now handles commits
# await session.commit()
return closed_file_names
@enforce_types
@trace_method
async def enforce_max_open_files_and_open(
self,
*,
agent_id: str,
file_id: str,
file_name: str,
source_id: str,
actor: PydanticUser,
visible_content: str,
max_files_open: int,
start_line: Optional[int] = None,
end_line: Optional[int] = None,
) -> tuple[List[str], bool, Dict[str, tuple[Optional[int], Optional[int]]]]:
"""
Efficiently handle LRU eviction and file opening in a single transaction.
Args:
agent_id: ID of the agent
file_id: ID of the file to open
file_name: Name of the file to open
source_id: ID of the source
actor: User performing the action
visible_content: Content to set for the opened file
Returns:
Tuple of (closed_file_names, file_was_already_open, previous_ranges)
where previous_ranges maps file names to their old (start_line, end_line) ranges
"""
async with db_registry.async_session() as session:
# Single query to get ALL open files for this agent, ordered by last_accessed_at (oldest first)
open_files_query = (
select(FileAgentModel)
.where(
and_(
FileAgentModel.agent_id == agent_id,
FileAgentModel.organization_id == actor.organization_id,
FileAgentModel.is_open.is_(True),
)
)
.order_by(FileAgentModel.last_accessed_at.asc()) # Oldest first for LRU
)
all_open_files = (await session.execute(open_files_query)).scalars().all()
# Check if the target file exists (open or closed)
target_file_query = select(FileAgentModel).where(
and_(
FileAgentModel.agent_id == agent_id,
FileAgentModel.organization_id == actor.organization_id,
FileAgentModel.file_name == file_name,
)
)
file_to_open = await session.scalar(target_file_query)
# Separate the file we're opening from others (only if it's currently open)
other_open_files = []
for file_agent in all_open_files:
if file_agent.file_name != file_name:
other_open_files.append(file_agent)
file_was_already_open = file_to_open is not None and file_to_open.is_open
# Capture previous line range if file was already open and we're changing the range
previous_ranges = {}
if file_was_already_open and file_to_open:
old_start = file_to_open.start_line
old_end = file_to_open.end_line
# Only record if there was a previous range or if we're setting a new range
if old_start is not None or old_end is not None or start_line is not None or end_line is not None:
# Only record if the range is actually changing
if old_start != start_line or old_end != end_line:
previous_ranges[file_name] = (old_start, old_end)
# Calculate how many files need to be closed
current_other_count = len(other_open_files)
target_other_count = max_files_open - 1 # Reserve 1 slot for file we're opening
closed_file_names = []
if current_other_count > target_other_count:
files_to_close_count = current_other_count - target_other_count
files_to_close = other_open_files[:files_to_close_count] # Take oldest
# Bulk close files using a single UPDATE query
file_ids_to_close = [f.file_id for f in files_to_close]
closed_file_names = [f.file_name for f in files_to_close]
if file_ids_to_close:
close_stmt = (
update(FileAgentModel)
.where(
and_(
FileAgentModel.agent_id == agent_id,
FileAgentModel.file_id.in_(file_ids_to_close),
FileAgentModel.organization_id == actor.organization_id,
)
)
.values(is_open=False, visible_content=None)
)
await session.execute(close_stmt)
# Open the target file (update or create)
now_ts = datetime.now(timezone.utc)
if file_to_open:
# Update existing file
file_to_open.is_open = True
file_to_open.visible_content = visible_content
file_to_open.last_accessed_at = now_ts
file_to_open.start_line = start_line
file_to_open.end_line = end_line
await file_to_open.update_async(session, actor=actor)
else:
# Create new file association
new_file_agent = FileAgentModel(
agent_id=agent_id,
file_id=file_id,
file_name=file_name,
source_id=source_id,
organization_id=actor.organization_id,
is_open=True,
visible_content=visible_content,
last_accessed_at=now_ts,
start_line=start_line,
end_line=end_line,
)
await new_file_agent.create_async(session, actor=actor)
return closed_file_names, file_was_already_open, previous_ranges
@enforce_types
@trace_method
async def attach_files_bulk(
self,
*,
agent_id: str,
files_metadata: list[FileMetadata],
max_files_open: int,
visible_content_map: Optional[dict[str, str]] = None,
actor: PydanticUser,
) -> list[str]:
"""Atomically attach many files, applying an LRU cap with one commit."""
if not files_metadata:
return []
# TODO: This is not strictly necessary, as the file_metadata should never be duped
# TODO: But we have this as a protection, check logs for details
# dedupe while preserving caller order
seen: set[str] = set()
ordered_unique: list[FileMetadata] = []
for m in files_metadata:
if m.file_name not in seen:
ordered_unique.append(m)
seen.add(m.file_name)
if (dup_cnt := len(files_metadata) - len(ordered_unique)) > 0:
logger.warning(
"attach_files_bulk: removed %d duplicate file(s) for agent %s",
dup_cnt,
agent_id,
)
now = datetime.now(timezone.utc)
vc_for = visible_content_map or {}
async with db_registry.async_session() as session:
# fetch existing assoc rows for requested names
existing_q = select(FileAgentModel).where(
FileAgentModel.agent_id == agent_id,
FileAgentModel.organization_id == actor.organization_id,
FileAgentModel.file_name.in_(seen),
)
existing_rows = (await session.execute(existing_q)).scalars().all()
existing_by_name = {r.file_name: r for r in existing_rows}
# snapshot current OPEN rows (oldest first)
open_q = (
select(FileAgentModel)
.where(
FileAgentModel.agent_id == agent_id,
FileAgentModel.organization_id == actor.organization_id,
FileAgentModel.is_open.is_(True),
)
.order_by(FileAgentModel.last_accessed_at.asc())
)
currently_open = (await session.execute(open_q)).scalars().all()
new_names = [m.file_name for m in ordered_unique]
new_names_set = set(new_names)
still_open_names = [r.file_name for r in currently_open if r.file_name not in new_names_set]
# decide final open set
if len(new_names) >= max_files_open:
final_open = new_names[:max_files_open]
else:
room_for_old = max_files_open - len(new_names)
final_open = new_names + still_open_names[-room_for_old:]
final_open_set = set(final_open)
closed_file_names = [r.file_name for r in currently_open if r.file_name not in final_open_set]
# Add new files that won't be opened due to max_files_open limit
if len(new_names) >= max_files_open:
closed_file_names.extend(new_names[max_files_open:])
evicted_ids = [r.file_id for r in currently_open if r.file_name in closed_file_names]
# validate file IDs exist to prevent FK violations (files may have been deleted)
requested_file_ids = {meta.id for meta in ordered_unique}
existing_file_ids_q = select(FileMetadataModel.id).where(FileMetadataModel.id.in_(requested_file_ids))
existing_file_ids = set((await session.execute(existing_file_ids_q)).scalars().all())
missing_file_ids = requested_file_ids - existing_file_ids
if missing_file_ids:
logger.warning(
"attach_files_bulk: skipping %d file(s) with missing records for agent %s: %s",
len(missing_file_ids),
agent_id,
missing_file_ids,
)
ordered_unique = [m for m in ordered_unique if m.id in existing_file_ids]
# upsert requested files
for meta in ordered_unique:
is_now_open = meta.file_name in final_open_set
vc = vc_for.get(meta.file_name, "") if is_now_open else None
if row := existing_by_name.get(meta.file_name):
row.is_open = is_now_open
row.visible_content = vc
row.last_accessed_at = now
session.add(row) # already present, but safe
else:
session.add(
FileAgentModel(
agent_id=agent_id,
file_id=meta.id,
file_name=meta.file_name,
source_id=meta.source_id,
organization_id=actor.organization_id,
is_open=is_now_open,
visible_content=vc,
last_accessed_at=now,
)
)
# bulk-close evicted rows
if evicted_ids:
await session.execute(
update(FileAgentModel)
.where(
FileAgentModel.agent_id == agent_id,
FileAgentModel.organization_id == actor.organization_id,
FileAgentModel.file_id.in_(evicted_ids),
)
.values(is_open=False, visible_content=None)
)
# context manager now handles commits
# await session.commit()
return closed_file_names
async def _get_association_by_file_id(self, session, agent_id: str, file_id: str, actor: PydanticUser) -> FileAgentModel:
q = select(FileAgentModel).where(
and_(
FileAgentModel.agent_id == agent_id,
FileAgentModel.file_id == file_id,
FileAgentModel.organization_id == actor.organization_id,
)
)
assoc = await session.scalar(q)
if not assoc:
raise NoResultFound(f"FileAgent(agent_id={agent_id}, file_id={file_id}) not found in org {actor.organization_id}")
return assoc
async def _get_association_by_file_name(self, session, agent_id: str, file_name: str, actor: PydanticUser) -> FileAgentModel:
q = select(FileAgentModel).where(
and_(
FileAgentModel.agent_id == agent_id,
FileAgentModel.file_name == file_name,
FileAgentModel.organization_id == actor.organization_id,
)
)
assoc = await session.scalar(q)
if not assoc:
raise NoResultFound(f"FileAgent(agent_id={agent_id}, file_name={file_name}) not found in org {actor.organization_id}")
return assoc
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/files_agents_manager.py",
"license": "Apache License 2.0",
"lines": 683,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/helpers/tool_parser_helper.py | import ast
import base64
import pickle
from typing import Any, Union
from letta.constants import REQUEST_HEARTBEAT_DESCRIPTION, REQUEST_HEARTBEAT_PARAM, SEND_MESSAGE_TOOL_NAME
from letta.schemas.agent import AgentState
from letta.schemas.response_format import ResponseFormatType, ResponseFormatUnion
from letta.types import JsonDict, JsonValue
def parse_stdout_best_effort(text: Union[str, bytes]) -> tuple[Any, AgentState | None]:
"""
Decode and unpickle the result from the function execution if possible.
Returns (function_return_value, agent_state).
"""
if not text:
return None, None
if isinstance(text, str):
text = base64.b64decode(text)
result = pickle.loads(text)
agent_state = result["agent_state"]
return result["results"], agent_state
def parse_function_arguments(source_code: str, tool_name: str):
"""Get arguments of a function from its source code"""
tree = ast.parse(source_code)
args = []
for node in ast.walk(tree):
# Handle both sync and async functions
if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef)) and node.name == tool_name:
for arg in node.args.args:
args.append(arg.arg)
return args
def convert_param_to_str_value(param_type: str, raw_value: JsonValue) -> str:
"""
Convert parameter to Python code representation based on JSON schema type.
TODO (cliandy): increase sanitization checks here to fail at the right place
"""
valid_types = {"string", "integer", "boolean", "number", "array", "object"}
if param_type not in valid_types:
raise TypeError(f"Unsupported type: {param_type}, raw_value={raw_value}")
if param_type == "string":
# Safely handle python string
return repr(raw_value)
if param_type == "integer":
return str(int(raw_value))
if param_type == "boolean":
if isinstance(raw_value, bool):
return str(raw_value)
if isinstance(raw_value, int) and raw_value in (0, 1):
return str(bool(raw_value))
if isinstance(raw_value, str) and raw_value.strip().lower() in ("true", "false"):
return raw_value.strip().lower().capitalize()
raise ValueError(f"Invalid boolean value: {raw_value}")
if param_type == "array":
pass # need more testing here
# if isinstance(raw_value, str):
# if raw_value.strip()[0] != "[" or raw_value.strip()[-1] != "]":
# raise ValueError(f'Invalid array value: "{raw_value}"')
# return raw_value.strip()
return str(raw_value)
def runtime_override_tool_json_schema(
tool_list: list[JsonDict],
response_format: ResponseFormatUnion | None,
request_heartbeat: bool = True,
terminal_tools: set[str] | None = None,
) -> list[JsonDict]:
"""Override the tool JSON schemas at runtime if certain conditions are met.
Cases:
1. We will inject `send_message` tool calls with `response_format` if provided
2. Tools will have an additional `request_heartbeat` parameter added (except for terminal tools).
"""
if terminal_tools is None:
terminal_tools = set()
for tool_json in tool_list:
if tool_json["name"] == SEND_MESSAGE_TOOL_NAME and response_format and response_format.type != ResponseFormatType.text:
if response_format.type == ResponseFormatType.json_schema:
tool_json["parameters"]["properties"]["message"] = response_format.json_schema["schema"]
if response_format.type == ResponseFormatType.json_object:
tool_json["parameters"]["properties"]["message"] = {
"type": "object",
"description": "Message contents. All unicode (including emojis) are supported.",
"additionalProperties": True,
"properties": {},
}
if request_heartbeat:
# Only add request_heartbeat to non-terminal tools
if tool_json["name"] not in terminal_tools:
tool_json["parameters"]["properties"][REQUEST_HEARTBEAT_PARAM] = {
"type": "boolean",
"description": REQUEST_HEARTBEAT_DESCRIPTION,
}
if REQUEST_HEARTBEAT_PARAM not in tool_json["parameters"]["required"]:
tool_json["parameters"]["required"].append(REQUEST_HEARTBEAT_PARAM)
return tool_list
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/helpers/tool_parser_helper.py",
"license": "Apache License 2.0",
"lines": 92,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/mcp/oauth_utils.py | """OAuth utilities for MCP server authentication."""
import asyncio
import json
import secrets
import time
from datetime import datetime, timedelta
from typing import TYPE_CHECKING, Callable, Optional, Tuple
from mcp.client.auth import OAuthClientProvider, TokenStorage
from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata, OAuthToken
from sqlalchemy import select
from letta.log import get_logger
from letta.orm.mcp_oauth import MCPOAuth, OAuthSessionStatus
from letta.schemas.mcp import MCPOAuthSessionUpdate
from letta.schemas.user import User as PydanticUser
from letta.server.db import db_registry
from letta.services.mcp.types import OauthStreamEvent
if TYPE_CHECKING:
from letta.services.mcp_manager import MCPManager
logger = get_logger(__name__)
class DatabaseTokenStorage(TokenStorage):
"""Database-backed token storage using MCPOAuth table via mcp_manager."""
def __init__(self, session_id: str, mcp_manager: "MCPManager", actor: PydanticUser):
self.session_id = session_id
self.mcp_manager = mcp_manager
self.actor = actor
async def get_tokens(self) -> Optional[OAuthToken]:
"""Retrieve tokens from database."""
oauth_session = await self.mcp_manager.get_oauth_session_by_id(self.session_id, self.actor)
if not oauth_session:
return None
# Read tokens directly from _enc columns
access_token = await oauth_session.access_token_enc.get_plaintext_async() if oauth_session.access_token_enc else None
if not access_token:
return None
refresh_token = await oauth_session.refresh_token_enc.get_plaintext_async() if oauth_session.refresh_token_enc else None
return OAuthToken(
access_token=access_token,
refresh_token=refresh_token,
token_type=oauth_session.token_type,
expires_in=int(oauth_session.expires_at.timestamp() - time.time()),
scope=oauth_session.scope,
)
async def set_tokens(self, tokens: OAuthToken) -> None:
"""Store tokens in database."""
session_update = MCPOAuthSessionUpdate(
access_token=tokens.access_token,
refresh_token=tokens.refresh_token,
token_type=tokens.token_type,
expires_at=datetime.fromtimestamp(tokens.expires_in + time.time()),
scope=tokens.scope,
status=OAuthSessionStatus.AUTHORIZED,
)
await self.mcp_manager.update_oauth_session(self.session_id, session_update, self.actor)
async def get_client_info(self) -> Optional[OAuthClientInformationFull]:
"""Retrieve client information from database."""
oauth_session = await self.mcp_manager.get_oauth_session_by_id(self.session_id, self.actor)
if not oauth_session or not oauth_session.client_id:
return None
# Read client secret directly from _enc column
client_secret = await oauth_session.client_secret_enc.get_plaintext_async() if oauth_session.client_secret_enc else None
return OAuthClientInformationFull(
client_id=oauth_session.client_id,
client_secret=client_secret,
redirect_uris=[oauth_session.redirect_uri] if oauth_session.redirect_uri else [],
)
async def set_client_info(self, client_info: OAuthClientInformationFull) -> None:
"""Store client information in database."""
session_update = MCPOAuthSessionUpdate(
client_id=client_info.client_id,
client_secret=client_info.client_secret,
redirect_uri=str(client_info.redirect_uris[0]) if client_info.redirect_uris else None,
)
await self.mcp_manager.update_oauth_session(self.session_id, session_update, self.actor)
class MCPOAuthSession:
"""Legacy OAuth session class - deprecated, use mcp_manager directly."""
def __init__(
self,
session_id: str,
server_url: Optional[str] = None,
server_name: Optional[str] = None,
user_id: Optional[str] = None,
organization_id: Optional[str] = None,
):
self.session_id = session_id
self.server_url = server_url
self.server_name = server_name
self.user_id = user_id
self.organization_id = organization_id
self.state = secrets.token_urlsafe(32) if server_url else None
# TODO: consolidate / deprecate this in favor of mcp_manager access
async def create_session(self) -> str:
"""Create a new OAuth session in the database."""
async with db_registry.async_session() as session:
oauth_record = MCPOAuth(
id=self.session_id,
state=self.state,
server_url=self.server_url,
server_name=self.server_name,
user_id=self.user_id,
organization_id=self.organization_id,
status=OAuthSessionStatus.PENDING,
created_at=datetime.now(),
updated_at=datetime.now(),
)
oauth_record = await oauth_record.create_async(session, actor=None)
return self.session_id
async def get_session_status(self) -> OAuthSessionStatus:
"""Get the current status of the OAuth session."""
async with db_registry.async_session() as session:
try:
oauth_record = await MCPOAuth.read_async(db_session=session, identifier=self.session_id, actor=None)
return oauth_record.status
except Exception:
return OAuthSessionStatus.ERROR
async def update_session_status(self, status: OAuthSessionStatus) -> None:
"""Update the session status."""
async with db_registry.async_session() as session:
try:
oauth_record = await MCPOAuth.read_async(db_session=session, identifier=self.session_id, actor=None)
oauth_record.status = status
oauth_record.updated_at = datetime.now()
await oauth_record.update_async(db_session=session, actor=None)
except Exception:
pass
async def store_authorization_code(self, code: str, state: str) -> Optional[MCPOAuth]:
"""Store the authorization code from OAuth callback."""
from letta.schemas.secret import Secret
async with db_registry.async_session() as session:
try:
oauth_record = await MCPOAuth.read_async(db_session=session, identifier=self.session_id, actor=None)
# Encrypt the authorization_code and store only in _enc column (async to avoid blocking event loop)
if code is not None:
code_secret = await Secret.from_plaintext_async(code)
oauth_record.authorization_code_enc = code_secret.get_encrypted()
oauth_record.status = OAuthSessionStatus.AUTHORIZED
oauth_record.state = state
return await oauth_record.update_async(db_session=session, actor=None)
except Exception:
return None
async def get_authorization_url(self) -> Optional[str]:
"""Get the authorization URL for this session."""
async with db_registry.async_session() as session:
try:
oauth_record = await MCPOAuth.read_async(db_session=session, identifier=self.session_id, actor=None)
return oauth_record.authorization_url
except Exception:
return None
async def set_authorization_url(self, url: str) -> None:
"""Set the authorization URL for this session."""
async with db_registry.async_session() as session:
try:
oauth_record = await MCPOAuth.read_async(db_session=session, identifier=self.session_id, actor=None)
oauth_record.authorization_url = url
oauth_record.updated_at = datetime.now()
await oauth_record.update_async(db_session=session, actor=None)
except Exception:
pass
async def create_oauth_provider(
session_id: str,
server_url: str,
redirect_uri: str,
mcp_manager: "MCPManager",
actor: PydanticUser,
logo_uri: Optional[str] = None,
url_callback: Optional[Callable[[str], None]] = None,
) -> OAuthClientProvider:
"""Create an OAuth provider for MCP server authentication.
DEPRECATED: Use ServerSideOAuth from letta.services.mcp.server_side_oauth instead.
This function is kept for backwards compatibility but will be removed in a future version.
"""
logger.warning("create_oauth_provider is deprecated. Use ServerSideOAuth from letta.services.mcp.server_side_oauth instead.")
client_metadata_dict = {
"client_name": "Letta",
"redirect_uris": [redirect_uri],
"grant_types": ["authorization_code", "refresh_token"],
"response_types": ["code"],
"token_endpoint_auth_method": "client_secret_post",
"logo_uri": logo_uri,
}
# Use manager-based storage
storage = DatabaseTokenStorage(session_id, mcp_manager, actor)
# Extract base URL (remove /mcp endpoint if present)
oauth_server_url = server_url.rstrip("/").removesuffix("/sse").removesuffix("/mcp")
async def redirect_handler(authorization_url: str) -> None:
"""Handle OAuth redirect by storing the authorization URL."""
logger.info(f"OAuth redirect handler called with URL: {authorization_url}")
session_update = MCPOAuthSessionUpdate(authorization_url=authorization_url)
await mcp_manager.update_oauth_session(session_id, session_update, actor)
logger.info(f"OAuth authorization URL stored: {authorization_url}")
# Call the callback if provided (e.g., to yield URL to SSE stream)
if url_callback:
url_callback(authorization_url)
async def callback_handler() -> Tuple[str, Optional[str]]:
"""Handle OAuth callback by waiting for authorization code."""
timeout = 300 # 5 minutes
start_time = time.time()
logger.info(f"Waiting for authorization code for session {session_id}")
while time.time() - start_time < timeout:
oauth_session = await mcp_manager.get_oauth_session_by_id(session_id, actor)
if oauth_session and oauth_session.authorization_code_enc:
# Read authorization code directly from _enc column
auth_code = await oauth_session.authorization_code_enc.get_plaintext_async()
return auth_code, oauth_session.state
elif oauth_session and oauth_session.status == OAuthSessionStatus.ERROR:
raise Exception("OAuth authorization failed")
await asyncio.sleep(1)
raise Exception(f"Timeout waiting for OAuth callback after {timeout} seconds")
return OAuthClientProvider(
server_url=oauth_server_url,
client_metadata=OAuthClientMetadata.model_validate(client_metadata_dict),
storage=storage,
redirect_handler=redirect_handler,
callback_handler=callback_handler,
)
async def cleanup_expired_oauth_sessions(max_age_hours: int = 24) -> None:
"""Clean up expired OAuth sessions."""
cutoff_time = datetime.now() - timedelta(hours=max_age_hours)
async with db_registry.async_session() as session:
result = await session.execute(select(MCPOAuth).where(MCPOAuth.created_at < cutoff_time))
expired_sessions = result.scalars().all()
for oauth_session in expired_sessions:
await oauth_session.hard_delete_async(db_session=session, actor=None)
if expired_sessions:
logger.info(f"Cleaned up {len(expired_sessions)} expired OAuth sessions")
def oauth_stream_event(event: OauthStreamEvent, **kwargs) -> str:
data = {"event": event.value}
data.update(kwargs)
return f"data: {json.dumps(data)}\n\n"
def drill_down_exception(exception, depth=0, max_depth=5):
"""Recursively drill down into nested exceptions to find the root cause"""
indent = " " * depth
error_details = []
error_details.append(f"{indent}Exception at depth {depth}:")
error_details.append(f"{indent} Type: {type(exception).__name__}")
error_details.append(f"{indent} Message: {str(exception)}")
error_details.append(f"{indent} Module: {getattr(type(exception), '__module__', 'unknown')}")
# Check for exception groups (TaskGroup errors)
if hasattr(exception, "exceptions") and exception.exceptions:
error_details.append(f"{indent} ExceptionGroup with {len(exception.exceptions)} sub-exceptions:")
for i, sub_exc in enumerate(exception.exceptions):
error_details.append(f"{indent} Sub-exception {i}:")
if depth < max_depth:
error_details.extend(drill_down_exception(sub_exc, depth + 1, max_depth))
# Check for chained exceptions (__cause__ and __context__)
if hasattr(exception, "__cause__") and exception.__cause__ and depth < max_depth:
error_details.append(f"{indent} Caused by:")
error_details.extend(drill_down_exception(exception.__cause__, depth + 1, max_depth))
if hasattr(exception, "__context__") and exception.__context__ and depth < max_depth:
error_details.append(f"{indent} Context:")
error_details.extend(drill_down_exception(exception.__context__, depth + 1, max_depth))
# Add traceback info
import traceback
if hasattr(exception, "__traceback__") and exception.__traceback__:
tb_lines = traceback.format_tb(exception.__traceback__)
error_details.append(f"{indent} Traceback:")
for line in tb_lines[-3:]: # Show last 3 traceback lines
error_details.append(f"{indent} {line.strip()}")
error_info = "".join(error_details)
return error_info
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/mcp/oauth_utils.py",
"license": "Apache License 2.0",
"lines": 259,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/mcp/streamable_http_client.py | from datetime import timedelta
from typing import Optional
from mcp import ClientSession
from mcp.client.auth import OAuthClientProvider
from mcp.client.streamable_http import streamablehttp_client
from letta.functions.mcp_client.types import BaseServerConfig, StreamableHTTPServerConfig
from letta.log import get_logger
from letta.services.mcp.base_client import AsyncBaseMCPClient
from letta.settings import tool_settings
logger = get_logger(__name__)
class AsyncStreamableHTTPMCPClient(AsyncBaseMCPClient):
def __init__(
self,
server_config: StreamableHTTPServerConfig,
oauth_provider: Optional[OAuthClientProvider] = None,
agent_id: Optional[str] = None,
):
super().__init__(server_config, oauth_provider, agent_id)
async def _initialize_connection(self, server_config: BaseServerConfig) -> None:
if not isinstance(server_config, StreamableHTTPServerConfig):
raise ValueError("Expected StreamableHTTPServerConfig")
try:
# Prepare headers for authentication
headers = {}
if server_config.custom_headers:
headers.update(server_config.custom_headers)
# Add auth header if specified
if server_config.auth_header and server_config.auth_token:
headers[server_config.auth_header] = server_config.auth_token
# Add agent ID header if provided
if self.agent_id:
headers[self.AGENT_ID_HEADER] = self.agent_id
# Use OAuth provider if available, otherwise use regular headers
# Pass timeout to prevent httpx.ReadTimeout errors on slow connections
timeout = timedelta(seconds=tool_settings.mcp_connect_to_server_timeout)
if self.oauth_provider:
streamable_http_cm = streamablehttp_client(
server_config.server_url, headers=headers if headers else None, auth=self.oauth_provider, timeout=timeout
)
else:
# Use streamablehttp_client context manager with headers if provided
if headers:
streamable_http_cm = streamablehttp_client(server_config.server_url, headers=headers, timeout=timeout)
else:
streamable_http_cm = streamablehttp_client(server_config.server_url, timeout=timeout)
read_stream, write_stream, _ = await self.exit_stack.enter_async_context(streamable_http_cm)
# Create and enter the ClientSession context manager
session_cm = ClientSession(read_stream, write_stream)
self.session = await self.exit_stack.enter_async_context(session_cm)
except Exception as e:
# Provide more helpful error messages for specific error types
if "404" in str(e) or "Not Found" in str(e):
raise ConnectionError(
f"MCP server not found at URL: {server_config.server_url}. "
"Please verify the URL is correct and the server supports the MCP protocol."
) from e
elif "Connection" in str(e) or "connect" in str(e).lower():
raise ConnectionError(
f"Failed to connect to MCP server at: {server_config.server_url}. "
"Please check that the server is running and accessible."
) from e
elif "JSON" in str(e) and "validation" in str(e):
raise ConnectionError(
f"MCP server at {server_config.server_url} is not returning valid JSON-RPC responses. "
"The server may not be a proper MCP server or may be returning empty/invalid JSON. "
"Please verify this is an MCP-compatible server endpoint."
) from e
else:
# Re-raise other exceptions with additional context
raise ConnectionError(f"Failed to initialize streamable HTTP connection to {server_config.server_url}: {str(e)}") from e
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/mcp/streamable_http_client.py",
"license": "Apache License 2.0",
"lines": 70,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/mcp_manager.py | import asyncio
import json
import os
import secrets
import uuid
from datetime import datetime, timedelta
from typing import Any, Dict, List, Optional, Tuple, Union
from fastapi import HTTPException
from sqlalchemy import delete, desc, select
from starlette.requests import Request
import letta.constants as constants
from letta.functions.mcp_client.types import (
MCPServerType,
MCPTool,
MCPToolHealth,
SSEServerConfig,
StdioServerConfig,
StreamableHTTPServerConfig,
)
from letta.functions.schema_generator import normalize_mcp_schema
from letta.functions.schema_validator import validate_complete_json_schema
from letta.log import get_logger
from letta.orm.errors import NoResultFound
from letta.orm.mcp_oauth import MCPOAuth, OAuthSessionStatus
from letta.orm.mcp_server import MCPServer as MCPServerModel
from letta.orm.tool import Tool as ToolModel
from letta.schemas.enums import PrimitiveType
from letta.schemas.mcp import (
MCPOAuthSession,
MCPOAuthSessionCreate,
MCPOAuthSessionUpdate,
MCPServer,
MCPServerResyncResult,
UpdateMCPServer,
UpdateSSEMCPServer,
UpdateStdioMCPServer,
UpdateStreamableHTTPMCPServer,
)
from letta.schemas.secret import Secret
from letta.schemas.tool import Tool as PydanticTool, ToolCreate, ToolUpdate
from letta.schemas.user import User as PydanticUser
from letta.server.db import db_registry
from letta.services.mcp.base_client import AsyncBaseMCPClient
from letta.services.mcp.fastmcp_client import AsyncFastMCPSSEClient, AsyncFastMCPStreamableHTTPClient
from letta.services.mcp.server_side_oauth import ServerSideOAuth
from letta.services.mcp.sse_client import MCP_CONFIG_TOPLEVEL_KEY
from letta.services.mcp.stdio_client import AsyncStdioMCPClient
from letta.services.tool_manager import ToolManager
from letta.settings import tool_settings
from letta.utils import enforce_types, printd, safe_create_task_with_return
from letta.validators import raise_on_invalid_id
logger = get_logger(__name__)
class MCPManager:
"""Manager class to handle business logic related to MCP."""
def __init__(self):
# TODO: timeouts?
self.tool_manager = ToolManager()
self.cached_mcp_servers = {} # maps id -> async connection
@enforce_types
@raise_on_invalid_id(param_name="agent_id", expected_prefix=PrimitiveType.AGENT)
async def list_mcp_server_tools(self, mcp_server_name: str, actor: PydanticUser, agent_id: Optional[str] = None) -> List[MCPTool]:
"""Get a list of all tools for a specific MCP server."""
mcp_client = None
try:
mcp_server_id = await self.get_mcp_server_id_by_name(mcp_server_name, actor=actor)
mcp_config = await self.get_mcp_server_by_id_async(mcp_server_id, actor=actor)
server_config = await mcp_config.to_config_async()
mcp_client = await self.get_mcp_client(server_config, actor, agent_id=agent_id)
await mcp_client.connect_to_server()
# list tools
tools = await mcp_client.list_tools()
# Add health information to each tool
for tool in tools:
# Try to normalize the schema and re-validate
if tool.inputSchema:
tool.inputSchema = normalize_mcp_schema(tool.inputSchema)
health_status, reasons = validate_complete_json_schema(tool.inputSchema)
tool.health = MCPToolHealth(status=health_status.value, reasons=reasons)
return tools
except Exception as e:
# MCP tool listing errors are often due to connection/configuration issues, not system errors
# Log at info level to avoid triggering Sentry alerts for expected failures
logger.warning(f"Error listing tools for MCP server {mcp_server_name}: {e}")
raise e
finally:
if mcp_client:
await mcp_client.cleanup()
@enforce_types
async def execute_mcp_server_tool(
self,
mcp_server_name: str,
tool_name: str,
tool_args: Optional[Dict[str, Any]],
environment_variables: Dict[str, str],
actor: PydanticUser,
agent_id: Optional[str] = None,
) -> Tuple[str, bool]:
"""Call a specific tool from a specific MCP server."""
mcp_client = None
try:
if not tool_settings.mcp_read_from_config:
# read from DB
mcp_server_id = await self.get_mcp_server_id_by_name(mcp_server_name, actor=actor)
mcp_config = await self.get_mcp_server_by_id_async(mcp_server_id, actor=actor)
server_config = await mcp_config.to_config_async(environment_variables)
else:
# read from config file
mcp_config = await self.read_mcp_config()
if mcp_server_name not in mcp_config:
raise ValueError(f"MCP server {mcp_server_name} not found in config.")
server_config = mcp_config[mcp_server_name]
mcp_client = await self.get_mcp_client(server_config, actor, agent_id=agent_id)
await mcp_client.connect_to_server()
# call tool
result, success = await mcp_client.execute_tool(tool_name, tool_args)
logger.info(f"MCP Result: {result}, Success: {success}")
# TODO: change to pydantic tool
return result, success
finally:
if mcp_client:
await mcp_client.cleanup()
@enforce_types
async def add_tool_from_mcp_server(self, mcp_server_name: str, mcp_tool_name: str, actor: PydanticUser) -> PydanticTool:
"""Add a tool from an MCP server to the Letta tool registry."""
# get the MCP server ID, we should migrate to use the server_id instead of the name
mcp_server_id = await self.get_mcp_server_id_by_name(mcp_server_name, actor=actor)
if not mcp_server_id:
raise ValueError(f"MCP server '{mcp_server_name}' not found")
mcp_tools = await self.list_mcp_server_tools(mcp_server_name, actor=actor)
for mcp_tool in mcp_tools:
# TODO: @jnjpng move health check to tool class
if mcp_tool.name == mcp_tool_name:
# Check tool health - but try normalization first for INVALID schemas
if mcp_tool.health and mcp_tool.health.status == "INVALID":
logger.info(f"Attempting to normalize INVALID schema for tool {mcp_tool_name}")
logger.info(f"Original health reasons: {mcp_tool.health.reasons}")
# Try to normalize the schema and re-validate
try:
# Normalize the schema to fix common issues
logger.debug(f"Normalizing schema for {mcp_tool_name}")
normalized_schema = normalize_mcp_schema(mcp_tool.inputSchema)
# Re-validate after normalization
logger.debug(f"Re-validating schema for {mcp_tool_name}")
health_status, health_reasons = validate_complete_json_schema(normalized_schema)
logger.info(f"After normalization: status={health_status.value}, reasons={health_reasons}")
# Update the tool's schema and health (use inputSchema, not input_schema)
mcp_tool.inputSchema = normalized_schema
mcp_tool.health.status = health_status.value
mcp_tool.health.reasons = health_reasons
# Log the normalization result
if health_status.value != "INVALID":
logger.info(f"✓ MCP tool {mcp_tool_name} schema normalized successfully: {health_status.value}")
else:
logger.warning(f"MCP tool {mcp_tool_name} still INVALID after normalization. Reasons: {health_reasons}")
except Exception as e:
logger.error(f"Failed to normalize schema for tool {mcp_tool_name}: {e}", exc_info=True)
# After normalization attempt, check if still INVALID
if mcp_tool.health and mcp_tool.health.status == "INVALID":
logger.warning(f"Tool {mcp_tool_name} has potentially invalid schema. Reasons: {', '.join(mcp_tool.health.reasons)}")
tool_create = ToolCreate.from_mcp(mcp_server_name=mcp_server_name, mcp_tool=mcp_tool)
return await self.tool_manager.create_mcp_tool_async(
tool_create=tool_create, mcp_server_name=mcp_server_name, mcp_server_id=mcp_server_id, actor=actor
)
# failed to add - handle error?
return None
@enforce_types
async def resync_mcp_server_tools(
self, mcp_server_name: str, actor: PydanticUser, agent_id: Optional[str] = None
) -> MCPServerResyncResult:
"""
Resync tools for an MCP server by:
1. Fetching current tools from the MCP server
2. Deleting tools that no longer exist on the server
3. Updating schemas for existing tools
4. Adding new tools from the server
Returns a result with:
- deleted: List of deleted tool names
- updated: List of updated tool names
- added: List of added tool names
"""
# Get the MCP server ID
mcp_server_id = await self.get_mcp_server_id_by_name(mcp_server_name, actor=actor)
if not mcp_server_id:
raise ValueError(f"MCP server '{mcp_server_name}' not found")
# Fetch current tools from MCP server
try:
current_mcp_tools = await self.list_mcp_server_tools(mcp_server_name, actor=actor, agent_id=agent_id)
except Exception as e:
logger.error(f"Failed to fetch tools from MCP server {mcp_server_name}: {e}")
raise HTTPException(
status_code=404,
detail={
"code": "MCPServerUnavailable",
"message": f"Could not connect to MCP server {mcp_server_name} to resync tools",
"error": str(e),
},
)
# Get all persisted tools for this MCP server
async with db_registry.async_session() as session:
# Query for tools with MCP metadata matching this server
# Using JSON path query to filter by metadata
persisted_tools = await ToolModel.list_async(
db_session=session,
organization_id=actor.organization_id,
)
# Filter tools that belong to this MCP server
mcp_tools = []
for tool in persisted_tools:
if tool.metadata_ and constants.MCP_TOOL_TAG_NAME_PREFIX in tool.metadata_:
if tool.metadata_[constants.MCP_TOOL_TAG_NAME_PREFIX].get("server_id") == mcp_server_id:
mcp_tools.append(tool)
# Create maps for easier comparison
current_tool_map = {tool.name: tool for tool in current_mcp_tools}
persisted_tool_map = {tool.name: tool for tool in mcp_tools}
deleted_tools = []
updated_tools = []
added_tools = []
# 1. Delete tools that no longer exist on the server
for tool_name, persisted_tool in persisted_tool_map.items():
if tool_name not in current_tool_map:
# Delete the tool (cascade will handle agent detachment)
await persisted_tool.hard_delete_async(db_session=session, actor=actor)
deleted_tools.append(tool_name)
logger.info(f"Deleted MCP tool {tool_name} as it no longer exists on server {mcp_server_name}")
# Commit deletions
# context manager now handles commits
# await session.commit()
# 2. Update existing tools and add new tools
for tool_name, current_tool in current_tool_map.items():
if tool_name in persisted_tool_map:
# Update existing tool
persisted_tool = persisted_tool_map[tool_name]
tool_create = ToolCreate.from_mcp(mcp_server_name=mcp_server_name, mcp_tool=current_tool)
# Check if schema has changed
if persisted_tool.json_schema != tool_create.json_schema:
# Update the tool
update_data = ToolUpdate(
description=tool_create.description,
json_schema=tool_create.json_schema,
source_code=tool_create.source_code,
)
await self.tool_manager.update_tool_by_id_async(tool_id=persisted_tool.id, tool_update=update_data, actor=actor)
updated_tools.append(tool_name)
logger.info(f"Updated MCP tool {tool_name} with new schema from server {mcp_server_name}")
else:
# Add new tool
# Skip INVALID tools
if current_tool.health and current_tool.health.status == "INVALID":
logger.warning(
f"Skipping invalid tool {tool_name} from MCP server {mcp_server_name}: {', '.join(current_tool.health.reasons)}"
)
continue
tool_create = ToolCreate.from_mcp(mcp_server_name=mcp_server_name, mcp_tool=current_tool)
await self.tool_manager.create_mcp_tool_async(
tool_create=tool_create, mcp_server_name=mcp_server_name, mcp_server_id=mcp_server_id, actor=actor
)
added_tools.append(tool_name)
logger.info(f"Added new MCP tool {tool_name} from server {mcp_server_name}")
return MCPServerResyncResult(
deleted=deleted_tools,
updated=updated_tools,
added=added_tools,
)
@enforce_types
async def list_mcp_servers(self, actor: PydanticUser) -> List[MCPServer]:
"""List all MCP servers available"""
async with db_registry.async_session() as session:
mcp_servers = await MCPServerModel.list_async(
db_session=session,
organization_id=actor.organization_id,
# SqlalchemyBase.list_async defaults to limit=50; MCP servers should not be capped.
# Use a higher limit until we implement proper pagination in the API/SDK.
limit=200,
)
return [mcp_server.to_pydantic() for mcp_server in mcp_servers]
@enforce_types
async def create_or_update_mcp_server(self, pydantic_mcp_server: MCPServer, actor: PydanticUser) -> MCPServer:
"""Create a new tool based on the ToolCreate schema."""
mcp_server_id = await self.get_mcp_server_id_by_name(mcp_server_name=pydantic_mcp_server.server_name, actor=actor)
if mcp_server_id:
# Put to dict and remove fields that should not be reset
update_data = pydantic_mcp_server.model_dump(exclude_unset=True, exclude_none=True)
# If there's anything to update (can only update the configs, not the name)
# TODO: pass in custom headers for update as well?
if update_data:
if pydantic_mcp_server.server_type == MCPServerType.SSE:
update_request = UpdateSSEMCPServer(server_url=pydantic_mcp_server.server_url, token=pydantic_mcp_server.token)
elif pydantic_mcp_server.server_type == MCPServerType.STDIO:
update_request = UpdateStdioMCPServer(stdio_config=pydantic_mcp_server.stdio_config)
elif pydantic_mcp_server.server_type == MCPServerType.STREAMABLE_HTTP:
update_request = UpdateStreamableHTTPMCPServer(
server_url=pydantic_mcp_server.server_url, auth_token=pydantic_mcp_server.token
)
else:
raise ValueError(f"Unsupported server type: {pydantic_mcp_server.server_type}")
mcp_server = await self.update_mcp_server_by_id(mcp_server_id, update_request, actor)
else:
printd(
f"`create_or_update_mcp_server` was called with user_id={actor.id}, organization_id={actor.organization_id}, name={pydantic_mcp_server.server_name}, but found existing mcp server with nothing to update."
)
mcp_server = await self.get_mcp_server_by_id_async(mcp_server_id, actor=actor)
else:
mcp_server = await self.create_mcp_server(pydantic_mcp_server, actor=actor)
return mcp_server
@enforce_types
async def create_mcp_server(self, pydantic_mcp_server: MCPServer, actor: PydanticUser) -> MCPServer:
"""Create a new MCP server."""
async with db_registry.async_session() as session:
try:
# Set the organization id at the ORM layer
pydantic_mcp_server.organization_id = actor.organization_id
# Explicitly populate encrypted fields
if pydantic_mcp_server.token is not None:
pydantic_mcp_server.token_enc = Secret.from_plaintext(pydantic_mcp_server.token)
if pydantic_mcp_server.custom_headers is not None:
# custom_headers is a Dict[str, str], serialize to JSON then encrypt
import json
json_str = json.dumps(pydantic_mcp_server.custom_headers)
pydantic_mcp_server.custom_headers_enc = Secret.from_plaintext(json_str)
mcp_server_data = pydantic_mcp_server.model_dump(to_orm=True)
# Ensure custom_headers None is stored as SQL NULL, not JSON null
if mcp_server_data.get("custom_headers") is None:
mcp_server_data.pop("custom_headers", None)
mcp_server = MCPServerModel(**mcp_server_data)
mcp_server = await mcp_server.create_async(session, actor=actor, no_commit=True)
# Link existing OAuth sessions for the same user and server URL
# This ensures OAuth sessions created during testing get linked to the server
# Also updates the server_name to match the new MCP server's name
server_url = getattr(mcp_server, "server_url", None)
server_name = getattr(mcp_server, "server_name", None)
if server_url:
result = await session.execute(
select(MCPOAuth).where(
MCPOAuth.server_url == server_url,
MCPOAuth.organization_id == actor.organization_id,
MCPOAuth.user_id == actor.id, # Only link sessions for the same user
MCPOAuth.server_id.is_(None), # Only update sessions not already linked
)
)
oauth_sessions = result.scalars().all()
# TODO: @jnjpng we should update sessions in bulk
for oauth_session in oauth_sessions:
oauth_session.server_id = mcp_server.id
# Update server_name to match the persisted MCP server's name
if server_name:
oauth_session.server_name = server_name
await oauth_session.update_async(db_session=session, actor=actor, no_commit=True)
if oauth_sessions:
logger.info(
f"Linked {len(oauth_sessions)} OAuth sessions to MCP server {mcp_server.id} "
f"(URL: {server_url}, name: {server_name}) for user {actor.id}"
)
# context manager now handles commits
# await session.commit()
return mcp_server.to_pydantic()
except Exception:
await session.rollback()
raise
@enforce_types
async def create_mcp_server_from_config(
self, server_config: Union[StdioServerConfig, SSEServerConfig, StreamableHTTPServerConfig], actor: PydanticUser
) -> MCPServer:
"""
Create an MCP server from a config object, handling encryption of sensitive fields.
This method converts the server config to an MCPServer model and encrypts
sensitive fields like tokens and custom headers.
"""
# Create base MCPServer object
if isinstance(server_config, StdioServerConfig):
# Check if stdio MCP servers are disabled (not suitable for multi-tenant deployments)
if tool_settings.mcp_disable_stdio:
raise ValueError("MCP stdio servers are disabled. Set MCP_DISABLE_STDIO=false to enable them.")
mcp_server = MCPServer(server_name=server_config.server_name, server_type=server_config.type, stdio_config=server_config)
elif isinstance(server_config, SSEServerConfig):
mcp_server = MCPServer(
server_name=server_config.server_name,
server_type=server_config.type,
server_url=server_config.server_url,
)
# Encrypt sensitive fields - write only to _enc columns
token = server_config.resolve_token()
if token:
mcp_server.token_enc = Secret.from_plaintext(token)
if server_config.custom_headers:
# Convert dict to JSON string, then encrypt as Secret
headers_json = json.dumps(server_config.custom_headers)
mcp_server.custom_headers_enc = Secret.from_plaintext(headers_json)
elif isinstance(server_config, StreamableHTTPServerConfig):
mcp_server = MCPServer(
server_name=server_config.server_name,
server_type=server_config.type,
server_url=server_config.server_url,
)
# Encrypt sensitive fields - write only to _enc columns
token = server_config.resolve_token()
if token:
mcp_server.token_enc = Secret.from_plaintext(token)
if server_config.custom_headers:
# Convert dict to JSON string, then encrypt as Secret
headers_json = json.dumps(server_config.custom_headers)
mcp_server.custom_headers_enc = Secret.from_plaintext(headers_json)
else:
raise ValueError(f"Unsupported server config type: {type(server_config)}")
return mcp_server
@enforce_types
async def create_mcp_server_from_config_with_tools(
self, server_config: Union[StdioServerConfig, SSEServerConfig, StreamableHTTPServerConfig], actor: PydanticUser
) -> MCPServer:
"""
Create an MCP server from a config object and optimistically sync its tools.
This method handles encryption of sensitive fields and then creates the server
with automatic tool synchronization.
"""
# Convert config to MCPServer with encryption
mcp_server = await self.create_mcp_server_from_config(server_config, actor)
# Create the server with tools
return await self.create_mcp_server_with_tools(mcp_server, actor)
@enforce_types
async def create_mcp_server_with_tools(self, pydantic_mcp_server: MCPServer, actor: PydanticUser) -> MCPServer:
"""
Create a new MCP server and optimistically sync its tools.
This method:
1. Creates the MCP server record
2. Attempts to connect and fetch tools
3. Persists valid tools in parallel (best-effort)
"""
# First, create the MCP server
created_server = await self.create_mcp_server(pydantic_mcp_server, actor)
# Optimistically try to sync tools
try:
logger.info(f"Attempting to auto-sync tools from MCP server: {created_server.server_name}")
# List all tools from the MCP server
mcp_tools = await self.list_mcp_server_tools(mcp_server_name=created_server.server_name, actor=actor)
# Filter out invalid tools
valid_tools = [tool for tool in mcp_tools if not (tool.health and tool.health.status == "INVALID")]
# Register tools sequentially to avoid exhausting database connection pool
# When an MCP server has many tools (e.g., 50+), concurrent tool creation can create
# too many simultaneous database connections, causing pool exhaustion errors
if valid_tools:
results = []
for mcp_tool in valid_tools:
tool_create = ToolCreate.from_mcp(mcp_server_name=created_server.server_name, mcp_tool=mcp_tool)
try:
result = await self.tool_manager.create_mcp_tool_async(
tool_create=tool_create,
mcp_server_name=created_server.server_name,
mcp_server_id=created_server.id,
actor=actor,
)
results.append(result)
except Exception as e:
results.append(e)
successful = sum(1 for r in results if not isinstance(r, Exception))
failed = len(results) - successful
logger.info(
f"Auto-sync completed for MCP server {created_server.server_name}: "
f"{successful} tools persisted, {failed} failed, "
f"{len(mcp_tools) - len(valid_tools)} invalid tools skipped"
)
else:
logger.info(f"No valid tools found to sync from MCP server {created_server.server_name}")
except Exception as e:
# Log the error but don't fail the server creation
logger.warning(
f"Failed to auto-sync tools from MCP server {created_server.server_name}: {e}. "
f"Server was created successfully but tools were not persisted."
)
return created_server
@enforce_types
async def update_mcp_server_by_id(self, mcp_server_id: str, mcp_server_update: UpdateMCPServer, actor: PydanticUser) -> MCPServer:
"""Update a tool by its ID with the given ToolUpdate object."""
async with db_registry.async_session() as session:
# Fetch the tool by ID
mcp_server = await MCPServerModel.read_async(db_session=session, identifier=mcp_server_id, actor=actor)
# Update tool attributes with only the fields that were explicitly set
update_data = mcp_server_update.model_dump(to_orm=True, exclude_unset=True)
# Handle encryption for token if provided - write only to _enc column
if "token" in update_data and update_data["token"] is not None:
# Check if value changed by reading from _enc column only
existing_token = None
if mcp_server.token_enc:
existing_secret = Secret.from_encrypted(mcp_server.token_enc)
existing_token = await existing_secret.get_plaintext_async()
# Only re-encrypt if different
if existing_token != update_data["token"]:
mcp_server.token_enc = Secret.from_plaintext(update_data["token"]).get_encrypted()
# Remove from update_data since we set directly on mcp_server
update_data.pop("token", None)
update_data.pop("token_enc", None)
# Handle encryption for custom_headers if provided - write only to _enc column
if "custom_headers" in update_data:
if update_data["custom_headers"] is not None:
# custom_headers is a Dict[str, str], serialize to JSON then encrypt
json_str = json.dumps(update_data["custom_headers"])
# Check if value changed by reading from _enc column only
existing_headers_json = None
if mcp_server.custom_headers_enc:
existing_secret = Secret.from_encrypted(mcp_server.custom_headers_enc)
existing_headers_json = await existing_secret.get_plaintext_async()
# Only re-encrypt if different
if existing_headers_json != json_str:
mcp_server.custom_headers_enc = Secret.from_plaintext(json_str).get_encrypted()
# Remove from update_data since we set directly on mcp_server
update_data.pop("custom_headers", None)
update_data.pop("custom_headers_enc", None)
else:
# Ensure custom_headers_enc None is stored as SQL NULL
update_data.pop("custom_headers", None)
setattr(mcp_server, "custom_headers_enc", None)
for key, value in update_data.items():
setattr(mcp_server, key, value)
mcp_server = await mcp_server.update_async(db_session=session, actor=actor)
# Save the updated tool to the database mcp_server = await mcp_server.update_async(db_session=session, actor=actor)
return mcp_server.to_pydantic()
@enforce_types
async def update_mcp_server_by_name(self, mcp_server_name: str, mcp_server_update: UpdateMCPServer, actor: PydanticUser) -> MCPServer:
"""Update an MCP server by its name."""
mcp_server_id = await self.get_mcp_server_id_by_name(mcp_server_name, actor)
if not mcp_server_id:
raise HTTPException(
status_code=404,
detail={
"code": "MCPServerNotFoundError",
"message": f"MCP server {mcp_server_name} not found",
"mcp_server_name": mcp_server_name,
},
)
return await self.update_mcp_server_by_id(mcp_server_id, mcp_server_update, actor)
@enforce_types
async def get_mcp_server_id_by_name(self, mcp_server_name: str, actor: PydanticUser) -> Optional[str]:
"""Retrieve a MCP server by its name and a user"""
try:
async with db_registry.async_session() as session:
mcp_server = await MCPServerModel.read_async(db_session=session, server_name=mcp_server_name, actor=actor)
return mcp_server.id
except NoResultFound:
return None
@enforce_types
async def get_mcp_server_by_id_async(self, mcp_server_id: str, actor: PydanticUser) -> MCPServer:
"""Fetch a tool by its ID."""
async with db_registry.async_session() as session:
# Retrieve tool by id using the Tool model's read method
mcp_server = await MCPServerModel.read_async(db_session=session, identifier=mcp_server_id, actor=actor)
# Convert the SQLAlchemy Tool object to PydanticTool
return mcp_server.to_pydantic()
@enforce_types
async def get_mcp_servers_by_ids(self, mcp_server_ids: List[str], actor: PydanticUser) -> List[MCPServer]:
"""Fetch multiple MCP servers by their IDs in a single query."""
if not mcp_server_ids:
return []
async with db_registry.async_session() as session:
mcp_servers = await MCPServerModel.list_async(
db_session=session,
organization_id=actor.organization_id,
id=mcp_server_ids, # This will use the IN operator
)
return [mcp_server.to_pydantic() for mcp_server in mcp_servers]
@enforce_types
async def get_mcp_server(self, mcp_server_name: str, actor: PydanticUser) -> PydanticTool:
"""Get a MCP server by name."""
async with db_registry.async_session() as session:
mcp_server_id = await self.get_mcp_server_id_by_name(mcp_server_name, actor)
mcp_server = await MCPServerModel.read_async(db_session=session, identifier=mcp_server_id, actor=actor)
if not mcp_server:
raise HTTPException(
status_code=404, # Not Found
detail={
"code": "MCPServerNotFoundError",
"message": f"MCP server {mcp_server_name} not found",
"mcp_server_name": mcp_server_name,
},
)
return mcp_server.to_pydantic()
@enforce_types
async def delete_mcp_server_by_id(self, mcp_server_id: str, actor: PydanticUser) -> None:
"""Delete a MCP server by its ID and associated tools and OAuth sessions."""
async with db_registry.async_session() as session:
try:
mcp_server = await MCPServerModel.read_async(db_session=session, identifier=mcp_server_id, actor=actor)
if not mcp_server:
raise NoResultFound(f"MCP server with id {mcp_server_id} not found.")
server_url = getattr(mcp_server, "server_url", None)
# Get all tools with matching metadata
stmt = select(ToolModel).where(ToolModel.organization_id == actor.organization_id)
result = await session.execute(stmt)
all_tools = result.scalars().all()
# Filter and delete tools that belong to this MCP server
tools_deleted = 0
for tool in all_tools:
if tool.metadata_ and constants.MCP_TOOL_TAG_NAME_PREFIX in tool.metadata_:
if tool.metadata_[constants.MCP_TOOL_TAG_NAME_PREFIX].get("server_id") == mcp_server_id:
await tool.hard_delete_async(db_session=session, actor=actor)
tools_deleted = 1
logger.info(f"Deleted MCP tool {tool.name} associated with MCP server {mcp_server_id}")
if tools_deleted > 0:
logger.info(f"Deleted {tools_deleted} MCP tools associated with MCP server {mcp_server_id}")
# Delete OAuth sessions associated with this MCP server
# 1. Delete sessions directly linked to this server (server_id matches)
# 2. Delete orphaned pending sessions (server_id IS NULL) for same server_url + user
# 3. Keep authorized sessions linked to OTHER MCP servers (different server_id)
oauth_count = 0
# Delete sessions directly linked to this server
result = await session.execute(
delete(MCPOAuth).where(
MCPOAuth.server_id == mcp_server_id,
MCPOAuth.organization_id == actor.organization_id,
)
)
oauth_count += result.rowcount
# Delete orphaned sessions (no server_id) for same server_url + user
if server_url:
result = await session.execute(
delete(MCPOAuth).where(
MCPOAuth.server_url == server_url,
MCPOAuth.server_id.is_(None), # Only orphaned sessions (not linked to any server)
MCPOAuth.organization_id == actor.organization_id,
MCPOAuth.user_id == actor.id,
)
)
oauth_count += result.rowcount
if oauth_count > 0:
logger.info(
f"Deleted {oauth_count} OAuth sessions for MCP server {mcp_server_id} (URL: {server_url}) for user {actor.id}"
)
# Delete the MCP server, will cascade delete to linked OAuth sessions
await session.execute(
delete(MCPServerModel).where(
MCPServerModel.id == mcp_server_id,
MCPServerModel.organization_id == actor.organization_id,
)
)
# context manager now handles commits
# await session.commit()
except NoResultFound:
await session.rollback()
raise ValueError(f"MCP server with id {mcp_server_id} not found.")
except Exception as e:
await session.rollback()
logger.error(f"Failed to delete MCP server {mcp_server_id}: {e}")
raise
async def read_mcp_config(self) -> dict[str, Union[SSEServerConfig, StdioServerConfig, StreamableHTTPServerConfig]]:
mcp_server_list = {}
# Attempt to read from ~/.letta/mcp_config.json
mcp_config_path = os.path.join(constants.LETTA_DIR, constants.MCP_CONFIG_NAME)
if os.path.exists(mcp_config_path):
# Read file without blocking event loop
def _read_config():
with open(mcp_config_path, "r") as f:
return json.load(f)
try:
mcp_config = await asyncio.to_thread(_read_config)
except Exception as e:
# Config parsing errors are user configuration issues, not system errors
logger.warning(f"Failed to parse MCP config file ({mcp_config_path}) as json: {e}")
return mcp_server_list
# Proper formatting is "mcpServers" key at the top level,
# then a dict with the MCP server name as the key,
# with the value being the schema from StdioServerParameters
if MCP_CONFIG_TOPLEVEL_KEY in mcp_config:
for server_name, server_params_raw in mcp_config[MCP_CONFIG_TOPLEVEL_KEY].items():
# No support for duplicate server names
if server_name in mcp_server_list:
# Duplicate server names are configuration issues, not system errors
logger.warning(f"Duplicate MCP server name found (skipping): {server_name}")
continue
if "url" in server_params_raw:
# Attempt to parse the server params as an SSE server
try:
server_params = SSEServerConfig(
server_name=server_name,
server_url=server_params_raw["url"],
auth_header=server_params_raw.get("auth_header", None),
auth_token=server_params_raw.get("auth_token", None),
headers=server_params_raw.get("headers", None),
)
mcp_server_list[server_name] = server_params
except Exception as e:
# Config parsing errors are user configuration issues, not system errors
logger.warning(f"Failed to parse server params for MCP server {server_name} (skipping): {e}")
continue
else:
# Attempt to parse the server params as a StdioServerParameters
try:
server_params = StdioServerConfig(
server_name=server_name,
command=server_params_raw["command"],
args=server_params_raw.get("args", []),
env=server_params_raw.get("env", {}),
)
mcp_server_list[server_name] = server_params
except Exception as e:
# Config parsing errors are user configuration issues, not system errors
logger.warning(f"Failed to parse server params for MCP server {server_name} (skipping): {e}")
continue
return mcp_server_list
async def get_mcp_client(
self,
server_config: Union[SSEServerConfig, StdioServerConfig, StreamableHTTPServerConfig],
actor: PydanticUser,
oauth: Optional[ServerSideOAuth] = None,
agent_id: Optional[str] = None,
) -> Union[AsyncFastMCPSSEClient, AsyncStdioMCPClient, AsyncFastMCPStreamableHTTPClient]:
"""
Helper function to create the appropriate MCP client based on server configuration.
Args:
server_config: The server configuration object
actor: The user making the request
oauth: Optional ServerSideOAuth instance for authentication
agent_id: Optional agent ID for request headers
Returns:
The appropriate MCP client instance
Raises:
ValueError: If server config type is not supported
"""
# If no OAuth is provided, check if we have stored OAuth credentials
if oauth is None and hasattr(server_config, "server_url"):
oauth_session = await self.get_oauth_session_by_server(server_config.server_url, actor, status=OAuthSessionStatus.AUTHORIZED)
# Check if access token exists by attempting to decrypt it
if oauth_session and oauth_session.access_token_enc and await oauth_session.access_token_enc.get_plaintext_async():
# Create ServerSideOAuth from stored credentials
oauth = ServerSideOAuth(
mcp_url=oauth_session.server_url,
session_id=oauth_session.id,
mcp_manager=self,
actor=actor,
redirect_uri=oauth_session.redirect_uri,
)
if server_config.type == MCPServerType.SSE:
server_config = SSEServerConfig(**server_config.model_dump())
return AsyncFastMCPSSEClient(server_config=server_config, oauth=oauth, agent_id=agent_id)
elif server_config.type == MCPServerType.STDIO:
# Check if stdio MCP servers are disabled (not suitable for multi-tenant deployments)
if tool_settings.mcp_disable_stdio:
raise ValueError("MCP stdio servers are disabled. Set MCP_DISABLE_STDIO=false to enable them.")
server_config = StdioServerConfig(**server_config.model_dump())
return AsyncStdioMCPClient(server_config=server_config, oauth_provider=None, agent_id=agent_id)
elif server_config.type == MCPServerType.STREAMABLE_HTTP:
server_config = StreamableHTTPServerConfig(**server_config.model_dump())
return AsyncFastMCPStreamableHTTPClient(server_config=server_config, oauth=oauth, agent_id=agent_id)
else:
raise ValueError(f"Unsupported server config type: {type(server_config)}")
# OAuth-related methods
async def _oauth_orm_to_pydantic_async(self, oauth_session: MCPOAuth) -> MCPOAuthSession:
"""
Convert OAuth ORM model to Pydantic model, reading directly from encrypted columns.
"""
# Convert encrypted columns to Secret objects
authorization_code_enc = (
Secret.from_encrypted(oauth_session.authorization_code_enc) if oauth_session.authorization_code_enc else None
)
access_token_enc = Secret.from_encrypted(oauth_session.access_token_enc) if oauth_session.access_token_enc else None
refresh_token_enc = Secret.from_encrypted(oauth_session.refresh_token_enc) if oauth_session.refresh_token_enc else None
client_secret_enc = Secret.from_encrypted(oauth_session.client_secret_enc) if oauth_session.client_secret_enc else None
# Get plaintext values from encrypted columns (primary source of truth)
authorization_code = await authorization_code_enc.get_plaintext_async() if authorization_code_enc else None
access_token = await access_token_enc.get_plaintext_async() if access_token_enc else None
refresh_token = await refresh_token_enc.get_plaintext_async() if refresh_token_enc else None
client_secret = await client_secret_enc.get_plaintext_async() if client_secret_enc else None
# Create the Pydantic object with both encrypted and plaintext fields
pydantic_session = MCPOAuthSession(
id=oauth_session.id,
state=oauth_session.state,
server_id=oauth_session.server_id,
server_url=oauth_session.server_url,
server_name=oauth_session.server_name,
user_id=oauth_session.user_id,
organization_id=oauth_session.organization_id,
authorization_url=oauth_session.authorization_url,
token_type=oauth_session.token_type,
expires_at=oauth_session.expires_at,
scope=oauth_session.scope,
client_id=oauth_session.client_id,
redirect_uri=oauth_session.redirect_uri,
status=oauth_session.status,
created_at=oauth_session.created_at,
updated_at=oauth_session.updated_at,
# Plaintext fields populated from encrypted columns
authorization_code=authorization_code,
access_token=access_token,
refresh_token=refresh_token,
client_secret=client_secret,
# Encrypted fields as Secret objects
authorization_code_enc=authorization_code_enc,
access_token_enc=access_token_enc,
refresh_token_enc=refresh_token_enc,
client_secret_enc=client_secret_enc,
)
return pydantic_session
@enforce_types
async def create_oauth_session(self, session_create: MCPOAuthSessionCreate, actor: PydanticUser) -> MCPOAuthSession:
"""Create a new OAuth session for MCP server authentication."""
async with db_registry.async_session() as session:
# Create the OAuth session with a unique state
oauth_session = MCPOAuth(
id="mcp-oauth-" + str(uuid.uuid4())[:8],
state=secrets.token_urlsafe(32),
server_url=session_create.server_url,
server_name=session_create.server_name,
user_id=session_create.user_id,
organization_id=session_create.organization_id,
status=OAuthSessionStatus.PENDING,
created_at=datetime.now(),
updated_at=datetime.now(),
)
oauth_session = await oauth_session.create_async(session, actor=actor)
# Convert to Pydantic model - note: new sessions won't have tokens yet
return await self._oauth_orm_to_pydantic_async(oauth_session)
@enforce_types
async def get_oauth_session_by_id(self, session_id: str, actor: PydanticUser) -> Optional[MCPOAuthSession]:
"""Get an OAuth session by its ID."""
async with db_registry.async_session() as session:
try:
oauth_session = await MCPOAuth.read_async(db_session=session, identifier=session_id, actor=actor)
return await self._oauth_orm_to_pydantic_async(oauth_session)
except NoResultFound:
return None
@enforce_types
async def get_oauth_session_by_server(
self, server_url: str, actor: PydanticUser, status: Optional[OAuthSessionStatus] = None
) -> Optional[MCPOAuthSession]:
"""Get the latest OAuth session by server URL, organization, and user.
Args:
server_url: The MCP server URL
actor: The user making the request
status: Optional status filter. If None, returns the most recent session regardless of status.
If specified, only returns sessions with that status.
"""
async with db_registry.async_session() as session:
# Query for OAuth session matching organization, user, server URL
# Order by updated_at desc to get the most recent record
query = select(MCPOAuth).where(
MCPOAuth.organization_id == actor.organization_id,
MCPOAuth.user_id == actor.id,
MCPOAuth.server_url == server_url,
)
# Optionally filter by status
if status is not None:
query = query.where(MCPOAuth.status == status)
result = await session.execute(query.order_by(desc(MCPOAuth.updated_at)).limit(1))
oauth_session = result.scalar_one_or_none()
if not oauth_session:
return None
return await self._oauth_orm_to_pydantic_async(oauth_session)
@enforce_types
async def get_oauth_session_by_state(self, state: str) -> Optional[MCPOAuthSession]:
"""Get an OAuth session by its state parameter (used in static callback URI flow)."""
async with db_registry.async_session() as session:
result = await session.execute(select(MCPOAuth).where(MCPOAuth.state == state).limit(1))
oauth_session = result.scalar_one_or_none()
if not oauth_session:
return None
return await self._oauth_orm_to_pydantic_async(oauth_session)
@enforce_types
async def update_oauth_session(self, session_id: str, session_update: MCPOAuthSessionUpdate, actor: PydanticUser) -> MCPOAuthSession:
"""Update an existing OAuth session."""
async with db_registry.async_session() as session:
oauth_session = await MCPOAuth.read_async(db_session=session, identifier=session_id, actor=actor)
# Update fields that are provided
if session_update.state is not None:
oauth_session.state = session_update.state
if session_update.authorization_url is not None:
oauth_session.authorization_url = session_update.authorization_url
# Handle encryption for authorization_code
# Only re-encrypt if the value has actually changed
if session_update.authorization_code is not None:
# Check if value changed by reading from _enc column only
existing_code = None
if oauth_session.authorization_code_enc:
existing_secret = Secret.from_encrypted(oauth_session.authorization_code_enc)
existing_code = await existing_secret.get_plaintext_async()
# Only re-encrypt if different
if existing_code != session_update.authorization_code:
oauth_session.authorization_code_enc = Secret.from_plaintext(session_update.authorization_code).get_encrypted()
# Handle encryption for access_token - write only to _enc column
if session_update.access_token is not None:
# Check if value changed by reading from _enc column only
existing_token = None
if oauth_session.access_token_enc:
existing_secret = Secret.from_encrypted(oauth_session.access_token_enc)
existing_token = await existing_secret.get_plaintext_async()
# Only re-encrypt if different
if existing_token != session_update.access_token:
oauth_session.access_token_enc = Secret.from_plaintext(session_update.access_token).get_encrypted()
# Handle encryption for refresh_token - write only to _enc column
if session_update.refresh_token is not None:
# Check if value changed by reading from _enc column only
existing_refresh = None
if oauth_session.refresh_token_enc:
existing_secret = Secret.from_encrypted(oauth_session.refresh_token_enc)
existing_refresh = await existing_secret.get_plaintext_async()
# Only re-encrypt if different
if existing_refresh != session_update.refresh_token:
oauth_session.refresh_token_enc = Secret.from_plaintext(session_update.refresh_token).get_encrypted()
if session_update.token_type is not None:
oauth_session.token_type = session_update.token_type
if session_update.expires_at is not None:
oauth_session.expires_at = session_update.expires_at
if session_update.scope is not None:
oauth_session.scope = session_update.scope
if session_update.client_id is not None:
oauth_session.client_id = session_update.client_id
# Handle encryption for client_secret - write only to _enc column
if session_update.client_secret is not None:
# Check if value changed by reading from _enc column only
existing_secret_val = None
if oauth_session.client_secret_enc:
existing_secret = Secret.from_encrypted(oauth_session.client_secret_enc)
existing_secret_val = await existing_secret.get_plaintext_async()
# Only re-encrypt if different
if existing_secret_val != session_update.client_secret:
oauth_session.client_secret_enc = Secret.from_plaintext(session_update.client_secret).get_encrypted()
if session_update.redirect_uri is not None:
oauth_session.redirect_uri = session_update.redirect_uri
if session_update.status is not None:
oauth_session.status = session_update.status
# Always update the updated_at timestamp
oauth_session.updated_at = datetime.now()
oauth_session = await oauth_session.update_async(db_session=session, actor=actor)
return await self._oauth_orm_to_pydantic_async(oauth_session)
@enforce_types
async def delete_oauth_session(self, session_id: str, actor: PydanticUser) -> None:
"""Delete an OAuth session."""
async with db_registry.async_session() as session:
try:
oauth_session = await MCPOAuth.read_async(db_session=session, identifier=session_id, actor=actor)
await oauth_session.hard_delete_async(db_session=session, actor=actor)
except NoResultFound:
raise ValueError(f"OAuth session with id {session_id} not found.")
@enforce_types
async def cleanup_expired_oauth_sessions(self, max_age_hours: int = 24) -> int:
"""Clean up expired OAuth sessions and return the count of deleted sessions."""
cutoff_time = datetime.now() - timedelta(hours=max_age_hours)
async with db_registry.async_session() as session:
# Find expired sessions
result = await session.execute(select(MCPOAuth).where(MCPOAuth.created_at < cutoff_time))
expired_sessions = result.scalars().all()
# Delete expired sessions using async ORM method
for oauth_session in expired_sessions:
await oauth_session.hard_delete_async(db_session=session, actor=None)
if expired_sessions:
logger.info(f"Cleaned up {len(expired_sessions)} expired OAuth sessions")
return len(expired_sessions)
@enforce_types
async def handle_oauth_flow(
self,
request: Union[SSEServerConfig, StdioServerConfig, StreamableHTTPServerConfig],
actor: PydanticUser,
http_request: Optional[Request] = None,
):
"""
Handle OAuth flow for MCP server connection and yield SSE events.
Args:
request: The server configuration
actor: The user making the request
http_request: The HTTP request object
Yields:
SSE events during OAuth flow
Returns:
Tuple of (temp_client, connect_task) after yielding events
"""
import asyncio
from letta.services.mcp.oauth_utils import oauth_stream_event
from letta.services.mcp.types import OauthStreamEvent
# OAuth required, yield state to client to prepare to handle authorization URL
# Note: Existing AUTHORIZED sessions are already checked upstream in get_mcp_client
yield oauth_stream_event(OauthStreamEvent.OAUTH_REQUIRED, message="OAuth authentication required")
# Create new OAuth session for each test connection attempt
# Note: Old pending sessions will be cleaned up when an MCP server is created/deleted
session_create = MCPOAuthSessionCreate(
server_url=request.server_url,
server_name=request.server_name,
user_id=actor.id,
organization_id=actor.organization_id,
)
oauth_session = await self.create_oauth_session(session_create, actor)
session_id = oauth_session.id
# TODO: @jnjpng make this check more robust and remove direct os.getenv
# Check if request is from web frontend to determine redirect URI
is_web_request = (
http_request
and http_request.headers
and http_request.headers.get("user-agent", "") == "Next.js Middleware"
and http_request.headers.__contains__("x-organization-id")
)
# Check if request is from letta-code CLI (uses web callback for OAuth)
is_letta_code_request = http_request and http_request.headers and http_request.headers.get("x-letta-source", "") == "letta-code"
logo_uri = None
NEXT_PUBLIC_CURRENT_HOST = os.getenv("NEXT_PUBLIC_CURRENT_HOST")
LETTA_AGENTS_ENDPOINT = os.getenv("LETTA_AGENTS_ENDPOINT")
if (is_web_request or is_letta_code_request) and NEXT_PUBLIC_CURRENT_HOST:
# Use static callback URI - session is identified via state parameter
redirect_uri = f"{NEXT_PUBLIC_CURRENT_HOST}/oauth/callback/mcp"
logo_uri = f"{NEXT_PUBLIC_CURRENT_HOST}/seo/favicon.svg"
elif LETTA_AGENTS_ENDPOINT:
# API and SDK usage should call core server directly
# Use static callback URI - session is identified via state parameter
redirect_uri = f"{LETTA_AGENTS_ENDPOINT}/v1/tools/mcp/oauth/callback"
else:
logger.error(
f"No redirect URI found for request and base urls: {http_request.headers if http_request else 'No headers'} {NEXT_PUBLIC_CURRENT_HOST} {LETTA_AGENTS_ENDPOINT}"
)
raise HTTPException(status_code=400, detail="No redirect URI found")
# Create ServerSideOAuth for FastMCP client
oauth = ServerSideOAuth(
mcp_url=request.server_url,
session_id=session_id,
mcp_manager=self,
actor=actor,
redirect_uri=redirect_uri,
url_callback=None, # URL is stored by redirect_handler
logo_uri=logo_uri,
)
# Get authorization URL by triggering OAuth flow
temp_client = None
connect_task = None
async def connect_and_cleanup(client: AsyncBaseMCPClient, ready_queue: asyncio.Queue):
"""Wrap connection and cleanup in the same task to share cancel scope"""
try:
await client.connect_to_server()
# Send client to main task without finishing the task
await ready_queue.put(client)
# Now wait for signal to cleanup
await client._cleanup_event.wait()
finally:
await client.cleanup()
try:
ready_queue = asyncio.Queue()
temp_client = await self.get_mcp_client(request, actor, oauth)
temp_client._cleanup_event = asyncio.Event()
# Run connect_to_server in background to avoid blocking
# This will trigger the OAuth flow and the redirect_handler will save the authorization URL to database
connect_task = safe_create_task_with_return(connect_and_cleanup(temp_client, ready_queue), label="mcp_oauth_connect")
# Fetch the authorization URL from database and yield state to client to proceed with handling authorization URL
auth_session = await self.get_oauth_session_by_id(session_id, actor)
# Give the OAuth flow time to connect to the MCP server and store the authorization URL
timeout = 0
while not auth_session or (not auth_session.authorization_url and not connect_task.done() and timeout < 10):
timeout += 1
auth_session = await self.get_oauth_session_by_id(session_id, actor)
await asyncio.sleep(1.0)
if auth_session and auth_session.authorization_url:
yield oauth_stream_event(OauthStreamEvent.AUTHORIZATION_URL, url=auth_session.authorization_url, session_id=session_id)
# Wait for user authorization (with timeout), client should render loading state until user completes the flow and /mcp/oauth/callback/{session_id} is hit
yield oauth_stream_event(OauthStreamEvent.WAITING_FOR_AUTH, message="Waiting for user authorization...")
# Callback handler will poll for authorization code and state and update the OAuth session
# Get the client from the queue
temp_client = await ready_queue.get()
tools = await temp_client.list_tools(serialize=True)
yield oauth_stream_event(OauthStreamEvent.SUCCESS, tools=tools)
# Signal the background task to cleanup in its own task
temp_client._cleanup_event.set()
await connect_task # now it finishes safely
except Exception as e:
logger.error(f"Error triggering OAuth flow: {e}")
yield oauth_stream_event(OauthStreamEvent.ERROR, message=f"Failed to trigger OAuth: {str(e)}")
raise e
finally:
# Clean up resources
if connect_task and not connect_task.done():
connect_task.cancel()
try:
await connect_task
except asyncio.CancelledError:
pass
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/mcp_manager.py",
"license": "Apache License 2.0",
"lines": 1064,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/telemetry_manager.py | import asyncio
import os
from letta.helpers.singleton import singleton
from letta.log import get_logger
from letta.otel.tracing import trace_method
from letta.schemas.provider_trace import ProviderTrace
from letta.schemas.user import User as PydanticUser
from letta.services.provider_trace_backends import get_provider_trace_backend, get_provider_trace_backends
from letta.settings import telemetry_settings
from letta.utils import enforce_types
logger = get_logger(__name__)
class TelemetryManager:
"""
Manages provider trace telemetry using configurable backends.
Supports multiple backends for dual-write scenarios (e.g., migration).
Configure via LETTA_TELEMETRY_PROVIDER_TRACE_BACKEND (comma-separated):
- postgres: Store in PostgreSQL (default)
- clickhouse: Store in ClickHouse (reads and writes from llm_traces table)
- socket: Store via Unix socket to external sidecar
Example: LETTA_TELEMETRY_PROVIDER_TRACE_BACKEND=postgres,clickhouse
Multi-backend behavior:
- Writes: Sent to ALL configured backends concurrently via asyncio.gather.
Errors in one backend don't affect others (logged but not raised).
- Reads: Only from PRIMARY backend (first in the comma-separated list).
Secondary backends are write-only for this manager.
"""
def __init__(self):
self._backends = get_provider_trace_backends()
self._primary_backend = self._backends[0] if self._backends else get_provider_trace_backend()
@enforce_types
@trace_method
async def get_provider_trace_by_step_id_async(
self,
step_id: str,
actor: PydanticUser,
) -> ProviderTrace | None:
# Read from primary backend only
return await self._primary_backend.get_by_step_id_async(step_id=step_id, actor=actor)
@enforce_types
@trace_method
async def create_provider_trace_async(
self,
actor: PydanticUser,
provider_trace: ProviderTrace,
) -> ProviderTrace:
# Set source if not already set (use LETTA_TELEMETRY_SOURCE, fallback to DD_SERVICE)
if provider_trace.source is None:
source = telemetry_settings.source or os.environ.get("DD_SERVICE")
if source:
provider_trace = provider_trace.model_copy(update={"source": source})
# Write to all backends concurrently
tasks = [self._safe_create_async(backend, actor, provider_trace) for backend in self._backends]
results = await asyncio.gather(*tasks)
# Return first non-None result (from primary backend)
return next((r for r in results if r is not None), None)
async def _safe_create_async(
self,
backend,
actor: PydanticUser,
provider_trace: ProviderTrace,
) -> ProviderTrace | None:
"""Create trace in a backend, catching and logging errors."""
try:
return await backend.create_async(actor=actor, provider_trace=provider_trace)
except Exception as e:
logger.warning(f"Failed to write to {backend.__class__.__name__}: {e}")
return None
def create_provider_trace(
self,
actor: PydanticUser,
provider_trace: ProviderTrace,
) -> ProviderTrace | None:
"""Synchronous version - writes to all backends."""
# Set source if not already set (use LETTA_TELEMETRY_SOURCE, fallback to DD_SERVICE)
if provider_trace.source is None:
source = telemetry_settings.source or os.environ.get("DD_SERVICE")
if source:
provider_trace = provider_trace.model_copy(update={"source": source})
result = None
for backend in self._backends:
try:
r = backend.create_sync(actor=actor, provider_trace=provider_trace)
if result is None:
result = r
except Exception as e:
logger.warning(f"Failed to write to {backend.__class__.__name__}: {e}")
return result
@singleton
class NoopTelemetryManager(TelemetryManager):
"""Noop implementation of TelemetryManager."""
def __init__(self):
pass # Don't initialize backend
async def create_provider_trace_async(
self,
actor: PydanticUser,
provider_trace: ProviderTrace,
) -> ProviderTrace:
return None
async def get_provider_trace_by_step_id_async(
self,
step_id: str,
actor: PydanticUser,
) -> ProviderTrace | None:
return None
def create_provider_trace(
self,
actor: PydanticUser,
provider_trace: ProviderTrace,
) -> ProviderTrace:
return None
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/telemetry_manager.py",
"license": "Apache License 2.0",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/tool_executor/builtin_tool_executor.py | import asyncio
import json
from typing import Any, Dict, List, Literal, Optional
from letta.log import get_logger
from letta.otel.tracing import trace_method
from letta.schemas.agent import AgentState
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.schemas.user import User
from letta.services.tool_executor.tool_executor_base import ToolExecutor
from letta.settings import tool_settings
logger = get_logger(__name__)
class LettaBuiltinToolExecutor(ToolExecutor):
"""Executor for built in Letta tools."""
@trace_method
async def execute(
self,
function_name: str,
function_args: dict,
tool: Tool,
actor: User,
agent_state: Optional[AgentState] = None,
sandbox_config: Optional[SandboxConfig] = None,
sandbox_env_vars: Optional[Dict[str, Any]] = None,
) -> ToolExecutionResult:
function_map = {
"run_code": self.run_code,
"run_code_with_tools": self.run_code_with_tools,
"web_search": self.web_search,
"fetch_webpage": self.fetch_webpage,
}
if function_name not in function_map:
raise ValueError(f"Unknown function: {function_name}")
# Execute the appropriate function
function_args_copy = function_args.copy() # Make a copy to avoid modifying the original
function_response = await function_map[function_name](agent_state=agent_state, **function_args_copy)
return ToolExecutionResult(
status="success",
func_return=function_response,
agent_state=agent_state,
)
async def run_code_with_tools(self, agent_state: "AgentState", code: str) -> ToolExecutionResult:
from e2b_code_interpreter import AsyncSandbox
from letta.utils import get_friendly_error_msg
if tool_settings.e2b_api_key is None:
raise ValueError("E2B_API_KEY is not set")
env = {"LETTA_AGENT_ID": agent_state.id}
env.update(agent_state.get_agent_env_vars_as_dict())
# Create the sandbox, using template if configured (similar to tool_execution_sandbox.py)
if tool_settings.e2b_sandbox_template_id:
sbx = await AsyncSandbox.create(tool_settings.e2b_sandbox_template_id, api_key=tool_settings.e2b_api_key, envs=env)
else:
sbx = await AsyncSandbox.create(api_key=tool_settings.e2b_api_key, envs=env)
tool_source_code = ""
lines = []
# initialize the letta client
lines.extend(
[
"# Initialize Letta client for tool execution",
"import os",
"from letta_client import Letta",
"client = None",
"if os.getenv('LETTA_API_KEY'):",
" # Check letta_client version to use correct parameter name",
" from packaging import version as pkg_version",
" import letta_client as lc_module",
" lc_version = pkg_version.parse(lc_module.__version__)",
" if lc_version < pkg_version.parse('1.0.0'):",
" client = Letta(",
" token=os.getenv('LETTA_API_KEY')",
" )",
" else:",
" client = Letta(",
" api_key=os.getenv('LETTA_API_KEY')",
" )",
]
)
tool_source_code = "\n".join(lines) + "\n"
# Inject source code from agent's tools to enable programmatic tool calling
# This allows Claude to compose tools in a single code execution, e.g.:
# run_code("result = add(multiply(4, 5), 6)")
from letta.schemas.enums import ToolType
if agent_state and agent_state.tools:
for tool in agent_state.tools:
if tool.tool_type == ToolType.CUSTOM and tool.source_code:
# simply append the source code of the tool
# TODO: can get rid of this option
tool_source_code += tool.source_code + "\n\n"
else:
# invoke the tool through the client
# raises an error if LETTA_API_KEY or other envs not set
tool_lines = [
f"def {tool.name}(**kwargs):",
" if not os.getenv('LETTA_API_KEY'):",
" raise ValueError('LETTA_API_KEY is not set')",
" if not os.getenv('LETTA_AGENT_ID'):",
" raise ValueError('LETTA_AGENT_ID is not set')",
f" result = client.agents.tools.run(agent_id=os.getenv('LETTA_AGENT_ID'), tool_name='{tool.name}', args=kwargs)",
" if result.status == 'success':",
" return result.func_return",
" else:",
" raise ValueError(result.stderr)",
]
tool_source_code += "\n".join(tool_lines) + "\n\n"
params = {"code": tool_source_code + code}
execution = await sbx.run_code(**params)
# Parse results similar to e2b_sandbox.py
if execution.results:
func_return = execution.results[0].text if hasattr(execution.results[0], "text") else str(execution.results[0])
elif execution.error:
func_return = get_friendly_error_msg(
function_name="run_code_with_tools", exception_name=execution.error.name, exception_message=execution.error.value
)
execution.logs.stderr.append(execution.error.traceback)
else:
func_return = None
return json.dumps(
{
"status": "error" if execution.error else "success",
"func_return": func_return,
"stdout": execution.logs.stdout,
"stderr": execution.logs.stderr,
},
ensure_ascii=False,
)
async def run_code(self, agent_state: "AgentState", code: str, language: Literal["python", "js", "ts", "r", "java"]) -> str:
from e2b_code_interpreter import AsyncSandbox
if tool_settings.e2b_api_key is None:
raise ValueError("E2B_API_KEY is not set")
# Create the sandbox, using template if configured (similar to tool_execution_sandbox.py)
if tool_settings.e2b_sandbox_template_id:
sbx = await AsyncSandbox.create(tool_settings.e2b_sandbox_template_id, api_key=tool_settings.e2b_api_key)
else:
sbx = await AsyncSandbox.create(api_key=tool_settings.e2b_api_key)
# Inject source code from agent's tools to enable programmatic tool calling
# This allows Claude to compose tools in a single code execution, e.g.:
# run_code_with_tools("result = add(multiply(4, 5), 6)")
if language == "python" and agent_state and agent_state.tools:
tool_source_code = ""
for tool in agent_state.tools:
if tool.source_code:
tool_source_code += tool.source_code + "\n\n"
if tool_source_code:
code = tool_source_code + code
params = {"code": code}
if language != "python":
# Leave empty for python
params["language"] = language
res = self._llm_friendly_result(await sbx.run_code(**params))
return json.dumps(res, ensure_ascii=False)
def _llm_friendly_result(self, res):
out = {
"results": [r.text if hasattr(r, "text") else str(r) for r in res.results],
"logs": {
"stdout": getattr(res.logs, "stdout", []),
"stderr": getattr(res.logs, "stderr", []),
},
}
err = getattr(res, "error", None)
if err is not None:
out["error"] = err
return out
@trace_method
async def web_search(
self,
agent_state: "AgentState",
query: str,
num_results: int = 10,
category: Optional[
Literal["company", "research paper", "news", "pdf", "github", "tweet", "personal site", "linkedin profile", "financial report"]
] = None,
include_text: bool = False,
include_domains: Optional[List[str]] = None,
exclude_domains: Optional[List[str]] = None,
start_published_date: Optional[str] = None,
end_published_date: Optional[str] = None,
user_location: Optional[str] = None,
) -> str:
"""
Search the web using Exa's AI-powered search engine and retrieve relevant content.
Args:
query: The search query to find relevant web content
num_results: Number of results to return (1-100)
category: Focus search on specific content types
include_text: Whether to retrieve full page content (default: False, only returns summary and highlights)
include_domains: List of domains to include in search results
exclude_domains: List of domains to exclude from search results
start_published_date: Only return content published after this date (ISO format)
end_published_date: Only return content published before this date (ISO format)
user_location: Two-letter country code for localized results
Returns:
JSON-encoded string containing search results
"""
try:
from exa_py import Exa
except ImportError:
raise ImportError("exa-py is not installed in the tool execution environment")
if not query.strip():
return json.dumps({"error": "Query cannot be empty", "query": query})
# Get EXA API key from agent environment or tool settings
agent_state_tool_env_vars = agent_state.get_agent_env_vars_as_dict()
exa_api_key = agent_state_tool_env_vars.get("EXA_API_KEY") or tool_settings.exa_api_key
if not exa_api_key:
raise ValueError("EXA_API_KEY is not set in environment or on agent_state tool execution environment variables.")
logger.info(f"[DEBUG] Starting Exa web search for query: '{query}' with {num_results} results")
# Build search parameters
search_params = {
"query": query,
"num_results": min(max(num_results, 1), 100), # Clamp between 1-100
"type": "auto", # Always use auto search type
}
# Add optional parameters if provided
if category:
search_params["category"] = category
if include_domains:
search_params["include_domains"] = include_domains
if exclude_domains:
search_params["exclude_domains"] = exclude_domains
if start_published_date:
search_params["start_published_date"] = start_published_date
if end_published_date:
search_params["end_published_date"] = end_published_date
if user_location:
search_params["user_location"] = user_location
# Configure contents retrieval
contents_params = {
"text": include_text,
"highlights": {"num_sentences": 2, "highlights_per_url": 3, "query": query},
"summary": {"query": f"Summarize the key information from this content related to: {query}"},
}
def _sync_exa_search():
"""Synchronous Exa API call to run in thread pool."""
exa = Exa(api_key=exa_api_key)
return exa.search_and_contents(**search_params, **contents_params)
try:
# Perform search with content retrieval in thread pool to avoid blocking event loop
logger.info(f"[DEBUG] Making async Exa API call with params: {search_params}")
result = await asyncio.to_thread(_sync_exa_search)
# Format results
formatted_results = []
for res in result.results:
formatted_result = {
"title": res.title,
"url": res.url,
"published_date": res.published_date,
"author": res.author,
}
# Add content if requested
if include_text and hasattr(res, "text") and res.text:
formatted_result["text"] = res.text
# Add highlights if available
if hasattr(res, "highlights") and res.highlights:
formatted_result["highlights"] = res.highlights
# Add summary if available
if hasattr(res, "summary") and res.summary:
formatted_result["summary"] = res.summary
formatted_results.append(formatted_result)
response = {"query": query, "results": formatted_results}
logger.info(f"[DEBUG] Exa search completed successfully with {len(formatted_results)} results")
return json.dumps(response, indent=2, ensure_ascii=False)
except Exception as e:
logger.info(f"Exa search failed for query '{query}': {str(e)}")
return json.dumps({"query": query, "error": f"Search failed: {str(e)}"})
async def fetch_webpage(self, agent_state: "AgentState", url: str) -> str:
"""
Fetch a webpage and convert it to markdown/text format using Exa API (if available) or trafilatura/readability.
Args:
url: The URL of the webpage to fetch and convert
Returns:
String containing the webpage content in markdown/text format
"""
import asyncio
from urllib.parse import urlparse
import html2text
import requests
from readability import Document
from trafilatura import extract, fetch_url
# Validate URL scheme - only HTTP and HTTPS are supported
parsed_url = urlparse(url)
if parsed_url.scheme.lower() not in ("http", "https"):
raise ValueError(
f"Invalid URL scheme '{parsed_url.scheme}'. Only 'http' and 'https' URLs are supported. "
f"Local file paths (file://) and other protocols cannot be fetched."
)
# Try exa first
try:
from exa_py import Exa
agent_state_tool_env_vars = agent_state.get_agent_env_vars_as_dict()
exa_api_key = agent_state_tool_env_vars.get("EXA_API_KEY") or tool_settings.exa_api_key
if exa_api_key:
logger.info(f"[DEBUG] Starting Exa fetch content for url: '{url}'")
exa = Exa(api_key=exa_api_key)
results = await asyncio.to_thread(
lambda: exa.get_contents(
[url],
text=True,
).results
)
if len(results) > 0:
result = results[0]
return json.dumps(
{
"title": result.title,
"published_date": result.published_date,
"author": result.author,
"text": result.text,
}
)
else:
logger.info(f"[DEBUG] Exa did not return content for '{url}', falling back to local fetch.")
else:
logger.info("[DEBUG] No Exa key available, falling back to local fetch.")
except ImportError:
logger.info("[DEBUG] Exa pip package unavailable, falling back to local fetch.")
pass
try:
# single thread pool call for the entire trafilatura pipeline
def trafilatura_pipeline():
downloaded = fetch_url(url) # fetch_url doesn't accept timeout parameter
if downloaded:
md = extract(downloaded, output_format="markdown")
return md
md = await asyncio.to_thread(trafilatura_pipeline)
if md:
return md
# single thread pool call for the entire fallback pipeline
def readability_pipeline():
response = requests.get(url, timeout=30, headers={"User-Agent": "Mozilla/5.0 (compatible; LettaBot/1.0)"})
response.raise_for_status()
doc = Document(response.text)
clean_html = doc.summary(html_partial=True)
return html2text.html2text(clean_html)
return await asyncio.to_thread(readability_pipeline)
except requests.exceptions.RequestException as e:
raise Exception(f"Error fetching webpage: {str(e)}")
except Exception as e:
raise Exception(f"Unexpected error: {str(e)}")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_executor/builtin_tool_executor.py",
"license": "Apache License 2.0",
"lines": 338,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/tool_executor/composio_tool_executor.py | from typing import Any, Dict, Optional
from letta.constants import COMPOSIO_ENTITY_ENV_VAR_KEY
from letta.functions.composio_helpers import execute_composio_action_async, generate_composio_action_from_func_name
from letta.helpers.composio_helpers import get_composio_api_key_async
from letta.otel.tracing import trace_method
from letta.schemas.agent import AgentState
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.schemas.user import User
from letta.services.tool_executor.tool_executor_base import ToolExecutor
class ExternalComposioToolExecutor(ToolExecutor):
"""Executor for external Composio tools."""
@trace_method
async def execute(
self,
function_name: str,
function_args: dict,
tool: Tool,
actor: User,
agent_state: Optional[AgentState] = None,
sandbox_config: Optional[SandboxConfig] = None,
sandbox_env_vars: Optional[Dict[str, Any]] = None,
) -> ToolExecutionResult:
if agent_state is None:
return ToolExecutionResult(
status="error",
func_return="Agent state is required for external Composio tools. Please contact Letta support if you see this error.",
)
action_name = generate_composio_action_from_func_name(tool.name)
# Get entity ID from the agent_state
entity_id = self._get_entity_id(agent_state)
# Get composio_api_key
composio_api_key = await get_composio_api_key_async(actor=actor)
# TODO (matt): Roll in execute_composio_action into this class
function_response = await execute_composio_action_async(
action_name=action_name, args=function_args, api_key=composio_api_key, entity_id=entity_id
)
return ToolExecutionResult(
status="success",
func_return=function_response,
)
def _get_entity_id(self, agent_state: AgentState) -> Optional[str]:
"""Extract the entity ID from environment variables."""
for env_var in agent_state.secrets:
if env_var.key == COMPOSIO_ENTITY_ENV_VAR_KEY:
return env_var.value
return None
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_executor/composio_tool_executor.py",
"license": "Apache License 2.0",
"lines": 48,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/services/tool_executor/core_tool_executor.py | from datetime import datetime
from typing import Any, Dict, List, Literal, Optional
from zoneinfo import ZoneInfo
from letta.constants import (
CORE_MEMORY_LINE_NUMBER_WARNING,
MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX,
READ_ONLY_BLOCK_EDIT_ERROR,
RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE,
)
from letta.log import get_logger
from letta.orm.errors import NoResultFound
from letta.schemas.agent import AgentState
from letta.schemas.block import BlockUpdate
from letta.schemas.enums import MessageRole
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.schemas.user import User
from letta.services.tool_executor.tool_executor_base import ToolExecutor
from letta.utils import get_friendly_error_msg
logger = get_logger(__name__)
class LettaCoreToolExecutor(ToolExecutor):
"""Executor for LETTA core tools with direct implementation of functions."""
async def execute(
self,
function_name: str,
function_args: dict,
tool: Tool,
actor: User,
agent_state: Optional[AgentState] = None,
sandbox_config: Optional[SandboxConfig] = None,
sandbox_env_vars: Optional[Dict[str, Any]] = None,
) -> ToolExecutionResult:
# Map function names to method calls
assert agent_state is not None, "Agent state is required for core tools"
function_map = {
"send_message": self.send_message,
"conversation_search": self.conversation_search,
"archival_memory_search": self.archival_memory_search,
"archival_memory_insert": self.archival_memory_insert,
"core_memory_append": self.core_memory_append,
"core_memory_replace": self.core_memory_replace,
"memory_replace": self.memory_replace,
"memory_insert": self.memory_insert,
"memory_apply_patch": self.memory_apply_patch,
"memory_str_replace": self.memory_str_replace,
"memory_str_insert": self.memory_str_insert,
"memory_rethink": self.memory_rethink,
"memory_finish_edits": self.memory_finish_edits,
"memory": self.memory,
}
if function_name not in function_map:
raise ValueError(f"Unknown function: {function_name}")
# Execute the appropriate function
function_args_copy = function_args.copy() # Make a copy to avoid modifying the original
try:
function_response = await function_map[function_name](agent_state, actor, **function_args_copy)
return ToolExecutionResult(
status="success",
func_return=function_response,
agent_state=agent_state,
)
except Exception as e:
return ToolExecutionResult(
status="error",
func_return=e,
agent_state=agent_state,
stderr=[get_friendly_error_msg(function_name=function_name, exception_name=type(e).__name__, exception_message=str(e))],
)
async def send_message(self, agent_state: AgentState, actor: User, message: str) -> Optional[str]:
return "Sent message successfully."
async def conversation_search(
self,
agent_state: AgentState,
actor: User,
query: Optional[str] = None,
roles: Optional[List[Literal["assistant", "user", "tool"]]] = None,
limit: Optional[int] = None,
start_date: Optional[str] = None,
end_date: Optional[str] = None,
) -> Optional[dict]:
try:
# Parse datetime parameters if provided
start_datetime = None
end_datetime = None
if start_date:
try:
# Try parsing as full datetime first (with time)
start_datetime = datetime.fromisoformat(start_date)
except ValueError:
try:
# Fall back to date-only format
start_datetime = datetime.strptime(start_date, "%Y-%m-%d")
# Set to beginning of day
start_datetime = start_datetime.replace(hour=0, minute=0, second=0, microsecond=0)
except ValueError:
raise ValueError(f"Invalid start_date format: {start_date}. Use ISO 8601 format (YYYY-MM-DD or YYYY-MM-DDTHH:MM)")
# Apply agent's timezone if datetime is naive
if start_datetime.tzinfo is None and agent_state.timezone:
tz = ZoneInfo(agent_state.timezone)
start_datetime = start_datetime.replace(tzinfo=tz)
if end_date:
try:
# Try parsing as full datetime first (with time)
end_datetime = datetime.fromisoformat(end_date)
except ValueError:
try:
# Fall back to date-only format
end_datetime = datetime.strptime(end_date, "%Y-%m-%d")
# Set to end of day for end dates
end_datetime = end_datetime.replace(hour=23, minute=59, second=59, microsecond=999999)
except ValueError:
raise ValueError(f"Invalid end_date format: {end_date}. Use ISO 8601 format (YYYY-MM-DD or YYYY-MM-DDTHH:MM)")
# Apply agent's timezone if datetime is naive
if end_datetime.tzinfo is None and agent_state.timezone:
tz = ZoneInfo(agent_state.timezone)
end_datetime = end_datetime.replace(tzinfo=tz)
# Convert string roles to MessageRole enum if provided
message_roles = None
if roles:
message_roles = [MessageRole(role) for role in roles]
# Use provided limit or default
search_limit = limit if limit is not None else RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE
# Search using the message manager's search_messages_async method
message_results = await self.message_manager.search_messages_async(
agent_id=agent_state.id,
actor=actor,
query_text=query,
roles=message_roles,
limit=search_limit,
start_date=start_datetime,
end_date=end_datetime,
)
# Filter out tool messages to prevent recursive results and exponential escaping
from letta.constants import CONVERSATION_SEARCH_TOOL_NAME
filtered_results = []
for message, metadata in message_results:
# Skip ALL tool messages - they contain tool execution results
# which can cause recursive nesting and exponential escaping
if message.role == MessageRole.tool:
continue
# Also skip assistant messages that call conversation_search
# These can contain the search query which may lead to confusing results
if message.role == MessageRole.assistant and message.tool_calls:
if CONVERSATION_SEARCH_TOOL_NAME in [tool_call.function.name for tool_call in message.tool_calls]:
continue
filtered_results.append((message, metadata))
if len(filtered_results) == 0:
return {"message": "No results found.", "results": []}
else:
results_formatted = []
# get current time in UTC, then convert to agent timezone for consistent comparison
from datetime import timezone
now_utc = datetime.now(timezone.utc)
if agent_state.timezone:
try:
tz = ZoneInfo(agent_state.timezone)
now = now_utc.astimezone(tz)
except Exception:
now = now_utc
else:
now = now_utc
for message, metadata in filtered_results:
# Format timestamp in agent's timezone if available
timestamp = message.created_at
time_delta_str = ""
if timestamp and agent_state.timezone:
try:
# Convert to agent's timezone
tz = ZoneInfo(agent_state.timezone)
local_time = timestamp.astimezone(tz)
# Format as ISO string with timezone
formatted_timestamp = local_time.isoformat()
# Calculate time delta
delta = now - local_time
total_seconds = int(delta.total_seconds())
if total_seconds < 60:
time_delta_str = f"{total_seconds}s ago"
elif total_seconds < 3600:
minutes = total_seconds // 60
time_delta_str = f"{minutes}m ago"
elif total_seconds < 86400:
hours = total_seconds // 3600
time_delta_str = f"{hours}h ago"
else:
days = total_seconds // 86400
time_delta_str = f"{days}d ago"
except Exception:
# Fallback to ISO format if timezone conversion fails
formatted_timestamp = str(timestamp)
else:
# Use ISO format if no timezone is set
formatted_timestamp = str(timestamp) if timestamp else "Unknown"
content = self.message_manager._extract_message_text(message)
# Create the base result dict
result_dict = {
"timestamp": formatted_timestamp,
"time_ago": time_delta_str,
"role": message.role,
}
# Add search relevance metadata if available
if metadata:
# Only include non-None values
relevance_info = {
k: v
for k, v in {
"rrf_score": metadata.get("combined_score"),
"vector_rank": metadata.get("vector_rank"),
"fts_rank": metadata.get("fts_rank"),
"search_mode": metadata.get("search_mode"),
}.items()
if v is not None
}
if relevance_info: # Only add if we have metadata
result_dict["relevance"] = relevance_info
# _extract_message_text returns already JSON-encoded strings
# We need to parse them to get the actual content structure
if content:
try:
import json
parsed_content = json.loads(content)
# Add the parsed content directly to avoid double JSON encoding
if isinstance(parsed_content, dict):
# Merge the parsed content into result_dict
result_dict.update(parsed_content)
else:
# If it's not a dict, add as content
result_dict["content"] = parsed_content
except (json.JSONDecodeError, ValueError):
# if not valid JSON, add as plain content
result_dict["content"] = content
results_formatted.append(result_dict)
# Return structured dict instead of JSON string to avoid double-encoding
return {
"message": f"Showing {len(message_results)} results:",
"results": results_formatted,
}
except Exception as e:
raise e
async def archival_memory_search(
self,
agent_state: AgentState,
actor: User,
query: str,
tags: Optional[list[str]] = None,
tag_match_mode: Literal["any", "all"] = "any",
top_k: Optional[int] = None,
start_datetime: Optional[str] = None,
end_datetime: Optional[str] = None,
) -> Optional[str]:
try:
# Use the shared service method to get results
formatted_results = await self.agent_manager.search_agent_archival_memory_async(
agent_id=agent_state.id,
actor=actor,
query=query,
tags=tags,
tag_match_mode=tag_match_mode,
top_k=top_k,
start_datetime=start_datetime,
end_datetime=end_datetime,
)
return formatted_results
except Exception as e:
raise e
async def archival_memory_insert(
self, agent_state: AgentState, actor: User, content: str, tags: Optional[list[str]] = None
) -> Optional[str]:
await self.passage_manager.insert_passage(
agent_state=agent_state,
text=content,
actor=actor,
tags=tags,
)
await self.agent_manager.rebuild_system_prompt_async(agent_id=agent_state.id, actor=actor, force=True)
return None
async def core_memory_append(self, agent_state: AgentState, actor: User, label: str, content: str) -> str:
if agent_state.memory.get_block(label).read_only:
raise ValueError(f"{READ_ONLY_BLOCK_EDIT_ERROR}")
current_value = str(agent_state.memory.get_block(label).value)
new_value = current_value + "\n" + str(content)
agent_state.memory.update_block_value(label=label, value=new_value)
await self.agent_manager.update_memory_if_changed_async(agent_id=agent_state.id, new_memory=agent_state.memory, actor=actor)
return new_value
async def core_memory_replace(
self,
agent_state: AgentState,
actor: User,
label: str,
old_content: str,
new_content: str,
) -> str:
if agent_state.memory.get_block(label).read_only:
raise ValueError(f"{READ_ONLY_BLOCK_EDIT_ERROR}")
current_value = str(agent_state.memory.get_block(label).value)
if old_content not in current_value:
raise ValueError(f"Old content '{old_content}' not found in memory block '{label}'")
new_value = current_value.replace(str(old_content), str(new_content))
agent_state.memory.update_block_value(label=label, value=new_value)
await self.agent_manager.update_memory_if_changed_async(agent_id=agent_state.id, new_memory=agent_state.memory, actor=actor)
return new_value
async def memory_replace(
self,
agent_state: AgentState,
actor: User,
label: str,
old_string: str,
new_string: str,
) -> str:
if agent_state.memory.get_block(label).read_only:
raise ValueError(f"{READ_ONLY_BLOCK_EDIT_ERROR}")
if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(old_string)):
raise ValueError(
"old_string contains a line number prefix, which is not allowed. "
"Do not include line numbers when calling memory tools (line "
"numbers are for display purposes only)."
)
if CORE_MEMORY_LINE_NUMBER_WARNING in old_string:
raise ValueError(
"old_string contains a line number warning, which is not allowed. "
"Do not include line number information when calling memory tools "
"(line numbers are for display purposes only)."
)
if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(new_string)):
raise ValueError(
"new_string contains a line number prefix, which is not allowed. "
"Do not include line numbers when calling memory tools (line "
"numbers are for display purposes only)."
)
old_string = str(old_string).expandtabs()
new_string = str(new_string).expandtabs()
current_value = str(agent_state.memory.get_block(label).value).expandtabs()
# Check if old_string is unique in the block
occurences = current_value.count(old_string)
if occurences == 0:
raise ValueError(
f"No replacement was performed, old_string `{old_string}` did not appear verbatim in memory block with label `{label}`."
)
elif occurences > 1:
content_value_lines = current_value.split("\n")
lines = [idx + 1 for idx, line in enumerate(content_value_lines) if old_string in line]
raise ValueError(
f"No replacement was performed. Multiple occurrences of old_string `{old_string}` in lines {lines}. Please ensure it is unique."
)
# Replace old_string with new_string
new_value = current_value.replace(str(old_string), str(new_string))
# Write the new content to the block
agent_state.memory.update_block_value(label=label, value=new_value)
await self.agent_manager.update_memory_if_changed_async(agent_id=agent_state.id, new_memory=agent_state.memory, actor=actor)
return new_value
async def memory_apply_patch(self, agent_state: AgentState, actor: User, label: str, patch: str) -> str:
"""Apply a simplified unified-diff style patch to one or more memory blocks.
Backwards compatible behavior:
- If `patch` contains no "***" headers, this behaves like the legacy implementation and
applies the patch to the single memory block identified by `label`.
Extended, codex-style behavior (multi-block):
- `*** Add Block: <label>` (+ lines become initial content; optional `Description:` header)
- `*** Delete Block: <label>`
- `*** Update Block: <label>` (apply unified-diff hunks to that block)
- `*** Move to: <new_label>` (rename the most recent block in the patch)
"""
# Guardrails: forbid visual line numbers and warning banners
if MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(patch or ""):
raise ValueError(
"Patch contains a line number prefix, which is not allowed. Do not include line numbers (they are for display only)."
)
if CORE_MEMORY_LINE_NUMBER_WARNING in (patch or ""):
raise ValueError("Patch contains the line number warning banner, which is not allowed. Provide only the text to edit.")
patch = str(patch).expandtabs()
def normalize_label_to_path(lbl: str) -> str:
# Keep consistent with other memory tool path parsing
return f"/memories/{lbl.strip()}"
def apply_unified_patch_to_value(current_value: str, patch_text: str) -> str:
current_value = str(current_value).expandtabs()
patch_text = str(patch_text).expandtabs()
current_lines = current_value.split("\n")
# Ignore common diff headers
raw_lines = patch_text.splitlines()
patch_lines = [ln for ln in raw_lines if not ln.startswith("*** ") and not ln.startswith("---") and not ln.startswith("+++")]
# Split into hunks using '@@' as delimiter
hunks: list[list[str]] = []
h: list[str] = []
for ln in patch_lines:
if ln.startswith("@@"):
if h:
hunks.append(h)
h = []
continue
if ln.startswith(" ") or ln.startswith("-") or ln.startswith("+"):
h.append(ln)
elif ln.strip() == "":
# Treat blank line as context for empty string line
h.append(" ")
else:
# Skip unknown metadata lines
continue
if h:
hunks.append(h)
if not hunks:
raise ValueError("No applicable hunks found in patch. Ensure lines start with ' ', '-', or '+'.")
def find_all_subseq(hay: list[str], needle: list[str]) -> list[int]:
out: list[int] = []
n = len(needle)
if n == 0:
return out
for i in range(0, len(hay) - n + 1):
if hay[i : i + n] == needle:
out.append(i)
return out
# Apply each hunk sequentially against the rolling buffer
for hunk in hunks:
expected: list[str] = []
replacement: list[str] = []
for ln in hunk:
if ln.startswith(" "):
line = ln[1:]
expected.append(line)
replacement.append(line)
elif ln.startswith("-"):
line = ln[1:]
expected.append(line)
elif ln.startswith("+"):
line = ln[1:]
replacement.append(line)
if not expected and replacement:
# Pure insertion with no context: append at end
current_lines = current_lines + replacement
continue
matches = find_all_subseq(current_lines, expected)
if len(matches) == 0:
sample = "\n".join(expected[:4])
raise ValueError(
"Failed to apply patch: expected hunk context not found in the memory block. "
f"Verify the target lines exist and try providing more context. Expected start:\n{sample}"
)
if len(matches) > 1:
raise ValueError(
"Failed to apply patch: hunk context matched multiple places in the memory block. "
"Please add more unique surrounding context to disambiguate."
)
idx = matches[0]
end = idx + len(expected)
current_lines = current_lines[:idx] + replacement + current_lines[end:]
return "\n".join(current_lines)
def is_extended_patch(patch_text: str) -> bool:
return any(
ln.startswith("*** Add Block:")
or ln.startswith("*** Delete Block:")
or ln.startswith("*** Update Block:")
or ln.startswith("*** Move to:")
for ln in patch_text.splitlines()
)
# Legacy mode: patch targets the provided `label`
if not is_extended_patch(patch):
try:
memory_block = agent_state.memory.get_block(label)
except KeyError:
raise ValueError(f"Error: Memory block '{label}' does not exist")
if memory_block.read_only:
raise ValueError(f"{READ_ONLY_BLOCK_EDIT_ERROR}")
new_value = apply_unified_patch_to_value(str(memory_block.value), patch)
agent_state.memory.update_block_value(label=label, value=new_value)
await self.agent_manager.update_memory_if_changed_async(agent_id=agent_state.id, new_memory=agent_state.memory, actor=actor)
return new_value
# Extended mode: parse codex-like patch operations for memory blocks
lines = patch.splitlines()
i = 0
actions: list[dict] = []
current_action: Optional[dict] = None
last_action_label: Optional[str] = None
def flush_action():
nonlocal current_action, actions
if current_action is not None:
actions.append(current_action)
current_action = None
while i < len(lines):
ln = lines[i]
if ln.startswith("*** Add Block:"):
flush_action()
target_label = ln.split(":", 1)[1].strip()
if not target_label:
raise ValueError("*** Add Block: must specify a non-empty label")
current_action = {"kind": "add", "label": target_label, "description": "", "content_lines": []}
last_action_label = target_label
i += 1
# Optional description header: Description: ... (single-line)
if i < len(lines) and lines[i].startswith("Description:"):
current_action["description"] = lines[i].split(":", 1)[1].strip()
i += 1
continue
if ln.startswith("*** Delete Block:"):
flush_action()
target_label = ln.split(":", 1)[1].strip()
if not target_label:
raise ValueError("*** Delete Block: must specify a non-empty label")
actions.append({"kind": "delete", "label": target_label})
last_action_label = target_label
i += 1
continue
if ln.startswith("*** Update Block:"):
flush_action()
target_label = ln.split(":", 1)[1].strip()
if not target_label:
raise ValueError("*** Update Block: must specify a non-empty label")
current_action = {"kind": "update", "label": target_label, "patch_lines": []}
last_action_label = target_label
i += 1
continue
if ln.startswith("*** Move to:"):
new_label = ln.split(":", 1)[1].strip()
if not new_label:
raise ValueError("*** Move to: must specify a non-empty new label")
if last_action_label is None:
raise ValueError("*** Move to: must follow an Add/Update/Delete header")
actions.append({"kind": "rename", "old_label": last_action_label, "new_label": new_label})
last_action_label = new_label
i += 1
continue
# Collect body lines for current action
if current_action is not None:
if current_action["kind"] == "add":
if ln.startswith("+"):
current_action["content_lines"].append(ln[1:])
elif ln.strip() == "":
current_action["content_lines"].append("")
else:
# ignore unknown metadata lines
pass
elif current_action["kind"] == "update":
current_action["patch_lines"].append(ln)
i += 1
continue
# Otherwise ignore unrelated lines (e.g. leading @@ markers)
i += 1
flush_action()
if not actions:
raise ValueError("No operations found. Provide at least one of: *** Add Block, *** Delete Block, *** Update Block.")
results: list[str] = []
for action in actions:
kind = action["kind"]
if kind == "add":
try:
agent_state.memory.get_block(action["label"])
# If we get here, the block exists
raise ValueError(f"Error: Memory block '{action['label']}' already exists")
except KeyError:
# Block doesn't exist, which is what we want for adding
pass
content = "\n".join(action["content_lines"]).rstrip("\n")
await self.memory_create(
agent_state,
actor,
path=normalize_label_to_path(action["label"]),
description=action.get("description", ""),
file_text=content,
)
results.append(f"Created memory block '{action['label']}'")
elif kind == "delete":
await self.memory_delete(agent_state, actor, path=normalize_label_to_path(action["label"]))
results.append(f"Deleted memory block '{action['label']}'")
elif kind == "rename":
await self.memory_rename(
agent_state,
actor,
old_path=normalize_label_to_path(action["old_label"]),
new_path=normalize_label_to_path(action["new_label"]),
)
results.append(f"Renamed memory block '{action['old_label']}' to '{action['new_label']}'")
elif kind == "update":
try:
memory_block = agent_state.memory.get_block(action["label"])
except KeyError:
raise ValueError(f"Error: Memory block '{action['label']}' does not exist")
if memory_block.read_only:
raise ValueError(f"{READ_ONLY_BLOCK_EDIT_ERROR}")
patch_text = "\n".join(action["patch_lines"])
new_value = apply_unified_patch_to_value(str(memory_block.value), patch_text)
agent_state.memory.update_block_value(label=action["label"], value=new_value)
await self.agent_manager.update_memory_if_changed_async(agent_id=agent_state.id, new_memory=agent_state.memory, actor=actor)
results.append(f"Updated memory block '{action['label']}'")
else:
raise ValueError(f"Unknown operation kind: {kind}")
return (
"Successfully applied memory patch operations. "
"Your system prompt has been recompiled with the updated memory contents and is now active in your context.\n\n"
"Operations completed:\n- " + "\n- ".join(results)
)
async def memory_insert(
self,
agent_state: AgentState,
actor: User,
label: str,
new_string: str,
insert_line: int = -1,
) -> str:
if agent_state.memory.get_block(label).read_only:
raise ValueError(f"{READ_ONLY_BLOCK_EDIT_ERROR}")
if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(new_string)):
raise ValueError(
"new_string contains a line number prefix, which is not allowed. Do not "
"include line numbers when calling memory tools (line numbers are for "
"display purposes only)."
)
if CORE_MEMORY_LINE_NUMBER_WARNING in new_string:
raise ValueError(
"new_string contains a line number warning, which is not allowed. Do not "
"include line number information when calling memory tools (line numbers "
"are for display purposes only)."
)
current_value = str(agent_state.memory.get_block(label).value).expandtabs()
new_string = str(new_string).expandtabs()
current_value_lines = current_value.split("\n")
n_lines = len(current_value_lines)
# Check if we're in range, from 0 (pre-line), to 1 (first line), to n_lines (last line)
if insert_line == -1:
insert_line = n_lines
elif insert_line < 0 or insert_line > n_lines:
raise ValueError(
f"Invalid `insert_line` parameter: {insert_line}. It should be within "
f"the range of lines of the memory block: {[0, n_lines]}, or -1 to "
f"append to the end of the memory block."
)
# Insert the new string as a line
SNIPPET_LINES = 3
new_string_lines = new_string.split("\n")
new_value_lines = current_value_lines[:insert_line] + new_string_lines + current_value_lines[insert_line:]
snippet_lines = (
current_value_lines[max(0, insert_line - SNIPPET_LINES) : insert_line]
+ new_string_lines
+ current_value_lines[insert_line : insert_line + SNIPPET_LINES]
)
# Collate into the new value to update
new_value = "\n".join(new_value_lines)
"\n".join(snippet_lines)
# Write into the block
agent_state.memory.update_block_value(label=label, value=new_value)
await self.agent_manager.update_memory_if_changed_async(agent_id=agent_state.id, new_memory=agent_state.memory, actor=actor)
return new_value
async def memory_rethink(self, agent_state: AgentState, actor: User, label: str, new_memory: str) -> str:
if agent_state.memory.get_block(label).read_only:
raise ValueError(f"{READ_ONLY_BLOCK_EDIT_ERROR}")
if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(new_memory)):
raise ValueError(
"new_memory contains a line number prefix, which is not allowed. Do not "
"include line numbers when calling memory tools (line numbers are for "
"display purposes only)."
)
if CORE_MEMORY_LINE_NUMBER_WARNING in new_memory:
raise ValueError(
"new_memory contains a line number warning, which is not allowed. Do not "
"include line number information when calling memory tools (line numbers "
"are for display purposes only)."
)
try:
agent_state.memory.get_block(label)
except KeyError:
# Block doesn't exist, create it
from letta.schemas.block import Block
new_block = Block(label=label, value=new_memory)
agent_state.memory.set_block(new_block)
agent_state.memory.update_block_value(label=label, value=new_memory)
await self.agent_manager.update_memory_if_changed_async(agent_id=agent_state.id, new_memory=agent_state.memory, actor=actor)
return new_memory
async def memory_finish_edits(self, agent_state: AgentState, actor: User) -> None:
return None
async def memory_delete(self, agent_state: AgentState, actor: User, path: str) -> str:
"""Delete a memory block by detaching it from the agent."""
# Extract memory block label from path
label = path.removeprefix("/memories/").removeprefix("/").replace("/", "_")
try:
# Check if memory block exists
memory_block = agent_state.memory.get_block(label)
if memory_block is None:
raise ValueError(f"Error: Memory block '{label}' does not exist")
# Detach the block from the agent
updated_agent_state = await self.agent_manager.detach_block_async(
agent_id=agent_state.id, block_id=memory_block.id, actor=actor
)
# Update the agent state with the updated memory from the database
agent_state.memory = updated_agent_state.memory
return (
f"Successfully deleted memory block '{label}'. "
f"Your system prompt has been recompiled without this memory block and is now active in your context."
)
except NoResultFound:
# Catch the specific error and re-raise with human-readable names
raise ValueError(f"Memory block '{label}' is not attached to agent '{agent_state.name}'")
except Exception as e:
return f"Error performing delete: {str(e)}"
async def memory_update_description(self, agent_state: AgentState, actor: User, path: str, description: str) -> str:
"""Update the description of a memory block."""
label = path.removeprefix("/memories/").removeprefix("/").replace("/", "_")
try:
# Check if old memory block exists
memory_block = agent_state.memory.get_block(label)
if memory_block is None:
raise ValueError(f"Error: Memory block '{label}' does not exist")
await self.block_manager.update_block_async(
block_id=memory_block.id, block_update=BlockUpdate(description=description), actor=actor
)
await self.agent_manager.rebuild_system_prompt_async(agent_id=agent_state.id, actor=actor, force=True)
return (
f"Successfully updated description of memory block '{label}'. "
f"Your system prompt has been recompiled with the updated description and is now active in your context."
)
except NoResultFound:
# Catch the specific error and re-raise with human-readable names
raise ValueError(f"Memory block '{label}' not found for agent '{agent_state.name}'")
except Exception as e:
raise Exception(f"Error performing update_description: {str(e)}")
async def memory_rename(self, agent_state: AgentState, actor: User, old_path: str, new_path: str) -> str:
"""Rename a memory block by copying content to new label and detaching old one."""
# Extract memory block labels from paths
old_label = old_path.removeprefix("/memories/").removeprefix("/").replace("/", "_")
new_label = new_path.removeprefix("/memories/").removeprefix("/").replace("/", "_")
try:
# Check if old memory block exists
memory_block = agent_state.memory.get_block(old_label)
if memory_block is None:
raise ValueError(f"Error: Memory block '{old_label}' does not exist")
await self.block_manager.update_block_async(block_id=memory_block.id, block_update=BlockUpdate(label=new_label), actor=actor)
await self.agent_manager.rebuild_system_prompt_async(agent_id=agent_state.id, actor=actor, force=True)
return (
f"Successfully renamed memory block '{old_label}' to '{new_label}'. "
f"Your system prompt has been recompiled with the renamed memory block and is now active in your context."
)
except NoResultFound:
# Catch the specific error and re-raise with human-readable names
raise ValueError(f"Memory block '{old_label}' not found for agent '{agent_state.name}'")
except Exception as e:
raise Exception(f"Error performing rename: {str(e)}")
async def memory_create(
self, agent_state: AgentState, actor: User, path: str, description: str, file_text: Optional[str] = None
) -> str:
"""Create a memory block by setting its value to an empty string."""
from letta.schemas.block import Block
label = path.removeprefix("/memories/").removeprefix("/")
# Create a new block and persist it to the database
new_block = Block(label=label, value=file_text if file_text else "", description=description)
persisted_block = await self.block_manager.create_or_update_block_async(new_block, actor)
# Attach the block to the agent
await self.agent_manager.attach_block_async(agent_id=agent_state.id, block_id=persisted_block.id, actor=actor)
# Add the persisted block to memory
agent_state.memory.set_block(persisted_block)
await self.agent_manager.update_memory_if_changed_async(agent_id=agent_state.id, new_memory=agent_state.memory, actor=actor)
return (
f"Successfully created memory block '{label}'. "
f"Your system prompt has been recompiled with the new memory block and is now active in your context."
)
async def memory_str_replace(
self,
agent_state: AgentState,
actor: User,
path: str,
old_string: str,
new_string: str,
) -> str:
"""Replace text in a memory block."""
label = path.removeprefix("/memories/").removeprefix("/")
memory_block = agent_state.memory.get_block(label)
if memory_block is None:
raise ValueError(f"Error: Memory block '{label}' does not exist")
if memory_block.read_only:
raise ValueError(f"{READ_ONLY_BLOCK_EDIT_ERROR}")
if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(old_string)):
raise ValueError(
"old_string contains a line number prefix, which is not allowed. "
"Do not include line numbers when calling memory tools (line "
"numbers are for display purposes only)."
)
if CORE_MEMORY_LINE_NUMBER_WARNING in old_string:
raise ValueError(
"old_string contains a line number warning, which is not allowed. "
"Do not include line number information when calling memory tools "
"(line numbers are for display purposes only)."
)
if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(new_string)):
raise ValueError(
"new_string contains a line number prefix, which is not allowed. "
"Do not include line numbers when calling memory tools (line "
"numbers are for display purposes only)."
)
old_string = str(old_string).expandtabs()
new_string = str(new_string).expandtabs()
current_value = str(memory_block.value).expandtabs()
# Check if old_string is unique in the block
occurences = current_value.count(old_string)
if occurences == 0:
raise ValueError(
f"No replacement was performed, old_string `{old_string}` did not appear verbatim in memory block with label `{label}`."
)
elif occurences > 1:
content_value_lines = current_value.split("\n")
lines = [idx + 1 for idx, line in enumerate(content_value_lines) if old_string in line]
raise ValueError(
f"No replacement was performed. Multiple occurrences of old_string `{old_string}` in lines {lines}. Please ensure it is unique."
)
# Replace old_string with new_string
new_value = current_value.replace(str(old_string), str(new_string))
# Write the new content to the block
await self.block_manager.update_block_async(block_id=memory_block.id, block_update=BlockUpdate(value=new_value), actor=actor)
# Keep in-memory AgentState consistent with DB
agent_state.memory.update_block_value(label=label, value=new_value)
await self.agent_manager.rebuild_system_prompt_async(agent_id=agent_state.id, actor=actor, force=True)
return new_value
async def memory_str_insert(self, agent_state: AgentState, actor: User, path: str, insert_text: str, insert_line: int = -1) -> str:
"""Insert text into a memory block at a specific line."""
label = path.removeprefix("/memories/").removeprefix("/").replace("/", "_")
memory_block = agent_state.memory.get_block(label)
if memory_block is None:
raise ValueError(f"Error: Memory block '{label}' does not exist")
if memory_block.read_only:
raise ValueError(f"{READ_ONLY_BLOCK_EDIT_ERROR}")
if bool(MEMORY_TOOLS_LINE_NUMBER_PREFIX_REGEX.search(insert_text)):
raise ValueError(
"insert_text contains a line number prefix, which is not allowed. "
"Do not include line numbers when calling memory tools (line "
"numbers are for display purposes only)."
)
if CORE_MEMORY_LINE_NUMBER_WARNING in insert_text:
raise ValueError(
"insert_text contains a line number warning, which is not allowed. "
"Do not include line number information when calling memory tools "
"(line numbers are for display purposes only)."
)
current_value = str(memory_block.value).expandtabs()
insert_text = str(insert_text).expandtabs()
current_value_lines = current_value.split("\n")
n_lines = len(current_value_lines)
# Check if we're in range, from 0 (pre-line), to 1 (first line), to n_lines (last line)
if insert_line == -1:
insert_line = n_lines
elif insert_line < 0 or insert_line > n_lines:
raise ValueError(
f"Invalid `insert_line` parameter: {insert_line}. It should be within "
f"the range of lines of the memory block: {[0, n_lines]}, or -1 to "
f"append to the end of the memory block."
)
# Insert the new text as a line
SNIPPET_LINES = 3
insert_text_lines = insert_text.split("\n")
new_value_lines = current_value_lines[:insert_line] + insert_text_lines + current_value_lines[insert_line:]
snippet_lines = (
current_value_lines[max(0, insert_line - SNIPPET_LINES) : insert_line]
+ insert_text_lines
+ current_value_lines[insert_line : insert_line + SNIPPET_LINES]
)
# Collate into the new value to update
new_value = "\n".join(new_value_lines)
"\n".join(snippet_lines)
# Write into the block
await self.block_manager.update_block_async(block_id=memory_block.id, block_update=BlockUpdate(value=new_value), actor=actor)
# Keep in-memory AgentState consistent with DB
agent_state.memory.update_block_value(label=label, value=new_value)
await self.agent_manager.rebuild_system_prompt_async(agent_id=agent_state.id, actor=actor, force=True)
return new_value
async def memory(
self,
agent_state: AgentState,
actor: User,
command: str,
file_text: Optional[str] = None,
description: Optional[str] = None,
path: Optional[str] = None,
old_string: Optional[str] = None,
new_string: Optional[str] = None,
insert_line: Optional[int] = None,
insert_text: Optional[str] = None,
old_path: Optional[str] = None,
new_path: Optional[str] = None,
) -> Optional[str]:
if command == "create":
if path is None:
raise ValueError("Error: path is required for create command")
if description is None:
raise ValueError("Error: description is required for create command")
return await self.memory_create(agent_state, actor, path, description, file_text)
elif command == "str_replace":
if path is None:
raise ValueError("Error: path is required for str_replace command")
if old_string is None:
raise ValueError("Error: old_string is required for str_replace command")
if new_string is None:
raise ValueError("Error: new_string is required for str_replace command")
return await self.memory_str_replace(agent_state, actor, path, old_string, new_string)
elif command == "insert":
if path is None:
raise ValueError("Error: path is required for insert command")
if insert_text is None:
raise ValueError("Error: insert_text is required for insert command")
return await self.memory_str_insert(agent_state, actor, path, insert_text, insert_line)
elif command == "delete":
if path is None:
raise ValueError("Error: path is required for delete command")
return await self.memory_delete(agent_state, actor, path)
elif command == "rename":
if path and description:
return await self.memory_update_description(agent_state, actor, path, description)
elif old_path and new_path:
return await self.memory_rename(agent_state, actor, old_path, new_path)
else:
raise ValueError(
"Error: path and description are required for update_description command, or old_path and new_path are required for rename command"
)
else:
raise ValueError(f"Error: Unknown command '{command}'. Supported commands: create, str_replace, insert, delete, rename")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_executor/core_tool_executor.py",
"license": "Apache License 2.0",
"lines": 906,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/tool_executor/files_tool_executor.py | import asyncio
import re
from typing import Any, Dict, List, Optional
from sqlalchemy.exc import NoResultFound
from letta.constants import PINECONE_TEXT_FIELD_NAME
from letta.functions.types import FileOpenRequest
from letta.helpers.pinecone_utils import search_pinecone_index, should_use_pinecone
from letta.helpers.tpuf_client import should_use_tpuf
from letta.log import get_logger
from letta.otel.tracing import trace_method
from letta.schemas.agent import AgentState
from letta.schemas.enums import VectorDBProvider
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.source import Source
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.schemas.user import User
from letta.services.agent_manager import AgentManager
from letta.services.block_manager import BlockManager
from letta.services.file_manager import FileManager
from letta.services.file_processor.chunker.line_chunker import LineChunker
from letta.services.files_agents_manager import FileAgentManager
from letta.services.message_manager import MessageManager
from letta.services.passage_manager import PassageManager
from letta.services.run_manager import RunManager
from letta.services.source_manager import SourceManager
from letta.services.tool_executor.tool_executor_base import ToolExecutor
from letta.utils import get_friendly_error_msg
class LettaFileToolExecutor(ToolExecutor):
"""Executor for Letta file tools with direct implementation of functions."""
# Production safety constants
MAX_FILE_SIZE_BYTES = 50 * 1024 * 1024 # 50MB limit per file
MAX_TOTAL_CONTENT_SIZE = 200 * 1024 * 1024 # 200MB total across all files
MAX_REGEX_COMPLEXITY = 1000 # Prevent catastrophic backtracking
MAX_MATCHES_PER_FILE = 20 # Limit matches per file (legacy, not used with new pagination)
MAX_TOTAL_MATCHES = 50 # Keep original value for semantic search
GREP_PAGE_SIZE = 20 # Number of grep matches to show per page
GREP_TIMEOUT_SECONDS = 30 # Max time for grep_files operation
MAX_CONTEXT_LINES = 1 # Lines of context around matches
MAX_TOTAL_COLLECTED = 1000 # Reasonable upper limit to prevent memory issues
def __init__(
self,
message_manager: MessageManager,
agent_manager: AgentManager,
block_manager: BlockManager,
run_manager: RunManager,
passage_manager: PassageManager,
actor: User,
):
super().__init__(
message_manager=message_manager,
agent_manager=agent_manager,
block_manager=block_manager,
run_manager=run_manager,
passage_manager=passage_manager,
actor=actor,
)
# TODO: This should be passed in to for testing purposes
self.files_agents_manager = FileAgentManager()
self.file_manager = FileManager()
self.source_manager = SourceManager()
self.logger = get_logger(__name__)
async def execute(
self,
function_name: str,
function_args: dict,
tool: Tool,
actor: User,
agent_state: Optional[AgentState] = None,
sandbox_config: Optional[SandboxConfig] = None,
sandbox_env_vars: Optional[Dict[str, Any]] = None,
) -> ToolExecutionResult:
if agent_state is None:
raise ValueError("Agent state is required for file tools")
function_map = {
"open_files": self.open_files,
"grep_files": self.grep_files,
"semantic_search_files": self.semantic_search_files,
}
if function_name not in function_map:
raise ValueError(f"Unknown function: {function_name}")
function_args_copy = function_args.copy()
try:
func_return = await function_map[function_name](agent_state, **function_args_copy)
return ToolExecutionResult(
status="success",
func_return=func_return,
agent_state=agent_state,
)
except Exception as e:
return ToolExecutionResult(
status="error",
func_return=e,
agent_state=agent_state,
stderr=[get_friendly_error_msg(function_name=function_name, exception_name=type(e).__name__, exception_message=str(e))],
)
@trace_method
async def open_files(self, agent_state: AgentState, file_requests: List[FileOpenRequest], close_all_others: bool = False) -> str:
"""Open one or more files and load their contents into memory blocks."""
# Parse raw dictionaries into FileOpenRequest objects if needed
parsed_requests = []
for req in file_requests:
if isinstance(req, dict):
# LLM returned a dictionary, parse it into FileOpenRequest
parsed_requests.append(FileOpenRequest(**req))
elif isinstance(req, FileOpenRequest):
# Already a FileOpenRequest object
parsed_requests.append(req)
else:
raise ValueError(f"Invalid file request type: {type(req)}. Expected dict or FileOpenRequest.")
file_requests = parsed_requests
# Validate file count first
if len(file_requests) > agent_state.max_files_open:
raise ValueError(
f"Cannot open {len(file_requests)} files: exceeds configured maximum limit of {agent_state.max_files_open} files"
)
if not file_requests:
raise ValueError("No file requests provided")
# Extract file names for various operations
file_names = [req.file_name for req in file_requests]
# Get all currently attached files for error reporting
file_blocks = agent_state.memory.file_blocks
attached_file_names = [fb.label for fb in file_blocks]
# Close all other files if requested
closed_by_close_all_others = []
if close_all_others:
closed_by_close_all_others = await self.files_agents_manager.close_all_other_files(
agent_id=agent_state.id, keep_file_names=file_names, actor=self.actor
)
# Process each file
opened_files = []
all_closed_files = []
all_previous_ranges = {} # Collect all previous ranges from all files
for file_request in file_requests:
file_name = file_request.file_name
offset = file_request.offset
length = file_request.length
# Use 0-indexed offset/length directly for LineChunker
start, end = None, None
if offset is not None or length is not None:
if offset is not None and offset < 0:
raise ValueError(f"Offset for file {file_name} must be >= 0 (0-indexed), got {offset}")
if length is not None and length < 1:
raise ValueError(f"Length for file {file_name} must be >= 1, got {length}")
# Use offset directly as it's already 0-indexed
start = offset if offset is not None else None
if start is not None and length is not None:
end = start + length
else:
end = None
# Validate file exists and is attached to agent
file_agent = await self.files_agents_manager.get_file_agent_by_file_name(
agent_id=agent_state.id, file_name=file_name, actor=self.actor
)
if not file_agent:
raise ValueError(
f"{file_name} not attached - did you get the filename correct? Currently you have the following files attached: {attached_file_names}"
)
file_id = file_agent.file_id
file = await self.file_manager.get_file_by_id(file_id=file_id, actor=self.actor, include_content=True)
# Process file content
content_lines = LineChunker().chunk_text(file_metadata=file, start=start, end=end, validate_range=True)
visible_content = "\n".join(content_lines)
# Handle LRU eviction and file opening
closed_files, _was_already_open, previous_ranges = await self.files_agents_manager.enforce_max_open_files_and_open(
agent_id=agent_state.id,
file_id=file_id,
file_name=file_name,
source_id=file.source_id,
actor=self.actor,
visible_content=visible_content,
max_files_open=agent_state.max_files_open,
start_line=start + 1 if start is not None else None, # convert to 1-indexed for user display
end_line=end if end is not None else None, # end is already exclusive, shows as 1-indexed inclusive
)
opened_files.append(file_name)
all_closed_files.extend(closed_files)
all_previous_ranges.update(previous_ranges) # Merge previous ranges from this file
# Update access timestamps for all opened files efficiently
await self.files_agents_manager.mark_access_bulk(agent_id=agent_state.id, file_names=file_names, actor=self.actor)
# Helper function to format previous range info
def format_previous_range(file_name: str) -> str:
if file_name in all_previous_ranges:
old_start, old_end = all_previous_ranges[file_name]
if old_start is not None and old_end is not None:
return f" (previously lines {old_start}-{old_end})"
elif old_start is not None:
return f" (previously lines {old_start}-end)"
else:
return " (previously full file)"
return ""
# Build unified success message - treat single and multiple files consistently
file_summaries = []
for req in file_requests:
previous_info = format_previous_range(req.file_name)
if req.offset is not None and req.length is not None:
# Display as 1-indexed for user readability: (offset+1) to (offset+length)
start_line = req.offset + 1
end_line = req.offset + req.length
file_summaries.append(f"{req.file_name} (lines {start_line}-{end_line}){previous_info}")
elif req.offset is not None:
# Display as 1-indexed
start_line = req.offset + 1
file_summaries.append(f"{req.file_name} (lines {start_line}-end){previous_info}")
else:
file_summaries.append(f"{req.file_name}{previous_info}")
if len(file_requests) == 1:
success_msg = f"* Opened {file_summaries[0]}"
else:
success_msg = f"* Opened {len(file_requests)} files: {', '.join(file_summaries)}"
# Add information about closed files
if closed_by_close_all_others:
success_msg += f"\nNote: Closed {len(closed_by_close_all_others)} file(s) due to close_all_others=True: {', '.join(closed_by_close_all_others)}"
if all_closed_files:
success_msg += (
f"\nNote: Closed {len(all_closed_files)} least recently used file(s) due to open file limit: {', '.join(all_closed_files)}"
)
return success_msg
def _validate_regex_pattern(self, pattern: str) -> None:
"""Validate regex pattern to prevent catastrophic backtracking."""
if len(pattern) > self.MAX_REGEX_COMPLEXITY:
raise ValueError(f"Pattern too complex: {len(pattern)} chars > {self.MAX_REGEX_COMPLEXITY} limit")
# Test compile the pattern to catch syntax errors early
try:
re.compile(pattern, re.IGNORECASE | re.MULTILINE)
except re.error as e:
raise ValueError(f"Invalid regex pattern: {e}")
def _get_context_lines(
self,
formatted_lines: List[str],
match_line_num: int,
context_lines: int,
) -> List[str]:
"""Get context lines around a match from already-chunked lines.
Args:
formatted_lines: Already chunked lines from LineChunker (format: "line_num: content")
match_line_num: The 1-based line number of the match
context_lines: Number of context lines before and after
"""
if not formatted_lines or context_lines < 0:
return []
# Find the index of the matching line in the formatted_lines list
match_formatted_idx = None
for i, line in enumerate(formatted_lines):
if line and ":" in line:
try:
line_num = int(line.split(":", 1)[0].strip())
if line_num == match_line_num:
match_formatted_idx = i
break
except ValueError:
continue
if match_formatted_idx is None:
return []
# Calculate context range with bounds checking
start_idx = max(0, match_formatted_idx - context_lines)
end_idx = min(len(formatted_lines), match_formatted_idx + context_lines + 1)
# Extract context lines and add match indicator
context_lines_with_indicator = []
for i in range(start_idx, end_idx):
line = formatted_lines[i]
prefix = ">" if i == match_formatted_idx else " "
context_lines_with_indicator.append(f"{prefix} {line}")
return context_lines_with_indicator
@trace_method
async def grep_files(
self,
agent_state: AgentState,
pattern: str,
include: Optional[str] = None,
context_lines: Optional[int] = 1,
offset: Optional[int] = None,
) -> str:
"""
Search for pattern in all attached files and return matches with context.
Args:
agent_state: Current agent state
pattern: Regular expression pattern to search for
include: Optional pattern to filter filenames to include in the search
context_lines (Optional[int]): Number of lines of context to show before and after each match.
Equivalent to `-C` in grep_files. Defaults to 1.
offset (Optional[int]): Number of matches to skip before showing results. Used for pagination.
Defaults to 0 (show from first match).
Returns:
Formatted string with search results, file names, line numbers, and context
"""
if not pattern or not pattern.strip():
raise ValueError("Empty search pattern provided")
pattern = pattern.strip()
self._validate_regex_pattern(pattern)
# Validate include pattern if provided
include_regex = None
if include and include.strip():
include = include.strip()
# Convert glob pattern to regex if it looks like a glob pattern
if "*" in include and not any(c in include for c in ["^", "$", "(", ")", "[", "]", "{", "}", "\\", "+"]):
# Simple glob to regex conversion
include_pattern = include.replace(".", r"\.").replace("*", ".*").replace("?", ".")
if not include_pattern.endswith("$"):
include_pattern += "$"
else:
include_pattern = include
self._validate_regex_pattern(include_pattern)
include_regex = re.compile(include_pattern, re.IGNORECASE)
# Get all attached files for this agent
file_agents = await self.files_agents_manager.list_files_for_agent(
agent_id=agent_state.id, per_file_view_window_char_limit=agent_state.per_file_view_window_char_limit, actor=self.actor
)
if not file_agents:
return "No files are currently attached to search"
# Filter files by filename pattern if include is specified
if include_regex:
original_count = len(file_agents)
file_agents = [fa for fa in file_agents if include_regex.search(fa.file_name)]
if not file_agents:
return f"No files match the filename pattern '{include}' (filtered {original_count} files)"
# Validate offset parameter
if offset is not None and offset < 0:
offset = 0 # Treat negative offsets as 0
# Compile regex pattern with appropriate flags
regex_flags = re.MULTILINE
regex_flags |= re.IGNORECASE
pattern_regex = re.compile(pattern, regex_flags)
# Collect all matches first (up to a reasonable limit)
all_matches = [] # List of tuples: (file_name, line_num, context_lines)
total_content_size = 0
files_processed = 0
files_skipped = 0
files_with_matches = set() # Track files that had matches for LRU policy
# Use asyncio timeout to prevent hanging
async def _search_files():
nonlocal all_matches, total_content_size, files_processed, files_skipped, files_with_matches
for file_agent in file_agents:
# Load file content
try:
file = await self.file_manager.get_file_by_id(file_id=file_agent.file_id, actor=self.actor, include_content=True)
except NoResultFound:
files_skipped += 1
self.logger.warning(f"Grep: Skipping file {file_agent.file_name} - no content available")
continue
# Check individual file size
content_size = len(file.content.encode("utf-8"))
if content_size > self.MAX_FILE_SIZE_BYTES:
files_skipped += 1
self.logger.warning(
f"Grep: Skipping file {file.file_name} - too large ({content_size:,} bytes > {self.MAX_FILE_SIZE_BYTES:,} limit)"
)
continue
# Check total content size across all files
total_content_size += content_size
if total_content_size > self.MAX_TOTAL_CONTENT_SIZE:
files_skipped += 1
self.logger.warning(
f"Grep: Skipping file {file.file_name} - total content size limit exceeded ({total_content_size:,} bytes > {self.MAX_TOTAL_CONTENT_SIZE:,} limit)"
)
break
files_processed += 1
# Use LineChunker to get all lines with proper formatting
chunker = LineChunker()
formatted_lines = chunker.chunk_text(file_metadata=file)
# Remove metadata header
if formatted_lines and formatted_lines[0].startswith("[Viewing"):
formatted_lines = formatted_lines[1:]
# Search for matches in formatted lines
for formatted_line in formatted_lines:
if len(all_matches) >= self.MAX_TOTAL_COLLECTED:
# Stop collecting if we hit the upper limit
break
# Extract line number and content from formatted line
if ":" in formatted_line:
try:
line_parts = formatted_line.split(":", 1)
line_num = int(line_parts[0].strip())
line_content = line_parts[1].strip() if len(line_parts) > 1 else ""
except (ValueError, IndexError):
continue
if pattern_regex.search(line_content):
# Mark this file as having matches for LRU tracking
files_with_matches.add(file.file_name)
context = self._get_context_lines(formatted_lines, match_line_num=line_num, context_lines=context_lines or 0)
# Store match data for later pagination
all_matches.append((file.file_name, line_num, context))
# Break if we've collected enough matches
if len(all_matches) >= self.MAX_TOTAL_COLLECTED:
break
# Execute with timeout
await asyncio.wait_for(_search_files(), timeout=self.GREP_TIMEOUT_SECONDS)
# Mark access for files that had matches
if files_with_matches:
await self.files_agents_manager.mark_access_bulk(agent_id=agent_state.id, file_names=list(files_with_matches), actor=self.actor)
# Handle no matches case
total_matches = len(all_matches)
if total_matches == 0:
summary = f"No matches found for pattern: '{pattern}'"
if include:
summary += f" in files matching '{include}'"
if files_skipped > 0:
summary += f" (searched {files_processed} files, skipped {files_skipped})"
return summary
# Apply pagination
start_idx = offset if offset else 0
end_idx = start_idx + self.GREP_PAGE_SIZE
paginated_matches = all_matches[start_idx:end_idx]
# Check if we hit the collection limit
hit_collection_limit = len(all_matches) >= self.MAX_TOTAL_COLLECTED
# Format the paginated results
results = []
# Build summary showing the range of matches displayed
if hit_collection_limit:
# We collected MAX_TOTAL_COLLECTED but there might be more
summary = f"Found {self.MAX_TOTAL_COLLECTED}+ total matches across {len(files_with_matches)} files (showing matches {start_idx + 1}-{min(end_idx, total_matches)} of {self.MAX_TOTAL_COLLECTED}+)"
else:
# We found all matches
summary = f"Found {total_matches} total matches across {len(files_with_matches)} files (showing matches {start_idx + 1}-{min(end_idx, total_matches)} of {total_matches})"
if files_skipped > 0:
summary += f"\nNote: Skipped {files_skipped} files due to size limits"
results.append(summary)
results.append("=" * 80)
# Add file summary - count matches per file
file_match_counts = {}
for file_name, _, _ in all_matches:
file_match_counts[file_name] = file_match_counts.get(file_name, 0) + 1
# Sort files by match count (descending) for better overview
sorted_files = sorted(file_match_counts.items(), key=lambda x: x[1], reverse=True)
results.append("\nFiles with matches:")
for file_name, count in sorted_files:
if hit_collection_limit and count >= self.MAX_TOTAL_COLLECTED:
results.append(f" - {file_name}: {count}+ matches")
else:
results.append(f" - {file_name}: {count} matches")
results.append("") # blank line before matches
# Format each match in the current page
for file_name, line_num, context_lines in paginated_matches:
match_header = f"\n=== {file_name}:{line_num} ==="
match_content = "\n".join(context_lines)
results.append(f"{match_header}\n{match_content}")
# Add navigation hint
results.append("") # blank line
if end_idx < total_matches:
if hit_collection_limit:
results.append(f'To see more matches, call: grep_files(pattern="{pattern}", offset={end_idx})')
results.append(
f"Note: Only the first {self.MAX_TOTAL_COLLECTED} matches were collected. There may be more matches beyond this limit."
)
else:
results.append(f'To see more matches, call: grep_files(pattern="{pattern}", offset={end_idx})')
else:
if hit_collection_limit:
results.append("Showing last page of collected matches. There may be more matches beyond the collection limit.")
else:
results.append("No more matches to show.")
return "\n".join(results)
@trace_method
async def semantic_search_files(self, agent_state: AgentState, query: str, limit: int = 5) -> str:
"""
Search for text within attached files using semantic search and return passages with their source filenames.
Uses Pinecone if configured, otherwise falls back to traditional search.
Args:
agent_state: Current agent state
query: Search query for semantic matching
limit: Maximum number of results to return (default: 5)
Returns:
Formatted string with search results in IDE/terminal style
"""
if not query or not query.strip():
raise ValueError("Empty search query provided")
query = query.strip()
# Apply reasonable limit
limit = min(limit, self.MAX_TOTAL_MATCHES)
self.logger.info(f"Semantic search started for agent {agent_state.id} with query '{query}' (limit: {limit})")
# Check which vector DB to use - Turbopuffer takes precedence
attached_sources = await self.agent_manager.list_attached_sources_async(agent_id=agent_state.id, actor=self.actor)
attached_tpuf_sources = [source for source in attached_sources if source.vector_db_provider == VectorDBProvider.TPUF]
attached_pinecone_sources = [source for source in attached_sources if source.vector_db_provider == VectorDBProvider.PINECONE]
if not attached_tpuf_sources and not attached_pinecone_sources:
return await self._search_files_native(agent_state, query, limit)
results = []
# If both have items, we half the limit roughly
# TODO: This is very hacky bc it skips the re-ranking - but this is a temporary stopgap while we think about migrating data
if attached_tpuf_sources and attached_pinecone_sources:
limit = max(limit // 2, 1)
if should_use_tpuf() and attached_tpuf_sources:
tpuf_result = await self._search_files_turbopuffer(agent_state, attached_tpuf_sources, query, limit)
results.append(tpuf_result)
if should_use_pinecone() and attached_pinecone_sources:
pinecone_result = await self._search_files_pinecone(agent_state, attached_pinecone_sources, query, limit)
results.append(pinecone_result)
# combine results from both sources
if results:
return "\n\n".join(results)
# fallback if no results from either source
return "No results found"
async def _search_files_turbopuffer(self, agent_state: AgentState, attached_sources: List[Source], query: str, limit: int) -> str:
"""Search files using Turbopuffer vector database."""
# Get attached sources
source_ids = [source.id for source in attached_sources]
if not source_ids:
return "No valid source IDs found for attached files"
# Get all attached files for this agent
file_agents = await self.files_agents_manager.list_files_for_agent(
agent_id=agent_state.id, per_file_view_window_char_limit=agent_state.per_file_view_window_char_limit, actor=self.actor
)
if not file_agents:
return "No files are currently attached to search"
# Create a map of file_id to file_name for quick lookup
file_map = {fa.file_id: fa.file_name for fa in file_agents}
results = []
total_hits = 0
files_with_matches = {}
try:
from letta.helpers.tpuf_client import TurbopufferClient
tpuf_client = TurbopufferClient()
# Query Turbopuffer for all sources at once
search_results = await tpuf_client.query_file_passages(
source_ids=source_ids, # pass all source_ids as a list
organization_id=self.actor.organization_id,
actor=self.actor,
query_text=query,
search_mode="hybrid", # use hybrid search for best results
top_k=limit,
)
# Process search results
for passage, score, metadata in search_results:
if total_hits >= limit:
break
total_hits += 1
# get file name from our map
file_name = file_map.get(passage.file_id, "Unknown File")
# group by file name
if file_name not in files_with_matches:
files_with_matches[file_name] = []
files_with_matches[file_name].append({"text": passage.text, "score": score, "passage_id": passage.id})
except Exception as e:
self.logger.error(f"Turbopuffer search failed: {str(e)}")
raise e
if not files_with_matches:
return f"No semantic matches found for query: '{query}'"
# Format results
passage_num = 0
for file_name, matches in files_with_matches.items():
for match in matches:
passage_num += 1
# format each passage with terminal-style header
score_display = f"(score: {match['score']:.3f})"
passage_header = f"\n=== {file_name} (passage #{passage_num}) {score_display} ==="
# format the passage text
passage_text = match["text"].strip()
lines = passage_text.splitlines()
formatted_lines = []
for line in lines[:20]: # limit to first 20 lines per passage
formatted_lines.append(f" {line}")
if len(lines) > 20:
formatted_lines.append(f" ... [truncated {len(lines) - 20} more lines]")
passage_content = "\n".join(formatted_lines)
results.append(f"{passage_header}\n{passage_content}")
# mark access for files that had matches
if files_with_matches:
matched_file_names = [name for name in files_with_matches.keys() if name != "Unknown File"]
if matched_file_names:
await self.files_agents_manager.mark_access_bulk(agent_id=agent_state.id, file_names=matched_file_names, actor=self.actor)
# create summary header
file_count = len(files_with_matches)
summary = f"Found {total_hits} matches in {file_count} file{'s' if file_count != 1 else ''} for query: '{query}'"
# combine all results
formatted_results = [summary, "=" * len(summary), *results]
self.logger.info(f"Turbopuffer search completed: {total_hits} matches across {file_count} files")
return "\n".join(formatted_results)
async def _search_files_pinecone(self, agent_state: AgentState, attached_sources: List[Source], query: str, limit: int) -> str:
"""Search files using Pinecone vector database."""
# Extract unique source_ids
# TODO: Inefficient
source_ids = [source.id for source in attached_sources]
if not source_ids:
return "No valid source IDs found for attached files"
# Get all attached files for this agent
file_agents = await self.files_agents_manager.list_files_for_agent(
agent_id=agent_state.id, per_file_view_window_char_limit=agent_state.per_file_view_window_char_limit, actor=self.actor
)
if not file_agents:
return "No files are currently attached to search"
results = []
total_hits = 0
files_with_matches = {}
try:
filter = {"source_id": {"$in": source_ids}}
search_results = await search_pinecone_index(query, limit, filter, self.actor)
# Process search results
if "result" in search_results and "hits" in search_results["result"]:
for hit in search_results["result"]["hits"]:
if total_hits >= limit:
break
total_hits += 1
# Extract hit information
hit_id = hit.get("_id", "unknown")
score = hit.get("_score", 0.0)
fields = hit.get("fields", {})
text = fields.get(PINECONE_TEXT_FIELD_NAME, "")
file_id = fields.get("file_id", "")
# Find corresponding file name
file_name = "Unknown File"
for fa in file_agents:
if fa.file_id == file_id:
file_name = fa.file_name
break
# Group by file name
if file_name not in files_with_matches:
files_with_matches[file_name] = []
files_with_matches[file_name].append({"text": text, "score": score, "hit_id": hit_id})
except Exception as e:
self.logger.error(f"Pinecone search failed: {str(e)}")
raise e
if not files_with_matches:
return f"No semantic matches found in Pinecone for query: '{query}'"
# Format results
passage_num = 0
for file_name, matches in files_with_matches.items():
for match in matches:
passage_num += 1
# Format each passage with terminal-style header
score_display = f"(score: {match['score']:.3f})"
passage_header = f"\n=== {file_name} (passage #{passage_num}) {score_display} ==="
# Format the passage text
passage_text = match["text"].strip()
lines = passage_text.splitlines()
formatted_lines = []
for line in lines[:20]: # Limit to first 20 lines per passage
formatted_lines.append(f" {line}")
if len(lines) > 20:
formatted_lines.append(f" ... [truncated {len(lines) - 20} more lines]")
passage_content = "\n".join(formatted_lines)
results.append(f"{passage_header}\n{passage_content}")
# Mark access for files that had matches
if files_with_matches:
matched_file_names = [name for name in files_with_matches.keys() if name != "Unknown File"]
if matched_file_names:
await self.files_agents_manager.mark_access_bulk(agent_id=agent_state.id, file_names=matched_file_names, actor=self.actor)
# Create summary header
file_count = len(files_with_matches)
summary = f"Found {total_hits} Pinecone matches in {file_count} file{'s' if file_count != 1 else ''} for query: '{query}'"
# Combine all results
formatted_results = [summary, "=" * len(summary), *results]
self.logger.info(f"Pinecone search completed: {total_hits} matches across {file_count} files")
return "\n".join(formatted_results)
async def _search_files_native(self, agent_state: AgentState, query: str, limit: int) -> str:
"""Traditional search using existing passage manager."""
# Get semantic search results
passages = await self.agent_manager.query_source_passages_async(
actor=self.actor,
agent_id=agent_state.id,
query_text=query,
embed_query=True,
embedding_config=agent_state.embedding_config,
)
if not passages:
return f"No semantic matches found for query: '{query}'"
# Limit results
passages = passages[:limit]
# Group passages by file for better organization
files_with_passages = {}
for p in passages:
file_name = p.file_name if p.file_name else "Unknown File"
if file_name not in files_with_passages:
files_with_passages[file_name] = []
files_with_passages[file_name].append(p)
results = []
total_passages = 0
for file_name, file_passages in files_with_passages.items():
for passage in file_passages:
total_passages += 1
# Format each passage with terminal-style header
passage_header = f"\n=== {file_name} (passage #{total_passages}) ==="
# Format the passage text with some basic formatting
passage_text = passage.text.strip()
# Format the passage text without line numbers
lines = passage_text.splitlines()
formatted_lines = []
for line in lines[:20]: # Limit to first 20 lines per passage
formatted_lines.append(f" {line}")
if len(lines) > 20:
formatted_lines.append(f" ... [truncated {len(lines) - 20} more lines]")
passage_content = "\n".join(formatted_lines)
results.append(f"{passage_header}\n{passage_content}")
# Mark access for files that had matches
if files_with_passages:
matched_file_names = [name for name in files_with_passages.keys() if name != "Unknown File"]
if matched_file_names:
await self.files_agents_manager.mark_access_bulk(agent_id=agent_state.id, file_names=matched_file_names, actor=self.actor)
# Create summary header
file_count = len(files_with_passages)
summary = f"Found {total_passages} semantic matches in {file_count} file{'s' if file_count != 1 else ''} for query: '{query}'"
# Combine all results
formatted_results = [summary, "=" * len(summary), *results]
self.logger.info(f"Semantic search completed: {total_passages} matches across {file_count} files")
return "\n".join(formatted_results)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_executor/files_tool_executor.py",
"license": "Apache License 2.0",
"lines": 694,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/tool_executor/mcp_tool_executor.py | from typing import Any, Dict, Optional
from letta.constants import MCP_TOOL_TAG_NAME_PREFIX
from letta.log import get_logger
from letta.otel.tracing import trace_method
from letta.schemas.agent import AgentState
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.schemas.user import User
from letta.services.mcp_manager import MCPManager
from letta.services.tool_executor.tool_executor_base import ToolExecutor
from letta.utils import get_friendly_error_msg
logger = get_logger(__name__)
# MCP error class names that represent expected user-facing errors
# These are checked by class name to avoid import dependencies on fastmcp/mcp packages
MCP_EXPECTED_ERROR_CLASSES = {"McpError", "ToolError"}
class ExternalMCPToolExecutor(ToolExecutor):
"""Executor for external MCP tools."""
@trace_method
async def execute(
self,
function_name: str,
function_args: dict,
tool: Tool,
actor: User,
agent_state: Optional[AgentState] = None,
sandbox_config: Optional[SandboxConfig] = None,
sandbox_env_vars: Optional[Dict[str, Any]] = None,
) -> ToolExecutionResult:
mcp_server_tag = [tag for tag in tool.tags if tag.startswith(f"{MCP_TOOL_TAG_NAME_PREFIX}:")]
if not mcp_server_tag:
raise ValueError(f"Tool {tool.name} does not have a valid MCP server tag")
mcp_server_name = mcp_server_tag[0].split(":")[1]
mcp_manager = MCPManager()
# TODO: may need to have better client connection management
environment_variables = {}
agent_id = None
if agent_state:
environment_variables = agent_state.get_agent_env_vars_as_dict()
agent_id = agent_state.id
try:
function_response, success = await mcp_manager.execute_mcp_server_tool(
mcp_server_name=mcp_server_name,
tool_name=function_name,
tool_args=function_args,
environment_variables=environment_variables,
actor=actor,
agent_id=agent_id,
)
return ToolExecutionResult(
status="success" if success else "error",
func_return=function_response,
)
except Exception as e:
# Check if this is an expected MCP error (ToolError, McpError)
# These are user-facing errors from the external MCP server (e.g., "No connected account found")
# We handle them gracefully instead of letting them propagate as exceptions
# Handle ExceptionGroup wrapping (Python 3.11+ async TaskGroup can wrap exceptions)
exception_to_check = e
if hasattr(e, "exceptions") and e.exceptions:
# If it's an ExceptionGroup with a single wrapped exception, unwrap it
if len(e.exceptions) == 1:
exception_to_check = e.exceptions[0]
if exception_to_check.__class__.__name__ in MCP_EXPECTED_ERROR_CLASSES:
logger.info(f"MCP tool '{function_name}' returned expected error: {str(exception_to_check)}")
error_message = get_friendly_error_msg(
function_name=function_name,
exception_name=exception_to_check.__class__.__name__,
exception_message=str(exception_to_check),
)
return ToolExecutionResult(
status="error",
func_return=error_message,
)
# Re-raise unexpected errors
raise
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_executor/mcp_tool_executor.py",
"license": "Apache License 2.0",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/tool_executor/multi_agent_tool_executor.py | from typing import Any, Dict, List, Optional
from letta.log import get_logger
from letta.schemas.agent import AgentState
from letta.schemas.enums import MessageRole
from letta.schemas.letta_message import AssistantMessage
from letta.schemas.letta_message_content import TextContent
from letta.schemas.message import MessageCreate
from letta.schemas.run import Run
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.schemas.user import User
from letta.services.run_manager import RunManager
from letta.services.tool_executor.tool_executor_base import ToolExecutor
from letta.settings import settings
from letta.utils import safe_create_task
logger = get_logger(__name__)
class LettaMultiAgentToolExecutor(ToolExecutor):
"""Executor for LETTA multi-agent core tools."""
async def execute(
self,
function_name: str,
function_args: dict,
tool: Tool,
actor: User,
agent_state: Optional[AgentState] = None,
sandbox_config: Optional[SandboxConfig] = None,
sandbox_env_vars: Optional[Dict[str, Any]] = None,
) -> ToolExecutionResult:
assert agent_state is not None, "Agent state is required for multi-agent tools"
function_map = {
"send_message_to_agent_and_wait_for_reply": self.send_message_to_agent_and_wait_for_reply,
"send_message_to_agent_async": self.send_message_to_agent_async,
"send_message_to_agents_matching_tags": self.send_message_to_agents_matching_tags_async,
}
if function_name not in function_map:
raise ValueError(f"Unknown function: {function_name}")
# Execute the appropriate function
function_args_copy = function_args.copy() # Make a copy to avoid modifying the original
function_response = await function_map[function_name](agent_state, actor, **function_args_copy)
return ToolExecutionResult(
status="success",
func_return=function_response,
)
async def send_message_to_agent_and_wait_for_reply(
self, agent_state: AgentState, actor: User, message: str, other_agent_id: str
) -> str:
augmented_message = (
f"[Incoming message from agent with ID '{agent_state.id}' - to reply to this message, "
f"make sure to use the 'send_message' at the end, and the system will notify the sender of your response] "
f"{message}"
)
other_agent_state = await self.agent_manager.get_agent_by_id_async(agent_id=other_agent_id, actor=self.actor)
return str(await self._process_agent(agent_state=other_agent_state, message=augmented_message, actor=actor))
async def send_message_to_agents_matching_tags_async(
self, agent_state: AgentState, actor: User, message: str, match_all: List[str], match_some: List[str]
) -> str:
# Find matching agents
matching_agents = await self.agent_manager.list_agents_matching_tags_async(
actor=self.actor, match_all=match_all, match_some=match_some
)
if not matching_agents:
return str([])
augmented_message = (
"[Incoming message from external Letta agent - to reply to this message, "
"make sure to use the 'send_message' at the end, and the system will notify "
"the sender of your response] "
f"{message}"
)
# Process agents sequentially to avoid exhausting the database connection pool.
# When many agents match the tags, concurrent execution can create too many simultaneous
# database connections, causing pool exhaustion errors.
results = []
for agent_state in matching_agents:
result = await self._process_agent(agent_state=agent_state, message=augmented_message, actor=actor)
results.append(result)
return str(results)
async def _process_agent(self, agent_state: AgentState, message: str, actor: User) -> Dict[str, Any]:
from letta.agents.letta_agent_v3 import LettaAgentV3
try:
runs_manager = RunManager()
run = await runs_manager.create_run(
pydantic_run=Run(
agent_id=agent_state.id,
background=False,
metadata={
"run_type": "agent_send_message_to_agent", # TODO: Make this a constant
},
),
actor=actor,
)
letta_agent = LettaAgentV3(
agent_state=agent_state,
actor=self.actor,
)
letta_response = await letta_agent.step(
[MessageCreate(role=MessageRole.system, content=[TextContent(text=message)])], run_id=run.id
)
messages = letta_response.messages
send_message_content = [message.content for message in messages if isinstance(message, AssistantMessage)]
return {
"agent_id": agent_state.id,
"response": send_message_content if send_message_content else ["<no response>"],
}
except Exception as e:
return {
"agent_id": agent_state.id,
"error": str(e),
"type": type(e).__name__,
}
async def send_message_to_agent_async(self, agent_state: AgentState, actor: User, message: str, other_agent_id: str) -> str:
if settings.environment == "prod":
raise RuntimeError("This tool is not allowed to be run on Letta Cloud.")
# 1) Build the prefixed system‐message
prefixed = (
f"[Incoming message from agent with ID '{agent_state.id}' - "
f"to reply to this message, make sure to use the "
f"'send_message_to_agent_async' tool, or the agent will not receive your message] "
f"{message}"
)
other_agent_state = await self.agent_manager.get_agent_by_id_async(agent_id=other_agent_id, actor=self.actor)
task = safe_create_task(
self._process_agent(agent_state=other_agent_state, message=prefixed, actor=actor), label=f"send_message_to_{other_agent_id}"
)
task.add_done_callback(lambda t: (logger.error(f"Async send_message task failed: {t.exception()}") if t.exception() else None))
return "Successfully sent message"
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_executor/multi_agent_tool_executor.py",
"license": "Apache License 2.0",
"lines": 126,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/tool_executor/sandbox_tool_executor.py | import traceback
from typing import Any, Dict, Optional
from letta.functions.ast_parsers import coerce_dict_args_by_annotations, get_function_annotations_from_source
from letta.log import get_logger
from letta.otel.tracing import trace_method
from letta.schemas.agent import AgentState
from letta.schemas.enums import SandboxType
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.schemas.user import User
from letta.services.agent_manager import AgentManager
from letta.services.sandbox_credentials_service import SandboxCredentialsService
from letta.services.tool_executor.tool_executor_base import ToolExecutor
from letta.services.tool_sandbox.local_sandbox import AsyncToolSandboxLocal
from letta.settings import tool_settings
from letta.types import JsonDict
from letta.utils import get_friendly_error_msg
logger = get_logger(__name__)
class SandboxToolExecutor(ToolExecutor):
"""Executor for sandboxed tools."""
@trace_method
async def execute(
self,
function_name: str,
function_args: JsonDict,
tool: Tool,
actor: User,
agent_state: Optional[AgentState] = None,
sandbox_config: Optional[SandboxConfig] = None,
sandbox_env_vars: Optional[Dict[str, Any]] = None,
) -> ToolExecutionResult:
# Store original memory state
if agent_state:
orig_memory_str = agent_state.memory.compile(llm_config=agent_state.llm_config)
else:
orig_memory_str = None
# Fetch credentials from webhook
credentials_service = SandboxCredentialsService()
fetched_credentials = await credentials_service.fetch_credentials(
actor=actor,
tool_name=tool.name,
agent_id=agent_state.id if agent_state else None,
)
# Merge fetched credentials with provided sandbox_env_vars
if sandbox_env_vars is None:
sandbox_env_vars = {}
# inject some extra env such as PROJECT_ID from agent_state
if agent_state and agent_state.project_id:
fetched_credentials["PROJECT_ID"] = agent_state.project_id
sandbox_env_vars = {**fetched_credentials, **sandbox_env_vars}
try:
# Prepare function arguments
function_args = self._prepare_function_args(function_args, tool, function_name)
agent_state_copy = self._create_agent_state_copy(agent_state) if agent_state else None
# Execute in sandbox with Modal first (if configured and requested), then fallback to E2B/LOCAL
# Try Modal if: (1) Modal credentials configured AND (2) tool requests Modal via metadata
tool_requests_modal = tool.metadata_ and tool.metadata_.get("sandbox") == "modal"
modal_configured = tool_settings.modal_sandbox_enabled
tool_execution_result = None
# Try Modal first if both conditions met
if tool_requests_modal and modal_configured:
try:
from letta.services.tool_sandbox.modal_sandbox import AsyncToolSandboxModal
logger.info(f"Attempting Modal execution for tool {tool.name}")
sandbox = AsyncToolSandboxModal(
function_name,
function_args,
actor,
tool_id=tool.id,
agent_id=agent_state.id if agent_state else None,
project_id=agent_state.project_id if agent_state else None,
tool_object=tool,
sandbox_config=sandbox_config,
sandbox_env_vars=sandbox_env_vars,
organization_id=actor.organization_id,
)
# TODO: pass through letta api key
tool_execution_result = await sandbox.run(agent_state=agent_state_copy, additional_env_vars=sandbox_env_vars)
except Exception as e:
# Modal execution failed, log and fall back to E2B/LOCAL
logger.warning(f"Modal execution failed for tool {tool.name}: {e}. Falling back to {tool_settings.sandbox_type.value}")
tool_execution_result = None
# Fallback to E2B or LOCAL if Modal wasn't tried or failed
if tool_execution_result is None:
if tool_settings.sandbox_type == SandboxType.E2B:
from letta.services.tool_sandbox.e2b_sandbox import AsyncToolSandboxE2B
sandbox = AsyncToolSandboxE2B(
function_name,
function_args,
actor,
tool_id=tool.id,
agent_id=agent_state.id if agent_state else None,
project_id=agent_state.project_id if agent_state else None,
tool_object=tool,
sandbox_config=sandbox_config,
sandbox_env_vars=sandbox_env_vars,
)
else:
sandbox = AsyncToolSandboxLocal(
function_name,
function_args,
actor,
tool_id=tool.id,
agent_id=agent_state.id if agent_state else None,
project_id=agent_state.project_id if agent_state else None,
tool_object=tool,
sandbox_config=sandbox_config,
sandbox_env_vars=sandbox_env_vars,
)
tool_execution_result = await sandbox.run(agent_state=agent_state_copy)
log_lines = (tool_execution_result.stdout or []) + (tool_execution_result.stderr or [])
logger.debug("Tool execution log: %s", "\n".join(log_lines))
# Verify memory integrity
if agent_state:
new_memory_str = agent_state.memory.compile(llm_config=agent_state.llm_config)
assert orig_memory_str == new_memory_str, "Memory should not be modified in a sandbox tool"
# Update agent memory if needed
if tool_execution_result.agent_state is not None:
await AgentManager().update_memory_if_changed_async(agent_state.id, tool_execution_result.agent_state.memory, actor)
return tool_execution_result
except Exception as e:
return self._handle_execution_error(e, function_name, traceback.format_exc())
@staticmethod
def _prepare_function_args(function_args: JsonDict, tool: Tool, function_name: str) -> dict:
"""Prepare function arguments with proper type coercion."""
# Skip Python AST parsing for TypeScript tools - they use json_schema for type info
if tool.source_type == "typescript":
return function_args
try:
# Parse the source code to extract function annotations (Python only)
annotations = get_function_annotations_from_source(tool.source_code, function_name)
# Coerce the function arguments to the correct types based on the annotations
return coerce_dict_args_by_annotations(function_args, annotations)
except ValueError:
# Just log the error and continue with original args
# This is defensive programming - we try to coerce but fall back if it fails
return function_args
@staticmethod
def _create_agent_state_copy(agent_state: AgentState):
"""Create a copy of agent state for sandbox execution."""
agent_state_copy = agent_state.__deepcopy__()
# Remove tools from copy to prevent nested tool execution
agent_state_copy.tools = []
agent_state_copy.tool_rules = []
return agent_state_copy
@staticmethod
def _handle_execution_error(
exception: Exception,
function_name: str,
stderr: str,
) -> ToolExecutionResult:
"""Handle tool execution errors."""
error_message = get_friendly_error_msg(
function_name=function_name, exception_name=type(exception).__name__, exception_message=str(exception)
)
return ToolExecutionResult(
status="error",
func_return=error_message,
stderr=[stderr],
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_executor/sandbox_tool_executor.py",
"license": "Apache License 2.0",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/tool_executor/tool_executor_base.py | from abc import ABC, abstractmethod
from typing import Any, Dict, Optional
from letta.schemas.agent import AgentState
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.schemas.user import User
from letta.services.agent_manager import AgentManager
from letta.services.block_manager import BlockManager
from letta.services.message_manager import MessageManager
from letta.services.passage_manager import PassageManager
from letta.services.run_manager import RunManager
class ToolExecutor(ABC):
"""Abstract base class for tool executors."""
def __init__(
self,
message_manager: MessageManager,
agent_manager: AgentManager,
block_manager: BlockManager,
run_manager: RunManager,
passage_manager: PassageManager,
actor: User,
):
self.message_manager = message_manager
self.agent_manager = agent_manager
self.block_manager = block_manager
self.run_manager = run_manager
self.passage_manager = passage_manager
self.actor = actor
@abstractmethod
async def execute(
self,
function_name: str,
function_args: dict,
tool: Tool,
actor: User,
agent_state: Optional[AgentState] = None,
sandbox_config: Optional[SandboxConfig] = None,
sandbox_env_vars: Optional[Dict[str, Any]] = None,
) -> ToolExecutionResult:
"""Execute the tool and return the result."""
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_executor/tool_executor_base.py",
"license": "Apache License 2.0",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/services/tool_sandbox/modal_constants.py | """Shared constants for Modal sandbox implementations."""
# Deployment and versioning
DEFAULT_CONFIG_KEY = "default"
MODAL_DEPLOYMENTS_KEY = "modal_deployments"
VERSION_HASH_LENGTH = 12
# Cache settings
CACHE_TTL_SECONDS = 60
# Modal execution settings
DEFAULT_MODAL_TIMEOUT = 60
DEFAULT_MAX_CONCURRENT_INPUTS = 1
DEFAULT_PYTHON_VERSION = "3.12"
# Security settings
SAFE_IMPORT_MODULES = {"typing", "pydantic", "datetime", "enum", "uuid", "decimal"}
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_sandbox/modal_constants.py",
"license": "Apache License 2.0",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/services/tool_sandbox/modal_deployment_manager.py | """
Modal Deployment Manager - Handles deployment orchestration with optional locking.
This module separates deployment logic from the main sandbox execution,
making it easier to understand and optionally disable locking/version tracking.
"""
import hashlib
from typing import Tuple
import modal
from letta.log import get_logger
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool import Tool
from letta.services.tool_sandbox.modal_constants import VERSION_HASH_LENGTH
from letta.services.tool_sandbox.modal_version_manager import ModalVersionManager, get_version_manager
logger = get_logger(__name__)
class ModalDeploymentManager:
"""Manages Modal app deployments with optional locking and version tracking."""
def __init__(
self,
tool: Tool,
version_manager: ModalVersionManager | None = None,
use_locking: bool = True,
use_version_tracking: bool = True,
):
"""
Initialize deployment manager.
Args:
tool: The tool to deploy
version_manager: Version manager for tracking deployments (optional)
use_locking: Whether to use locking for coordinated deployments
use_version_tracking: Whether to track and reuse existing deployments
"""
self.tool = tool
self.version_manager = version_manager or get_version_manager() if (use_locking or use_version_tracking) else None
self.use_locking = use_locking
self.use_version_tracking = use_version_tracking
self._app_name = self._generate_app_name()
def _generate_app_name(self) -> str:
"""Generate app name based on tool ID."""
return self.tool.id[:40]
def calculate_version_hash(self, sbx_config: SandboxConfig) -> str:
"""Calculate version hash for the current configuration."""
components = (
self.tool.source_code,
str(self.tool.pip_requirements) if self.tool.pip_requirements else "",
str(self.tool.npm_requirements) if self.tool.npm_requirements else "",
sbx_config.fingerprint(),
)
combined = "|".join(components)
return hashlib.sha256(combined.encode()).hexdigest()[:VERSION_HASH_LENGTH]
def get_full_app_name(self, version_hash: str) -> str:
"""Get the full app name including version."""
app_full_name = f"{self._app_name}-{version_hash}"
# Ensure total length is under 64 characters
if len(app_full_name) > 63:
max_id_len = 63 - len(version_hash) - 1
app_full_name = f"{self._app_name[:max_id_len]}-{version_hash}"
return app_full_name
async def get_or_deploy_app(
self,
sbx_config: SandboxConfig,
user,
create_app_func,
) -> Tuple[modal.App, str]:
"""
Get existing app or deploy new one.
Args:
sbx_config: Sandbox configuration
user: User/actor for permissions
create_app_func: Function to create and deploy the app
Returns:
Tuple of (Modal app, version hash)
"""
version_hash = self.calculate_version_hash(sbx_config)
# Simple path: no version tracking or locking
if not self.use_version_tracking:
logger.info(f"Deploying Modal app {self._app_name} (version tracking disabled)")
app = await create_app_func(sbx_config, version_hash)
return app, version_hash
# Try to use existing deployment
if self.use_version_tracking:
existing_app = await self._try_get_existing_app(sbx_config, version_hash, user)
if existing_app:
return existing_app, version_hash
# Need to deploy - with or without locking
if self.use_locking:
return await self._deploy_with_locking(sbx_config, version_hash, user, create_app_func)
else:
return await self._deploy_without_locking(sbx_config, version_hash, user, create_app_func)
async def _try_get_existing_app(
self,
sbx_config: SandboxConfig,
version_hash: str,
user,
) -> modal.App | None:
"""Try to get an existing deployed app."""
if not self.version_manager:
return None
deployment = await self.version_manager.get_deployment(
tool_id=self.tool.id, sandbox_config_id=sbx_config.id if sbx_config else None, actor=user
)
if deployment and deployment.version_hash == version_hash:
app_full_name = self.get_full_app_name(version_hash)
logger.info(f"Checking for existing Modal app {app_full_name}")
try:
app = await modal.App.lookup.aio(app_full_name)
logger.info(f"Found existing Modal app {app_full_name}")
return app
except Exception:
logger.info(f"Modal app {app_full_name} not found in Modal, will redeploy")
return None
return None
async def _deploy_without_locking(
self,
sbx_config: SandboxConfig,
version_hash: str,
user,
create_app_func,
) -> Tuple[modal.App, str]:
"""Deploy without locking - simpler but may have race conditions."""
app_full_name = self.get_full_app_name(version_hash)
logger.info(f"Deploying Modal app {app_full_name} (no locking)")
# Deploy the app
app = await create_app_func(sbx_config, version_hash)
# Register deployment if tracking is enabled
if self.use_version_tracking and self.version_manager:
await self._register_deployment(sbx_config, version_hash, app, user)
return app, version_hash
async def _deploy_with_locking(
self,
sbx_config: SandboxConfig,
version_hash: str,
user,
create_app_func,
) -> Tuple[modal.App, str]:
"""Deploy with locking to prevent concurrent deployments."""
cache_key = f"{self.tool.id}:{sbx_config.id if sbx_config else 'default'}"
deployment_lock = self.version_manager.get_deployment_lock(cache_key)
async with deployment_lock:
# Double-check after acquiring lock
existing_app = await self._try_get_existing_app(sbx_config, version_hash, user)
if existing_app:
return existing_app, version_hash
# Check if another process is deploying
if self.version_manager.is_deployment_in_progress(cache_key, version_hash):
logger.info(f"Another process is deploying {self._app_name} v{version_hash}, waiting...")
# Release lock and wait
deployment_lock = None
# Wait for other deployment if needed
if deployment_lock is None:
success = await self.version_manager.wait_for_deployment(cache_key, version_hash, timeout=120)
if success:
existing_app = await self._try_get_existing_app(sbx_config, version_hash, user)
if existing_app:
return existing_app, version_hash
raise RuntimeError("Deployment completed but app not found")
else:
raise RuntimeError("Timeout waiting for deployment")
# We're deploying - mark as in progress
deployment_key = None
async with deployment_lock:
deployment_key = self.version_manager.mark_deployment_in_progress(cache_key, version_hash)
try:
app_full_name = self.get_full_app_name(version_hash)
logger.info(f"Deploying Modal app {app_full_name} with locking")
# Deploy the app
app = await create_app_func(sbx_config, version_hash)
# Mark deployment complete
if deployment_key:
self.version_manager.complete_deployment(deployment_key)
# Register deployment
if self.use_version_tracking:
await self._register_deployment(sbx_config, version_hash, app, user)
return app, version_hash
except Exception:
if deployment_key:
self.version_manager.complete_deployment(deployment_key)
raise
async def _register_deployment(
self,
sbx_config: SandboxConfig,
version_hash: str,
app: modal.App,
user,
):
if not self.version_manager:
return
dependencies = set()
if self.tool.pip_requirements:
dependencies.update(str(req) for req in self.tool.pip_requirements)
modal_config = sbx_config.get_modal_config()
if modal_config.pip_requirements:
dependencies.update(str(req) for req in modal_config.pip_requirements)
await self.version_manager.register_deployment(
tool_id=self.tool.id,
app_name=self._app_name,
version_hash=version_hash,
app=app,
dependencies=dependencies,
sandbox_config_id=sbx_config.id if sbx_config else None,
actor=user,
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_sandbox/modal_deployment_manager.py",
"license": "Apache License 2.0",
"lines": 201,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/tool_sandbox/modal_sandbox.py | """
Model sandbox implementation, which configures on Modal App per tool.
"""
from typing import TYPE_CHECKING, Any, Dict, Optional
from letta.constants import MODAL_DEFAULT_TOOL_NAME
from letta.log import get_logger
from letta.otel.tracing import log_event, trace_method
from letta.schemas.agent import AgentState
from letta.schemas.enums import SandboxType
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.services.tool_sandbox.base import AsyncToolSandboxBase
from letta.types import JsonDict
logger = get_logger(__name__)
if TYPE_CHECKING:
pass
class AsyncToolSandboxModal(AsyncToolSandboxBase):
METADATA_CONFIG_STATE_KEY = "config_state"
def __init__(
self,
tool_name: str,
args: JsonDict,
user,
tool_id: str,
agent_id: Optional[str] = None,
project_id: Optional[str] = None,
force_recreate: bool = True,
tool_object: Optional[Tool] = None,
sandbox_config: Optional[SandboxConfig] = None,
sandbox_env_vars: Optional[Dict[str, Any]] = None,
organization_id: Optional[str] = None,
):
super().__init__(
tool_name,
args,
user,
tool_id=tool_id,
agent_id=agent_id,
project_id=project_id,
tool_object=tool_object,
sandbox_config=sandbox_config,
sandbox_env_vars=sandbox_env_vars,
)
self.force_recreate = force_recreate
# Get organization_id from user if not explicitly provided
self.organization_id = organization_id if organization_id is not None else user.organization_id
# TODO: check to make sure modal app `App(tool.id)` exists
async def _wait_for_modal_function_deployment(self, timeout: int = 60):
"""Wait for Modal app deployment to complete by retrying function lookup."""
import asyncio
import time
import modal
from letta.helpers.tool_helpers import generate_modal_function_name
# Use the same naming logic as deployment
function_name = generate_modal_function_name(self.tool.name, self.organization_id, self.project_id)
start_time = time.time()
retry_delay = 2 # seconds
while time.time() - start_time < timeout:
try:
f = modal.Function.from_name(function_name, MODAL_DEFAULT_TOOL_NAME)
logger.info(f"Modal function found successfully for app {function_name}, function {f}")
return f
except Exception as e:
elapsed = time.time() - start_time
if elapsed >= timeout:
raise TimeoutError(
f"Modal app {function_name} deployment timed out after {timeout} seconds. "
f"Expected app name: {function_name}, function: {MODAL_DEFAULT_TOOL_NAME}"
) from e
logger.info(f"Modal app {function_name} not ready yet (elapsed: {elapsed:.1f}s), waiting {retry_delay}s...")
await asyncio.sleep(retry_delay)
raise TimeoutError(f"Modal app {function_name} deployment timed out after {timeout} seconds")
@trace_method
async def run(
self,
agent_state: Optional[AgentState] = None,
additional_env_vars: Optional[Dict] = None,
) -> ToolExecutionResult:
await self._init_async()
try:
log_event("modal_execution_started", {"tool": self.tool_name, "modal_app_id": self.tool.id})
logger.info(f"Waiting for Modal function deployment for app {self.tool.id}")
func = await self._wait_for_modal_function_deployment()
logger.info(f"Modal function found successfully for app {self.tool.id}, function {str(func)}")
logger.info(f"Calling with arguments {self.args}")
# TODO: use another mechanism to pass through the key
if additional_env_vars is None:
letta_api_key = None
else:
letta_api_key = additional_env_vars.get("LETTA_SECRET_API_KEY", None)
# Construct dynamic env vars with proper layering:
# 1. Global sandbox env vars from DB (always included)
# 2. Provided sandbox env vars (agent-scoped, override global on key collision)
# 3. Agent-specific env vars from secrets
# 4. Additional runtime env vars (highest priority)
env_vars = {}
# Always load global sandbox-level environment variables from the database
try:
sandbox_config = await self.sandbox_config_manager.get_or_create_default_sandbox_config_async(
sandbox_type=SandboxType.MODAL, actor=self.user
)
if sandbox_config:
global_env_vars = await self.sandbox_config_manager.get_sandbox_env_vars_as_dict_async(
sandbox_config_id=sandbox_config.id, actor=self.user, limit=None
)
env_vars.update(global_env_vars)
except Exception as e:
logger.warning(f"Could not load global sandbox env vars for tool {self.tool_name}: {e}")
# Override with provided sandbox env vars (agent-scoped)
if self.provided_sandbox_env_vars:
env_vars.update(self.provided_sandbox_env_vars)
# Override with agent-specific environment variables from secrets
if agent_state:
env_vars.update(agent_state.get_agent_env_vars_as_dict())
# Override with additional env vars passed at runtime (highest priority)
if additional_env_vars:
env_vars.update(additional_env_vars)
# Call the modal function (already retrieved at line 101)
# Convert agent_state to dict to avoid cloudpickle serialization issues
agent_state_dict = agent_state.model_dump() if agent_state else None
logger.info(f"Calling function {func} with arguments {self.args}")
result = await func.remote.aio(
tool_name=self.tool_name,
agent_state=agent_state_dict,
agent_id=self.agent_id,
env_vars=env_vars,
letta_api_key=letta_api_key,
**self.args,
)
logger.info(f"Modal function result: {result}")
# Reconstruct agent_state if it was returned (use original as fallback)
result_agent_state = agent_state
if result.get("agent_state"):
if isinstance(result["agent_state"], dict):
try:
from letta.schemas.agent import AgentState
result_agent_state = AgentState.model_validate(result["agent_state"])
except Exception as e:
logger.warning(f"Failed to reconstruct AgentState: {e}, using original")
else:
result_agent_state = result["agent_state"]
return ToolExecutionResult(
func_return=result["result"],
agent_state=result_agent_state,
stdout=[result["stdout"]],
stderr=[result["stderr"]],
status="error" if result["error"] else "success",
)
except Exception as e:
log_event(
"modal_execution_failed",
{
"tool": self.tool_name,
"modal_app_id": self.tool.id,
"error": str(e),
},
)
logger.error(f"Modal execution failed for tool {self.tool_name} {self.tool.id}: {e}")
return ToolExecutionResult(
func_return=None,
agent_state=agent_state,
stdout=[""],
stderr=[str(e)],
status="error",
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_sandbox/modal_sandbox.py",
"license": "Apache License 2.0",
"lines": 166,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/tool_sandbox/modal_sandbox_v2.py | """
This runs tool calls within an isolated modal sandbox. This does this by doing the following:
1. deploying modal functions that embed the original functions
2. dynamically executing tools with arguments passed in at runtime
3. tracking deployment versions to know when a deployment update is needed
"""
import asyncio
from typing import Any, Dict
import modal
from letta.log import get_logger
from letta.otel.tracing import log_event, trace_method
from letta.schemas.agent import AgentState
from letta.schemas.enums import SandboxType
from letta.schemas.sandbox_config import SandboxConfig
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.services.tool_sandbox.base import AsyncToolSandboxBase
from letta.services.tool_sandbox.modal_constants import DEFAULT_MAX_CONCURRENT_INPUTS, DEFAULT_PYTHON_VERSION
from letta.services.tool_sandbox.modal_deployment_manager import ModalDeploymentManager
from letta.services.tool_sandbox.modal_version_manager import ModalVersionManager
from letta.services.tool_sandbox.safe_pickle import SafePickleError, safe_pickle_dumps, sanitize_for_pickle
from letta.settings import tool_settings
from letta.types import JsonDict
from letta.utils import get_friendly_error_msg
logger = get_logger(__name__)
class AsyncToolSandboxModalV2(AsyncToolSandboxBase):
"""Modal sandbox with dynamic argument passing and version tracking."""
def __init__(
self,
tool_name: str,
args: JsonDict,
user,
tool_id: str,
agent_id: str | None = None,
project_id: str | None = None,
tool_object: Tool | None = None,
sandbox_config: SandboxConfig | None = None,
sandbox_env_vars: dict[str, Any] | None = None,
version_manager: ModalVersionManager | None = None,
use_locking: bool = True,
use_version_tracking: bool = True,
):
"""
Initialize the Modal sandbox.
Args:
tool_name: Name of the tool to execute
args: Arguments to pass to the tool
user: User/actor for permissions
tool_id: Tool ID for the tool being executed
agent_id: Agent ID (optional)
project_id: Project ID for the tool execution (optional)
tool_object: Tool object (optional)
sandbox_config: Sandbox configuration (optional)
sandbox_env_vars: Environment variables (optional)
version_manager: Version manager, will create default if needed (optional)
use_locking: Whether to use locking for deployment coordination (default: True)
use_version_tracking: Whether to track and reuse deployments (default: True)
"""
super().__init__(
tool_name,
args,
user,
tool_id=tool_id,
agent_id=agent_id,
project_id=project_id,
tool_object=tool_object,
sandbox_config=sandbox_config,
sandbox_env_vars=sandbox_env_vars,
)
if not tool_settings.modal_token_id or not tool_settings.modal_token_secret:
raise ValueError("MODAL_TOKEN_ID and MODAL_TOKEN_SECRET must be set.")
# Initialize deployment manager with configurable options
self._deployment_manager = ModalDeploymentManager(
tool=self.tool,
version_manager=version_manager,
use_locking=use_locking,
use_version_tracking=use_version_tracking,
)
self._version_hash = None
async def _get_or_deploy_modal_app(self, sbx_config: SandboxConfig) -> modal.App:
"""Get existing Modal app or deploy a new version if needed."""
app, version_hash = await self._deployment_manager.get_or_deploy_app(
sbx_config=sbx_config,
user=self.user,
create_app_func=self._create_and_deploy_app,
)
self._version_hash = version_hash
return app
async def _create_and_deploy_app(self, sbx_config: SandboxConfig, version: str) -> modal.App:
"""Create and deploy a new Modal app with the executor function."""
import importlib.util
from pathlib import Path
# App name = tool_id + version hash
app_full_name = self._deployment_manager.get_full_app_name(version)
app = modal.App(app_full_name)
modal_config = sbx_config.get_modal_config()
image = self._get_modal_image(sbx_config)
# Find the sandbox module dynamically
spec = importlib.util.find_spec("sandbox")
if not spec or not spec.origin:
raise ValueError("Could not find sandbox module")
sandbox_dir = Path(spec.origin).parent
# Read the modal_executor module content
executor_path = sandbox_dir / "modal_executor.py"
if not executor_path.exists():
raise ValueError(f"modal_executor.py not found at {executor_path}")
# Validate file is readable (wrapped to avoid blocking event loop)
def _validate_file():
with open(executor_path, "r") as f:
f.read()
await asyncio.to_thread(_validate_file)
# Create a single file mount instead of directory mount
# This avoids sys.path manipulation
image = image.add_local_file(str(executor_path), remote_path="/modal_executor.py")
# Register the executor function with Modal
@app.function(
image=image,
timeout=modal_config.timeout,
restrict_modal_access=True,
max_inputs=DEFAULT_MAX_CONCURRENT_INPUTS,
serialized=True,
)
def tool_executor(
tool_source: str,
tool_name: str,
args_pickled: bytes,
agent_state_pickled: bytes | None,
inject_agent_state: bool,
is_async: bool,
args_schema_code: str | None,
environment_vars: Dict[str, Any],
) -> Dict[str, Any]:
"""Execute tool in Modal container."""
# Execute the modal_executor code in a clean namespace
# Create a module-like namespace for executor
executor_namespace = {
"__name__": "modal_executor",
"__file__": "/modal_executor.py",
}
# Read and execute the module file
with open("/modal_executor.py", "r") as f:
exec(compile(f.read(), "/modal_executor.py", "exec"), executor_namespace)
# Call the wrapper function from the executed namespace
return executor_namespace["execute_tool_wrapper"](
tool_source=tool_source,
tool_name=tool_name,
args_pickled=args_pickled,
agent_state_pickled=agent_state_pickled,
inject_agent_state=inject_agent_state,
is_async=is_async,
args_schema_code=args_schema_code,
environment_vars=environment_vars,
)
# Store the function reference
app.tool_executor = tool_executor
# Deploy the app
logger.info(f"Deploying Modal app {app_full_name}")
log_event("modal_v2_deploy_started", {"app_name": app_full_name, "version": version})
try:
# Try to look up the app first to see if it already exists
try:
await modal.App.lookup.aio(app_full_name)
logger.info(f"Modal app {app_full_name} already exists, skipping deployment")
log_event("modal_v2_deploy_already_exists", {"app_name": app_full_name, "version": version})
# Return the created app with the function attached
return app
except Exception:
# App doesn't exist, need to deploy
pass
with modal.enable_output():
await app.deploy.aio()
log_event("modal_v2_deploy_succeeded", {"app_name": app_full_name, "version": version})
except Exception as e:
log_event("modal_v2_deploy_failed", {"app_name": app_full_name, "version": version, "error": str(e)})
raise
return app
@trace_method
async def run(
self,
agent_state: AgentState | None = None,
additional_env_vars: Dict | None = None,
) -> ToolExecutionResult:
"""Execute the tool in Modal sandbox with dynamic argument passing."""
if self.provided_sandbox_config:
sbx_config = self.provided_sandbox_config
else:
sbx_config = await self.sandbox_config_manager.get_or_create_default_sandbox_config_async(
sandbox_type=SandboxType.MODAL, actor=self.user
)
envs = await self._gather_env_vars(agent_state, additional_env_vars or {}, sbx_config.id, is_local=False)
# Prepare schema code if needed
args_schema_code = None
if self.tool.args_json_schema:
from letta.services.helpers.tool_execution_helper import add_imports_and_pydantic_schemas_for_args
args_schema_code = add_imports_and_pydantic_schemas_for_args(self.tool.args_json_schema)
# Serialize arguments and agent state with safety checks
try:
args_pickled = safe_pickle_dumps(self.args)
except SafePickleError as e:
logger.warning(f"Failed to pickle args, attempting sanitization: {e}")
sanitized_args = sanitize_for_pickle(self.args)
try:
args_pickled = safe_pickle_dumps(sanitized_args)
except SafePickleError:
# Final fallback: convert to string representation
args_pickled = safe_pickle_dumps(str(self.args))
agent_state_pickled = None
if self.inject_agent_state and agent_state:
try:
agent_state_pickled = safe_pickle_dumps(agent_state)
except SafePickleError as e:
logger.warning(f"Failed to pickle agent state: {e}")
# For agent state, we prefer to skip injection rather than send corrupted data
agent_state_pickled = None
self.inject_agent_state = False
try:
log_event(
"modal_execution_started",
{
"tool": self.tool_name,
"app_name": self._deployment_manager._app_name,
"version": self._version_hash,
"env_vars": list(envs),
"args_size": len(args_pickled),
"agent_state_size": len(agent_state_pickled) if agent_state_pickled else 0,
"inject_agent_state": self.inject_agent_state,
},
)
# Get or deploy the Modal app
app = await self._get_or_deploy_modal_app(sbx_config)
# Get modal config for timeout settings
modal_config = sbx_config.get_modal_config()
# Execute the tool remotely with retry logic
max_retries = 3
retry_delay = 1 # seconds
last_error = None
for attempt in range(max_retries):
try:
# Add timeout to prevent hanging
import asyncio
result = await asyncio.wait_for(
app.tool_executor.remote.aio(
tool_source=self.tool.source_code,
tool_name=self.tool.name,
args_pickled=args_pickled,
agent_state_pickled=agent_state_pickled,
inject_agent_state=self.inject_agent_state,
is_async=self.is_async_function,
args_schema_code=args_schema_code,
environment_vars=envs,
),
timeout=modal_config.timeout + 10, # Add 10s buffer to Modal's own timeout
)
break # Success, exit retry loop
except asyncio.TimeoutError as e:
last_error = e
logger.warning(f"Modal execution timeout on attempt {attempt + 1}/{max_retries} for tool {self.tool_name}")
if attempt < max_retries - 1:
await asyncio.sleep(retry_delay)
retry_delay *= 2 # Exponential backoff
except Exception as e:
last_error = e
# Check if it's a transient error worth retrying
error_str = str(e).lower()
if any(x in error_str for x in ["segmentation fault", "sigsegv", "connection", "timeout"]):
logger.warning(f"Transient error on attempt {attempt + 1}/{max_retries} for tool {self.tool_name}: {e}")
if attempt < max_retries - 1:
await asyncio.sleep(retry_delay)
retry_delay *= 2
continue
# Non-transient error, don't retry
raise
else:
# All retries exhausted
raise last_error
# Process the result
if result["error"]:
logger.debug(f"Tool {self.tool_name} raised a {result['error']['name']}: {result['error']['value']}")
logger.debug(f"Traceback from Modal sandbox: \n{result['error']['traceback']}")
# Check for segfault indicators
is_segfault = False
if "SIGSEGV" in str(result["error"]["value"]) or "Segmentation fault" in str(result["error"]["value"]):
is_segfault = True
logger.error(f"SEGFAULT detected in tool {self.tool_name}: {result['error']['value']}")
func_return = get_friendly_error_msg(
function_name=self.tool_name,
exception_name=result["error"]["name"],
exception_message=result["error"]["value"],
)
log_event(
"modal_execution_failed",
{
"tool": self.tool_name,
"app_name": self._deployment_manager._app_name,
"version": self._version_hash,
"error_type": result["error"]["name"],
"error_message": result["error"]["value"],
"func_return": func_return,
"is_segfault": is_segfault,
"stdout": result.get("stdout", ""),
"stderr": result.get("stderr", ""),
},
)
status = "error"
else:
func_return = result["result"]
agent_state = result["agent_state"]
log_event(
"modal_v2_execution_succeeded",
{
"tool": self.tool_name,
"app_name": self._deployment_manager._app_name,
"version": self._version_hash,
"func_return": str(func_return)[:500], # Limit logged result size
"stdout_size": len(result.get("stdout", "")),
"stderr_size": len(result.get("stderr", "")),
},
)
status = "success"
return ToolExecutionResult(
func_return=func_return,
agent_state=agent_state if not result["error"] else None,
stdout=[result["stdout"]] if result["stdout"] else [],
stderr=[result["stderr"]] if result["stderr"] else [],
status=status,
sandbox_config_fingerprint=sbx_config.fingerprint(),
)
except Exception as e:
import traceback
error_context = {
"tool": self.tool_name,
"app_name": self._deployment_manager._app_name,
"version": self._version_hash,
"error_type": type(e).__name__,
"error_message": str(e),
"traceback": traceback.format_exc(),
}
logger.error(f"Modal V2 execution for tool {self.tool_name} encountered an error: {e}", extra=error_context)
# Determine if this is a deployment error or execution error
if "deploy" in str(e).lower() or "modal" in str(e).lower():
error_category = "deployment_error"
else:
error_category = "execution_error"
func_return = get_friendly_error_msg(
function_name=self.tool_name,
exception_name=type(e).__name__,
exception_message=str(e),
)
log_event(f"modal_v2_{error_category}", error_context)
return ToolExecutionResult(
func_return=func_return,
agent_state=None,
stdout=[],
stderr=[f"{type(e).__name__}: {str(e)}\n{traceback.format_exc()}"],
status="error",
sandbox_config_fingerprint=sbx_config.fingerprint(),
)
def _get_modal_image(self, sbx_config: SandboxConfig) -> modal.Image:
"""Get Modal image with required public python dependencies.
Caching and rebuilding is handled in a cascading manner
https://modal.com/docs/guide/images#image-caching-and-rebuilds
"""
# Start with a more robust base image with development tools
image = modal.Image.debian_slim(python_version=DEFAULT_PYTHON_VERSION)
# Add system packages for better C extension support
image = image.apt_install(
"build-essential", # Compilation tools
"libsqlite3-dev", # SQLite development headers
"libffi-dev", # Foreign Function Interface library
"libssl-dev", # OpenSSL development headers
"python3-dev", # Python development headers
)
# Include dependencies required by letta's ORM modules
# These are needed when unpickling agent_state objects
all_requirements = [
"letta",
"sqlite-vec>=0.1.7a2", # Required for SQLite vector operations
"numpy<2.0", # Pin numpy to avoid compatibility issues
]
# Add sandbox-specific pip requirements
modal_configs = sbx_config.get_modal_config()
if modal_configs.pip_requirements:
all_requirements.extend([str(req) for req in modal_configs.pip_requirements])
# Add tool-specific pip requirements
if self.tool and self.tool.pip_requirements:
all_requirements.extend([str(req) for req in self.tool.pip_requirements])
if all_requirements:
image = image.pip_install(*all_requirements)
return image
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_sandbox/modal_sandbox_v2.py",
"license": "Apache License 2.0",
"lines": 389,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/tool_sandbox/modal_version_manager.py | """
This module tracks and manages deployed app versions. We currently use the tools.metadata field
to store the information detailing modal deployments and when we need to redeploy due to changes.
Modal Version Manager - Tracks and manages deployed Modal app versions.
"""
import asyncio
import time
from datetime import datetime
from typing import Any
import modal
from pydantic import BaseModel, ConfigDict, Field
from letta.log import get_logger
from letta.schemas.tool import ToolUpdate
from letta.services.tool_manager import ToolManager
from letta.services.tool_sandbox.modal_constants import CACHE_TTL_SECONDS, DEFAULT_CONFIG_KEY, MODAL_DEPLOYMENTS_KEY
from letta.utils import safe_create_task
logger = get_logger(__name__)
class DeploymentInfo(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
"""Information about a deployed Modal app."""
app_name: str = Field(..., description="The name of the modal app.")
version_hash: str = Field(..., description="The version hash of the modal app.")
deployed_at: datetime = Field(..., description="The time the modal app was deployed.")
dependencies: set[str] = Field(default_factory=set, description="A set of dependencies.")
# app_reference: modal.App | None = Field(None, description="The reference to the modal app.", exclude=True)
app_reference: Any = Field(None, description="The reference to the modal app.", exclude=True)
class ModalVersionManager:
"""Manages versions and deployments of Modal apps using tools.metadata."""
def __init__(self):
self.tool_manager = ToolManager()
self._deployment_locks: dict[str, asyncio.Lock] = {}
self._cache: dict[str, tuple[DeploymentInfo, float]] = {}
self._deployments_in_progress: dict[str, asyncio.Event] = {}
self._deployments: dict[str, DeploymentInfo] = {} # Track all deployments for stats
@staticmethod
def _make_cache_key(tool_id: str, sandbox_config_id: str | None = None) -> str:
"""Generate cache key for tool and config combination."""
return f"{tool_id}:{sandbox_config_id or DEFAULT_CONFIG_KEY}"
@staticmethod
def _get_config_key(sandbox_config_id: str | None = None) -> str:
"""Get standardized config key."""
return sandbox_config_id or DEFAULT_CONFIG_KEY
def _is_cache_valid(self, timestamp: float) -> bool:
"""Check if cache entry is still valid."""
return time.time() - timestamp < CACHE_TTL_SECONDS
def _get_deployment_metadata(self, tool) -> dict:
"""Get or initialize modal deployments metadata."""
if not tool.metadata_:
tool.metadata_ = {}
if MODAL_DEPLOYMENTS_KEY not in tool.metadata_:
tool.metadata_[MODAL_DEPLOYMENTS_KEY] = {}
return tool.metadata_[MODAL_DEPLOYMENTS_KEY]
def _create_deployment_data(self, app_name: str, version_hash: str, dependencies: set[str]) -> dict:
"""Create deployment data dictionary for metadata storage."""
return {
"app_name": app_name,
"version_hash": version_hash,
"deployed_at": datetime.now().isoformat(),
"dependencies": list(dependencies),
}
async def get_deployment(self, tool_id: str, sandbox_config_id: str | None = None, actor=None) -> DeploymentInfo | None:
"""Get deployment info from tool metadata."""
cache_key = self._make_cache_key(tool_id, sandbox_config_id)
if cache_key in self._cache:
info, timestamp = self._cache[cache_key]
if self._is_cache_valid(timestamp):
return info
tool = self.tool_manager.get_tool_by_id(tool_id, actor=actor)
if not tool or not tool.metadata_:
return None
modal_deployments = tool.metadata_.get(MODAL_DEPLOYMENTS_KEY, {})
config_key = self._get_config_key(sandbox_config_id)
if config_key not in modal_deployments:
return None
deployment_data = modal_deployments[config_key]
info = DeploymentInfo(
app_name=deployment_data["app_name"],
version_hash=deployment_data["version_hash"],
deployed_at=datetime.fromisoformat(deployment_data["deployed_at"]),
dependencies=set(deployment_data.get("dependencies", [])),
app_reference=None,
)
self._cache[cache_key] = (info, time.time())
return info
async def register_deployment(
self,
tool_id: str,
app_name: str,
version_hash: str,
app: modal.App,
dependencies: set[str] | None = None,
sandbox_config_id: str | None = None,
actor=None,
) -> DeploymentInfo:
"""Register a new deployment in tool metadata."""
cache_key = self._make_cache_key(tool_id, sandbox_config_id)
config_key = self._get_config_key(sandbox_config_id)
async with self.get_deployment_lock(cache_key):
tool = self.tool_manager.get_tool_by_id(tool_id, actor=actor)
if not tool:
raise ValueError(f"Tool {tool_id} not found")
modal_deployments = self._get_deployment_metadata(tool)
info = DeploymentInfo(
app_name=app_name,
version_hash=version_hash,
deployed_at=datetime.now(),
dependencies=dependencies or set(),
app_reference=app,
)
modal_deployments[config_key] = self._create_deployment_data(app_name, version_hash, info.dependencies)
# Use ToolUpdate to update metadata
tool_update = ToolUpdate(metadata_=tool.metadata_)
await self.tool_manager.update_tool_by_id_async(
tool_id=tool_id,
tool_update=tool_update,
actor=actor,
)
self._cache[cache_key] = (info, time.time())
self._deployments[cache_key] = info # Track for stats
return info
async def needs_redeployment(self, tool_id: str, current_version: str, sandbox_config_id: str | None = None, actor=None) -> bool:
"""Check if an app needs to be redeployed."""
deployment = await self.get_deployment(tool_id, sandbox_config_id, actor=actor)
if not deployment:
return True
return deployment.version_hash != current_version
def get_deployment_lock(self, cache_key: str) -> asyncio.Lock:
"""Get or create a deployment lock for a tool+config combination."""
if cache_key not in self._deployment_locks:
self._deployment_locks[cache_key] = asyncio.Lock()
return self._deployment_locks[cache_key]
def mark_deployment_in_progress(self, cache_key: str, version_hash: str) -> str:
"""Mark that a deployment is in progress for a specific version.
Returns a unique deployment ID that should be used to complete/fail the deployment.
"""
deployment_key = f"{cache_key}:{version_hash}"
if deployment_key not in self._deployments_in_progress:
self._deployments_in_progress[deployment_key] = asyncio.Event()
return deployment_key
def is_deployment_in_progress(self, cache_key: str, version_hash: str) -> bool:
"""Check if a deployment is currently in progress."""
deployment_key = f"{cache_key}:{version_hash}"
return deployment_key in self._deployments_in_progress
async def wait_for_deployment(self, cache_key: str, version_hash: str, timeout: float = 120) -> bool:
"""Wait for an in-progress deployment to complete.
Returns True if deployment completed within timeout, False otherwise.
"""
deployment_key = f"{cache_key}:{version_hash}"
if deployment_key not in self._deployments_in_progress:
return True # No deployment in progress
event = self._deployments_in_progress[deployment_key]
try:
await asyncio.wait_for(event.wait(), timeout=timeout)
return True
except asyncio.TimeoutError:
return False
def complete_deployment(self, deployment_key: str):
"""Mark a deployment as complete and wake up any waiters."""
if deployment_key in self._deployments_in_progress:
self._deployments_in_progress[deployment_key].set()
# Clean up after a short delay to allow waiters to wake up
safe_create_task(self._cleanup_deployment_marker(deployment_key), label=f"cleanup_deployment_{deployment_key}")
async def _cleanup_deployment_marker(self, deployment_key: str):
"""Clean up deployment marker after a delay."""
await asyncio.sleep(5) # Give waiters time to wake up
if deployment_key in self._deployments_in_progress:
del self._deployments_in_progress[deployment_key]
async def force_redeploy(self, tool_id: str, sandbox_config_id: str | None = None, actor=None):
"""Force a redeployment by removing deployment info from tool metadata."""
cache_key = self._make_cache_key(tool_id, sandbox_config_id)
config_key = self._get_config_key(sandbox_config_id)
async with self.get_deployment_lock(cache_key):
tool = self.tool_manager.get_tool_by_id(tool_id, actor=actor)
if not tool or not tool.metadata_:
return
modal_deployments = tool.metadata_.get(MODAL_DEPLOYMENTS_KEY, {})
if config_key in modal_deployments:
del modal_deployments[config_key]
# Use ToolUpdate to update metadata
tool_update = ToolUpdate(metadata_=tool.metadata_)
await self.tool_manager.update_tool_by_id_async(
tool_id=tool_id,
tool_update=tool_update,
actor=actor,
)
if cache_key in self._cache:
del self._cache[cache_key]
def clear_deployments(self):
"""Clear all deployment tracking (for testing purposes)."""
self._deployments.clear()
self._cache.clear()
self._deployments_in_progress.clear()
async def get_deployment_stats(self) -> dict:
"""Get statistics about current deployments."""
total_deployments = len(self._deployments)
active_deployments = len([d for d in self._deployments.values() if d])
stale_deployments = total_deployments - active_deployments
deployments_list = []
for cache_key, deployment in self._deployments.items():
if deployment:
deployments_list.append(
{
"app_name": deployment.app_name,
"version": deployment.version_hash,
"usage_count": 1, # Track usage in future
"deployed_at": deployment.deployed_at.isoformat(),
}
)
return {
"total_deployments": total_deployments,
"active_deployments": active_deployments,
"stale_deployments": stale_deployments,
"deployments": deployments_list,
}
_version_manager = None
def get_version_manager() -> ModalVersionManager:
"""Get the global Modal version manager instance."""
global _version_manager
if _version_manager is None:
_version_manager = ModalVersionManager()
return _version_manager
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_sandbox/modal_version_manager.py",
"license": "Apache License 2.0",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/tool_sandbox/safe_pickle.py | """Safe pickle serialization wrapper for Modal sandbox.
This module provides defensive serialization utilities to prevent segmentation
faults and other crashes when passing complex objects to Modal containers.
"""
import pickle
import sys
from typing import Any, Optional, Tuple
from letta.log import get_logger
logger = get_logger(__name__)
# Serialization limits
MAX_PICKLE_SIZE = 10 * 1024 * 1024 # 10MB limit
MAX_RECURSION_DEPTH = 50 # Prevent deep object graphs
PICKLE_PROTOCOL = 4 # Use protocol 4 for better compatibility
class SafePickleError(Exception):
"""Raised when safe pickling fails."""
class RecursionLimiter:
"""Context manager to limit recursion depth during pickling."""
def __init__(self, max_depth: int):
self.max_depth = max_depth
self.original_limit = None
def __enter__(self):
self.original_limit = sys.getrecursionlimit()
sys.setrecursionlimit(min(self.max_depth, self.original_limit))
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.original_limit is not None:
sys.setrecursionlimit(self.original_limit)
def safe_pickle_dumps(obj: Any, max_size: int = MAX_PICKLE_SIZE) -> bytes:
"""Safely pickle an object with size and recursion limits.
Args:
obj: The object to pickle
max_size: Maximum allowed pickle size in bytes
Returns:
bytes: The pickled object
Raises:
SafePickleError: If pickling fails or exceeds limits
"""
try:
# First check for obvious size issues
# Do a quick pickle to check size
quick_pickle = pickle.dumps(obj, protocol=PICKLE_PROTOCOL)
if len(quick_pickle) > max_size:
raise SafePickleError(f"Pickle size {len(quick_pickle)} exceeds limit {max_size}")
# Check recursion depth by traversing the object
def check_depth(obj, depth=0):
if depth > MAX_RECURSION_DEPTH:
raise SafePickleError(f"Object graph too deep (depth > {MAX_RECURSION_DEPTH})")
if isinstance(obj, (list, tuple)):
for item in obj:
check_depth(item, depth + 1)
elif isinstance(obj, dict):
for value in obj.values():
check_depth(value, depth + 1)
elif hasattr(obj, "__dict__"):
check_depth(obj.__dict__, depth + 1)
check_depth(obj)
logger.debug(f"Successfully pickled object of size {len(quick_pickle)} bytes")
return quick_pickle
except SafePickleError:
raise
except RecursionError as e:
raise SafePickleError(f"Object graph too deep: {e}")
except Exception as e:
raise SafePickleError(f"Failed to pickle object: {e}")
def safe_pickle_loads(data: bytes) -> Any:
"""Safely unpickle data with error handling.
Args:
data: The pickled data
Returns:
Any: The unpickled object
Raises:
SafePickleError: If unpickling fails
"""
if not data:
raise SafePickleError("Cannot unpickle empty data")
if len(data) > MAX_PICKLE_SIZE:
raise SafePickleError(f"Pickle data size {len(data)} exceeds limit {MAX_PICKLE_SIZE}")
try:
obj = pickle.loads(data)
logger.debug(f"Successfully unpickled object from {len(data)} bytes")
return obj
except Exception as e:
raise SafePickleError(f"Failed to unpickle data: {e}")
def try_pickle_with_fallback(obj: Any, fallback_value: Any = None, max_size: int = MAX_PICKLE_SIZE) -> Tuple[Optional[bytes], bool]:
"""Try to pickle an object with fallback on failure.
Args:
obj: The object to pickle
fallback_value: Value to use if pickling fails
max_size: Maximum allowed pickle size
Returns:
Tuple of (pickled_data or None, success_flag)
"""
try:
pickled = safe_pickle_dumps(obj, max_size)
return pickled, True
except SafePickleError as e:
logger.warning(f"Failed to pickle object, using fallback: {e}")
if fallback_value is not None:
try:
pickled = safe_pickle_dumps(fallback_value, max_size)
return pickled, False
except SafePickleError:
pass
return None, False
def validate_pickleable(obj: Any) -> bool:
"""Check if an object can be safely pickled.
Args:
obj: The object to validate
Returns:
bool: True if the object can be pickled safely
"""
try:
# Try to pickle to a small buffer
safe_pickle_dumps(obj, max_size=MAX_PICKLE_SIZE)
return True
except SafePickleError:
return False
def sanitize_for_pickle(obj: Any) -> Any:
"""Sanitize an object for safe pickling.
This function attempts to make an object pickleable by converting
problematic types to safe alternatives.
Args:
obj: The object to sanitize
Returns:
Any: A sanitized version of the object
"""
# Handle common problematic types
if hasattr(obj, "__dict__"):
# For objects with __dict__, try to sanitize attributes
sanitized = {}
for key, value in obj.__dict__.items():
if key.startswith("_"):
continue # Skip private attributes
# Convert non-pickleable types
if callable(value):
sanitized[key] = f"<function {value.__name__}>"
elif hasattr(value, "__module__"):
sanitized[key] = f"<{value.__class__.__name__} object>"
else:
try:
# Test if the value is pickleable
pickle.dumps(value, protocol=PICKLE_PROTOCOL)
sanitized[key] = value
except Exception:
sanitized[key] = str(value)
return sanitized
# For other types, return as-is and let pickle handle it
return obj
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_sandbox/safe_pickle.py",
"license": "Apache License 2.0",
"lines": 148,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:sandbox/modal_executor.py | """Modal function executor for tool sandbox v2.
This module contains the executor function that runs inside Modal containers
to execute tool functions with dynamically passed arguments.
"""
import faulthandler
import signal
from typing import Any, Dict
import modal
# List of safe modules that can be imported in schema code
SAFE_IMPORT_MODULES = {
"typing",
"datetime",
"uuid",
"enum",
"decimal",
"collections",
"abc",
"dataclasses",
"pydantic",
"typing_extensions",
}
class ModalFunctionExecutor:
"""Executes tool functions in Modal with dynamic argument passing."""
@staticmethod
def execute_tool_dynamic(
tool_source: str,
tool_name: str,
args_pickled: bytes,
agent_state_pickled: bytes | None,
inject_agent_state: bool,
is_async: bool,
args_schema_code: str | None,
) -> dict[str, Any]:
"""Execute a tool function with dynamically passed arguments.
This function runs inside the Modal container and receives all parameters
at runtime rather than having them embedded in a script.
"""
import asyncio
import pickle
import sys
import traceback
from io import StringIO
# Enable fault handler for better debugging of segfaults
faulthandler.enable()
stdout_capture = StringIO()
stderr_capture = StringIO()
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = stdout_capture
sys.stderr = stderr_capture
# Safely unpickle arguments with size validation
if not args_pickled:
raise ValueError("No arguments provided")
if len(args_pickled) > 10 * 1024 * 1024: # 10MB limit
raise ValueError(f"Pickled args too large: {len(args_pickled)} bytes")
try:
args = pickle.loads(args_pickled)
except Exception as e:
raise ValueError(f"Failed to unpickle arguments: {e}")
agent_state = None
if agent_state_pickled:
if len(agent_state_pickled) > 10 * 1024 * 1024: # 10MB limit
raise ValueError(f"Pickled agent state too large: {len(agent_state_pickled)} bytes")
try:
agent_state = pickle.loads(agent_state_pickled)
except Exception as e:
# Log but don't fail - agent state is optional
print(f"Warning: Failed to unpickle agent state: {e}", file=sys.stderr)
agent_state = None
exec_globals = {
"__name__": "__main__",
"__builtins__": __builtins__,
}
if args_schema_code:
import ast
try:
tree = ast.parse(args_schema_code)
for node in ast.walk(tree):
if isinstance(node, ast.Import):
for alias in node.names:
module_name = alias.name.split(".")[0]
if module_name not in SAFE_IMPORT_MODULES:
raise ValueError(f"Import of '{module_name}' not allowed in schema code")
elif isinstance(node, ast.ImportFrom):
if node.module:
module_name = node.module.split(".")[0]
if module_name not in SAFE_IMPORT_MODULES:
raise ValueError(f"Import from '{module_name}' not allowed in schema code")
exec(compile(tree, "<schema>", "exec"), exec_globals)
except (SyntaxError, ValueError) as e:
raise ValueError(f"Invalid or unsafe schema code: {e}")
exec(tool_source, exec_globals)
if tool_name not in exec_globals:
raise ValueError(f"Function '{tool_name}' not found in tool source code")
func = exec_globals[tool_name]
kwargs = dict(args)
if inject_agent_state:
kwargs["agent_state"] = agent_state
try:
from letta.functions.ast_parsers import coerce_dict_args_by_annotations
annotations = getattr(func, "__annotations__", {})
kwargs = coerce_dict_args_by_annotations(
kwargs,
annotations,
allow_unsafe_eval=True,
extra_globals=func.__globals__,
)
except Exception:
pass
if is_async:
result = asyncio.run(func(**kwargs))
else:
result = func(**kwargs)
try:
from pydantic import BaseModel, ConfigDict
class _TempResultWrapper(BaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)
result: Any
wrapped = _TempResultWrapper(result=result)
serialized_result = wrapped.model_dump()["result"]
except (ImportError, Exception):
serialized_result = str(result)
return {
"result": serialized_result,
"agent_state": agent_state,
"stdout": stdout_capture.getvalue(),
"stderr": stderr_capture.getvalue(),
"error": None,
}
except Exception as e:
return {
"result": None,
"agent_state": None,
"stdout": stdout_capture.getvalue(),
"stderr": stderr_capture.getvalue(),
"error": {
"name": type(e).__name__,
"value": str(e),
"traceback": traceback.format_exc(),
},
}
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
def setup_signal_handlers():
"""Setup signal handlers for better debugging."""
def handle_segfault(signum, frame):
import sys
import traceback
print(f"SEGFAULT detected! Signal: {signum}", file=sys.stderr)
print("Stack trace:", file=sys.stderr)
traceback.print_stack(frame, file=sys.stderr)
sys.exit(139) # Standard segfault exit code
def handle_abort(signum, frame):
import sys
import traceback
print(f"ABORT detected! Signal: {signum}", file=sys.stderr)
print("Stack trace:", file=sys.stderr)
traceback.print_stack(frame, file=sys.stderr)
sys.exit(134) # Standard abort exit code
# Register signal handlers
signal.signal(signal.SIGSEGV, handle_segfault)
signal.signal(signal.SIGABRT, handle_abort)
@modal.method()
def execute_tool_wrapper(
self,
tool_source: str,
tool_name: str,
args_pickled: bytes,
agent_state_pickled: bytes | None,
inject_agent_state: bool,
is_async: bool,
args_schema_code: str | None,
environment_vars: Dict[str, str],
) -> Dict[str, Any]:
"""Wrapper function that runs in Modal container with enhanced error handling."""
import os
import resource
import sys
# Setup signal handlers for better crash debugging
setup_signal_handlers()
# Enable fault handler with file output
try:
faulthandler.enable(file=sys.stderr, all_threads=True)
except Exception:
pass # Faulthandler might not be available
# Set resource limits to prevent runaway processes
try:
# Limit memory usage to 1GB
resource.setrlimit(resource.RLIMIT_AS, (1024 * 1024 * 1024, 1024 * 1024 * 1024))
# Limit stack size to 8MB (default is often unlimited)
resource.setrlimit(resource.RLIMIT_STACK, (8 * 1024 * 1024, 8 * 1024 * 1024))
except Exception:
pass # Resource limits might not be available
# Set environment variables
for key, value in environment_vars.items():
os.environ[key] = str(value)
# Add debugging environment variables
os.environ["PYTHONFAULTHANDLER"] = "1"
os.environ["PYTHONDEVMODE"] = "1"
try:
# Execute the tool
return ModalFunctionExecutor.execute_tool_dynamic(
tool_source=tool_source,
tool_name=tool_name,
args_pickled=args_pickled,
agent_state_pickled=agent_state_pickled,
inject_agent_state=inject_agent_state,
is_async=is_async,
args_schema_code=args_schema_code,
)
except Exception as e:
import traceback
# Enhanced error reporting
return {
"result": None,
"agent_state": None,
"stdout": "",
"stderr": f"Container execution failed: {traceback.format_exc()}",
"error": {
"name": type(e).__name__,
"value": str(e),
"traceback": traceback.format_exc(),
},
}
| {
"repo_id": "letta-ai/letta",
"file_path": "sandbox/modal_executor.py",
"license": "Apache License 2.0",
"lines": 226,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:sandbox/node_server.py | import modal
class NodeShimServer:
# This runs once startup
@modal.enter()
def start_server(self):
import subprocess
import time
server_root_dir = "/root/sandbox/resources/server"
# /app/server
# Comment this in to show the updated user-function.ts file
# subprocess.run(["sh", "-c", "cat /app/server/user-function.ts"], check=True)
subprocess.run(["sh", "-c", f"cd {server_root_dir} && npm run build"], check=True)
subprocess.Popen(
[
"sh",
"-c",
f"cd {server_root_dir} && npm run start",
],
)
time.sleep(1)
print("🔮 Node server started and listening on /tmp/my_unix_socket.sock")
@modal.method()
def remote_executor(self, json_args: str): # Dynamic TypeScript function execution
"""Execute a TypeScript function with JSON-encoded arguments.
Args:
json_args: JSON string containing the function arguments
Returns:
The result from the TypeScript function execution
"""
import http.client
import json
import socket
class UnixSocketHTTPConnection(http.client.HTTPConnection):
def __init__(self, path):
super().__init__("localhost")
self.unix_path = path
def connect(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(self.unix_path)
try:
# Connect to the Node.js server via Unix socket
conn = UnixSocketHTTPConnection("/tmp/my_unix_socket.sock")
# Send the JSON arguments directly to the server
# The server will parse them and call the TypeScript function
conn.request("POST", "/", body=json_args)
response = conn.getresponse()
output = response.read().decode()
# Parse the response from the server
try:
output_json = json.loads(output)
# Check if there was an error
if "error" in output_json:
return {"error": output_json["error"]}
# Return the successful result
return output_json.get("result")
except json.JSONDecodeError:
# If the response isn't valid JSON, it's likely an error message
return {"error": f"Invalid JSON response from TypeScript server: {output}"}
except Exception as e:
# Handle connection or other errors
return {"error": f"Error executing TypeScript function: {str(e)}"}
| {
"repo_id": "letta-ai/letta",
"file_path": "sandbox/node_server.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:tests/helpers/plugins_helper.py | from letta.data_sources.redis_client import get_redis_client
from letta.services.agent_manager import AgentManager
async def is_experimental_okay(feature_name: str, **kwargs) -> bool:
print(feature_name, kwargs)
if feature_name == "test_pass_with_kwarg":
return isinstance(kwargs["agent_manager"], AgentManager)
if feature_name == "test_just_pass":
return True
if feature_name == "test_fail":
return False
if feature_name == "test_override_kwarg":
return kwargs["bool_val"]
if feature_name == "test_redis_flag":
client = await get_redis_client()
user_id = kwargs["user_id"]
return await client.check_inclusion_and_exclusion(member=user_id, group="TEST_GROUP")
# Err on safety here, disabling experimental if not handled here.
return False
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/helpers/plugins_helper.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/integration_test_builtin_tools.py | import json
import os
import threading
import time
import uuid
from unittest.mock import MagicMock, patch
import pytest
import requests
from dotenv import load_dotenv
from letta_client import Letta
from letta_client.types import AgentState, MessageCreateParam, ToolReturnMessage
from letta_client.types.agents import ToolCallMessage
from letta.services.tool_executor.builtin_tool_executor import LettaBuiltinToolExecutor
from letta.settings import tool_settings
# ------------------------------
# Fixtures
# ------------------------------
@pytest.fixture(scope="module")
def server_url() -> str:
"""
Provides the URL for the Letta server.
If LETTA_SERVER_URL is not set, starts the server in a background thread
and polls until it’s accepting connections.
"""
def _run_server() -> None:
load_dotenv()
from letta.server.rest_api.app import start_server
start_server(debug=True)
url: str = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
if not os.getenv("LETTA_SERVER_URL"):
thread = threading.Thread(target=_run_server, daemon=True)
thread.start()
# Poll until the server is up (or timeout)
timeout_seconds = 60
deadline = time.time() + timeout_seconds
while time.time() < deadline:
try:
resp = requests.get(url + "/v1/health")
if resp.status_code < 500:
break
except requests.exceptions.RequestException:
pass
time.sleep(0.1)
else:
raise RuntimeError(f"Could not reach {url} within {timeout_seconds}s")
yield url
@pytest.fixture(scope="module")
def client(server_url: str) -> Letta:
"""
Creates and returns a synchronous Letta REST client for testing.
"""
client_instance = Letta(base_url=server_url)
yield client_instance
@pytest.fixture(scope="function")
def agent_state(client: Letta) -> AgentState:
"""
Creates and returns an agent state for testing with a pre-configured agent.
Uses system-level EXA_API_KEY setting.
"""
send_message_tool = client.tools.list(name="send_message").items[0]
run_code_tool = client.tools.list(name="run_code").items[0]
web_search_tool = client.tools.list(name="web_search").items[0]
agent_state_instance = client.agents.create(
name="test_builtin_tools_agent",
include_base_tools=False,
tool_ids=[send_message_tool.id, run_code_tool.id, web_search_tool.id],
model="openai/gpt-4o",
embedding="openai/text-embedding-3-small",
tags=["test_builtin_tools_agent"],
)
yield agent_state_instance
# ------------------------------
# Helper Functions and Constants
# ------------------------------
USER_MESSAGE_OTID = str(uuid.uuid4())
TEST_LANGUAGES = ["Python", "Javascript", "Typescript"]
EXPECTED_INTEGER_PARTITION_OUTPUT = "190569292"
# Reference implementation in Python, to embed in the user prompt
REFERENCE_CODE = """\
def reference_partition(n):
partitions = [1] + [0] * (n + 1)
for k in range(1, n + 1):
for i in range(k, n + 1):
partitions[i] += partitions[i - k]
return partitions[n]
"""
def reference_partition(n: int) -> int:
# Same logic, used to compute expected result in the test
partitions = [1] + [0] * (n + 1)
for k in range(1, n + 1):
for i in range(k, n + 1):
partitions[i] += partitions[i - k]
return partitions[n]
# ------------------------------
# Test Cases
# ------------------------------
@pytest.mark.parametrize("language", TEST_LANGUAGES, ids=TEST_LANGUAGES)
def test_run_code(
client: Letta,
agent_state: AgentState,
language: str,
) -> None:
"""
Sends a reference Python implementation, asks the model to translate & run it
in different languages, and verifies the exact partition(100) result.
"""
expected = str(reference_partition(100))
user_message = MessageCreateParam(
role="user",
content=(
"Here is a Python reference implementation:\n\n"
f"{REFERENCE_CODE}\n"
f"Please translate and execute this code in {language} to compute p(100), "
"and return **only** the result with no extra formatting."
),
otid=USER_MESSAGE_OTID,
)
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[user_message],
)
tool_returns = [m for m in response.messages if isinstance(m, ToolReturnMessage)]
assert tool_returns, f"No ToolReturnMessage found for language: {language}"
returns = [m.tool_return for m in tool_returns]
assert any(expected in ret for ret in returns), (
f"For language={language!r}, expected to find '{expected}' in tool_return, but got {returns!r}"
)
@pytest.mark.asyncio(scope="function")
async def test_web_search() -> None:
"""Test web search tool with mocked Exa API."""
# create mock agent state with exa api key
mock_agent_state = MagicMock()
mock_agent_state.get_agent_env_vars_as_dict.return_value = {"EXA_API_KEY": "test-exa-key"}
# Mock Exa search result with education information
mock_exa_result = MagicMock()
mock_exa_result.results = [
MagicMock(
title="Charles Packer - UC Berkeley PhD in Computer Science",
url="https://example.com/charles-packer-profile",
published_date="2023-01-01",
author="UC Berkeley",
text=None,
highlights=["Charles Packer completed his PhD at UC Berkeley", "Research in artificial intelligence and machine learning"],
summary="Charles Packer is the CEO of Letta who earned his PhD in Computer Science from UC Berkeley, specializing in AI research.",
),
MagicMock(
title="Letta Leadership Team",
url="https://letta.com/team",
published_date="2023-06-01",
author="Letta",
text=None,
highlights=["CEO Charles Packer brings academic expertise"],
summary="Leadership team page featuring CEO Charles Packer's educational background.",
),
]
with patch("exa_py.Exa") as mock_exa_class:
# Setup mock
mock_exa_client = MagicMock()
mock_exa_class.return_value = mock_exa_client
mock_exa_client.search_and_contents.return_value = mock_exa_result
# create executor with mock dependencies
executor = LettaBuiltinToolExecutor(
message_manager=MagicMock(),
agent_manager=MagicMock(),
block_manager=MagicMock(),
run_manager=MagicMock(),
passage_manager=MagicMock(),
actor=MagicMock(),
)
# call web_search directly
result = await executor.web_search(
agent_state=mock_agent_state,
query="where did Charles Packer, CEO of Letta, go to school",
num_results=10,
include_text=False,
)
# Parse the JSON response from web_search
response_json = json.loads(result)
# Basic structure assertions for new Exa format
assert "query" in response_json, "Missing 'query' field in response"
assert "results" in response_json, "Missing 'results' field in response"
# Verify we got search results
results = response_json["results"]
assert len(results) == 2, "Should have found exactly 2 search results from mock"
# Check each result has the expected structure
found_education_info = False
for result in results:
assert "title" in result, "Result missing title"
assert "url" in result, "Result missing URL"
# text should not be present since include_text=False by default
assert "text" not in result or result["text"] is None, "Text should not be included by default"
# Check for education-related information in summary and highlights
result_text = ""
if result.get("summary"):
result_text += " " + result["summary"].lower()
if result.get("highlights"):
for highlight in result["highlights"]:
result_text += " " + highlight.lower()
# Look for education keywords
if any(keyword in result_text for keyword in ["berkeley", "university", "phd", "ph.d", "education", "student"]):
found_education_info = True
assert found_education_info, "Should have found education-related information about Charles Packer"
# Verify Exa was called with correct parameters
mock_exa_class.assert_called_once_with(api_key="test-exa-key")
mock_exa_client.search_and_contents.assert_called_once()
call_args = mock_exa_client.search_and_contents.call_args
assert call_args[1]["type"] == "auto"
assert call_args[1]["text"] is False # Default is False now
@pytest.mark.asyncio(scope="function")
async def test_web_search_uses_exa():
"""Test that web search uses Exa API correctly."""
# create mock agent state with exa api key
mock_agent_state = MagicMock()
mock_agent_state.get_agent_env_vars_as_dict.return_value = {"EXA_API_KEY": "test-exa-key"}
# Mock exa search result
mock_exa_result = MagicMock()
mock_exa_result.results = [
MagicMock(
title="Test Result",
url="https://example.com/test",
published_date="2023-01-01",
author="Test Author",
text="This is test content from the search result.",
highlights=["This is a highlight"],
summary="This is a summary of the content.",
)
]
with patch("exa_py.Exa") as mock_exa_class:
# Mock Exa
mock_exa_client = MagicMock()
mock_exa_class.return_value = mock_exa_client
mock_exa_client.search_and_contents.return_value = mock_exa_result
# create executor with mock dependencies
executor = LettaBuiltinToolExecutor(
message_manager=MagicMock(),
agent_manager=MagicMock(),
block_manager=MagicMock(),
run_manager=MagicMock(),
passage_manager=MagicMock(),
actor=MagicMock(),
)
result = await executor.web_search(agent_state=mock_agent_state, query="test query", num_results=3, include_text=True)
# Verify Exa was called correctly
mock_exa_class.assert_called_once_with(api_key="test-exa-key")
mock_exa_client.search_and_contents.assert_called_once()
# Check the call arguments
call_args = mock_exa_client.search_and_contents.call_args
assert call_args[1]["query"] == "test query"
assert call_args[1]["num_results"] == 3
assert call_args[1]["type"] == "auto"
assert call_args[1]["text"] == True
# Verify the response format
response_json = json.loads(result)
assert "query" in response_json
assert "results" in response_json
assert response_json["query"] == "test query"
assert len(response_json["results"]) == 1
# ------------------------------
# Programmatic Tool Calling Tests
# ------------------------------
ADD_TOOL_SOURCE = """
def add(a: int, b: int) -> int:
\"\"\"Add two numbers together.
Args:
a (int): The first number.
b (int): The second number.
Returns:
int: The sum of a and b.
\"\"\"
return a + b
"""
MULTIPLY_TOOL_SOURCE = """
def multiply(a: int, b: int) -> int:
\"\"\"Multiply two numbers together.
Args:
a (int): The first number.
b (int): The second number.
Returns:
int: The product of a and b.
\"\"\"
return a * b
"""
@pytest.fixture(scope="function")
def agent_with_custom_tools(client: Letta) -> AgentState:
"""
Creates an agent with custom add/multiply tools and run_code tool
to test programmatic tool calling.
"""
# Create custom tools
add_tool = client.tools.create(source_code=ADD_TOOL_SOURCE)
multiply_tool = client.tools.create(source_code=MULTIPLY_TOOL_SOURCE)
# Get the run_code tool
run_code_tool = client.tools.list(name="run_code").items[0]
send_message_tool = client.tools.list(name="send_message").items[0]
agent_state_instance = client.agents.create(
name="test_programmatic_tool_calling_agent",
include_base_tools=False,
tool_ids=[send_message_tool.id, run_code_tool.id, add_tool.id, multiply_tool.id],
model="openai/gpt-4o",
embedding="openai/text-embedding-3-small",
tags=["test_programmatic_tool_calling"],
)
yield agent_state_instance
# Cleanup
client.agents.delete(agent_state_instance.id)
client.tools.delete(add_tool.id)
client.tools.delete(multiply_tool.id)
def test_programmatic_tool_calling_compose_tools(
client: Letta,
agent_with_custom_tools: AgentState,
) -> None:
"""
Tests that run_code can compose agent tools programmatically in a SINGLE call.
This validates that:
1. Tool source code is injected into the sandbox
2. Claude composes tools in one run_code call, not multiple separate tool calls
3. The result is computed correctly: add(multiply(4, 5), 6) = 26
"""
# Expected result: multiply(4, 5) = 20, add(20, 6) = 26
expected = "26"
user_message = MessageCreateParam(
role="user",
content=(
"Use the run_code tool to execute Python code that composes the add and multiply tools. "
"Calculate add(multiply(4, 5), 6) and return the result. "
"The add and multiply functions are already available in the code execution environment. "
"Do this in a SINGLE run_code call - do NOT call add or multiply as separate tools."
),
otid=str(uuid.uuid4()),
)
response = client.agents.messages.create(
agent_id=agent_with_custom_tools.id,
messages=[user_message],
)
# Extract all tool calls
tool_calls = [m for m in response.messages if isinstance(m, ToolCallMessage)]
assert tool_calls, "No ToolCallMessage found for programmatic tool calling test"
# Verify the agent used run_code to compose tools, not direct add/multiply calls
tool_names = [m.tool_call.name for m in tool_calls]
run_code_calls = [name for name in tool_names if name == "run_code"]
direct_add_calls = [name for name in tool_names if name == "add"]
direct_multiply_calls = [name for name in tool_names if name == "multiply"]
# The key assertion: tools should be composed via run_code, not called directly
assert len(run_code_calls) >= 1, f"Expected at least one run_code call, but got tool calls: {tool_names}"
assert len(direct_add_calls) == 0, (
f"Expected no direct 'add' tool calls (should be called via run_code), but found {len(direct_add_calls)}"
)
assert len(direct_multiply_calls) == 0, (
f"Expected no direct 'multiply' tool calls (should be called via run_code), but found {len(direct_multiply_calls)}"
)
# Verify the result is correct
tool_returns = [m for m in response.messages if isinstance(m, ToolReturnMessage)]
returns = [m.tool_return for m in tool_returns]
assert any(expected in ret for ret in returns), f"Expected to find '{expected}' in tool_return, but got {returns!r}"
@pytest.mark.asyncio(scope="function")
async def test_run_code_injects_tool_source_code() -> None:
"""
Unit test that verifies run_code injects agent tool source code into the sandbox.
This test directly calls run_code with a mocked agent_state containing tools.
"""
from letta.schemas.tool import Tool
# Create mock agent state with tools that have source code
mock_agent_state = MagicMock()
mock_agent_state.tools = [
Tool(
id="tool-00000001",
name="add",
source_code=ADD_TOOL_SOURCE.strip(),
),
Tool(
id="tool-00000002",
name="multiply",
source_code=MULTIPLY_TOOL_SOURCE.strip(),
),
]
# Skip if E2B_API_KEY is not set
if not tool_settings.e2b_api_key:
pytest.skip("E2B_API_KEY not set, skipping run_code test")
# Create executor with mock dependencies
executor = LettaBuiltinToolExecutor(
message_manager=MagicMock(),
agent_manager=MagicMock(),
block_manager=MagicMock(),
run_manager=MagicMock(),
passage_manager=MagicMock(),
actor=MagicMock(),
)
# Execute code that composes the tools
# Note: We don't define add/multiply in the code - they should be injected from tool source
result = await executor.run_code(
agent_state=mock_agent_state,
code="print(add(multiply(4, 5), 6))",
language="python",
)
response_json = json.loads(result)
# Verify execution succeeded and returned correct result
assert "error" not in response_json or response_json.get("error") is None, f"Code execution failed: {response_json}"
assert "26" in str(response_json["results"]) or "26" in str(response_json["logs"]["stdout"]), (
f"Expected '26' in results, got: {response_json}"
)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/integration_test_builtin_tools.py",
"license": "Apache License 2.0",
"lines": 400,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/integration_test_human_in_the_loop.py | import logging
import uuid
from typing import Any, List
from unittest.mock import patch
import pytest
from letta_client import APIError, Letta
from letta_client.types import AgentState, MessageCreateParam, Tool
from letta.adapters.simple_llm_stream_adapter import SimpleLLMStreamAdapter
logger = logging.getLogger(__name__)
# ------------------------------
# Helper Functions and Constants
# ------------------------------
USER_MESSAGE_OTID = str(uuid.uuid4())
USER_MESSAGE_CONTENT = "This is an automated test message. Call the get_secret_code_tool to get the code for text 'hello world'."
USER_MESSAGE_TEST_APPROVAL: List[MessageCreateParam] = [
MessageCreateParam(
role="user",
content=USER_MESSAGE_CONTENT,
otid=USER_MESSAGE_OTID,
)
]
FAKE_REQUEST_ID = str(uuid.uuid4())
SECRET_CODE = str(740845635798344975)
USER_MESSAGE_FOLLOW_UP_OTID = str(uuid.uuid4())
USER_MESSAGE_FOLLOW_UP_CONTENT = "Thank you for the secret code."
USER_MESSAGE_FOLLOW_UP: List[MessageCreateParam] = [
MessageCreateParam(
role="user",
content=USER_MESSAGE_FOLLOW_UP_CONTENT,
otid=USER_MESSAGE_FOLLOW_UP_OTID,
)
]
USER_MESSAGE_PARALLEL_TOOL_CALL_CONTENT = "This is an automated test message. Call the get_secret_code_tool 3 times in parallel for the following inputs: 'hello world', 'hello letta', 'hello test', and also call the roll_dice_tool once with a 16-sided dice."
USER_MESSAGE_PARALLEL_TOOL_CALL: List[MessageCreateParam] = [
MessageCreateParam(
role="user",
content=USER_MESSAGE_PARALLEL_TOOL_CALL_CONTENT,
otid=USER_MESSAGE_OTID,
)
]
def get_secret_code_tool(input_text: str) -> str:
"""
A tool that returns the secret code based on the input. This tool requires approval before execution.
Args:
input_text (str): The input text to process.
Returns:
str: The secret code based on the input text.
"""
return str(abs(hash(input_text)))
def roll_dice_tool(num_sides: int) -> str:
"""
A tool that returns a random number between 1 and num_sides.
Args:
num_sides (int): The number of sides on the die.
Returns:
str: The random number between 1 and num_sides.
"""
import random
return str(random.randint(1, num_sides))
def accumulate_chunks(stream):
messages = []
current_message = None
prev_message_type = None
for chunk in stream:
# Handle chunks that might not have message_type (like pings)
if not hasattr(chunk, "message_type"):
continue
current_message_type = getattr(chunk, "message_type", None)
if prev_message_type != current_message_type:
# Save the previous message if it exists
if current_message is not None:
messages.append(current_message)
# Start a new message
current_message = chunk
else:
# Accumulate content for same message type (token streaming)
if current_message is not None and hasattr(current_message, "content") and hasattr(chunk, "content"):
current_message.content += chunk.content
prev_message_type = current_message_type
# Don't forget the last message
if current_message is not None:
messages.append(current_message)
return [m for m in messages if m is not None]
def approve_tool_call(client: Letta, agent_id: str, tool_call_id: str):
client.agents.messages.create(
agent_id=agent_id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": True,
"tool_call_id": tool_call_id,
},
],
},
],
)
# ------------------------------
# Fixtures
# ------------------------------
# Note: server_url and client fixtures are inherited from tests/conftest.py
@pytest.fixture(scope="function")
def approval_tool_fixture(client: Letta):
"""
Creates and returns a tool that requires approval for testing.
"""
approval_tool = client.tools.upsert_from_function(
func=get_secret_code_tool,
default_requires_approval=True,
)
yield approval_tool
client.tools.delete(tool_id=approval_tool.id)
@pytest.fixture(scope="function")
def dice_tool_fixture(client: Letta):
dice_tool = client.tools.upsert_from_function(
func=roll_dice_tool,
)
yield dice_tool
client.tools.delete(tool_id=dice_tool.id)
@pytest.fixture(scope="function")
def agent(client: Letta, approval_tool_fixture, dice_tool_fixture) -> AgentState:
"""
Creates and returns an agent state for testing with a pre-configured agent.
The agent is configured with the requires_approval_tool.
"""
agent_state = client.agents.create(
name="approval_test_agent",
agent_type="letta_v1_agent",
include_base_tools=False,
tool_ids=[approval_tool_fixture.id, dice_tool_fixture.id],
include_base_tool_rules=False,
tool_rules=[],
model="anthropic/claude-sonnet-4-5-20250929",
embedding="openai/text-embedding-3-small",
tags=["approval_test"],
)
# Enable parallel tool calls for testing
agent_state = client.agents.update(agent_id=agent_state.id, parallel_tool_calls=True)
yield agent_state
client.agents.delete(agent_id=agent_state.id)
# ------------------------------
# Error Test Cases
# ------------------------------
def test_send_approval_without_pending_request(client, agent):
with pytest.raises(APIError, match="No tool call is currently awaiting approval"):
client.agents.messages.create(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": True,
"tool_call_id": FAKE_REQUEST_ID,
},
],
},
],
)
def test_send_user_message_with_pending_request(client, agent):
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
print("RESPONSE", response)
for message in response.messages:
print("MESSAGE", message)
with pytest.raises(APIError, match="Please approve or deny the pending request before continuing"):
client.agents.messages.create(
agent_id=agent.id,
messages=[{"role": "user", "content": "hi"}],
)
approve_tool_call(client, agent.id, response.messages[-1].tool_call.tool_call_id)
def test_send_approval_message_with_incorrect_request_id(client, agent):
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
with pytest.raises(APIError, match="Invalid tool call IDs"):
client.agents.messages.create(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": True,
"tool_call_id": FAKE_REQUEST_ID,
},
],
},
],
)
approve_tool_call(client, agent.id, response.messages[-1].tool_call.tool_call_id)
# ------------------------------
# Request Test Cases
# ------------------------------
def test_invoke_approval_request(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
messages = response.messages
assert messages is not None
assert messages[-1].message_type == "approval_request_message"
assert messages[-1].tool_call is not None
assert messages[-1].tool_call.name == "get_secret_code_tool"
assert messages[-1].tool_calls is not None
assert len(messages[-1].tool_calls) == 1
assert messages[-1].tool_calls[0].name == "get_secret_code_tool"
# v3/v1 path: approval request tool args must not include request_heartbeat
import json as _json
_args = _json.loads(messages[-1].tool_call.arguments)
assert "request_heartbeat" not in _args
client.get(f"/v1/agents/{agent.id}/context", cast_to=dict[str, Any])
# Test pending_approval relationship field
agent_with_pending = client.agents.retrieve(agent_id=agent.id, include=["agent.pending_approval"])
assert agent_with_pending.pending_approval is not None
# Client SDK returns it as a dict, so use dict access
assert agent_with_pending.pending_approval["tool_call"]["name"] == "get_secret_code_tool"
assert len(agent_with_pending.pending_approval["tool_calls"]) > 0
assert agent_with_pending.pending_approval["tool_calls"][0]["name"] == "get_secret_code_tool"
assert agent_with_pending.pending_approval["tool_calls"][0]["tool_call_id"] == response.messages[-1].tool_call.tool_call_id
approve_tool_call(client, agent.id, response.messages[-1].tool_call.tool_call_id)
# After approval, pending_approval should be None (latest message is no longer approval request)
agent_after_approval = client.agents.retrieve(agent_id=agent.id, include=["agent.pending_approval"])
assert agent_after_approval.pending_approval is None
def test_invoke_approval_request_stream(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.stream(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
stream_tokens=True,
)
messages = accumulate_chunks(response)
assert messages is not None
assert messages[-3].message_type == "approval_request_message"
assert messages[-3].tool_call is not None
assert messages[-3].tool_call.name == "get_secret_code_tool"
assert messages[-2].message_type == "stop_reason"
assert messages[-1].message_type == "usage_statistics"
client.get(f"/v1/agents/{agent.id}/context", cast_to=dict[str, Any])
approve_tool_call(client, agent.id, messages[-3].tool_call.tool_call_id)
def test_invoke_tool_after_turning_off_requires_approval(
client: Letta,
agent: AgentState,
approval_tool_fixture: Tool,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
response = client.agents.messages.stream(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": True,
"tool_call_id": tool_call_id,
},
],
},
],
stream_tokens=True,
)
messages = accumulate_chunks(response)
client.agents.tools.update_approval(
agent_id=agent.id,
tool_name=approval_tool_fixture.name,
body_requires_approval=False,
)
response = client.agents.messages.stream(agent_id=agent.id, messages=USER_MESSAGE_TEST_APPROVAL, stream_tokens=True)
messages = accumulate_chunks(response)
assert messages is not None
assert 6 <= len(messages) <= 9
idx = 0
assert messages[idx].message_type == "reasoning_message"
idx += 1
try:
assert messages[idx].message_type == "assistant_message"
idx += 1
except Exception:
pass
assert messages[idx].message_type == "tool_call_message"
idx += 1
assert messages[idx].message_type == "tool_return_message"
idx += 1
assert messages[idx].message_type == "reasoning_message"
idx += 1
try:
assert messages[idx].message_type == "assistant_message"
idx += 1
except Exception:
assert messages[idx].message_type == "tool_call_message"
idx += 1
assert messages[idx].message_type == "tool_return_message"
idx += 1
# ------------------------------
# Approve Test Cases
# ------------------------------
def test_approve_tool_call_request(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
response = client.agents.messages.stream(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": True,
"tool_call_id": tool_call_id,
},
],
},
],
stream_tokens=True,
)
messages = accumulate_chunks(response)
assert messages is not None
assert messages[0].message_type == "tool_return_message"
assert messages[0].tool_call_id == tool_call_id
assert messages[0].status == "success"
assert messages[-2].message_type == "stop_reason"
assert messages[-1].message_type == "usage_statistics"
def test_approve_cursor_fetch(
client: Letta,
agent: AgentState,
) -> None:
last_message_cursor = client.agents.messages.list(agent_id=agent.id, limit=1).items[0].id
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
last_message_id = response.messages[0].id
tool_call_id = response.messages[-1].tool_call.tool_call_id
messages = client.agents.messages.list(agent_id=agent.id, after=last_message_cursor).items
assert messages[0].message_type == "user_message"
assert messages[-1].message_type == "approval_request_message"
# Ensure no request_heartbeat on approval request
import json as _json
_args = _json.loads(messages[-1].tool_call.arguments)
assert "request_heartbeat" not in _args
client.agents.messages.create(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": True,
"tool_call_id": tool_call_id,
},
],
},
],
)
messages = client.agents.messages.list(agent_id=agent.id, after=last_message_id).items
assert messages[0].message_type == "approval_response_message"
assert messages[0].approval_request_id == tool_call_id
assert messages[0].approve is True
assert messages[0].approvals[0].approve is True
assert messages[0].approvals[0].tool_call_id == tool_call_id
assert messages[1].message_type == "tool_return_message"
assert messages[1].status == "success"
def test_approve_with_context_check(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
response = client.agents.messages.stream(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": True,
"tool_call_id": tool_call_id,
},
],
},
],
stream_tokens=True,
)
messages = accumulate_chunks(response)
try:
client.get(f"/v1/agents/{agent.id}/context", cast_to=dict[str, Any])
except Exception as e:
if len(messages) > 4:
raise ValueError("Model did not respond with only reasoning content, please rerun test to repro edge case.")
raise e
def test_approve_and_follow_up(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
client.agents.messages.create(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": True,
"tool_call_id": tool_call_id,
},
],
},
],
)
response = client.agents.messages.stream(
agent_id=agent.id,
messages=USER_MESSAGE_FOLLOW_UP,
stream_tokens=True,
)
messages = accumulate_chunks(response)
assert messages is not None
assert messages[0].message_type in ["reasoning_message", "assistant_message", "tool_call_message"]
assert messages[-2].message_type == "stop_reason"
assert messages[-1].message_type == "usage_statistics"
def test_approve_and_follow_up_with_error(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
# Mock the streaming adapter to return llm invocation failure on the follow up turn
with patch.object(SimpleLLMStreamAdapter, "invoke_llm", side_effect=ValueError("TEST: Mocked error")):
response = client.agents.messages.stream(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": True,
"tool_call_id": tool_call_id,
},
],
},
],
stream_tokens=True,
)
with pytest.raises(APIError, match="TEST: Mocked error"):
messages = accumulate_chunks(response)
# Ensure that agent is not bricked
response = client.agents.messages.stream(
agent_id=agent.id,
messages=USER_MESSAGE_FOLLOW_UP,
)
messages = accumulate_chunks(response)
assert messages is not None
assert len(messages) == 4 or len(messages) == 5
assert messages[0].message_type == "reasoning_message"
if len(messages) == 4:
assert messages[1].message_type == "assistant_message"
else:
assert messages[1].message_type == "tool_call_message"
assert messages[2].message_type == "tool_return_message"
def test_approve_with_user_message(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
client.agents.messages.create(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": True,
"tool_call_id": tool_call_id,
},
],
},
{
"type": "message",
"role": "user",
"content": "The secret code should not contain any special characters.",
},
],
)
response = client.agents.messages.stream(
agent_id=agent.id,
messages=USER_MESSAGE_FOLLOW_UP,
stream_tokens=True,
)
messages = accumulate_chunks(response)
assert messages is not None
assert messages[0].message_type in ["reasoning_message", "assistant_message", "tool_call_message"]
assert messages[-2].message_type == "stop_reason"
assert messages[-1].message_type == "usage_statistics"
# ------------------------------
# Deny Test Cases
# ------------------------------
def test_deny_tool_call_request(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
response = client.agents.messages.stream(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": False,
"tool_call_id": tool_call_id,
"reason": f"You don't need to call the tool, the secret code is {SECRET_CODE}",
},
],
},
],
)
messages = accumulate_chunks(response)
assert messages is not None
if messages[0].message_type == "assistant_message":
assert SECRET_CODE in messages[0].content
elif messages[1].message_type == "assistant_message":
assert SECRET_CODE in messages[1].content
def test_deny_cursor_fetch(
client: Letta,
agent: AgentState,
) -> None:
last_message_cursor = client.agents.messages.list(agent_id=agent.id, limit=1).items[0].id
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
last_message_id = response.messages[0].id
tool_call_id = response.messages[-1].tool_call.tool_call_id
messages = client.agents.messages.list(agent_id=agent.id, after=last_message_cursor).items
assert messages[0].message_type == "user_message"
assert messages[-1].message_type == "approval_request_message"
assert messages[-1].tool_call.tool_call_id == tool_call_id
# Ensure no request_heartbeat on approval request
# import json as _json
# _args = _json.loads(messages[2].tool_call.arguments)
# assert "request_heartbeat" not in _args
client.agents.messages.create(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": False,
"tool_call_id": tool_call_id,
"reason": f"You don't need to call the tool, the secret code is {SECRET_CODE}",
},
],
},
],
)
messages = client.agents.messages.list(agent_id=agent.id, after=last_message_id).items
assert messages[0].message_type == "approval_response_message"
assert messages[0].approvals[0].approve == False
assert messages[0].approvals[0].tool_call_id == tool_call_id
assert messages[0].approvals[0].reason == f"You don't need to call the tool, the secret code is {SECRET_CODE}"
assert messages[1].message_type == "tool_return_message"
assert messages[1].status == "error"
def test_deny_with_context_check(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
response = client.agents.messages.stream(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": False,
"tool_call_id": tool_call_id,
"reason": "Cancelled by user. Instead of responding, wait for next user input before replying.",
},
],
},
],
stream_tokens=True,
)
messages = accumulate_chunks(response)
try:
client.get(f"/v1/agents/{agent.id}/context", cast_to=dict[str, Any])
except Exception as e:
if len(messages) > 4:
raise ValueError("Model did not respond with only reasoning content, please rerun test to repro edge case.")
raise e
def test_deny_and_follow_up(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
client.agents.messages.create(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": False,
"tool_call_id": tool_call_id,
"reason": f"You don't need to call the tool, the secret code is {SECRET_CODE}",
},
],
},
],
)
response = client.agents.messages.stream(
agent_id=agent.id,
messages=USER_MESSAGE_FOLLOW_UP,
stream_tokens=True,
)
messages = accumulate_chunks(response)
assert messages is not None
assert len(messages) > 2
assert messages[-2].message_type == "stop_reason"
assert messages[-1].message_type == "usage_statistics"
def test_deny_and_follow_up_with_error(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
# Mock the streaming adapter to return llm invocation failure on the follow up turn
with patch.object(SimpleLLMStreamAdapter, "invoke_llm", side_effect=ValueError("TEST: Mocked error")):
response = client.agents.messages.stream(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": False,
"tool_call_id": tool_call_id,
"reason": f"You don't need to call the tool, the secret code is {SECRET_CODE}",
},
],
},
],
stream_tokens=True,
)
with pytest.raises(APIError, match="TEST: Mocked error"):
messages = accumulate_chunks(response)
# Ensure that agent is not bricked
response = client.agents.messages.stream(
agent_id=agent.id,
messages=USER_MESSAGE_FOLLOW_UP,
)
messages = accumulate_chunks(response)
assert messages is not None
assert len(messages) > 2
assert messages[-2].message_type == "stop_reason"
assert messages[-1].message_type == "usage_statistics"
def test_deny_with_user_message(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
client.agents.messages.create(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": False,
"tool_call_id": tool_call_id,
},
],
},
{
"type": "message",
"role": "user",
"content": f"Actually, you don't need to call the tool, the secret code is {SECRET_CODE}",
},
],
)
response = client.agents.messages.stream(
agent_id=agent.id,
messages=USER_MESSAGE_FOLLOW_UP,
stream_tokens=True,
)
messages = accumulate_chunks(response)
assert messages is not None
assert len(messages) > 2
assert messages[-2].message_type == "stop_reason"
assert messages[-1].message_type == "usage_statistics"
# --------------------------------
# Client-Side Execution Test Cases
# --------------------------------
def test_client_side_tool_call_request(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
response = client.agents.messages.stream(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "tool",
"tool_call_id": tool_call_id,
"tool_return": SECRET_CODE,
"status": "success",
},
],
},
],
)
messages = accumulate_chunks(response)
assert messages is not None
if messages[0].message_type == "assistant_message":
assert SECRET_CODE in messages[1].content
elif messages[1].message_type == "assistant_message":
assert SECRET_CODE in messages[2].content
assert messages[-2].message_type == "stop_reason"
assert messages[-1].message_type == "usage_statistics"
def test_client_side_tool_call_cursor_fetch(
client: Letta,
agent: AgentState,
) -> None:
last_message_cursor = client.agents.messages.list(agent_id=agent.id, limit=1).items[0].id
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
last_message_id = response.messages[0].id
tool_call_id = response.messages[-1].tool_call.tool_call_id
messages = client.agents.messages.list(agent_id=agent.id, after=last_message_cursor).items
assert messages[0].message_type == "user_message"
assert messages[-1].message_type == "approval_request_message"
assert messages[-1].tool_call.tool_call_id == tool_call_id
# Ensure no request_heartbeat on approval request
# import json as _json
# _args = _json.loads(messages[2].tool_call.arguments)
# assert "request_heartbeat" not in _args
client.agents.messages.create(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "tool",
"tool_call_id": tool_call_id,
"tool_return": SECRET_CODE,
"status": "success",
},
],
},
],
)
messages = client.agents.messages.list(agent_id=agent.id, after=last_message_id).items
assert messages[0].message_type == "approval_response_message"
assert messages[0].approvals[0].type == "tool"
assert messages[0].approvals[0].tool_call_id == tool_call_id
assert messages[0].approvals[0].tool_return == SECRET_CODE
assert messages[0].approvals[0].status == "success"
assert messages[1].message_type == "tool_return_message"
assert messages[1].status == "success"
assert messages[1].tool_call_id == tool_call_id
assert messages[1].tool_return == SECRET_CODE
def test_client_side_tool_call_with_context_check(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
response = client.agents.messages.stream(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "tool",
"tool_call_id": tool_call_id,
"tool_return": SECRET_CODE,
"status": "success",
},
],
},
],
stream_tokens=True,
)
messages = accumulate_chunks(response)
try:
client.get(f"/v1/agents/{agent.id}/context", cast_to=dict[str, Any])
except Exception as e:
if len(messages) > 4:
raise ValueError("Model did not respond with only reasoning content, please rerun test to repro edge case.")
raise e
def test_client_side_tool_call_and_follow_up(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
client.agents.messages.create(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "tool",
"tool_call_id": tool_call_id,
"tool_return": SECRET_CODE,
"status": "success",
},
],
},
],
)
response = client.agents.messages.stream(
agent_id=agent.id,
messages=USER_MESSAGE_FOLLOW_UP,
stream_tokens=True,
)
messages = accumulate_chunks(response)
assert messages is not None
assert len(messages) > 2
assert messages[-2].message_type == "stop_reason"
assert messages[-1].message_type == "usage_statistics"
def test_client_side_tool_call_and_follow_up_with_error(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
# Mock the streaming adapter to return llm invocation failure on the follow up turn
with patch.object(SimpleLLMStreamAdapter, "invoke_llm", side_effect=ValueError("TEST: Mocked error")):
response = client.agents.messages.stream(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "tool",
"tool_call_id": tool_call_id,
"tool_return": SECRET_CODE,
"status": "success",
},
],
},
],
stream_tokens=True,
)
with pytest.raises(APIError, match="TEST: Mocked error"):
messages = accumulate_chunks(response)
# Ensure that agent is not bricked
response = client.agents.messages.stream(
agent_id=agent.id,
messages=USER_MESSAGE_FOLLOW_UP,
)
messages = accumulate_chunks(response)
assert messages is not None
assert len(messages) > 2
assert messages[-2].message_type == "stop_reason"
assert messages[-1].message_type == "usage_statistics"
def test_client_side_tool_call_with_user_message(
client: Letta,
agent: AgentState,
) -> None:
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
client.agents.messages.create(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "tool",
"tool_call_id": tool_call_id,
"tool_return": SECRET_CODE,
"status": "success",
},
],
},
{
"type": "message",
"role": "user",
"content": "The secret code should not contain any special characters.",
},
],
)
response = client.agents.messages.stream(
agent_id=agent.id,
messages=USER_MESSAGE_FOLLOW_UP,
stream_tokens=True,
)
messages = accumulate_chunks(response)
assert messages is not None
assert len(messages) > 2
assert messages[-2].message_type == "stop_reason"
assert messages[-1].message_type == "usage_statistics"
def test_parallel_tool_calling(
client: Letta,
agent: AgentState,
) -> None:
last_message_cursor = client.agents.messages.list(agent_id=agent.id, limit=1).items[0].id
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_PARALLEL_TOOL_CALL,
)
messages = response.messages
assert messages is not None
assert messages[-2].message_type == "tool_call_message"
assert len(messages[-2].tool_calls) == 1
assert messages[-2].tool_calls[0].name == "roll_dice_tool"
assert "6" in messages[-2].tool_calls[0].arguments
dice_tool_call_id = messages[-2].tool_calls[0].tool_call_id
assert messages[-1].message_type == "approval_request_message"
assert messages[-1].tool_call is not None
assert messages[-1].tool_call.name == "get_secret_code_tool"
assert len(messages[-1].tool_calls) == 3
assert messages[-1].tool_calls[0].name == "get_secret_code_tool"
assert "hello world" in messages[-1].tool_calls[0].arguments
approve_tool_call_id = messages[-1].tool_calls[0].tool_call_id
assert messages[-1].tool_calls[1].name == "get_secret_code_tool"
assert "hello letta" in messages[-1].tool_calls[1].arguments
deny_tool_call_id = messages[-1].tool_calls[1].tool_call_id
assert messages[-1].tool_calls[2].name == "get_secret_code_tool"
assert "hello test" in messages[-1].tool_calls[2].arguments
client_side_tool_call_id = messages[-1].tool_calls[2].tool_call_id
# ensure context is not bricked
client.get(f"/v1/agents/{agent.id}/context", cast_to=dict[str, Any])
response = client.agents.messages.create(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "approval",
"approve": True,
"tool_call_id": approve_tool_call_id,
},
{
"type": "approval",
"approve": False,
"tool_call_id": deny_tool_call_id,
},
{
"type": "tool",
"tool_call_id": client_side_tool_call_id,
"tool_return": SECRET_CODE,
"status": "success",
},
],
},
],
)
messages = response.messages
assert messages is not None
assert len(messages) == 1 or len(messages) == 3 or len(messages) == 4
assert messages[0].message_type == "tool_return_message"
assert len(messages[0].tool_returns) == 4
for tool_return in messages[0].tool_returns:
if tool_return.tool_call_id == approve_tool_call_id:
assert tool_return.status == "success"
elif tool_return.tool_call_id == deny_tool_call_id:
assert tool_return.status == "error"
elif tool_return.tool_call_id == client_side_tool_call_id:
assert tool_return.status == "success"
assert tool_return.tool_return == SECRET_CODE
else:
assert tool_return.tool_call_id == dice_tool_call_id
assert tool_return.status == "success"
if len(messages) == 3:
assert messages[1].message_type == "reasoning_message"
assert messages[2].message_type == "assistant_message"
elif len(messages) == 4:
assert messages[1].message_type == "reasoning_message"
assert messages[2].message_type == "tool_call_message"
assert messages[3].message_type == "tool_return_message"
# ensure context is not bricked
client.get(f"/v1/agents/{agent.id}/context", cast_to=dict[str, Any])
messages = client.agents.messages.list(agent_id=agent.id, after=last_message_cursor).items
assert len(messages) > 6
assert messages[0].message_type == "user_message"
assert messages[1].message_type == "reasoning_message"
assert messages[2].message_type == "assistant_message"
assert messages[3].message_type == "tool_call_message"
assert messages[4].message_type == "approval_request_message"
assert messages[5].message_type == "approval_response_message"
assert messages[6].message_type == "tool_return_message"
response = client.agents.messages.stream(
agent_id=agent.id,
messages=USER_MESSAGE_FOLLOW_UP,
stream_tokens=True,
)
messages = accumulate_chunks(response)
assert messages is not None
assert len(messages) == 4
assert messages[0].message_type == "reasoning_message"
assert messages[1].message_type == "assistant_message"
assert messages[2].message_type == "stop_reason"
assert messages[3].message_type == "usage_statistics"
def test_agent_records_last_stop_reason_after_approval_flow(
client: Letta,
agent: AgentState,
) -> None:
"""
Test that the agent's last_stop_reason is properly updated after a human-in-the-loop flow.
This verifies the integration between run completion and agent state updates.
"""
# Get initial agent state
initial_agent = client.agents.retrieve(agent_id=agent.id)
initial_stop_reason = initial_agent.last_stop_reason
# Trigger approval request
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
# Verify we got an approval request
messages = response.messages
assert messages is not None
assert messages[-1].message_type == "approval_request_message"
# Check agent after approval request (run should be paused with requires_approval)
agent_after_request = client.agents.retrieve(agent_id=agent.id)
assert agent_after_request.last_stop_reason == "requires_approval"
# Approve the tool call
approve_tool_call(client, agent.id, response.messages[-1].tool_call.tool_call_id)
# Check agent after approval (run should complete with end_turn or similar)
agent_after_approval = client.agents.retrieve(agent_id=agent.id)
# After approval and run completion, stop reason should be updated (could be end_turn or other terminal reason)
assert agent_after_approval.last_stop_reason is not None
assert agent_after_approval.last_stop_reason != initial_stop_reason # Should be different from initial
# Send follow-up message to complete the flow
client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_FOLLOW_UP,
)
# Verify final agent state has the most recent stop reason
final_agent = client.agents.retrieve(agent_id=agent.id)
assert final_agent.last_stop_reason is not None
def test_approve_with_cancellation(
client: Letta,
agent: AgentState,
) -> None:
"""
Test that when approval and cancellation happen simultaneously,
the stream returns stop_reason: cancelled and stream_was_cancelled is set.
"""
import threading
import time
last_message_cursor = client.agents.messages.list(agent_id=agent.id, limit=1).items[0].id
# Step 1: Send message that triggers approval request
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
# Step 2: Start cancellation in background thread
def cancel_after_delay():
time.sleep(0.3) # Wait for stream to start
client.agents.messages.cancel(agent_id=agent.id)
cancel_thread = threading.Thread(target=cancel_after_delay, daemon=True)
cancel_thread.start()
# Step 3: Start approval stream (will be cancelled during processing)
response = client.agents.messages.stream(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "tool",
"tool_call_id": tool_call_id,
"tool_return": SECRET_CODE,
"status": "success",
},
],
},
],
streaming=True,
stream_tokens=True,
)
# Step 4: Accumulate chunks
messages = accumulate_chunks(response)
# Step 5: Verify we got chunks AND a cancelled stop reason
assert len(messages) > 1, "Should receive at least some chunks before cancellation"
# Find stop_reason in messages
stop_reasons = [msg for msg in messages if hasattr(msg, "message_type") and msg.message_type == "stop_reason"]
assert len(stop_reasons) == 1, f"Expected exactly 1 stop_reason, got {len(stop_reasons)}"
assert stop_reasons[0].stop_reason == "cancelled", f"Expected stop_reason 'cancelled', got '{stop_reasons[0].stop_reason}'"
# Step 6: Verify run status is cancelled
runs = client.runs.list(agent_ids=[agent.id])
latest_run = runs.items[0]
assert latest_run.status == "cancelled", f"Expected run status 'cancelled', got '{latest_run.status}'"
# Wait for cancel thread to finish
cancel_thread.join(timeout=1.0)
logger.info(f"✅ Test passed: approval with cancellation handled correctly, received {len(messages)} chunks")
# Step 7: Verify that approval response message is persisted
messages = client.agents.messages.list(agent_id=agent.id, after=last_message_cursor).items
assert len(messages) > 0, "Should have persisted at least some messages before cancellation"
assert messages[-1].message_type == "tool_return_message", "Last message should be a tool return message"
last_message_cursor = messages[-1].id
# Step 8: Attempt retry with same response
response = client.agents.messages.stream(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "tool",
"tool_call_id": tool_call_id,
"tool_return": SECRET_CODE,
"status": "success",
},
],
},
],
streaming=True,
stream_tokens=True,
)
# Step 9: Accumulate chunks
messages = accumulate_chunks(response)
# Step 10: Verify we got chunks AND an end_turn stop reason
assert len(messages) > 1, "Should receive at least some chunks before cancellation"
# Find stop_reason in messages
stop_reasons = [msg for msg in messages if hasattr(msg, "message_type") and msg.message_type == "stop_reason"]
assert len(stop_reasons) == 1, f"Expected exactly 1 stop_reason, got {len(stop_reasons)}"
assert stop_reasons[0].stop_reason == "end_turn", f"Expected stop_reason 'end_turn', got '{stop_reasons[0].stop_reason}'"
# Step 11: Verify keep-alive message was sent
messages = client.agents.messages.list(agent_id=agent.id, after=last_message_cursor).items
assert len(messages) > 0, "Should have persisted new messages"
assert messages[0].message_type == "user_message", "First message should be a user message"
assert "keep-alive" in messages[0].content, f"Expected keep-alive message, got '{messages[0].content}'"
def test_retry_with_summarization(
client: Letta,
agent: AgentState,
) -> None:
"""
Test that approval retry works correctly after summarization evicts messages from context.
Scenario:
1. Send message that triggers approval request
2. Send approval response, but cancel during LLM processing
3. Call summarize with mode='all' to evict all messages from context
4. Verify only system and summary messages remain in context
5. Retry the original approval response - should succeed via idempotency check
"""
import threading
import time
# Step 1: Send message that triggers approval request
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_TEST_APPROVAL,
)
tool_call_id = response.messages[-1].tool_call.tool_call_id
# Step 2: Start cancellation in background thread
def cancel_after_delay():
time.sleep(0.3) # Wait for stream to start
client.agents.messages.cancel(agent_id=agent.id)
cancel_thread = threading.Thread(target=cancel_after_delay, daemon=True)
cancel_thread.start()
# Step 3: Start approval stream (will be cancelled during processing)
response = client.agents.messages.stream(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "tool",
"tool_call_id": tool_call_id,
"tool_return": SECRET_CODE,
"status": "success",
},
],
},
],
streaming=True,
stream_tokens=True,
)
# Step 4: Accumulate chunks (stream will be cancelled)
messages = accumulate_chunks(response)
# Step 5: Verify we got cancelled
stop_reasons = [msg for msg in messages if hasattr(msg, "message_type") and msg.message_type == "stop_reason"]
assert len(stop_reasons) == 1, f"Expected exactly 1 stop_reason, got {len(stop_reasons)}"
assert stop_reasons[0].stop_reason == "cancelled", f"Expected stop_reason 'cancelled', got '{stop_reasons[0].stop_reason}'"
cancel_thread.join(timeout=1.0)
# Step 6: Verify tool return message is persisted
all_messages = client.agents.messages.list(agent_id=agent.id, limit=100).items
tool_return_messages = [m for m in all_messages if m.message_type == "tool_return_message"]
assert len(tool_return_messages) > 0, "Tool return message should be persisted"
# Step 7: Call compact with mode='all' to evict all messages from context
compaction_response = client.agents.messages.compact(
agent_id=agent.id,
compaction_settings={"mode": "all"},
)
# Step 8: Verify only system and summary messages remain in context (should be 2)
assert compaction_response.num_messages_after == 2, (
f"Expected 2 messages (system + summary) after compaction, but got {compaction_response.num_messages_after}"
)
logger.info(f"✅ After compaction: {compaction_response.num_messages_before} -> {compaction_response.num_messages_after} messages")
# Step 9: Retry the original approval response - should succeed via idempotency check
response = client.agents.messages.stream(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "tool",
"tool_call_id": tool_call_id,
"tool_return": SECRET_CODE,
"status": "success",
},
],
},
],
streaming=True,
stream_tokens=True,
)
# Step 10: Accumulate chunks
messages = accumulate_chunks(response)
# Step 11: Verify we got chunks AND an end_turn stop reason (not an error)
assert len(messages) > 1, "Should receive at least some chunks"
stop_reasons = [msg for msg in messages if hasattr(msg, "message_type") and msg.message_type == "stop_reason"]
assert len(stop_reasons) == 1, f"Expected exactly 1 stop_reason, got {len(stop_reasons)}"
assert stop_reasons[0].stop_reason == "end_turn", f"Expected stop_reason 'end_turn', got '{stop_reasons[0].stop_reason}'"
logger.info("✅ Test passed: approval retry after summarization handled correctly via idempotency check")
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/integration_test_human_in_the_loop.py",
"license": "Apache License 2.0",
"lines": 1315,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/integration_test_modal_sandbox_v2.py | """
Integration tests for Modal Sandbox V2.
These tests cover:
- Basic tool execution with Modal
- Error handling and edge cases
- Async tool execution
- Version tracking and redeployment
- Persistence of deployment metadata
- Concurrent execution handling
- Multiple sandbox configurations
- Service restart scenarios
"""
import asyncio
import os
import uuid
from datetime import datetime
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from letta.schemas.enums import ToolSourceType
from letta.schemas.organization import Organization
from letta.schemas.pip_requirement import PipRequirement
from letta.schemas.sandbox_config import ModalSandboxConfig, SandboxConfig, SandboxConfigCreate, SandboxType
from letta.schemas.tool import Tool
from letta.schemas.user import User
from letta.services.organization_manager import OrganizationManager
from letta.services.sandbox_config_manager import SandboxConfigManager
from letta.services.tool_sandbox.modal_sandbox_v2 import AsyncToolSandboxModalV2
from letta.services.tool_sandbox.modal_version_manager import ModalVersionManager, get_version_manager
from letta.services.user_manager import UserManager
# ============================================================================
# SHARED FIXTURES
# ============================================================================
@pytest.fixture
def test_organization():
"""Create a test organization in the database."""
org_manager = OrganizationManager()
org = org_manager.create_organization(Organization(name=f"test-org-{uuid.uuid4().hex[:8]}"))
yield org
# Cleanup would go here if needed
@pytest.fixture
def test_user(test_organization):
"""Create a test user in the database."""
user_manager = UserManager()
user = user_manager.create_user(User(name=f"test-user-{uuid.uuid4().hex[:8]}", organization_id=test_organization.id))
yield user
# Cleanup would go here if needed
@pytest.fixture
def mock_user():
"""Create a mock user for tests that don't need database persistence."""
user = MagicMock()
user.organization_id = f"test-org-{uuid.uuid4().hex[:8]}"
user.id = f"user-{uuid.uuid4().hex[:8]}"
return user
@pytest.fixture
def basic_tool(test_user):
"""Create a basic tool for testing."""
from letta.services.tool_manager import ToolManager
tool = Tool(
id=f"tool-{uuid.uuid4().hex[:8]}",
name="calculate",
source_type=ToolSourceType.python,
source_code="""
def calculate(operation: str, a: float, b: float) -> float:
'''Perform a calculation on two numbers.
Args:
operation: The operation to perform (add, subtract, multiply, divide)
a: The first number
b: The second number
Returns:
float: The result of the calculation
'''
if operation == "add":
return a + b
elif operation == "subtract":
return a - b
elif operation == "multiply":
return a * b
elif operation == "divide":
if b == 0:
raise ValueError("Cannot divide by zero")
return a / b
else:
raise ValueError(f"Unknown operation: {operation}")
""",
json_schema={
"parameters": {
"properties": {
"operation": {"type": "string", "description": "The operation to perform"},
"a": {"type": "number", "description": "The first number"},
"b": {"type": "number", "description": "The second number"},
}
}
},
)
# Create the tool in the database
tool_manager = ToolManager()
created_tool = tool_manager.create_or_update_tool(tool, actor=test_user)
yield created_tool
# Cleanup would go here if needed
@pytest.fixture
def async_tool(test_user):
"""Create an async tool for testing."""
from letta.services.tool_manager import ToolManager
tool = Tool(
id=f"tool-{uuid.uuid4().hex[:8]}",
name="fetch_data",
source_type=ToolSourceType.python,
source_code="""
import asyncio
async def fetch_data(url: str, delay: float = 0.1) -> Dict:
'''Simulate fetching data from a URL.
Args:
url: The URL to fetch data from
delay: The delay in seconds before returning
Returns:
Dict: A dictionary containing the fetched data
'''
await asyncio.sleep(delay)
return {
"url": url,
"status": "success",
"data": f"Data from {url}",
"timestamp": "2024-01-01T00:00:00Z"
}
""",
json_schema={
"parameters": {
"properties": {
"url": {"type": "string", "description": "The URL to fetch data from"},
"delay": {"type": "number", "default": 0.1, "description": "The delay in seconds"},
}
}
},
)
# Create the tool in the database
tool_manager = ToolManager()
created_tool = tool_manager.create_or_update_tool(tool, actor=test_user)
yield created_tool
# Cleanup would go here if needed
@pytest.fixture
def tool_with_dependencies(test_user):
"""Create a tool that requires external dependencies."""
from letta.services.tool_manager import ToolManager
tool = Tool(
id=f"tool-{uuid.uuid4().hex[:8]}",
name="process_json",
source_type=ToolSourceType.python,
source_code="""
import json
import hashlib
def process_json(data: str) -> Dict:
'''Process JSON data and return metadata.
Args:
data: The JSON string to process
Returns:
Dict: Metadata about the JSON data
'''
try:
parsed = json.loads(data)
data_hash = hashlib.md5(data.encode()).hexdigest()
return {
"valid": True,
"keys": list(parsed.keys()) if isinstance(parsed, dict) else None,
"type": type(parsed).__name__,
"hash": data_hash,
"size": len(data),
}
except json.JSONDecodeError as e:
return {
"valid": False,
"error": str(e),
"size": len(data),
}
""",
json_schema={
"parameters": {
"properties": {
"data": {"type": "string", "description": "The JSON string to process"},
}
}
},
pip_requirements=[PipRequirement(name="hashlib")], # Actually built-in, but for testing
)
# Create the tool in the database
tool_manager = ToolManager()
created_tool = tool_manager.create_or_update_tool(tool, actor=test_user)
yield created_tool
# Cleanup would go here if needed
@pytest.fixture
def sandbox_config(test_user):
"""Create a test sandbox configuration in the database."""
manager = SandboxConfigManager()
modal_config = ModalSandboxConfig(
timeout=60,
pip_requirements=["pandas==2.0.0"],
)
config_create = SandboxConfigCreate(config=modal_config.model_dump())
config = manager.create_or_update_sandbox_config(sandbox_config_create=config_create, actor=test_user)
yield config
# Cleanup would go here if needed
@pytest.fixture
def mock_sandbox_config():
"""Create a mock sandbox configuration for tests that don't need database persistence."""
modal_config = ModalSandboxConfig(
timeout=60,
pip_requirements=["pandas==2.0.0"],
)
return SandboxConfig(
id=f"sandbox-{uuid.uuid4().hex[:8]}",
type=SandboxType.MODAL,
config=modal_config.model_dump(),
)
# ============================================================================
# BASIC EXECUTION TESTS (Requires Modal credentials)
# ============================================================================
@pytest.mark.skipif(
True or not os.getenv("MODAL_TOKEN_ID") or not os.getenv("MODAL_TOKEN_SECRET"), reason="Modal credentials not configured"
)
class TestModalV2BasicExecution:
"""Basic execution tests with Modal."""
@pytest.mark.asyncio
async def test_basic_execution(self, basic_tool, test_user):
"""Test basic tool execution with different operations."""
sandbox = AsyncToolSandboxModalV2(
tool_name="calculate",
args={"operation": "add", "a": 5, "b": 3},
user=test_user,
tool_id=basic_tool.id,
tool_object=basic_tool,
)
result = await sandbox.run()
assert result.status == "success"
assert result.func_return == 8.0
# Test division
sandbox2 = AsyncToolSandboxModalV2(
tool_name="calculate",
args={"operation": "divide", "a": 10, "b": 2},
user=test_user,
tool_id=basic_tool.id,
tool_object=basic_tool,
)
result2 = await sandbox2.run()
assert result2.status == "success"
assert result2.func_return == 5.0
@pytest.mark.asyncio
async def test_error_handling(self, basic_tool, test_user):
"""Test error handling in tool execution."""
# Test division by zero
sandbox = AsyncToolSandboxModalV2(
tool_name="calculate",
args={"operation": "divide", "a": 10, "b": 0},
user=test_user,
tool_id=basic_tool.id,
tool_object=basic_tool,
)
result = await sandbox.run()
assert result.status == "error"
assert "Cannot divide by zero" in str(result.func_return)
# Test unknown operation
sandbox2 = AsyncToolSandboxModalV2(
tool_name="calculate",
args={"operation": "unknown", "a": 1, "b": 2},
user=test_user,
tool_id=basic_tool.id,
tool_object=basic_tool,
)
result2 = await sandbox2.run()
assert result2.status == "error"
assert "Unknown operation" in str(result2.func_return)
@pytest.mark.asyncio
async def test_async_tool_execution(self, async_tool, test_user):
"""Test execution of async tools."""
sandbox = AsyncToolSandboxModalV2(
tool_name="fetch_data",
args={"url": "https://example.com", "delay": 0.01},
user=test_user,
tool_id=async_tool.id,
tool_object=async_tool,
)
result = await sandbox.run()
assert result.status == "success"
# Parse the result (it should be a dict)
data = result.func_return
assert isinstance(data, dict)
assert data["url"] == "https://example.com"
assert data["status"] == "success"
assert "Data from https://example.com" in data["data"]
@pytest.mark.asyncio
async def test_concurrent_executions(self, basic_tool, test_user):
"""Test that concurrent executions work correctly."""
# Create multiple sandboxes with different arguments
sandboxes = [
AsyncToolSandboxModalV2(
tool_name="calculate",
args={"operation": "add", "a": i, "b": i + 1},
user=test_user,
tool_id=basic_tool.id,
tool_object=basic_tool,
)
for i in range(5)
]
# Execute all concurrently
results = await asyncio.gather(*[s.run() for s in sandboxes])
# Verify all succeeded with correct results
for i, result in enumerate(results):
assert result.status == "success"
expected = i + (i + 1) # a + b
assert result.func_return == expected
# ============================================================================
# PERSISTENCE AND VERSION TRACKING TESTS
# ============================================================================
@pytest.mark.asyncio
class TestModalV2Persistence:
"""Tests for deployment persistence and version tracking."""
async def test_deployment_persists_in_tool_metadata(self, mock_user, sandbox_config):
"""Test that deployment info is correctly stored in tool metadata."""
tool = Tool(
id=f"tool-{uuid.uuid4().hex[:8]}",
name="calculate",
source_code="def calculate(x: float) -> float:\n '''Double a number.\n \n Args:\n x: The number to double\n \n Returns:\n The doubled value\n '''\n return x * 2",
json_schema={"parameters": {"properties": {"x": {"type": "number"}}}},
metadata_={},
)
with patch("letta.services.tool_sandbox.modal_version_manager.ToolManager") as MockToolManager:
mock_tool_manager = MockToolManager.return_value
mock_tool_manager.get_tool_by_id.return_value = tool
mock_tool_manager.update_tool_by_id_async = AsyncMock(return_value=tool)
version_manager = ModalVersionManager()
# Register a deployment
app_name = f"{mock_user.organization_id}-{tool.name}-v2"
version_hash = "abc123def456"
mock_app = MagicMock()
await version_manager.register_deployment(
tool_id=tool.id,
app_name=app_name,
version_hash=version_hash,
app=mock_app,
dependencies={"pandas", "numpy"},
sandbox_config_id=sandbox_config.id,
actor=mock_user,
)
# Verify update was called with correct metadata
mock_tool_manager.update_tool_by_id_async.assert_called_once()
call_args = mock_tool_manager.update_tool_by_id_async.call_args
metadata = call_args[1]["tool_update"].metadata_
assert "modal_deployments" in metadata
assert sandbox_config.id in metadata["modal_deployments"]
deployment_data = metadata["modal_deployments"][sandbox_config.id]
assert deployment_data["app_name"] == app_name
assert deployment_data["version_hash"] == version_hash
assert set(deployment_data["dependencies"]) == {"pandas", "numpy"}
async def test_version_tracking_and_redeployment(self, mock_user, basic_tool, sandbox_config):
"""Test version tracking and redeployment on code changes."""
with patch("letta.services.tool_sandbox.modal_version_manager.ToolManager") as MockToolManager:
mock_tool_manager = MockToolManager.return_value
mock_tool_manager.get_tool_by_id.return_value = basic_tool
# Track metadata updates
metadata_store = {}
async def update_tool(*args, **kwargs):
metadata_store.update(kwargs.get("metadata_", {}))
basic_tool.metadata_ = metadata_store
return basic_tool
mock_tool_manager.update_tool_by_id_async = AsyncMock(side_effect=update_tool)
version_manager = ModalVersionManager()
app_name = f"{mock_user.organization_id}-{basic_tool.name}-v2"
# First deployment
version1 = "version1hash"
await version_manager.register_deployment(
tool_id=basic_tool.id,
app_name=app_name,
version_hash=version1,
app=MagicMock(),
sandbox_config_id=sandbox_config.id,
actor=mock_user,
)
# Should not need redeployment with same version
assert not await version_manager.needs_redeployment(basic_tool.id, version1, sandbox_config.id, actor=mock_user)
# Should need redeployment with different version
version2 = "version2hash"
assert await version_manager.needs_redeployment(basic_tool.id, version2, sandbox_config.id, actor=mock_user)
async def test_deployment_survives_service_restart(self, mock_user, sandbox_config):
"""Test that deployment info survives a service restart."""
tool_id = f"tool-{uuid.uuid4().hex[:8]}"
app_name = f"{mock_user.organization_id}-calculate-v2"
version_hash = "restart-test-v1"
# Simulate existing deployment in metadata
existing_metadata = {
"modal_deployments": {
sandbox_config.id: {
"app_name": app_name,
"version_hash": version_hash,
"deployed_at": datetime.now().isoformat(),
"dependencies": ["pandas"],
}
}
}
tool = Tool(
id=tool_id,
name="calculate",
source_code="def calculate(x: float) -> float:\n '''Identity function.\n \n Args:\n x: The input value\n \n Returns:\n The same value\n '''\n return x",
json_schema={"parameters": {"properties": {}}},
metadata_=existing_metadata,
)
with patch("letta.services.tool_sandbox.modal_version_manager.ToolManager") as MockToolManager:
mock_tool_manager = MockToolManager.return_value
mock_tool_manager.get_tool_by_id.return_value = tool
# Create new version manager (simulating service restart)
version_manager = ModalVersionManager()
# Should be able to retrieve existing deployment
deployment = await version_manager.get_deployment(tool_id, sandbox_config.id, actor=mock_user)
assert deployment is not None
assert deployment.app_name == app_name
assert deployment.version_hash == version_hash
assert deployment.dependencies == {"pandas"}
# Should not need redeployment with same version
assert not await version_manager.needs_redeployment(tool_id, version_hash, sandbox_config.id, actor=mock_user)
async def test_different_sandbox_configs_same_tool(self, mock_user):
"""Test that different sandbox configs can have different deployments for the same tool."""
tool = Tool(
id=f"tool-{uuid.uuid4().hex[:8]}",
name="multi_config",
source_code="def test(x: int) -> int:\n '''Test function.\n \n Args:\n x: The input value\n \n Returns:\n The same value\n '''\n return x",
json_schema={"parameters": {"properties": {}}},
metadata_={},
)
# Create two different sandbox configs
config1 = SandboxConfig(
id=f"sandbox-{uuid.uuid4().hex[:8]}",
type=SandboxType.MODAL,
config=ModalSandboxConfig(timeout=30, pip_requirements=["pandas"]).model_dump(),
)
config2 = SandboxConfig(
id=f"sandbox-{uuid.uuid4().hex[:8]}",
type=SandboxType.MODAL,
config=ModalSandboxConfig(timeout=60, pip_requirements=["numpy"]).model_dump(),
)
with patch("letta.services.tool_sandbox.modal_version_manager.ToolManager") as MockToolManager:
mock_tool_manager = MockToolManager.return_value
mock_tool_manager.get_tool_by_id.return_value = tool
# Track all metadata updates
all_metadata = {"modal_deployments": {}}
async def update_tool(*args, **kwargs):
new_meta = kwargs.get("metadata_", {})
if "modal_deployments" in new_meta:
all_metadata["modal_deployments"].update(new_meta["modal_deployments"])
tool.metadata_ = all_metadata
return tool
mock_tool_manager.update_tool_by_id_async = AsyncMock(side_effect=update_tool)
version_manager = ModalVersionManager()
app_name = f"{mock_user.organization_id}-{tool.name}-v2"
# Deploy with config1
await version_manager.register_deployment(
tool_id=tool.id,
app_name=app_name,
version_hash="config1-hash",
app=MagicMock(),
sandbox_config_id=config1.id,
actor=mock_user,
)
# Deploy with config2
await version_manager.register_deployment(
tool_id=tool.id,
app_name=app_name,
version_hash="config2-hash",
app=MagicMock(),
sandbox_config_id=config2.id,
actor=mock_user,
)
# Both deployments should exist
deployment1 = await version_manager.get_deployment(tool.id, config1.id, actor=mock_user)
deployment2 = await version_manager.get_deployment(tool.id, config2.id, actor=mock_user)
assert deployment1 is not None
assert deployment2 is not None
assert deployment1.version_hash == "config1-hash"
assert deployment2.version_hash == "config2-hash"
async def test_sandbox_config_changes_trigger_redeployment(self, basic_tool, mock_user):
"""Test that sandbox config changes trigger redeployment."""
# Skip the actual Modal deployment part in this test
# Just test the version hash calculation changes
config1 = SandboxConfig(
id=f"sandbox-{uuid.uuid4().hex[:8]}",
type=SandboxType.MODAL,
config=ModalSandboxConfig(timeout=30).model_dump(),
)
config2 = SandboxConfig(
id=f"sandbox-{uuid.uuid4().hex[:8]}",
type=SandboxType.MODAL,
config=ModalSandboxConfig(
timeout=60,
pip_requirements=["requests"],
).model_dump(),
)
# Mock the Modal credentials to allow sandbox instantiation
with patch("letta.services.tool_sandbox.modal_sandbox_v2.tool_settings") as mock_settings:
mock_settings.modal_token_id = "test-token-id"
mock_settings.modal_token_secret = "test-token-secret"
sandbox1 = AsyncToolSandboxModalV2(
tool_name="calculate",
args={"operation": "add", "a": 1, "b": 1},
user=mock_user,
tool_id=basic_tool.id,
tool_object=basic_tool,
sandbox_config=config1,
)
sandbox2 = AsyncToolSandboxModalV2(
tool_name="calculate",
args={"operation": "add", "a": 2, "b": 2},
user=mock_user,
tool_id=basic_tool.id,
tool_object=basic_tool,
sandbox_config=config2,
)
# Version hashes should be different due to config changes
version1 = sandbox1._deployment_manager.calculate_version_hash(config1)
version2 = sandbox2._deployment_manager.calculate_version_hash(config2)
assert version1 != version2
# ============================================================================
# MOCKED INTEGRATION TESTS (No Modal credentials required)
# ============================================================================
class TestModalV2MockedIntegration:
"""Integration tests with mocked Modal components."""
@pytest.mark.asyncio
async def test_full_integration_with_persistence(self, mock_user, sandbox_config):
"""Test the full Modal sandbox V2 integration with persistence."""
tool = Tool(
id=f"tool-{uuid.uuid4().hex[:8]}",
name="integration_test",
source_code="""
def calculate(operation: str, a: float, b: float) -> float:
'''Perform a simple calculation'''
if operation == "add":
return a + b
return 0
""",
json_schema={
"parameters": {
"properties": {
"operation": {"type": "string"},
"a": {"type": "number"},
"b": {"type": "number"},
}
}
},
metadata_={},
)
with patch("letta.services.tool_sandbox.modal_version_manager.ToolManager") as MockToolManager:
with patch("letta.services.tool_sandbox.modal_sandbox_v2.modal") as mock_modal:
mock_tool_manager = MockToolManager.return_value
mock_tool_manager.get_tool_by_id.return_value = tool
# Track metadata updates
async def update_tool(*args, **kwargs):
tool.metadata_ = kwargs.get("metadata_", {})
return tool
mock_tool_manager.update_tool_by_id_async = update_tool
# Mock Modal app
mock_app = MagicMock()
mock_app.run = MagicMock()
# Mock the function decorator
def mock_function_decorator(*args, **kwargs):
def decorator(func):
mock_func = MagicMock()
mock_func.remote = MagicMock()
mock_func.remote.aio = AsyncMock(
return_value={
"result": 8,
"agent_state": None,
"stdout": "",
"stderr": "",
"error": None,
}
)
mock_app.tool_executor = mock_func
return mock_func
return decorator
mock_app.function = mock_function_decorator
mock_app.deploy = MagicMock()
mock_app.deploy.aio = AsyncMock()
mock_modal.App.return_value = mock_app
# Mock the sandbox config manager
with patch("letta.services.tool_sandbox.base.SandboxConfigManager") as MockSCM:
mock_scm = MockSCM.return_value
mock_scm.get_sandbox_env_vars_as_dict_async = AsyncMock(return_value={})
# Create sandbox
sandbox = AsyncToolSandboxModalV2(
tool_name="integration_test",
args={"operation": "add", "a": 5, "b": 3},
user=mock_user,
tool_id=tool.id,
tool_object=tool,
sandbox_config=sandbox_config,
)
# Mock version manager methods through deployment manager
version_manager = sandbox._deployment_manager.version_manager
if version_manager:
with patch.object(version_manager, "get_deployment", return_value=None):
with patch.object(version_manager, "register_deployment", return_value=None):
# First execution - should deploy
result1 = await sandbox.run()
assert result1.status == "success"
assert result1.func_return == 8
else:
# If no version manager, just run
result1 = await sandbox.run()
assert result1.status == "success"
assert result1.func_return == 8
@pytest.mark.asyncio
async def test_concurrent_deployment_handling(self, mock_user, sandbox_config):
"""Test that concurrent deployment requests are handled correctly."""
tool = Tool(
id=f"tool-{uuid.uuid4().hex[:8]}",
name="concurrent_test",
source_code="def test(x: int) -> int:\n '''Test function.\n \n Args:\n x: The input value\n \n Returns:\n The same value\n '''\n return x",
json_schema={"parameters": {"properties": {}}},
metadata_={},
)
with patch("letta.services.tool_sandbox.modal_version_manager.ToolManager") as MockToolManager:
mock_tool_manager = MockToolManager.return_value
mock_tool_manager.get_tool_by_id.return_value = tool
# Track update calls
update_calls = []
async def track_update(*args, **kwargs):
update_calls.append((args, kwargs))
await asyncio.sleep(0.01) # Simulate slight delay
return tool
mock_tool_manager.update_tool_by_id_async = AsyncMock(side_effect=track_update)
version_manager = ModalVersionManager()
app_name = f"{mock_user.organization_id}-{tool.name}-v2"
version_hash = "concurrent123"
# Launch multiple concurrent deployments
tasks = []
for i in range(5):
task = version_manager.register_deployment(
tool_id=tool.id,
app_name=app_name,
version_hash=version_hash,
app=MagicMock(),
sandbox_config_id=sandbox_config.id,
actor=mock_user,
)
tasks.append(task)
# Wait for all to complete
await asyncio.gather(*tasks)
# All calls should complete (current implementation doesn't dedupe)
assert len(update_calls) == 5
# ============================================================================
# DEPLOYMENT STATISTICS TESTS
# ============================================================================
@pytest.mark.skipif(not os.getenv("MODAL_TOKEN_ID") or not os.getenv("MODAL_TOKEN_SECRET"), reason="Modal credentials not configured")
class TestModalV2DeploymentStats:
"""Tests for deployment statistics tracking."""
@pytest.mark.asyncio
async def test_deployment_stats(self, basic_tool, async_tool, test_user):
"""Test deployment statistics tracking."""
version_manager = get_version_manager()
# Clear any existing deployments (for test isolation)
version_manager.clear_deployments()
# Ensure clean state
await asyncio.sleep(0.1)
# Deploy multiple tools
tools = [basic_tool, async_tool]
for tool in tools:
sandbox = AsyncToolSandboxModalV2(
tool_name=tool.name,
args={},
user=test_user,
tool_id=tool.id,
tool_object=tool,
)
await sandbox.run()
# Get stats
stats = await version_manager.get_deployment_stats()
assert stats["total_deployments"] >= 2
assert stats["active_deployments"] >= 2
assert stats["stale_deployments"] == 0
# Check individual deployment info
for deployment in stats["deployments"]:
assert "app_name" in deployment
assert "version" in deployment
assert "usage_count" in deployment
assert deployment["usage_count"] >= 1
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/integration_test_modal_sandbox_v2.py",
"license": "Apache License 2.0",
"lines": 682,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/integration_test_turbopuffer.py | import asyncio
import uuid
from datetime import datetime, timezone
import pytest
from letta.config import LettaConfig
from letta.helpers.tpuf_client import TurbopufferClient, should_use_tpuf, should_use_tpuf_for_messages
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import MessageRole, TagMatchMode, VectorDBProvider
from letta.schemas.letta_message_content import ReasoningContent, TextContent, ToolCallContent, ToolReturnContent
from letta.schemas.message import Message as PydanticMessage
from letta.schemas.passage import Passage
from letta.server.server import SyncServer
from letta.settings import settings
@pytest.fixture(scope="module")
def server():
"""Server fixture for testing"""
config = LettaConfig.load()
config.save()
server = SyncServer(init_with_default_org_and_user=False)
return server
@pytest.fixture
async def sarah_agent(server, default_user):
"""Create a test agent named Sarah"""
from letta.schemas.agent import CreateAgent
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.llm_config import LLMConfig
agent = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="Sarah",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
yield agent
# Cleanup
try:
await server.agent_manager.delete_agent_async(agent.id, default_user)
except Exception:
pass
@pytest.fixture
def enable_turbopuffer():
"""Temporarily enable Turbopuffer for testing with a test API key"""
original_use_tpuf = settings.use_tpuf
original_api_key = settings.tpuf_api_key
original_environment = settings.environment
# Enable Turbopuffer with test key
settings.use_tpuf = True
# Use the existing tpuf_api_key if set, otherwise keep original
if not settings.tpuf_api_key:
settings.tpuf_api_key = original_api_key
# Set environment to DEV for testing
settings.environment = "DEV"
yield
# Restore original values
settings.use_tpuf = original_use_tpuf
settings.tpuf_api_key = original_api_key
settings.environment = original_environment
@pytest.fixture
def enable_message_embedding():
"""Enable both Turbopuffer and message embedding"""
original_use_tpuf = settings.use_tpuf
original_api_key = settings.tpuf_api_key
original_embed_messages = settings.embed_all_messages
original_environment = settings.environment
settings.use_tpuf = True
settings.tpuf_api_key = settings.tpuf_api_key or "test-key"
settings.embed_all_messages = True
settings.environment = "DEV"
yield
settings.use_tpuf = original_use_tpuf
settings.tpuf_api_key = original_api_key
settings.embed_all_messages = original_embed_messages
settings.environment = original_environment
@pytest.fixture
def disable_turbopuffer():
"""Ensure Turbopuffer is disabled for testing"""
original_use_tpuf = settings.use_tpuf
original_embed_messages = settings.embed_all_messages
settings.use_tpuf = False
settings.embed_all_messages = False
yield
settings.use_tpuf = original_use_tpuf
settings.embed_all_messages = original_embed_messages
@pytest.fixture
def sample_embedding_config():
"""Provide a sample embedding configuration"""
return EmbeddingConfig.default_config(model_name="letta")
async def wait_for_embedding(
agent_id: str, message_id: str, organization_id: str, actor, max_wait: float = 10.0, poll_interval: float = 0.5
) -> bool:
"""Poll Turbopuffer directly to check if a message has been embedded.
Args:
agent_id: Agent ID for the message
message_id: ID of the message to find
organization_id: Organization ID
max_wait: Maximum time to wait in seconds
poll_interval: Time between polls in seconds
Returns:
True if message was found in Turbopuffer within timeout, False otherwise
"""
import asyncio
from letta.helpers.tpuf_client import TurbopufferClient
client = TurbopufferClient()
start_time = asyncio.get_event_loop().time()
while asyncio.get_event_loop().time() - start_time < max_wait:
try:
# Query Turbopuffer directly using timestamp mode to get all messages
results = await client.query_messages_by_agent_id(
agent_id=agent_id,
organization_id=organization_id,
actor=actor,
search_mode="timestamp",
top_k=100, # Get more messages to ensure we find it
)
# Check if our message ID is in the results
if any(msg["id"] == message_id for msg, _, _ in results):
return True
except Exception:
# Log but don't fail - Turbopuffer might still be processing
pass
await asyncio.sleep(poll_interval)
return False
def test_should_use_tpuf_with_settings():
"""Test that should_use_tpuf correctly reads settings"""
# Save original values
original_use_tpuf = settings.use_tpuf
original_api_key = settings.tpuf_api_key
try:
# Test when both are set
settings.use_tpuf = True
settings.tpuf_api_key = "test-key"
assert should_use_tpuf() is True
# Test when use_tpuf is False
settings.use_tpuf = False
assert should_use_tpuf() is False
# Test when API key is missing
settings.use_tpuf = True
settings.tpuf_api_key = None
assert should_use_tpuf() is False
finally:
# Restore original values
settings.use_tpuf = original_use_tpuf
settings.tpuf_api_key = original_api_key
@pytest.mark.asyncio
async def test_archive_creation_with_tpuf_enabled(server, default_user, enable_turbopuffer):
"""Test that archives are created with correct vector_db_provider when TPUF is enabled"""
archive = await server.archive_manager.create_archive_async(
name="Test Archive with TPUF", embedding_config=EmbeddingConfig.default_config(provider="openai"), actor=default_user
)
assert archive.vector_db_provider == VectorDBProvider.TPUF
# TODO: Add cleanup when delete_archive method is available
@pytest.mark.asyncio
async def test_archive_creation_with_tpuf_disabled(server, default_user, disable_turbopuffer):
"""Test that archives default to NATIVE when TPUF is disabled"""
archive = await server.archive_manager.create_archive_async(
name="Test Archive without TPUF", embedding_config=EmbeddingConfig.default_config(provider="openai"), actor=default_user
)
assert archive.vector_db_provider == VectorDBProvider.NATIVE
# TODO: Add cleanup when delete_archive method is available
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured for testing")
async def test_dual_write_and_query_with_real_tpuf(server, default_user, sarah_agent, enable_turbopuffer):
"""Test that passages are written to both SQL and Turbopuffer with real connection and can be queried"""
# Create a TPUF-enabled archive
archive = await server.archive_manager.create_archive_async(
name="Test TPUF Archive for Real Dual Write", embedding_config=EmbeddingConfig.default_config(provider="openai"), actor=default_user
)
assert archive.vector_db_provider == VectorDBProvider.TPUF
# Attach the agent to the archive
await server.archive_manager.attach_agent_to_archive_async(
agent_id=sarah_agent.id, archive_id=archive.id, is_owner=True, actor=default_user
)
try:
# Insert passages - this should trigger dual write
test_passages = [
"Turbopuffer is a vector database optimized for performance.",
"This integration test verifies dual-write functionality.",
"Metadata attributes should be properly stored in Turbopuffer.",
]
for text in test_passages:
passages = await server.passage_manager.insert_passage(agent_state=sarah_agent, text=text, actor=default_user, strict_mode=True)
assert passages is not None
assert len(passages) > 0
# Verify passages are in SQL - use agent_manager to list passages
sql_passages = await server.agent_manager.query_agent_passages_async(actor=default_user, agent_id=sarah_agent.id, limit=10)
assert len(sql_passages) >= len(test_passages)
for text in test_passages:
assert any(p.text == text for p, _, _ in sql_passages)
# Test vector search which should use Turbopuffer
embedding_config = sarah_agent.embedding_config or EmbeddingConfig.default_config(provider="openai")
# Perform vector search
vector_results = await server.agent_manager.query_agent_passages_async(
actor=default_user,
agent_id=sarah_agent.id,
query_text="turbopuffer vector database",
embedding_config=embedding_config,
embed_query=True,
limit=5,
)
# Should find relevant passages via Turbopuffer vector search
assert len(vector_results) > 0
# The most relevant result should be about Turbopuffer
assert any("Turbopuffer" in p.text or "vector" in p.text for p, _, _ in vector_results)
# Test deletion - should delete from both
passage_to_delete = sql_passages[0][0] # Extract passage from tuple
await server.passage_manager.delete_agent_passages_async([passage_to_delete], default_user, strict_mode=True)
# Verify deleted from SQL
remaining = await server.agent_manager.query_agent_passages_async(actor=default_user, agent_id=sarah_agent.id, limit=10)
assert not any(p.id == passage_to_delete.id for p, _, _ in remaining)
# Verify vector search no longer returns deleted passage
vector_results_after_delete = await server.agent_manager.query_agent_passages_async(
actor=default_user,
agent_id=sarah_agent.id,
query_text=passage_to_delete.text,
embedding_config=embedding_config,
embed_query=True,
limit=10,
)
assert not any(p.id == passage_to_delete.id for p, _, _ in vector_results_after_delete)
finally:
# TODO: Clean up archive when delete_archive method is available
pass
@pytest.mark.asyncio
async def test_turbopuffer_metadata_attributes(default_user, enable_turbopuffer):
"""Test that Turbopuffer properly stores and retrieves metadata attributes"""
# Only run if we have a real API key
if not settings.tpuf_api_key:
pytest.skip("No Turbopuffer API key available")
client = TurbopufferClient()
archive_id = f"archive-{uuid.uuid4()}"
try:
# Insert passages with various metadata
test_data = [
{
"id": f"passage-{uuid.uuid4()}",
"text": "First test passage",
"vector": [0.1] * 1536,
"organization_id": "org-123",
"created_at": datetime.now(timezone.utc),
},
{
"id": f"passage-{uuid.uuid4()}",
"text": "Second test passage",
"vector": [0.2] * 1536,
"organization_id": "org-123",
"created_at": datetime.now(timezone.utc),
},
{
"id": f"passage-{uuid.uuid4()}",
"text": "Third test passage from different org",
"vector": [0.3] * 1536,
"organization_id": "org-456",
"created_at": datetime.now(timezone.utc),
},
]
# Insert all passages
result = await client.insert_archival_memories(
archive_id=archive_id,
text_chunks=[d["text"] for d in test_data],
passage_ids=[d["id"] for d in test_data],
organization_id="org-123", # Default org
actor=default_user,
created_at=datetime.now(timezone.utc),
)
assert len(result) == 3
# Query all passages (no tag filtering)
results = await client.query_passages(archive_id=archive_id, actor=default_user, top_k=10)
# Should get all passages
assert len(results) == 3 # All three passages
for passage, score, metadata in results:
assert passage.organization_id is not None
# Clean up
await client.delete_passages(archive_id=archive_id, passage_ids=[d["id"] for d in test_data])
except Exception as e:
# Clean up on error
try:
await client.delete_all_passages(archive_id)
except Exception:
pass
raise e
@pytest.mark.asyncio
async def test_native_only_operations(server, default_user, sarah_agent, disable_turbopuffer):
"""Test that operations work correctly when using only native PostgreSQL"""
# Create archive (should be NATIVE since turbopuffer is disabled)
archive = await server.archive_manager.get_or_create_default_archive_for_agent_async(agent_state=sarah_agent, actor=default_user)
assert archive.vector_db_provider == VectorDBProvider.NATIVE
# Insert passages - should only write to SQL
text_content = "This is a test passage for native PostgreSQL only."
passages = await server.passage_manager.insert_passage(agent_state=sarah_agent, text=text_content, actor=default_user, strict_mode=True)
assert passages is not None
assert len(passages) > 0
# List passages - should work from SQL
sql_passages = await server.agent_manager.query_agent_passages_async(actor=default_user, agent_id=sarah_agent.id, limit=10)
assert any(p.text == text_content for p, _, _ in sql_passages)
# Vector search should use PostgreSQL pgvector
embedding_config = sarah_agent.embedding_config or EmbeddingConfig.default_config(provider="openai")
vector_results = await server.agent_manager.query_agent_passages_async(
actor=default_user,
agent_id=sarah_agent.id,
query_text="native postgresql",
embedding_config=embedding_config,
embed_query=True,
)
# Should still work with native PostgreSQL
assert isinstance(vector_results, list)
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured for testing")
async def test_hybrid_search_with_real_tpuf(default_user, enable_turbopuffer):
"""Test hybrid search functionality combining vector and full-text search"""
import uuid
from letta.helpers.tpuf_client import TurbopufferClient
client = TurbopufferClient()
archive_id = f"archive-{uuid.uuid4()}"
org_id = str(uuid.uuid4())
try:
# Insert test passages with different characteristics
texts = [
"Turbopuffer is a vector database optimized for high-performance similarity search",
"The quick brown fox jumps over the lazy dog",
"Machine learning models require vector embeddings for semantic search",
"Database optimization techniques improve query performance",
"Turbopuffer supports both vector and full-text search capabilities",
]
# Create simple embeddings for testing (normally you'd use a real embedding model)
[[float(i), float(i + 5), float(i + 10)] for i in range(len(texts))]
passage_ids = [f"passage-{str(uuid.uuid4())}" for _ in texts]
# Insert passages
await client.insert_archival_memories(
archive_id=archive_id, text_chunks=texts, passage_ids=passage_ids, organization_id=org_id, actor=default_user
)
# Test vector-only search
vector_results = await client.query_passages(
archive_id=archive_id,
actor=default_user,
query_text="python programming tutorial",
search_mode="vector",
top_k=3,
)
assert 0 < len(vector_results) <= 3
# all results should have scores
assert all(isinstance(score, float) for _, score, _ in vector_results)
# Test FTS-only search
fts_results = await client.query_passages(
archive_id=archive_id, actor=default_user, query_text="Turbopuffer vector database", search_mode="fts", top_k=3
)
assert 0 < len(fts_results) <= 3
# should find passages mentioning Turbopuffer
assert any("Turbopuffer" in passage.text for passage, _, _ in fts_results)
# all results should have scores
assert all(isinstance(score, float) for _, score, _ in fts_results)
# Test hybrid search
hybrid_results = await client.query_passages(
archive_id=archive_id,
actor=default_user,
query_text="vector search Turbopuffer",
search_mode="hybrid",
top_k=3,
vector_weight=0.5,
fts_weight=0.5,
)
assert 0 < len(hybrid_results) <= 3
# hybrid should combine both vector and text relevance
assert any("Turbopuffer" in passage.text or "vector" in passage.text for passage, _, _ in hybrid_results)
# all results should have scores
assert all(isinstance(score, float) for _, score, _ in hybrid_results)
# results should be sorted by score (highest first)
scores = [score for _, score, _ in hybrid_results]
assert scores == sorted(scores, reverse=True)
# Test with different weights
vector_heavy_results = await client.query_passages(
archive_id=archive_id,
actor=default_user,
query_text="quick brown fox", # matches second passage
search_mode="hybrid",
top_k=3,
vector_weight=0.8, # emphasize vector search
fts_weight=0.2,
)
assert 0 < len(vector_heavy_results) <= 3
# all results should have scores
assert all(isinstance(score, float) for _, score, _ in vector_heavy_results)
# Test with different search modes
await client.query_passages(archive_id=archive_id, actor=default_user, query_text="test", search_mode="vector", top_k=3)
await client.query_passages(archive_id=archive_id, actor=default_user, query_text="test", search_mode="fts", top_k=3)
await client.query_passages(archive_id=archive_id, actor=default_user, query_text="test", search_mode="hybrid", top_k=3)
# Test explicit timestamp mode
timestamp_results = await client.query_passages(archive_id=archive_id, actor=default_user, search_mode="timestamp", top_k=3)
assert len(timestamp_results) <= 3
# Should return passages ordered by timestamp (most recent first)
assert all(isinstance(passage, Passage) for passage, _, _ in timestamp_results)
finally:
# Clean up
try:
await client.delete_all_passages(archive_id)
except Exception:
pass
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured for testing")
async def test_tag_filtering_with_real_tpuf(default_user, enable_turbopuffer):
"""Test tag filtering functionality with AND and OR logic"""
import uuid
from letta.helpers.tpuf_client import TurbopufferClient
client = TurbopufferClient()
archive_id = f"archive-{uuid.uuid4()}"
org_id = str(uuid.uuid4())
try:
# Insert passages with different tag combinations
texts = [
"Python programming tutorial",
"Machine learning with Python",
"JavaScript web development",
"Python data science tutorial",
"React JavaScript framework",
]
tag_sets = [
["python", "tutorial"],
["python", "ml"],
["javascript", "web"],
["python", "tutorial", "data"],
["javascript", "react"],
]
[[float(i), float(i + 5), float(i + 10)] for i in range(len(texts))]
passage_ids = [f"passage-{str(uuid.uuid4())}" for _ in texts]
# Insert passages with tags
for i, (text, tags, passage_id) in enumerate(zip(texts, tag_sets, passage_ids)):
await client.insert_archival_memories(
archive_id=archive_id,
text_chunks=[text],
passage_ids=[passage_id],
organization_id=org_id,
actor=default_user,
tags=tags,
created_at=datetime.now(timezone.utc),
)
# Test tag filtering with "any" mode (should find passages with any of the specified tags)
python_any_results = await client.query_passages(
archive_id=archive_id,
actor=default_user,
query_text="python programming",
search_mode="vector",
top_k=10,
tags=["python"],
tag_match_mode=TagMatchMode.ANY,
)
# Should find 3 passages with python tag
python_passages = [passage for passage, _, _ in python_any_results]
python_texts = [p.text for p in python_passages]
assert len(python_passages) == 3
assert "Python programming tutorial" in python_texts
assert "Machine learning with Python" in python_texts
assert "Python data science tutorial" in python_texts
# Test tag filtering with "all" mode
python_tutorial_all_results = await client.query_passages(
archive_id=archive_id,
actor=default_user,
query_text="python tutorial",
search_mode="vector",
top_k=10,
tags=["python", "tutorial"],
tag_match_mode=TagMatchMode.ALL,
)
# Should find 2 passages that have both python AND tutorial tags
tutorial_passages = [passage for passage, _, _ in python_tutorial_all_results]
tutorial_texts = [p.text for p in tutorial_passages]
assert len(tutorial_passages) == 2
assert "Python programming tutorial" in tutorial_texts
assert "Python data science tutorial" in tutorial_texts
# Test tag filtering with FTS mode
js_fts_results = await client.query_passages(
archive_id=archive_id,
actor=default_user,
query_text="javascript",
search_mode="fts",
top_k=10,
tags=["javascript"],
tag_match_mode=TagMatchMode.ANY,
)
# Should find 2 passages with javascript tag
js_passages = [passage for passage, _, _ in js_fts_results]
js_texts = [p.text for p in js_passages]
assert len(js_passages) == 2
assert "JavaScript web development" in js_texts
assert "React JavaScript framework" in js_texts
# Test hybrid search with tags
python_hybrid_results = await client.query_passages(
archive_id=archive_id,
actor=default_user,
query_text="python programming",
search_mode="hybrid",
top_k=10,
tags=["python"],
tag_match_mode=TagMatchMode.ANY,
vector_weight=0.6,
fts_weight=0.4,
)
# Should find python-tagged passages
hybrid_passages = [passage for passage, _, _ in python_hybrid_results]
hybrid_texts = [p.text for p in hybrid_passages]
assert len(hybrid_passages) == 3
assert all("Python" in text for text in hybrid_texts)
finally:
# Clean up
try:
await client.delete_all_passages(archive_id)
except Exception:
pass
@pytest.mark.asyncio
async def test_temporal_filtering_with_real_tpuf(default_user, enable_turbopuffer):
"""Test temporal filtering with date ranges"""
from datetime import datetime, timedelta, timezone
# Skip if Turbopuffer is not properly configured
if not should_use_tpuf():
pytest.skip("Turbopuffer not configured - skipping TPUF temporal filtering test")
# Create client
client = TurbopufferClient()
# Create a unique archive ID for this test
archive_id = f"archive-{uuid.uuid4()}"
try:
# Create passages with different timestamps
now = datetime.now(timezone.utc)
yesterday = now - timedelta(days=1)
last_week = now - timedelta(days=7)
last_month = now - timedelta(days=30)
# Insert passages with specific timestamps
test_passages = [
("Today's meeting notes about project Alpha", now),
("Yesterday's standup summary", yesterday),
("Last week's sprint review", last_week),
("Last month's quarterly planning", last_month),
]
# We need to generate embeddings for the passages
# For testing, we'll use simple dummy embeddings
for text, timestamp in test_passages:
passage_id = f"passage-{uuid.uuid4()}"
await client.insert_archival_memories(
archive_id=archive_id,
text_chunks=[text],
passage_ids=[passage_id],
organization_id="test-org",
actor=default_user,
created_at=timestamp,
)
# Test 1: Query with date range (last 3 days)
three_days_ago = now - timedelta(days=3)
results = await client.query_passages(
archive_id=archive_id,
actor=default_user,
query_text="meeting notes",
search_mode="vector",
top_k=10,
start_date=three_days_ago,
end_date=now,
)
# Should only get today's and yesterday's passages
passages = [p for p, _, _ in results]
texts = [p.text for p in passages]
assert len(passages) == 2
assert "Today's meeting notes" in texts[0] or "Today's meeting notes" in texts[1]
assert "Yesterday's standup" in texts[0] or "Yesterday's standup" in texts[1]
assert "Last week's sprint" not in str(texts)
assert "Last month's quarterly" not in str(texts)
# Test 2: Query with only start_date (everything after 2 weeks ago)
two_weeks_ago = now - timedelta(days=14)
results = await client.query_passages(
archive_id=archive_id,
actor=default_user,
query_text="meeting notes",
search_mode="vector",
top_k=10,
start_date=two_weeks_ago,
)
# Should get all except last month's passage
passages = [p for p, _, _ in results]
assert len(passages) == 3
texts = [p.text for p in passages]
assert "Last month's quarterly" not in str(texts)
# Test 3: Query with only end_date (everything before yesterday)
results = await client.query_passages(
archive_id=archive_id,
actor=default_user,
query_text="meeting notes",
search_mode="vector",
top_k=10,
end_date=yesterday + timedelta(hours=12), # Middle of yesterday
)
# Should get yesterday and older passages
passages = [p for p, _, _ in results]
assert len(passages) >= 3 # yesterday, last week, last month
texts = [p.text for p in passages]
assert "Today's meeting notes" not in str(texts)
# Test 4: Test with FTS mode and date filtering
results = await client.query_passages(
archive_id=archive_id,
actor=default_user,
query_text="meeting notes project",
search_mode="fts",
top_k=10,
start_date=yesterday,
)
# Should only find today's meeting notes
passages = [p for p, _, _ in results]
if len(passages) > 0: # FTS might not match if text search doesn't find keywords
texts = [p.text for p in passages]
assert "Today's meeting notes" in texts[0]
# Test 5: Test with hybrid mode and date filtering
results = await client.query_passages(
archive_id=archive_id,
actor=default_user,
query_text="sprint review",
search_mode="hybrid",
top_k=10,
start_date=last_week - timedelta(days=1),
end_date=last_week + timedelta(days=1),
)
# Should find last week's sprint review
passages = [p for p, _, _ in results]
if len(passages) > 0:
texts = [p.text for p in passages]
assert "Last week's sprint review" in texts[0]
finally:
# Clean up
try:
await client.delete_all_passages(archive_id)
except Exception:
pass
def test_should_use_tpuf_for_messages_settings():
"""Test that should_use_tpuf_for_messages correctly checks both use_tpuf AND embed_all_messages"""
# Save original values
original_use_tpuf = settings.use_tpuf
original_api_key = settings.tpuf_api_key
original_embed_messages = settings.embed_all_messages
try:
# Test when both are true
settings.use_tpuf = True
settings.tpuf_api_key = "test-key"
settings.embed_all_messages = True
assert should_use_tpuf_for_messages() is True
# Test when use_tpuf is False
settings.use_tpuf = False
settings.embed_all_messages = True
assert should_use_tpuf_for_messages() is False
# Test when embed_all_messages is False
settings.use_tpuf = True
settings.tpuf_api_key = "test-key"
settings.embed_all_messages = False
assert should_use_tpuf_for_messages() is False
# Test when both are false
settings.use_tpuf = False
settings.embed_all_messages = False
assert should_use_tpuf_for_messages() is False
# Test when API key is missing
settings.use_tpuf = True
settings.tpuf_api_key = None
settings.embed_all_messages = True
assert should_use_tpuf_for_messages() is False
finally:
# Restore original values
settings.use_tpuf = original_use_tpuf
settings.tpuf_api_key = original_api_key
settings.embed_all_messages = original_embed_messages
def test_message_text_extraction(server, default_user):
"""Test extraction of text from various message content structures including ReasoningContent"""
manager = server.message_manager
# Test 1: List with single string-like TextContent
msg1 = PydanticMessage(
role=MessageRole.user,
content=[TextContent(text="Simple text content")],
agent_id="test-agent",
)
text1 = manager._extract_message_text(msg1)
assert text1 == '{"content": "Simple text content"}'
# Test 2: List with single TextContent
msg2 = PydanticMessage(
role=MessageRole.user,
content=[TextContent(text="Single text content")],
agent_id="test-agent",
)
text2 = manager._extract_message_text(msg2)
assert text2 == '{"content": "Single text content"}'
# Test 3: List with multiple TextContent items
msg3 = PydanticMessage(
role=MessageRole.user,
content=[
TextContent(text="First part"),
TextContent(text="Second part"),
TextContent(text="Third part"),
],
agent_id="test-agent",
)
text3 = manager._extract_message_text(msg3)
assert text3 == '{"content": "First part Second part Third part"}'
# Test 4: Empty content
msg4 = PydanticMessage(
role=MessageRole.system,
content=None,
agent_id="test-agent",
)
text4 = manager._extract_message_text(msg4)
assert text4 == ""
# Test 5: Empty list
msg5 = PydanticMessage(
role=MessageRole.assistant,
content=[],
agent_id="test-agent",
)
text5 = manager._extract_message_text(msg5)
assert text5 == ""
# Test 6: Mixed content types with to_text() methods
msg6 = PydanticMessage(
role=MessageRole.assistant,
content=[
TextContent(text="User said:"),
ToolCallContent(id="call-123", name="search", input={"query": "test"}),
ToolReturnContent(tool_call_id="call-123", content="Found 5 results", is_error=False),
ReasoningContent(is_native=True, reasoning="I should help the user", signature="step-1"),
],
agent_id="test-agent",
)
text6 = manager._extract_message_text(msg6)
assert (
text6
== '{"content": "User said: Tool call: search({\\n \\"query\\": \\"test\\"\\n}) Tool result: Found 5 results I should help the user"}'
)
# Test 7: ReasoningContent only (edge case)
msg7 = PydanticMessage(
role=MessageRole.assistant,
content=[ReasoningContent(is_native=True, reasoning="This is my internal reasoning process", signature="reasoning-abc123")],
agent_id="test-agent",
)
text7 = manager._extract_message_text(msg7)
assert "This is my internal reasoning process" in text7
# Test 8: ReasoningContent with empty reasoning (should handle gracefully)
msg8 = PydanticMessage(
role=MessageRole.assistant,
content=[
ReasoningContent(
is_native=True,
reasoning="", # Empty reasoning
signature="empty-reasoning",
),
TextContent(text="But I have text content"),
],
agent_id="test-agent",
)
text8 = manager._extract_message_text(msg8)
assert "But I have text content" in text8
# Test 9: Multiple ReasoningContent items
msg9 = PydanticMessage(
role=MessageRole.assistant,
content=[
ReasoningContent(is_native=True, reasoning="First thought", signature="step-1"),
ReasoningContent(is_native=True, reasoning="Second thought", signature="step-2"),
TextContent(text="Final answer"),
],
agent_id="test-agent",
)
text9 = manager._extract_message_text(msg9)
assert "First thought" in text9
assert "Second thought" in text9
assert "Final answer" in text9
# Test 10: ReasoningContent in _combine_assistant_tool_messages
assistant_with_reasoning = PydanticMessage(
id="message-c19dbdc7-ba2f-4bf2-a469-64b5aed2c01d",
role=MessageRole.assistant,
content=[ReasoningContent(is_native=True, reasoning="I need to search for information", signature="reasoning-xyz")],
agent_id="test-agent",
tool_calls=[
{"id": "call-456", "type": "function", "function": {"name": "web_search", "arguments": '{"query": "Python tutorials"}'}}
],
)
tool_response = PydanticMessage(
id="message-16134e76-40fa-48dd-92a8-3e0d9256d79a",
role=MessageRole.tool,
name="web_search",
tool_call_id="call-456",
content=[TextContent(text="Found 10 Python tutorials")],
agent_id="test-agent",
)
# Test that combination preserves reasoning content
combined_msgs = manager._combine_assistant_tool_messages([assistant_with_reasoning, tool_response])
assert len(combined_msgs) == 1
combined_text = combined_msgs[0].content[0].text
# Should contain the reasoning text
assert "search for information" in combined_text or "I need to" in combined_text
assert "web_search" in combined_text
assert "Found 10 Python tutorials" in combined_text
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_message_embedding_without_config(server, default_user, sarah_agent, enable_message_embedding):
"""Test that messages are NOT embedded without embedding_config even when tpuf is enabled"""
# Create messages WITHOUT embedding_config
messages = [
PydanticMessage(
role=MessageRole.user,
content=[TextContent(text="Test message without embedding config")],
agent_id=sarah_agent.id,
),
PydanticMessage(
role=MessageRole.assistant,
content=[TextContent(text="Response without embedding config")],
agent_id=sarah_agent.id,
),
]
created = await server.message_manager.create_many_messages_async(
pydantic_msgs=messages,
actor=default_user,
)
assert len(created) == 2
assert all(msg.agent_id == sarah_agent.id for msg in created)
# Messages should be in SQL
sql_messages = await server.message_manager.list_messages(
agent_id=sarah_agent.id,
actor=default_user,
limit=10,
)
assert len(sql_messages) >= 2
# Clean up
message_ids = [msg.id for msg in created]
await server.message_manager.delete_messages_by_ids_async(message_ids, default_user)
@pytest.mark.asyncio
async def test_generic_reciprocal_rank_fusion():
"""Test the generic RRF function with different object types"""
from letta.helpers.tpuf_client import TurbopufferClient
client = TurbopufferClient()
# Test with passage objects (backward compatibility)
p1_id = "passage-78d49031-8502-49c1-a970-45663e9f6e07"
p2_id = "passage-90df8386-4caf-49cc-acbc-d71526de6f77"
passage1 = Passage(
id=p1_id,
text="First passage",
organization_id="org1",
archive_id="archive1",
created_at=datetime.now(timezone.utc),
metadata_={},
tags=[],
embedding=[],
embedding_config=None,
)
passage2 = Passage(
id=p2_id,
text="Second passage",
organization_id="org1",
archive_id="archive1",
created_at=datetime.now(timezone.utc),
metadata_={},
tags=[],
embedding=[],
embedding_config=None,
)
vector_results = [(passage1, 0.9), (passage2, 0.7)]
fts_results = [(passage2, 0.8), (passage1, 0.6)]
# Test with passages using the RRF function
combined = client._reciprocal_rank_fusion(
vector_results=[passage for passage, _ in vector_results],
fts_results=[passage for passage, _ in fts_results],
get_id_func=lambda p: p.id,
vector_weight=0.5,
fts_weight=0.5,
top_k=2,
)
assert len(combined) == 2
# Both passages should be in results - now returns (passage, score, metadata)
result_ids = [p.id for p, _, _ in combined]
assert p1_id in result_ids
assert p2_id in result_ids
# Test with message dicts using generic function
msg1 = {"id": "m1", "text": "First message"}
msg2 = {"id": "m2", "text": "Second message"}
msg3 = {"id": "m3", "text": "Third message"}
vector_msg_results = [(msg1, 0.95), (msg2, 0.85), (msg3, 0.75)]
fts_msg_results = [(msg2, 0.90), (msg3, 0.80), (msg1, 0.70)]
combined_msgs = client._reciprocal_rank_fusion(
vector_results=[msg for msg, _ in vector_msg_results],
fts_results=[msg for msg, _ in fts_msg_results],
get_id_func=lambda m: m["id"],
vector_weight=0.6,
fts_weight=0.4,
top_k=3,
)
assert len(combined_msgs) == 3
msg_ids = [m["id"] for m, _, _ in combined_msgs]
assert "m1" in msg_ids
assert "m2" in msg_ids
assert "m3" in msg_ids
# Test edge cases
# Empty results
empty_combined = client._reciprocal_rank_fusion(
vector_results=[],
fts_results=[],
get_id_func=lambda x: x["id"],
vector_weight=0.5,
fts_weight=0.5,
top_k=10,
)
assert len(empty_combined) == 0
# Single result list
single_combined = client._reciprocal_rank_fusion(
vector_results=[msg1],
fts_results=[],
get_id_func=lambda m: m["id"],
vector_weight=0.5,
fts_weight=0.5,
top_k=10,
)
assert len(single_combined) == 1
assert single_combined[0][0]["id"] == "m1"
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_message_dual_write_with_real_tpuf(enable_message_embedding, default_user):
"""Test actual message embedding and storage in Turbopuffer"""
import uuid
from datetime import datetime, timezone
from letta.helpers.tpuf_client import TurbopufferClient
from letta.schemas.enums import MessageRole
client = TurbopufferClient()
agent_id = f"test-agent-{uuid.uuid4()}"
org_id = str(uuid.uuid4())
try:
# Prepare test messages
message_texts = [
"Hello, how can I help you today?",
"I need help with Python programming.",
"Sure, what specific Python topic?",
]
message_ids = [str(uuid.uuid4()) for _ in message_texts]
roles = [MessageRole.assistant, MessageRole.user, MessageRole.assistant]
created_ats = [datetime.now(timezone.utc) for _ in message_texts]
# Generate embeddings (dummy for test)
[[float(i), float(i + 1), float(i + 2)] for i in range(len(message_texts))]
# Insert messages into Turbopuffer
success = await client.insert_messages(
agent_id=agent_id,
message_texts=message_texts,
message_ids=message_ids,
organization_id=org_id,
actor=default_user,
roles=roles,
created_ats=created_ats,
)
assert success == True
# Verify we can query the messages
results = await client.query_messages_by_agent_id(
agent_id=agent_id, organization_id=org_id, search_mode="timestamp", top_k=10, actor=default_user
)
assert len(results) == 3
# Results should be ordered by timestamp (most recent first)
for msg_dict, score, metadata in results:
assert msg_dict["agent_id"] == agent_id
assert msg_dict["organization_id"] == org_id
assert msg_dict["text"] in message_texts
assert msg_dict["role"] in ["assistant", "user"]
finally:
# Clean up namespace
try:
await client.delete_all_messages(agent_id)
except Exception:
pass
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_message_vector_search_with_real_tpuf(enable_message_embedding, default_user):
"""Test vector search on messages in Turbopuffer"""
import uuid
from datetime import datetime, timezone
from letta.helpers.tpuf_client import TurbopufferClient
from letta.schemas.enums import MessageRole
client = TurbopufferClient()
agent_id = f"test-agent-{uuid.uuid4()}"
org_id = str(uuid.uuid4())
try:
# Insert messages with different embeddings
message_texts = [
"Python is a great programming language",
"JavaScript is used for web development",
"Machine learning with Python is powerful",
]
message_ids = [str(uuid.uuid4()) for _ in message_texts]
roles = [MessageRole.assistant] * len(message_texts)
created_ats = [datetime.now(timezone.utc) for _ in message_texts]
# Create embeddings that reflect content similarity
# Insert messages
await client.insert_messages(
agent_id=agent_id,
message_texts=message_texts,
message_ids=message_ids,
organization_id=org_id,
actor=default_user,
roles=roles,
created_ats=created_ats,
)
# Search for Python-related messages using vector search
results = await client.query_messages_by_agent_id(
agent_id=agent_id,
organization_id=org_id,
actor=default_user,
query_text="Python programming",
search_mode="vector",
top_k=2,
)
assert len(results) == 2
# Should return Python-related messages first
result_texts = [msg["text"] for msg, _, _ in results]
assert "Python is a great programming language" in result_texts
assert "Machine learning with Python is powerful" in result_texts
finally:
# Clean up namespace
try:
await client.delete_all_messages(agent_id)
except Exception:
pass
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_message_hybrid_search_with_real_tpuf(enable_message_embedding, default_user):
"""Test hybrid search combining vector and FTS for messages"""
import uuid
from datetime import datetime, timezone
from letta.helpers.tpuf_client import TurbopufferClient
from letta.schemas.enums import MessageRole
client = TurbopufferClient()
agent_id = f"test-agent-{uuid.uuid4()}"
org_id = str(uuid.uuid4())
try:
# Insert diverse messages
message_texts = [
"The quick brown fox jumps over the lazy dog",
"Machine learning algorithms are fascinating",
"Quick tutorial on Python programming",
"Deep learning with neural networks",
]
message_ids = [str(uuid.uuid4()) for _ in message_texts]
roles = [MessageRole.assistant] * len(message_texts)
created_ats = [datetime.now(timezone.utc) for _ in message_texts]
# Insert messages
await client.insert_messages(
agent_id=agent_id,
message_texts=message_texts,
message_ids=message_ids,
organization_id=org_id,
actor=default_user,
roles=roles,
created_ats=created_ats,
)
# Hybrid search - text search for "quick"
results = await client.query_messages_by_agent_id(
agent_id=agent_id,
organization_id=org_id,
actor=default_user,
query_text="quick", # Text search for "quick"
search_mode="hybrid",
top_k=3,
vector_weight=0.5,
fts_weight=0.5,
)
assert len(results) > 0
# Should get a mix of results based on both vector and text similarity
result_texts = [msg["text"] for msg, _, _ in results]
# At least one result should contain "quick" due to FTS
assert any("quick" in text.lower() for text in result_texts)
finally:
# Clean up namespace
try:
await client.delete_all_messages(agent_id)
except Exception:
pass
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_message_role_filtering_with_real_tpuf(enable_message_embedding, default_user):
"""Test filtering messages by role"""
import uuid
from datetime import datetime, timezone
from letta.helpers.tpuf_client import TurbopufferClient
from letta.schemas.enums import MessageRole
client = TurbopufferClient()
agent_id = f"test-agent-{uuid.uuid4()}"
org_id = str(uuid.uuid4())
try:
# Insert messages with different roles
message_data = [
("Hello! How can I help?", MessageRole.assistant),
("I need help with Python", MessageRole.user),
("Here's a Python example", MessageRole.assistant),
("Can you explain this?", MessageRole.user),
("System message here", MessageRole.system),
]
message_texts = [text for text, _ in message_data]
roles = [role for _, role in message_data]
message_ids = [str(uuid.uuid4()) for _ in message_texts]
created_ats = [datetime.now(timezone.utc) for _ in message_texts]
# Insert messages
await client.insert_messages(
agent_id=agent_id,
message_texts=message_texts,
message_ids=message_ids,
organization_id=org_id,
actor=default_user,
roles=roles,
created_ats=created_ats,
)
# Query only user messages
user_results = await client.query_messages_by_agent_id(
agent_id=agent_id, organization_id=org_id, search_mode="timestamp", top_k=10, roles=[MessageRole.user], actor=default_user
)
assert len(user_results) == 2
for msg, _, _ in user_results:
assert msg["role"] == "user"
assert msg["text"] in ["I need help with Python", "Can you explain this?"]
# Query assistant and system messages
non_user_results = await client.query_messages_by_agent_id(
agent_id=agent_id,
organization_id=org_id,
search_mode="timestamp",
top_k=10,
roles=[MessageRole.assistant, MessageRole.system],
actor=default_user,
)
assert len(non_user_results) == 3
for msg, _, _ in non_user_results:
assert msg["role"] in ["assistant", "system"]
finally:
# Clean up namespace
try:
await client.delete_all_messages(agent_id)
except Exception:
pass
@pytest.mark.asyncio
async def test_message_search_fallback_to_sql(server, default_user, sarah_agent):
"""Test that message search falls back to SQL when Turbopuffer is disabled"""
# Save original settings
original_use_tpuf = settings.use_tpuf
original_embed_messages = settings.embed_all_messages
try:
# Disable Turbopuffer for messages
settings.use_tpuf = False
settings.embed_all_messages = False
# Create messages
await server.message_manager.create_many_messages_async(
pydantic_msgs=[
PydanticMessage(
role=MessageRole.user,
content=[TextContent(text="Test message for SQL fallback")],
agent_id=sarah_agent.id,
)
],
actor=default_user,
)
# Search should use SQL backend (not Turbopuffer)
results = await server.message_manager.search_messages_async(
actor=default_user,
agent_id=sarah_agent.id,
query_text="fallback",
limit=10,
)
# Should return results from SQL search
assert len(results) > 0
# Extract text from messages and check for "fallback"
for msg, metadata in results:
text = server.message_manager._extract_message_text(msg)
if "fallback" in text.lower():
break
else:
assert False, "No messages containing 'fallback' found"
finally:
# Restore settings
settings.use_tpuf = original_use_tpuf
settings.embed_all_messages = original_embed_messages
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_message_update_reindexes_in_turbopuffer(server, default_user, sarah_agent, enable_message_embedding):
"""Test that updating a message properly deletes and re-inserts with new embedding in Turbopuffer"""
from letta.schemas.message import MessageUpdate
sarah_agent.embedding_config or EmbeddingConfig.default_config(provider="openai")
# Create initial message
messages = await server.message_manager.create_many_messages_async(
pydantic_msgs=[
PydanticMessage(
role=MessageRole.user,
content=[TextContent(text="Original content about Python programming")],
agent_id=sarah_agent.id,
)
],
actor=default_user,
strict_mode=True,
)
assert len(messages) == 1
message_id = messages[0].id
# Search for "Python" - should find it
python_results = await server.message_manager.search_messages_async(
agent_id=sarah_agent.id,
actor=default_user,
query_text="Python",
search_mode="fts",
limit=10,
)
assert len(python_results) > 0
assert any(msg.id == message_id for msg, metadata in python_results)
# Update the message content
updated_message = await server.message_manager.update_message_by_id_async(
message_id=message_id,
message_update=MessageUpdate(content="Updated content about JavaScript development"),
actor=default_user,
strict_mode=True,
)
assert updated_message.id == message_id # ID should remain the same
# Search for "Python" - should NOT find it anymore
python_results_after = await server.message_manager.search_messages_async(
agent_id=sarah_agent.id,
actor=default_user,
query_text="Python",
search_mode="fts",
limit=10,
)
# Should either find no results or results that don't include our message
assert not any(msg.id == message_id for msg, metadata in python_results_after)
# Search for "JavaScript" - should find the updated message
js_results = await server.message_manager.search_messages_async(
agent_id=sarah_agent.id,
actor=default_user,
query_text="JavaScript",
search_mode="fts",
limit=10,
)
assert len(js_results) > 0
assert any(msg.id == message_id for msg, metadata in js_results)
# Clean up
await server.message_manager.delete_messages_by_ids_async([message_id], default_user, strict_mode=True)
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_message_deletion_syncs_with_turbopuffer(server, default_user, enable_message_embedding):
"""Test that all deletion methods properly sync with Turbopuffer"""
from letta.schemas.agent import CreateAgent
from letta.schemas.llm_config import LLMConfig
# Create two test agents
agent_a = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="Agent A",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
agent_b = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="Agent B",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
try:
# Create 5 messages for agent A
agent_a_messages = []
for i in range(5):
msgs = await server.message_manager.create_many_messages_async(
pydantic_msgs=[
PydanticMessage(
role=MessageRole.user,
content=[TextContent(text=f"Agent A message {i + 1}")],
agent_id=agent_a.id,
)
],
actor=default_user,
strict_mode=True,
)
agent_a_messages.extend(msgs)
# Create 3 messages for agent B
agent_b_messages = []
for i in range(3):
msgs = await server.message_manager.create_many_messages_async(
pydantic_msgs=[
PydanticMessage(
role=MessageRole.user,
content=[TextContent(text=f"Agent B message {i + 1}")],
agent_id=agent_b.id,
)
],
actor=default_user,
strict_mode=True,
)
agent_b_messages.extend(msgs)
# Verify initial state - all messages are searchable
agent_a_search = await server.message_manager.search_messages_async(
agent_id=agent_a.id,
actor=default_user,
query_text="Agent A",
search_mode="fts",
limit=10,
)
assert len(agent_a_search) == 5
agent_b_search = await server.message_manager.search_messages_async(
agent_id=agent_b.id,
actor=default_user,
query_text="Agent B",
search_mode="fts",
limit=10,
)
assert len(agent_b_search) == 3
# Test 1: Delete single message from agent A
await server.message_manager.delete_message_by_id_async(agent_a_messages[0].id, default_user, strict_mode=True)
# Test 2: Batch delete 2 messages from agent A
await server.message_manager.delete_messages_by_ids_async(
[agent_a_messages[1].id, agent_a_messages[2].id], default_user, strict_mode=True
)
# Test 3: Delete all messages for agent B
await server.message_manager.delete_all_messages_for_agent_async(agent_b.id, default_user, strict_mode=True)
# Verify final state
# Agent A should have 2 messages left (5 - 1 - 2 = 2)
agent_a_final = await server.message_manager.search_messages_async(
agent_id=agent_a.id,
actor=default_user,
query_text="Agent A",
search_mode="fts",
limit=10,
)
assert len(agent_a_final) == 2
# Verify the remaining messages are the correct ones
remaining_ids = {msg.id for msg, metadata in agent_a_final}
assert agent_a_messages[3].id in remaining_ids
assert agent_a_messages[4].id in remaining_ids
# Agent B should have 0 messages
agent_b_final = await server.message_manager.search_messages_async(
agent_id=agent_b.id,
actor=default_user,
query_text="Agent B",
search_mode="fts",
limit=10,
)
assert len(agent_b_final) == 0
finally:
# Clean up agents
await server.agent_manager.delete_agent_async(agent_a.id, default_user)
await server.agent_manager.delete_agent_async(agent_b.id, default_user)
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_turbopuffer_failure_does_not_break_postgres(server, default_user, sarah_agent, enable_message_embedding):
"""Test that postgres operations succeed even if turbopuffer fails"""
from unittest.mock import AsyncMock, patch
from letta.schemas.message import MessageUpdate
sarah_agent.embedding_config or EmbeddingConfig.default_config(provider="openai")
# Create initial messages
messages = await server.message_manager.create_many_messages_async(
pydantic_msgs=[
PydanticMessage(
role=MessageRole.user,
content=[TextContent(text="Test message for error handling")],
agent_id=sarah_agent.id,
)
],
actor=default_user,
)
assert len(messages) == 1
message_id = messages[0].id
# Mock turbopuffer client to raise exceptions
with patch(
"letta.helpers.tpuf_client.TurbopufferClient.delete_messages",
new=AsyncMock(side_effect=Exception("Turbopuffer connection failed")),
):
with patch(
"letta.helpers.tpuf_client.TurbopufferClient.insert_messages",
new=AsyncMock(side_effect=Exception("Turbopuffer insert failed")),
):
# Test 1: Update should succeed in postgres despite turbopuffer failure
# NOTE: strict_mode=False here because we're testing error resilience
updated_message = await server.message_manager.update_message_by_id_async(
message_id=message_id,
message_update=MessageUpdate(content="Updated despite turbopuffer failure"),
actor=default_user,
strict_mode=False, # Don't fail on turbopuffer errors - that's what we're testing!
)
# Verify postgres was updated successfully
assert updated_message.id == message_id
updated_text = server.message_manager._extract_message_text(updated_message)
assert "Updated despite turbopuffer failure" in updated_text
# Test 2: Delete should succeed in postgres despite turbopuffer failure
# First create another message to delete
messages2 = await server.message_manager.create_many_messages_async(
pydantic_msgs=[
PydanticMessage(
role=MessageRole.user,
content=[TextContent(text="Message to delete")],
agent_id=sarah_agent.id,
)
],
actor=default_user,
)
message_to_delete_id = messages2[0].id
# Delete with mocked turbopuffer failure
# NOTE: strict_mode=False here because we're testing error resilience
deletion_result = await server.message_manager.delete_message_by_id_async(message_to_delete_id, default_user, strict_mode=False)
assert deletion_result == True
# Verify message is deleted from postgres
deleted_msg = await server.message_manager.get_message_by_id_async(message_to_delete_id, default_user)
assert deleted_msg is None
# Clean up remaining message (use strict_mode=False since turbopuffer might be mocked)
await server.message_manager.delete_messages_by_ids_async([message_id], default_user, strict_mode=False)
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_message_creation_background_mode(server, default_user, sarah_agent, enable_message_embedding):
"""Test that messages are embedded in background when strict_mode=False"""
sarah_agent.embedding_config or EmbeddingConfig.default_config(provider="openai")
# Create message in background mode
messages = await server.message_manager.create_many_messages_async(
pydantic_msgs=[
PydanticMessage(
role=MessageRole.user,
content=[TextContent(text="Background test message about Python programming")],
agent_id=sarah_agent.id,
)
],
actor=default_user,
strict_mode=False, # Background mode
)
assert len(messages) == 1
message_id = messages[0].id
# Message should be in PostgreSQL immediately
sql_message = await server.message_manager.get_message_by_id_async(message_id, default_user)
assert sql_message is not None
assert sql_message.id == message_id
# Poll for embedding completion by querying Turbopuffer directly
embedded = await wait_for_embedding(
agent_id=sarah_agent.id,
message_id=message_id,
organization_id=default_user.organization_id,
actor=default_user,
max_wait=10.0,
poll_interval=0.5,
)
assert embedded, "Message was not embedded in Turbopuffer within timeout"
# Now verify it's also searchable through the search API
search_results = await server.message_manager.search_messages_async(
agent_id=sarah_agent.id,
actor=default_user,
query_text="Python programming",
search_mode="fts",
limit=10,
)
assert len(search_results) > 0
assert any(msg.id == message_id for msg, _ in search_results)
# Clean up
await server.message_manager.delete_messages_by_ids_async([message_id], default_user, strict_mode=True)
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_message_update_background_mode(server, default_user, sarah_agent, enable_message_embedding):
"""Test that message updates work in background mode"""
from letta.schemas.message import MessageUpdate
sarah_agent.embedding_config or EmbeddingConfig.default_config(provider="openai")
# Create initial message with strict_mode=True to ensure it's embedded
messages = await server.message_manager.create_many_messages_async(
pydantic_msgs=[
PydanticMessage(
role=MessageRole.user,
content=[TextContent(text="Original content about databases")],
agent_id=sarah_agent.id,
)
],
actor=default_user,
strict_mode=True, # Ensure initial embedding
)
assert len(messages) == 1
message_id = messages[0].id
# Verify initial content is searchable
initial_results = await server.message_manager.search_messages_async(
agent_id=sarah_agent.id,
actor=default_user,
query_text="databases",
search_mode="fts",
limit=10,
)
assert any(msg.id == message_id for msg, _ in initial_results)
# Update message in background mode
updated_message = await server.message_manager.update_message_by_id_async(
message_id=message_id,
message_update=MessageUpdate(content="Updated content about machine learning"),
actor=default_user,
strict_mode=False, # Background mode
)
assert updated_message.id == message_id
# PostgreSQL should be updated immediately
sql_message = await server.message_manager.get_message_by_id_async(message_id, default_user)
assert "machine learning" in server.message_manager._extract_message_text(sql_message)
# Wait a bit for the background update to process
await asyncio.sleep(1.0)
# Poll for the update to be reflected in Turbopuffer
# We check by searching for the new content
embedded = await wait_for_embedding(
agent_id=sarah_agent.id,
message_id=message_id,
organization_id=default_user.organization_id,
actor=default_user,
max_wait=10.0,
poll_interval=0.5,
)
assert embedded, "Updated message was not re-embedded within timeout"
# Now verify the new content is searchable
new_results = await server.message_manager.search_messages_async(
agent_id=sarah_agent.id,
actor=default_user,
query_text="machine learning",
search_mode="fts",
limit=10,
)
assert any(msg.id == message_id for msg, _ in new_results)
# Old content should eventually no longer be searchable
# (may take a moment for the delete to process)
await asyncio.sleep(2.0)
old_results = await server.message_manager.search_messages_async(
agent_id=sarah_agent.id,
actor=default_user,
query_text="databases",
search_mode="fts",
limit=10,
)
# The message shouldn't match the old search term anymore
if len(old_results) > 0:
# If we find results, verify our message doesn't contain the old content
for msg, _ in old_results:
if msg.id == message_id:
text = server.message_manager._extract_message_text(msg)
assert "databases" not in text.lower()
# Clean up
await server.message_manager.delete_messages_by_ids_async([message_id], default_user, strict_mode=True)
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_message_date_filtering_with_real_tpuf(enable_message_embedding, default_user):
"""Test filtering messages by date range"""
import uuid
from datetime import datetime, timedelta, timezone
from letta.helpers.tpuf_client import TurbopufferClient
from letta.schemas.enums import MessageRole
client = TurbopufferClient()
agent_id = f"test-agent-{uuid.uuid4()}"
org_id = str(uuid.uuid4())
try:
# Create messages with different timestamps
now = datetime.now(timezone.utc)
yesterday = now - timedelta(days=1)
last_week = now - timedelta(days=7)
last_month = now - timedelta(days=30)
message_data = [
("Today's message", now),
("Yesterday's message", yesterday),
("Last week's message", last_week),
("Last month's message", last_month),
]
for text, timestamp in message_data:
await client.insert_messages(
agent_id=agent_id,
message_texts=[text],
message_ids=[str(uuid.uuid4())],
organization_id=org_id,
actor=default_user,
roles=[MessageRole.assistant],
created_ats=[timestamp],
)
# Query messages from the last 3 days
three_days_ago = now - timedelta(days=3)
recent_results = await client.query_messages_by_agent_id(
agent_id=agent_id, organization_id=org_id, search_mode="timestamp", top_k=10, start_date=three_days_ago, actor=default_user
)
# Should get today's and yesterday's messages
assert len(recent_results) == 2
result_texts = [msg["text"] for msg, _, _ in recent_results]
assert "Today's message" in result_texts
assert "Yesterday's message" in result_texts
# Query messages between 2 weeks ago and 1 week ago
two_weeks_ago = now - timedelta(days=14)
week_results = await client.query_messages_by_agent_id(
agent_id=agent_id,
organization_id=org_id,
search_mode="timestamp",
top_k=10,
start_date=two_weeks_ago,
end_date=last_week + timedelta(days=1), # Include last week's message
actor=default_user,
)
# Should get only last week's message
assert len(week_results) == 1
assert week_results[0][0]["text"] == "Last week's message"
# Query with vector search and date filtering
filtered_vector_results = await client.query_messages_by_agent_id(
agent_id=agent_id,
organization_id=org_id,
actor=default_user,
query_text="message",
search_mode="vector",
top_k=10,
start_date=three_days_ago,
)
# Should get only recent messages
assert len(filtered_vector_results) == 2
for msg, _, _ in filtered_vector_results:
assert msg["text"] in ["Today's message", "Yesterday's message"]
finally:
# Clean up namespace
try:
await client.delete_all_messages(agent_id)
except Exception:
pass
@pytest.mark.asyncio
async def test_archive_namespace_tracking(server, default_user, enable_turbopuffer):
"""Test that archive namespaces are properly tracked in database"""
# Create an archive
archive = await server.archive_manager.create_archive_async(
name="Test Archive for Namespace", embedding_config=EmbeddingConfig.default_config(provider="openai"), actor=default_user
)
# Get namespace - should be generated and stored
namespace = await server.archive_manager.get_or_set_vector_db_namespace_async(archive.id)
# Should have archive_ prefix and environment suffix
expected_prefix = "archive_"
assert namespace.startswith(expected_prefix)
assert archive.id in namespace
if settings.environment:
assert settings.environment.lower() in namespace
# Call again - should return same namespace from database
namespace2 = await server.archive_manager.get_or_set_vector_db_namespace_async(archive.id)
assert namespace == namespace2
@pytest.mark.asyncio
async def test_namespace_consistency_with_tpuf_client(server, default_user, enable_turbopuffer):
"""Test that the namespace from managers matches what tpuf_client would generate"""
# Create archive and agent
archive = await server.archive_manager.create_archive_async(
name="Test Consistency Archive", embedding_config=EmbeddingConfig.default_config(provider="openai"), actor=default_user
)
# Get namespace from manager
archive_namespace = await server.archive_manager.get_or_set_vector_db_namespace_async(archive.id)
# Create TurbopufferClient and get what it would generate
client = TurbopufferClient()
tpuf_namespace = await client._get_archive_namespace_name(archive.id)
# Should match
assert archive_namespace == tpuf_namespace
@pytest.mark.asyncio
async def test_environment_namespace_variation(server, default_user):
"""Test namespace generation with different environment settings"""
# Test with no environment
original_env = settings.environment
try:
settings.environment = None
archive = await server.archive_manager.create_archive_async(
name="No Env Archive", embedding_config=EmbeddingConfig.default_config(provider="openai"), actor=default_user
)
namespace_no_env = await server.archive_manager.get_or_set_vector_db_namespace_async(archive.id)
assert namespace_no_env == f"archive_{archive.id}"
# Test with environment
settings.environment = "TESTING"
archive2 = await server.archive_manager.create_archive_async(
name="With Env Archive", embedding_config=EmbeddingConfig.default_config(provider="openai"), actor=default_user
)
namespace_with_env = await server.archive_manager.get_or_set_vector_db_namespace_async(archive2.id)
assert namespace_with_env == f"archive_{archive2.id}_testing"
finally:
settings.environment = original_env
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_message_project_id_filtering(server, sarah_agent, default_user, enable_turbopuffer, enable_message_embedding):
"""Test that project_id filtering works correctly in query_messages_by_agent_id"""
from letta.schemas.letta_message_content import TextContent
# Create two project IDs
project_a_id = str(uuid.uuid4())
project_b_id = str(uuid.uuid4())
# Create messages with different project IDs
message_a = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text="Message for project A about Python")],
)
message_b = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text="Message for project B about JavaScript")],
)
# Insert messages with their respective project IDs
tpuf_client = TurbopufferClient()
# Embeddings will be generated automatically by the client
# Insert message A with project_a_id
await tpuf_client.insert_messages(
agent_id=sarah_agent.id,
message_texts=[message_a.content[0].text],
message_ids=[message_a.id],
organization_id=default_user.organization_id,
actor=default_user,
roles=[message_a.role],
created_ats=[message_a.created_at],
project_id=project_a_id,
)
# Insert message B with project_b_id
await tpuf_client.insert_messages(
agent_id=sarah_agent.id,
message_texts=[message_b.content[0].text],
message_ids=[message_b.id],
organization_id=default_user.organization_id,
actor=default_user,
roles=[message_b.role],
created_ats=[message_b.created_at],
project_id=project_b_id,
)
# Poll for message A with project_a_id filter
max_retries = 10
for i in range(max_retries):
results_a = await tpuf_client.query_messages_by_agent_id(
agent_id=sarah_agent.id,
organization_id=default_user.organization_id,
search_mode="timestamp", # Simple timestamp retrieval
top_k=10,
project_id=project_a_id,
actor=default_user,
)
if len(results_a) == 1 and results_a[0][0]["id"] == message_a.id:
break
await asyncio.sleep(0.5)
else:
pytest.fail(f"Message A not found after {max_retries} retries")
assert "Python" in results_a[0][0]["text"]
# Poll for message B with project_b_id filter
for i in range(max_retries):
results_b = await tpuf_client.query_messages_by_agent_id(
agent_id=sarah_agent.id,
organization_id=default_user.organization_id,
search_mode="timestamp",
top_k=10,
project_id=project_b_id,
actor=default_user,
)
if len(results_b) == 1 and results_b[0][0]["id"] == message_b.id:
break
await asyncio.sleep(0.5)
else:
pytest.fail(f"Message B not found after {max_retries} retries")
assert "JavaScript" in results_b[0][0]["text"]
# Query without project filter - should find both
results_all = await tpuf_client.query_messages_by_agent_id(
agent_id=sarah_agent.id,
organization_id=default_user.organization_id,
search_mode="timestamp",
top_k=10,
project_id=None, # No filter
actor=default_user,
)
assert len(results_all) >= 2 # May have other messages from setup
message_ids = [r[0]["id"] for r in results_all]
assert message_a.id in message_ids
assert message_b.id in message_ids
# Clean up
await tpuf_client.delete_messages(
agent_id=sarah_agent.id, organization_id=default_user.organization_id, message_ids=[message_a.id, message_b.id]
)
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_message_template_id_filtering(server, sarah_agent, default_user, enable_turbopuffer, enable_message_embedding):
"""Test that template_id filtering works correctly in message queries"""
from letta.schemas.letta_message_content import TextContent
# Create two template IDs
template_a_id = str(uuid.uuid4())
template_b_id = str(uuid.uuid4())
# Create messages with different template IDs
message_a = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text="Message for template A")],
)
message_b = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text="Message for template B")],
)
# Insert messages with their respective template IDs
tpuf_client = TurbopufferClient()
await tpuf_client.insert_messages(
agent_id=sarah_agent.id,
message_texts=[message_a.content[0].text],
message_ids=[message_a.id],
organization_id=default_user.organization_id,
actor=default_user,
roles=[message_a.role],
created_ats=[message_a.created_at],
template_id=template_a_id,
)
await tpuf_client.insert_messages(
agent_id=sarah_agent.id,
message_texts=[message_b.content[0].text],
message_ids=[message_b.id],
organization_id=default_user.organization_id,
actor=default_user,
roles=[message_b.role],
created_ats=[message_b.created_at],
template_id=template_b_id,
)
# Wait for indexing
await asyncio.sleep(1)
# Query for template A - should find only message A
results_a = await tpuf_client.query_messages_by_agent_id(
agent_id=sarah_agent.id,
organization_id=default_user.organization_id,
search_mode="timestamp",
top_k=10,
template_id=template_a_id,
actor=default_user,
)
assert len(results_a) == 1
assert results_a[0][0]["id"] == message_a.id
assert "template A" in results_a[0][0]["text"]
# Query for template B - should find only message B
results_b = await tpuf_client.query_messages_by_agent_id(
agent_id=sarah_agent.id,
organization_id=default_user.organization_id,
search_mode="timestamp",
top_k=10,
template_id=template_b_id,
actor=default_user,
)
assert len(results_b) == 1
assert results_b[0][0]["id"] == message_b.id
assert "template B" in results_b[0][0]["text"]
# Clean up
await tpuf_client.delete_messages(
agent_id=sarah_agent.id, organization_id=default_user.organization_id, message_ids=[message_a.id, message_b.id]
)
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_message_conversation_id_filtering(server, sarah_agent, default_user, enable_turbopuffer, enable_message_embedding):
"""Test that conversation_id filtering works correctly in message queries, including 'default' sentinel"""
from letta.schemas.conversation import CreateConversation
from letta.schemas.letta_message_content import TextContent
from letta.services.conversation_manager import ConversationManager
conversation_manager = ConversationManager()
# Create a conversation
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test conversation"),
actor=default_user,
)
# Create messages with different conversation_ids
message_with_conv = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text="Message in specific conversation about Python")],
)
message_default_conv = PydanticMessage(
agent_id=sarah_agent.id,
role=MessageRole.user,
content=[TextContent(text="Message in default conversation about JavaScript")],
)
# Insert messages with their respective conversation IDs
tpuf_client = TurbopufferClient()
# Message with specific conversation_id
await tpuf_client.insert_messages(
agent_id=sarah_agent.id,
message_texts=[message_with_conv.content[0].text],
message_ids=[message_with_conv.id],
organization_id=default_user.organization_id,
actor=default_user,
roles=[message_with_conv.role],
created_ats=[message_with_conv.created_at],
conversation_ids=[conversation.id], # Specific conversation
)
# Message with no conversation_id (default)
await tpuf_client.insert_messages(
agent_id=sarah_agent.id,
message_texts=[message_default_conv.content[0].text],
message_ids=[message_default_conv.id],
organization_id=default_user.organization_id,
actor=default_user,
roles=[message_default_conv.role],
created_ats=[message_default_conv.created_at],
conversation_ids=[None], # Default conversation (NULL)
)
# Wait for indexing
await asyncio.sleep(1)
# Test 1: Query for specific conversation - should find only message with that conversation_id
results_conv = await tpuf_client.query_messages_by_agent_id(
agent_id=sarah_agent.id,
organization_id=default_user.organization_id,
search_mode="timestamp",
top_k=10,
conversation_id=conversation.id,
actor=default_user,
)
assert len(results_conv) == 1
assert results_conv[0][0]["id"] == message_with_conv.id
assert "Python" in results_conv[0][0]["text"]
# Test 2: Query for "default" conversation - should find only messages with NULL conversation_id
results_default = await tpuf_client.query_messages_by_agent_id(
agent_id=sarah_agent.id,
organization_id=default_user.organization_id,
search_mode="timestamp",
top_k=10,
conversation_id="default", # Sentinel for NULL
actor=default_user,
)
assert len(results_default) >= 1 # May have other default messages from setup
# Check our message is in there
default_ids = [r[0]["id"] for r in results_default]
assert message_default_conv.id in default_ids
# Verify the message content
for msg_dict, _, _ in results_default:
if msg_dict["id"] == message_default_conv.id:
assert "JavaScript" in msg_dict["text"]
break
# Test 3: Query without conversation filter - should find both
results_all = await tpuf_client.query_messages_by_agent_id(
agent_id=sarah_agent.id,
organization_id=default_user.organization_id,
search_mode="timestamp",
top_k=10,
conversation_id=None, # No filter
actor=default_user,
)
assert len(results_all) >= 2 # May have other messages from setup
message_ids = [r[0]["id"] for r in results_all]
assert message_with_conv.id in message_ids
assert message_default_conv.id in message_ids
# Clean up
await tpuf_client.delete_messages(
agent_id=sarah_agent.id, organization_id=default_user.organization_id, message_ids=[message_with_conv.id, message_default_conv.id]
)
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_query_messages_with_mixed_conversation_id_presence(enable_message_embedding, default_user):
"""Test that querying works when the namespace schema doesn't have conversation_id.
This test validates the fix for the error:
'attribute "conversation_id" not found in schema, cannot be part of include_attributes'
The fix changed from explicitly listing attributes (which fails when the namespace
schema doesn't have conversation_id) to using include_attributes=True which gracefully
returns all available attributes.
IMPORTANT: This test uses raw Turbopuffer API to insert messages WITHOUT
the conversation_id schema, then queries BEFORE any new messages are added.
This reproduces the exact production scenario where old namespaces don't have
conversation_id in their schema.
"""
from turbopuffer import AsyncTurbopuffer
from letta.helpers.tpuf_client import TurbopufferClient
client = TurbopufferClient()
agent_id = f"test-agent-{uuid.uuid4()}"
org_id = str(uuid.uuid4())
namespace_name = f"messages_{org_id}_dev"
try:
# Insert messages using raw Turbopuffer API WITHOUT conversation_id in schema
# This simulates a namespace that was created before conversation_id feature existed
message_ids = [str(uuid.uuid4()), str(uuid.uuid4())]
message_texts = [
"Message without conversation_id about Python",
"Another message about machine learning",
]
# Generate embeddings
embeddings = await client._generate_embeddings(message_texts, default_user)
# Use raw Turbopuffer API to insert WITHOUT conversation_id in schema
async with AsyncTurbopuffer(api_key=client.api_key, region=client.region) as tpuf:
namespace = tpuf.namespace(namespace_name)
await namespace.write(
upsert_columns={
"id": message_ids,
"vector": embeddings,
"text": message_texts,
"organization_id": [org_id, org_id],
"agent_id": [agent_id, agent_id],
"role": ["user", "assistant"],
"created_at": [datetime.now(timezone.utc), datetime.now(timezone.utc)],
# NOTE: No conversation_id column - schema won't have this attribute!
},
distance_metric="cosine_distance",
schema={
"text": {"type": "string", "full_text_search": True},
# NOTE: No conversation_id in schema - this is the key!
},
)
# Wait for indexing
await asyncio.sleep(1)
# CRITICAL: Query BEFORE inserting any new messages with conversation_id
# This is when the bug manifests - the schema doesn't have conversation_id yet
# Test 1: Timestamp mode query
timestamp_results = await client.query_messages_by_agent_id(
agent_id=agent_id,
organization_id=org_id,
search_mode="timestamp",
top_k=10,
actor=default_user,
)
assert len(timestamp_results) == 2, f"Expected 2 messages, got {len(timestamp_results)}"
result_ids = [msg["id"] for msg, _, _ in timestamp_results]
for msg_id in message_ids:
assert msg_id in result_ids
# Test 2: Vector search
vector_results = await client.query_messages_by_agent_id(
agent_id=agent_id,
organization_id=org_id,
actor=default_user,
query_text="Python programming",
search_mode="vector",
top_k=10,
)
assert len(vector_results) > 0
# Test 3: Hybrid search
hybrid_results = await client.query_messages_by_agent_id(
agent_id=agent_id,
organization_id=org_id,
actor=default_user,
query_text="message",
search_mode="hybrid",
top_k=10,
vector_weight=0.5,
fts_weight=0.5,
)
assert len(hybrid_results) > 0
# Test 4: FTS search
fts_results = await client.query_messages_by_agent_id(
agent_id=agent_id,
organization_id=org_id,
actor=default_user,
query_text="Python",
search_mode="fts",
top_k=10,
)
assert len(fts_results) > 0
assert any("Python" in msg["text"] for msg, _, _ in fts_results)
finally:
# Clean up - delete the entire namespace
try:
async with AsyncTurbopuffer(api_key=client.api_key, region=client.region) as tpuf:
namespace = tpuf.namespace(namespace_name)
await namespace.delete_all()
except Exception:
pass
@pytest.mark.asyncio
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
async def test_query_messages_by_org_id_with_missing_conversation_id_schema(enable_message_embedding, default_user):
"""Test that query_messages_by_org_id works when the namespace doesn't have conversation_id in schema.
This is the companion test to test_query_messages_with_mixed_conversation_id_presence,
validating the same fix for the query_messages_by_org_id method.
IMPORTANT: This test queries BEFORE any messages with conversation_id are inserted,
to reproduce the exact production scenario.
"""
from turbopuffer import AsyncTurbopuffer
from letta.helpers.tpuf_client import TurbopufferClient
client = TurbopufferClient()
agent_id = f"test-agent-{uuid.uuid4()}"
org_id = str(uuid.uuid4())
namespace_name = f"messages_{org_id}_dev"
try:
# Insert messages using raw Turbopuffer API WITHOUT conversation_id in schema
message_ids = [str(uuid.uuid4()), str(uuid.uuid4())]
message_texts = ["Org message about Python", "Org message about JavaScript"]
# Generate embeddings
embeddings = await client._generate_embeddings(message_texts, default_user)
# Use raw Turbopuffer API to insert WITHOUT conversation_id in schema
async with AsyncTurbopuffer(api_key=client.api_key, region=client.region) as tpuf:
namespace = tpuf.namespace(namespace_name)
await namespace.write(
upsert_columns={
"id": message_ids,
"vector": embeddings,
"text": message_texts,
"organization_id": [org_id, org_id],
"agent_id": [agent_id, agent_id],
"role": ["user", "assistant"],
"created_at": [datetime.now(timezone.utc), datetime.now(timezone.utc)],
# NOTE: No conversation_id column
},
distance_metric="cosine_distance",
schema={
"text": {"type": "string", "full_text_search": True},
# NOTE: No conversation_id in schema
},
)
# Wait for indexing
await asyncio.sleep(1)
# CRITICAL: Query BEFORE inserting any new messages with conversation_id
# This is when the bug manifests - schema doesn't have conversation_id
# Query at org level - should work even without conversation_id in schema
org_results = await client.query_messages_by_org_id(
organization_id=org_id,
actor=default_user,
query_text="message",
search_mode="hybrid",
top_k=10,
vector_weight=0.5,
fts_weight=0.5,
)
# Should find both messages
assert len(org_results) == 2, f"Expected 2 messages, got {len(org_results)}"
result_ids = [msg["id"] for msg, _, _ in org_results]
assert message_ids[0] in result_ids
assert message_ids[1] in result_ids
finally:
# Clean up - delete the entire namespace
try:
async with AsyncTurbopuffer(api_key=client.api_key, region=client.region) as tpuf:
namespace = tpuf.namespace(namespace_name)
await namespace.delete_all()
except Exception:
pass
@pytest.mark.asyncio
async def test_system_messages_not_embedded_during_agent_creation(server, default_user, enable_message_embedding):
"""Test that system messages are filtered out before being passed to the embedding pipeline during agent creation"""
from unittest.mock import patch
from letta.schemas.agent import CreateAgent
from letta.schemas.llm_config import LLMConfig
# Mock the _embed_messages_background method to track what messages are passed to it
messages_passed_to_embed = []
original_embed = server.message_manager._embed_messages_background
async def mock_embed(messages, actor, agent_id, project_id=None, template_id=None):
# Capture what messages are being passed to embedding
messages_passed_to_embed.extend(messages)
# Call the original method
return await original_embed(messages, actor, agent_id, project_id, template_id)
with patch.object(server.message_manager, "_embed_messages_background", mock_embed):
# Create agent with initial messages (which includes a system message)
agent = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="TestSystemMessageAgent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
include_base_tools=False,
),
actor=default_user,
)
# Get all messages created for the agent
all_messages = await server.message_manager.get_messages_by_ids_async(message_ids=agent.message_ids, actor=default_user)
# Verify that at least one system message was created
system_messages = [msg for msg in all_messages if msg.role == MessageRole.system]
assert len(system_messages) > 0, "No system messages were created during agent creation"
print(messages_passed_to_embed)
print(system_messages)
print(all_messages)
# Verify that NO system messages were passed to the embedding pipeline
system_messages_in_embed = [msg for msg in messages_passed_to_embed if msg.role == MessageRole.system]
assert len(system_messages_in_embed) == 0, (
f"System messages should not be embedded, but {len(system_messages_in_embed)} were passed to embedding pipeline"
)
# Clean up
try:
await server.agent_manager.delete_agent_async(agent.id, default_user)
except Exception:
pass
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/integration_test_turbopuffer.py",
"license": "Apache License 2.0",
"lines": 2138,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/locust_test.py | import random
import string
from locust import HttpUser, between, task
from letta.constants import BASE_TOOLS, DEFAULT_HUMAN, DEFAULT_PERSONA
from letta.schemas.agent import AgentState, CreateAgent
from letta.schemas.letta_request import LettaRequest
from letta.schemas.letta_response import LettaResponse
from letta.schemas.memory import ChatMemory
from letta.schemas.message import MessageCreate, MessageRole
from letta.utils import get_human_text, get_persona_text
class LettaUser(HttpUser):
wait_time = between(1, 5)
token = None
agent_id = None
def on_start(self):
# Create a user and get the token
self.client.headers = {"Authorization": "Bearer password"}
user_data = {"name": f"User-{''.join(random.choices(string.ascii_lowercase + string.digits, k=8))}"}
response = self.client.post("/v1/admin/users", json=user_data)
response_json = response.json()
print(response_json)
self.user_id = response_json["id"]
# create a token
response = self.client.post("/v1/admin/users/keys", json={"user_id": self.user_id})
self.token = response.json()["key"]
# reset to use user token as headers
self.client.headers = {"Authorization": f"Bearer {self.token}"}
# @task(1)
# def create_agent(self):
# generate random name
name = "".join(random.choices(string.ascii_lowercase + string.digits, k=8))
request = CreateAgent(
name=f"Agent-{name}",
tools=BASE_TOOLS,
memory=ChatMemory(human=get_human_text(DEFAULT_HUMAN), persona=get_persona_text(DEFAULT_PERSONA)),
)
# create an agent
with self.client.post("/v1/agents", json=request.model_dump(), headers=self.client.headers, catch_response=True) as response:
if response.status_code != 200:
response.failure(f"Failed to create agent: {response.text}")
response_json = response.json()
agent_state = AgentState(**response_json)
self.agent_id = agent_state.id
print("Created agent", self.agent_id, agent_state.name)
@task(1)
def send_message(self):
messages = [MessageCreate(role=MessageRole("user"), content="hello")]
request = LettaRequest(messages=messages)
with self.client.post(
f"/v1/agents/{self.agent_id}/messages", json=request.model_dump(), headers=self.client.headers, catch_response=True
) as response:
if response.status_code != 200:
response.failure(f"Failed to send message {response.status_code}: {response.text}")
response = LettaResponse(**response.json())
print("Response", response.usage)
# @task(1)
# def send_message_stream(self):
# messages = [MessageCreate(role=MessageRole("user"), content="hello")]
# request = LettaRequest(messages=messages, stream_steps=True, stream_tokens=True, return_message_object=True)
# if stream_tokens or stream_steps:
# from letta.client.streaming import _sse_post
# request.return_message_object = False
# return _sse_post(f"{self.base_url}/api/agents/{agent_id}/messages", request.model_dump(), self.headers)
# else:
# response = requests.post(f"{self.base_url}/api/agents/{agent_id}/messages", json=request.model_dump(), headers=self.headers)
# if response.status_code != 200:
# raise ValueError(f"Failed to send message: {response.text}")
# return LettaResponse(**response.json())
# try:
# response = self.letta_client.send_message(message="Hello, world!", agent_id=self.agent_id, role="user")
# except Exception as e:
# with self.client.get("/", catch_response=True) as response:
# response.failure(str(e))
# @task(2)
# def get_agent_state(self):
# try:
# agent_state = self.letta_client.get_agent(agent_id=self.agent_id)
# except Exception as e:
# with self.client.get("/", catch_response=True) as response:
# response.failure(str(e))
# @task(3)
# def get_agent_memory(self):
# try:
# memory = self.letta_client.get_in_context_memory(agent_id=self.agent_id)
# except Exception as e:
# with self.client.get("/", catch_response=True) as response:
# response.failure(str(e))
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/locust_test.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/mcp_test.py | #!/usr/bin/env python3
"""
Simple MCP client example with OAuth authentication support.
This client connects to an MCP server using streamable HTTP transport with OAuth.
"""
import asyncio
import os
import threading
import time
import webbrowser
from datetime import timedelta
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import Any
from urllib.parse import parse_qs, urlparse
from mcp.client.auth import OAuthClientProvider, TokenStorage
from mcp.client.session import ClientSession
from mcp.client.sse import sse_client
from mcp.client.streamable_http import streamablehttp_client
from mcp.shared.auth import OAuthClientInformationFull, OAuthClientMetadata, OAuthToken
class InMemoryTokenStorage(TokenStorage):
"""Simple in-memory token storage implementation."""
def __init__(self):
self._tokens: OAuthToken | None = None
self._client_info: OAuthClientInformationFull | None = None
async def get_tokens(self) -> OAuthToken | None:
return self._tokens
async def set_tokens(self, tokens: OAuthToken) -> None:
self._tokens = tokens
async def get_client_info(self) -> OAuthClientInformationFull | None:
return self._client_info
async def set_client_info(self, client_info: OAuthClientInformationFull) -> None:
self._client_info = client_info
class CallbackHandler(BaseHTTPRequestHandler):
"""Simple HTTP handler to capture OAuth callback."""
def __init__(self, request, client_address, server, callback_data):
"""Initialize with callback data storage."""
self.callback_data = callback_data
super().__init__(request, client_address, server)
def do_GET(self):
"""Handle GET request from OAuth redirect."""
parsed = urlparse(self.path)
query_params = parse_qs(parsed.query)
if "code" in query_params:
self.callback_data["authorization_code"] = query_params["code"][0]
self.callback_data["state"] = query_params.get("state", [None])[0]
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(
b"""
<html>
<body>
<h1>Authorization Successful!</h1>
<p>You can close this window and return to the terminal.</p>
<script>setTimeout(() => window.close(), 2000);</script>
</body>
</html>
"""
)
elif "error" in query_params:
self.callback_data["error"] = query_params["error"][0]
self.send_response(400)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(
f"""
<html>
<body>
<h1>Authorization Failed</h1>
<p>Error: {query_params["error"][0]}</p>
<p>You can close this window and return to the terminal.</p>
</body>
</html>
""".encode()
)
else:
self.send_response(404)
self.end_headers()
def log_message(self, format, *args):
"""Suppress default logging."""
class CallbackServer:
"""Simple server to handle OAuth callbacks."""
def __init__(self, port=3000):
self.port = port
self.server = None
self.thread = None
self.callback_data = {"authorization_code": None, "state": None, "error": None}
def _create_handler_with_data(self):
"""Create a handler class with access to callback data."""
callback_data = self.callback_data
class DataCallbackHandler(CallbackHandler):
def __init__(self, request, client_address, server):
super().__init__(request, client_address, server, callback_data)
return DataCallbackHandler
def start(self):
"""Start the callback server in a background thread."""
handler_class = self._create_handler_with_data()
self.server = HTTPServer(("localhost", self.port), handler_class)
self.thread = threading.Thread(target=self.server.serve_forever, daemon=True)
self.thread.start()
print(f"🖥️ Started callback server on http://localhost:{self.port}")
def stop(self):
"""Stop the callback server."""
if self.server:
self.server.shutdown()
self.server.server_close()
if self.thread:
self.thread.join(timeout=1)
def wait_for_callback(self, timeout=300):
"""Wait for OAuth callback with timeout."""
start_time = time.time()
while time.time() - start_time < timeout:
if self.callback_data["authorization_code"]:
return self.callback_data["authorization_code"]
elif self.callback_data["error"]:
raise Exception(f"OAuth error: {self.callback_data['error']}")
time.sleep(0.1)
raise Exception("Timeout waiting for OAuth callback")
def get_state(self):
"""Get the received state parameter."""
return self.callback_data["state"]
class SimpleAuthClient:
"""Simple MCP client with auth support."""
def __init__(self, server_url: str, transport_type: str = "streamable_http"):
self.server_url = server_url
self.transport_type = transport_type
self.session: ClientSession | None = None
async def connect(self):
"""Connect to the MCP server."""
print(f"🔗 Attempting to connect to {self.server_url}...")
try:
callback_server = CallbackServer(port=3030)
callback_server.start()
async def callback_handler() -> tuple[str, str | None]:
"""Wait for OAuth callback and return auth code and state."""
print("⏳ Waiting for authorization callback...")
try:
auth_code = callback_server.wait_for_callback(timeout=300)
return auth_code, callback_server.get_state()
finally:
callback_server.stop()
client_metadata_dict = {
"client_name": "Simple Auth Client",
"redirect_uris": ["http://localhost:3030/callback"],
"grant_types": ["authorization_code", "refresh_token"],
"response_types": ["code"],
"token_endpoint_auth_method": "client_secret_post",
}
async def _default_redirect_handler(authorization_url: str) -> None:
"""Default redirect handler that opens the URL in a browser."""
print(f"Opening browser for authorization: {authorization_url}")
webbrowser.open(authorization_url)
# Create OAuth authentication handler using the new interface
oauth_auth = OAuthClientProvider(
server_url=self.server_url.replace("/mcp", ""),
client_metadata=OAuthClientMetadata.model_validate(client_metadata_dict),
storage=InMemoryTokenStorage(),
redirect_handler=_default_redirect_handler,
callback_handler=callback_handler,
)
# Create transport with auth handler based on transport type
if self.transport_type == "sse":
print("📡 Opening SSE transport connection with auth...")
async with sse_client(
url=self.server_url,
auth=oauth_auth,
timeout=60,
) as (read_stream, write_stream):
await self._run_session(read_stream, write_stream, None)
else:
print("📡 Opening StreamableHTTP transport connection with auth...")
async with streamablehttp_client(
url=self.server_url,
auth=oauth_auth,
timeout=timedelta(seconds=60),
) as (read_stream, write_stream, get_session_id):
await self._run_session(read_stream, write_stream, get_session_id)
except Exception as e:
print(f"❌ Failed to connect: {e}")
import traceback
traceback.print_exc()
async def _run_session(self, read_stream, write_stream, get_session_id):
"""Run the MCP session with the given streams."""
print("🤝 Initializing MCP session...")
async with ClientSession(read_stream, write_stream) as session:
self.session = session
print("⚡ Starting session initialization...")
await session.initialize()
print("✨ Session initialization complete!")
print(f"\n✅ Connected to MCP server at {self.server_url}")
if get_session_id:
session_id = get_session_id()
if session_id:
print(f"Session ID: {session_id}")
# Run interactive loop
await self.interactive_loop()
async def list_tools(self):
"""List available tools from the server."""
if not self.session:
print("❌ Not connected to server")
return
try:
result = await self.session.list_tools()
if hasattr(result, "tools") and result.tools:
print("\n📋 Available tools:")
for i, tool in enumerate(result.tools, 1):
print(f"{i}. {tool.name}")
if tool.description:
print(f" Description: {tool.description}")
print()
else:
print("No tools available")
except Exception as e:
print(f"❌ Failed to list tools: {e}")
async def call_tool(self, tool_name: str, arguments: dict[str, Any] | None = None):
"""Call a specific tool."""
if not self.session:
print("❌ Not connected to server")
return
try:
result = await self.session.call_tool(tool_name, arguments or {})
print(f"\n🔧 Tool '{tool_name}' result:")
if hasattr(result, "content"):
for content in result.content:
if content.type == "text":
print(content.text)
else:
print(content)
else:
print(result)
except Exception as e:
print(f"❌ Failed to call tool '{tool_name}': {e}")
async def interactive_loop(self):
"""Run interactive command loop."""
print("\n🎯 Interactive MCP Client")
print("Commands:")
print(" list - List available tools")
print(" call <tool_name> [args] - Call a tool")
print(" quit - Exit the client")
print()
while True:
try:
command = input("mcp> ").strip()
if not command:
continue
if command == "quit":
break
elif command == "list":
await self.list_tools()
elif command.startswith("call "):
parts = command.split(maxsplit=2)
tool_name = parts[1] if len(parts) > 1 else ""
if not tool_name:
print("❌ Please specify a tool name")
continue
# Parse arguments (simple JSON-like format)
arguments = {}
if len(parts) > 2:
import json
try:
arguments = json.loads(parts[2])
except json.JSONDecodeError:
print("❌ Invalid arguments format (expected JSON)")
continue
await self.call_tool(tool_name, arguments)
else:
print("❌ Unknown command. Try 'list', 'call <tool_name>', or 'quit'")
except KeyboardInterrupt:
print("\n\n👋 Goodbye!")
break
except EOFError:
break
async def main():
"""Main entry point."""
# Default server URL - can be overridden with environment variable
# Most MCP streamable HTTP servers use /mcp as the endpoint
server_url = os.getenv("MCP_SERVER_PORT", 8000)
transport_type = os.getenv("MCP_TRANSPORT_TYPE", "streamable_http")
server_url = f"http://localhost:{server_url}/mcp" if transport_type == "streamable_http" else f"http://localhost:{server_url}/sse"
print("🚀 Simple MCP Auth Client")
print(f"Connecting to: {server_url}")
print(f"Transport type: {transport_type}")
# Start connection flow - OAuth will be handled automatically
client = SimpleAuthClient(server_url, transport_type)
await client.connect()
def cli():
"""CLI entry point for uv script."""
asyncio.run(main())
if __name__ == "__main__":
cli()
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/mcp_test.py",
"license": "Apache License 2.0",
"lines": 290,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/mcp_tests/test_mcp.py | import json
import os
import shutil
import subprocess
import threading
import time
import uuid
import venv
from pathlib import Path
import pytest
from dotenv import load_dotenv
from letta_client import Letta
from letta_client.types import MessageCreateParam, Tool, ToolReturnMessage
from letta_client.types.agents import ToolCallMessage
from letta.functions.mcp_client.types import SSEServerConfig, StdioServerConfig, StreamableHTTPServerConfig
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.llm_config import LLMConfig
from tests.utils import wait_for_server
def create_virtualenv_and_install_requirements(requirements_path: Path, name="venv", force_recreate=True) -> Path:
requirements_path = requirements_path.resolve()
if not requirements_path.exists():
raise FileNotFoundError(f"Requirements file not found: {requirements_path}")
if requirements_path.name != "requirements.txt":
raise ValueError(f"Expected file named 'requirements.txt', got: {requirements_path.name}")
venv_dir = requirements_path.parent / name
# Always clean up existing venv if force_recreate is True (default)
# This prevents corruption issues
if venv_dir.exists() and force_recreate:
try:
shutil.rmtree(venv_dir)
print(f"Cleaned up existing venv at {venv_dir}")
except Exception as e:
print(f"Warning: Failed to remove existing venv: {e}")
# Continue anyway, might still work
# Create fresh venv
if not venv_dir.exists():
venv.EnvBuilder(with_pip=True, clear=True).create(venv_dir)
pip_path = venv_dir / ("Scripts" if os.name == "nt" else "bin") / "pip"
# Wait a moment for venv creation to complete
for _ in range(10):
if pip_path.exists():
break
time.sleep(0.1)
if not pip_path.exists():
raise FileNotFoundError(f"pip executable not found at: {pip_path}")
try:
# Upgrade pip first to avoid potential issues
subprocess.check_call([str(pip_path), "install", "--upgrade", "pip"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
# Install requirements
subprocess.check_call([str(pip_path), "install", "-r", str(requirements_path)])
except subprocess.CalledProcessError as exc:
# On failure, try to clean up and recreate once more
if not force_recreate: # Avoid infinite recursion
print("Initial pip install failed, attempting clean recreation...")
return create_virtualenv_and_install_requirements(requirements_path, name, force_recreate=False)
raise RuntimeError(f"pip install failed with exit code {exc.returncode}")
return venv_dir
# --- Server Management --- #
def _run_server():
"""Starts the Letta server in a background thread."""
load_dotenv()
from letta.server.rest_api.app import start_server
start_server(debug=True)
@pytest.fixture
def empty_mcp_config():
path = Path(__file__).parent / "mcp_config.json"
path.write_text(json.dumps({})) # writes "{}"
return path
@pytest.fixture(autouse=True)
def cleanup_test_venvs():
"""Fixture to clean up test virtual environments before and after tests."""
venv_path = Path(__file__).parent / "weather" / "venv"
# Clean before test (in case of previous failure)
if venv_path.exists():
try:
shutil.rmtree(venv_path)
except Exception:
pass # Ignore errors during cleanup
yield # Run the test
# Note: We don't clean after test to allow debugging if needed
# The next test run will clean it anyway
@pytest.fixture()
def server_url(empty_mcp_config):
"""Ensures a server is running and returns its base URL."""
url = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
if not os.getenv("LETTA_SERVER_URL"):
thread = threading.Thread(target=_run_server, daemon=True)
thread.start()
# Use 60s timeout to allow for provider model syncing during server startup
wait_for_server(url, timeout=60)
return url
@pytest.fixture()
def client(server_url):
"""Creates a REST client for testing."""
client = Letta(base_url=server_url)
return client
@pytest.fixture()
def agent_state(client):
"""Creates an agent and ensures cleanup after tests."""
agent_state = client.agents.create(
name=f"test_compl_{str(uuid.uuid4())[5:]}",
include_base_tools=True,
memory_blocks=[
{
"label": "human",
"value": "Name: Matt",
},
{
"label": "persona",
"value": "Friendly agent",
},
],
llm_config=LLMConfig.default_config(model_name="gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
)
yield agent_state
client.agents.delete(agent_state.id)
@pytest.mark.skip(reason="The deepwiki SSE MCP server is deprecated")
@pytest.mark.asyncio
async def test_sse_mcp_server(client, agent_state):
mcp_server_name = "deepwiki"
server_url = "https://mcp.deepwiki.com/sse"
sse_mcp_config = SSEServerConfig(server_name=mcp_server_name, server_url=server_url)
# Create MCP server using new API - convert 'type' to 'mcp_server_type' for 1.0 API
config_dict = sse_mcp_config.model_dump()
config_dict["mcp_server_type"] = config_dict.pop("type")
config_dict.pop("server_name") # server_name is passed separately
server = client.mcp_servers.create(server_name=mcp_server_name, config=config_dict)
try:
# Check that it's in the server list
servers = client.mcp_servers.list()
server_names = [s.server_name for s in servers]
assert mcp_server_name in server_names
# Check tools - now tools are automatically registered
tools = client.mcp_servers.tools.list(mcp_server_id=server.id)
assert len(tools) > 0
assert isinstance(tools[0], Tool)
# Test with the ask_question tool which is one of the available deepwiki tools
ask_question_tool = next((t for t in tools if t.name == "ask_question"), None)
assert ask_question_tool is not None, f"ask_question tool not found. Available tools: {[t.name for t in tools]}"
tool_args = {"repoName": "facebook/react", "question": "What is React?"}
# Add to agent - tool is already registered, just attach it
client.agents.tools.attach(agent_id=agent_state.id, tool_id=ask_question_tool.id)
# Create message using MessageCreateParam
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreateParam(role="user", content=f"Use the `{ask_question_tool.name}` tool with these arguments: {tool_args}.")
],
)
seq = response.messages
calls = [m for m in seq if isinstance(m, ToolCallMessage)]
assert calls, "Expected a ToolCallMessage"
assert calls[0].tool_call.name == "ask_question"
returns = [m for m in seq if isinstance(m, ToolReturnMessage)]
assert returns, "Expected a ToolReturnMessage"
tr = returns[0]
# status field
assert tr.status == "success", f"Bad status: {tr.status}"
# Check that we got some content back
assert len(tr.tool_return.strip()) > 0, f"Expected non-empty tool return, got: {tr.tool_return}"
finally:
client.mcp_servers.delete(mcp_server_id=server.id)
servers = client.mcp_servers.list()
server_names = [s.server_name for s in servers]
assert mcp_server_name not in server_names
def test_stdio_mcp_server(client, agent_state, server_url):
req_file = Path(__file__).parent / "weather" / "requirements.txt"
create_virtualenv_and_install_requirements(req_file, name="venv")
mcp_server_name = "weather"
command = str(Path(__file__).parent / "weather" / "venv" / "bin" / "python3")
args = [str(Path(__file__).parent / "weather" / "weather.py")]
stdio_config = StdioServerConfig(
server_name=mcp_server_name,
command=command,
args=args,
)
# Create MCP server using new API - convert 'type' to 'mcp_server_type' for 1.0 API
config_dict = stdio_config.model_dump()
config_dict["mcp_server_type"] = config_dict.pop("type")
config_dict.pop("server_name") # server_name is passed separately
server = client.mcp_servers.create(server_name=mcp_server_name, config=config_dict)
try:
servers = client.mcp_servers.list()
server_names = [s.server_name for s in servers]
assert mcp_server_name in server_names
# Get tools - now automatically registered
tools = client.mcp_servers.tools.list(mcp_server_id=server.id)
assert tools, "Expected at least one tool from the weather MCP server"
assert any(t.name == "get_alerts" for t in tools), f"Got: {[t.name for t in tools]}"
get_alerts = next(t for t in tools if t.name == "get_alerts")
# Tool is already registered, just attach it
client.agents.tools.attach(agent_id=agent_state.id, tool_id=get_alerts.id)
# Create message using MessageCreateParam
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreateParam(role="user", content=f"Use the `{get_alerts.name}` tool with these arguments: {{'state': 'CA'}}.")
],
)
calls = [m for m in response.messages if isinstance(m, ToolCallMessage) and m.tool_call.name == "get_alerts"]
assert calls, "Expected a get_alerts ToolCallMessage"
returns = [m for m in response.messages if isinstance(m, ToolReturnMessage) and m.tool_call_id == calls[0].tool_call.tool_call_id]
assert returns, "Expected a ToolReturnMessage for get_alerts"
ret = returns[0]
assert ret.status == "success", f"Unexpected status: {ret.status}"
# make sure there's at least some payload
assert len(ret.tool_return.strip()) >= 10, f"Expected at least 10 characters in tool_return, got {len(ret.tool_return.strip())}"
finally:
client.mcp_servers.delete(mcp_server_id=server.id)
servers = client.mcp_servers.list()
server_names = [s.server_name for s in servers]
assert mcp_server_name not in server_names
# Optional OpenAI validation test for MCP-normalized schema
# Skips unless OPENAI_API_KEY is set to avoid network flakiness in CI
EXAMPLE_BAD_SCHEMA = {
"type": "object",
"properties": {
"conversation_type": {
"type": "string",
"const": "Group",
"description": "Specifies the type of conversation to be created. Must be 'Group' for this action.",
},
"message": {
"type": "object",
"additionalProperties": {}, # invalid for OpenAI: missing "type"
"description": "Initial message payload",
},
"participant_ids": {
"type": "array",
"items": {"type": "string"},
"description": "Participant IDs",
},
},
"required": ["conversation_type", "message", "participant_ids"],
"additionalProperties": False,
"$schema": "http://json-schema.org/draft-07/schema#",
}
@pytest.mark.skipif(
not os.getenv("OPENAI_API_KEY"),
reason="Requires OPENAI_API_KEY to call OpenAI for schema validation",
)
def test_openai_rejects_untyped_additional_properties_and_accepts_normalized_schema():
"""Test written to check if our extra schema validation works.
Some MCP servers will return faulty schemas that require correction, or they will brick the LLM client calls.
"""
import copy
try:
from openai import OpenAI
except Exception as e: # pragma: no cover
pytest.skip(f"openai package not available: {e}")
client = OpenAI()
def run_request_with_schema(schema: dict):
tools = [
{
"type": "function",
"function": {
"name": "TWITTER_CREATE_A_NEW_DM_CONVERSATION",
"description": "Create a DM conversation",
"parameters": schema,
"strict": True,
},
}
]
return client.chat.completions.create(
model="gpt-4o-mini",
messages=[{"role": "user", "content": "hello"}],
tools=tools,
)
# Bad schema should raise
with pytest.raises(Exception):
run_request_with_schema(EXAMPLE_BAD_SCHEMA)
# Normalized should succeed
normalized = copy.deepcopy(EXAMPLE_BAD_SCHEMA)
normalized["properties"]["message"]["additionalProperties"] = False
normalized["properties"]["message"]["properties"] = {"text": {"type": "string"}}
normalized["properties"]["message"]["required"] = ["text"]
resp = run_request_with_schema(normalized)
assert getattr(resp, "id", None)
@pytest.mark.asyncio
async def test_streamable_http_mcp_server_update_schema_no_docstring_required(client, agent_state, server_url):
"""
Repro for schema-derivation-on-update error with MCP tools.
Without the fix, calling add_mcp_tool a second time for the same MCP tool
triggers a docstring-based schema derivation on a generated wrapper that has
no docstring, causing a 500. With the fix in place, updates should succeed.
With 1.0 API, tools are automatically registered when server is created,
so this test verifies that tools can be retrieved multiple times without issues.
"""
mcp_server_name = f"deepwiki_http_{uuid.uuid4().hex[:6]}"
mcp_url = "https://mcp.deepwiki.com/mcp"
http_mcp_config = StreamableHTTPServerConfig(server_name=mcp_server_name, server_url=mcp_url)
# Create MCP server using new API - convert 'type' to 'mcp_server_type' for 1.0 API
config_dict = http_mcp_config.model_dump()
config_dict["mcp_server_type"] = config_dict.pop("type")
config_dict.pop("server_name") # server_name is passed separately
server = client.mcp_servers.create(server_name=mcp_server_name, config=config_dict)
try:
# Ensure server is registered
servers = client.mcp_servers.list()
server_names = [s.server_name for s in servers]
assert mcp_server_name in server_names
# Fetch available tools from server - tools are automatically registered
tools = client.mcp_servers.tools.list(mcp_server_id=server.id)
assert tools, "Expected at least one tool from deepwiki streamable-http MCP server"
ask_question_tool = next((t for t in tools if t.name == "ask_question"), None)
assert ask_question_tool is not None, f"ask_question tool not found. Available: {[t.name for t in tools]}"
# Verify tool is accessible
letta_tool_1 = client.mcp_servers.tools.retrieve(mcp_server_id=server.id, tool_id=ask_question_tool.id)
assert letta_tool_1 is not None
# Retrieve again - should work without issues
letta_tool_2 = client.mcp_servers.tools.retrieve(mcp_server_id=server.id, tool_id=ask_question_tool.id)
assert letta_tool_2 is not None
finally:
client.mcp_servers.delete(mcp_server_id=server.id)
servers = client.mcp_servers.list()
server_names = [s.server_name for s in servers]
assert mcp_server_name not in server_names
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/mcp_tests/test_mcp.py",
"license": "Apache License 2.0",
"lines": 318,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/mcp_tests/test_mcp_schema_validation.py | """
Test MCP tool schema validation integration.
"""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from letta.functions.mcp_client.types import MCPTool, MCPToolHealth
from letta.functions.schema_generator import generate_tool_schema_for_mcp
from letta.functions.schema_validator import SchemaHealth, validate_complete_json_schema
from letta.server.rest_api.dependencies import HeaderParams
@pytest.mark.asyncio
async def test_mcp_tools_get_health_status():
"""Test that MCP tools receive health status when listed."""
from letta.server.server import SyncServer
# Create mock tools with different schema types
mock_tools = [
# Strict compliant tool
MCPTool(
name="strict_tool",
inputSchema={"type": "object", "properties": {"text": {"type": "string"}}, "required": ["text"], "additionalProperties": False},
),
# Non-strict tool (free-form object)
MCPTool(
name="non_strict_tool",
inputSchema={
"type": "object",
"properties": {"message": {"type": "object", "additionalProperties": {}}}, # Free-form object
"required": ["message"],
"additionalProperties": False,
},
),
# Invalid tool (missing type)
MCPTool(name="invalid_tool", inputSchema={"properties": {"data": {"type": "string"}}, "required": ["data"]}),
]
# Mock the server and client
mock_client = AsyncMock()
mock_client.list_tools = AsyncMock(return_value=mock_tools)
# Call the method directly
actual_server = SyncServer.__new__(SyncServer)
actual_server.mcp_clients = {"test_server": mock_client}
tools = await actual_server.get_tools_from_mcp_server("test_server")
# Verify health status was added
assert len(tools) == 3
# Check strict tool
strict_tool = tools[0]
assert strict_tool.name == "strict_tool"
assert strict_tool.health is not None
assert strict_tool.health.status == SchemaHealth.STRICT_COMPLIANT.value
assert strict_tool.health.reasons == []
# Check non-strict tool
non_strict_tool = tools[1]
assert non_strict_tool.name == "non_strict_tool"
assert non_strict_tool.health is not None
assert non_strict_tool.health.status == SchemaHealth.NON_STRICT_ONLY.value
assert len(non_strict_tool.health.reasons) > 0
assert any("additionalProperties" in reason for reason in non_strict_tool.health.reasons)
# Check invalid tool
invalid_tool = tools[2]
assert invalid_tool.name == "invalid_tool"
assert invalid_tool.health is not None
assert invalid_tool.health.status == SchemaHealth.INVALID.value
assert len(invalid_tool.health.reasons) > 0
assert any("type" in reason for reason in invalid_tool.health.reasons)
def test_empty_object_in_required_marked_invalid():
"""Test that required properties allowing empty objects are marked INVALID."""
schema = {
"type": "object",
"properties": {
"config": {"type": "object", "properties": {}, "required": [], "additionalProperties": False} # Empty object schema
},
"required": ["config"], # Required but allows empty object
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.INVALID
assert any("empty object" in reason for reason in reasons)
assert any("config" in reason for reason in reasons)
@pytest.mark.asyncio
async def test_add_mcp_tool_accepts_non_strict_schemas():
"""Test that adding MCP tools with non-strict schemas is allowed."""
from letta.server.rest_api.routers.v1.tools import add_mcp_tool
from letta.settings import tool_settings
# Mock a non-strict tool
non_strict_tool = MCPTool(
name="test_tool",
inputSchema={
"type": "object",
"properties": {"message": {"type": "object"}}, # Missing additionalProperties: false
"required": ["message"],
"additionalProperties": False,
},
)
non_strict_tool.health = MCPToolHealth(status=SchemaHealth.NON_STRICT_ONLY.value, reasons=["Missing additionalProperties for message"])
# Mock server response
with patch("letta.server.rest_api.routers.v1.tools.get_letta_server") as mock_get_server:
with patch.object(tool_settings, "mcp_read_from_config", True): # Ensure we're using config path
mock_server = AsyncMock()
mock_server.get_tools_from_mcp_server = AsyncMock(return_value=[non_strict_tool])
mock_server.user_manager.get_user_or_default = MagicMock()
mock_server.tool_manager.create_mcp_tool_async = AsyncMock(return_value=non_strict_tool)
mock_get_server.return_value = mock_server
# Should accept non-strict schema without raising an exception
headers = HeaderParams(actor_id="test_user")
result = await add_mcp_tool(mcp_server_name="test_server", mcp_tool_name="test_tool", server=mock_server, headers=headers)
# Verify the tool was added successfully
assert result is not None
# Verify create_mcp_tool_async was called with the right parameters
mock_server.tool_manager.create_mcp_tool_async.assert_called_once()
call_args = mock_server.tool_manager.create_mcp_tool_async.call_args
assert call_args.kwargs["mcp_server_name"] == "test_server"
@pytest.mark.skip(reason="Allowing invalid schemas to be attached")
@pytest.mark.asyncio
async def test_add_mcp_tool_rejects_invalid_schemas():
"""Test that adding MCP tools with invalid schemas is rejected."""
from letta.server.rest_api.routers.v1.tools import add_mcp_tool
from letta.settings import tool_settings
# Mock an invalid tool
invalid_tool = MCPTool(
name="test_tool",
inputSchema={
"properties": {"data": {"type": "string"}},
"required": ["data"],
# Missing "type": "object"
},
)
invalid_tool.health = MCPToolHealth(status=SchemaHealth.INVALID.value, reasons=["Missing 'type' at root level"])
# Mock server response
with patch("letta.server.rest_api.routers.v1.tools.get_letta_server") as mock_get_server:
with patch.object(tool_settings, "mcp_read_from_config", True): # Ensure we're using config path
mock_server = AsyncMock()
mock_server.get_tools_from_mcp_server = AsyncMock(return_value=[invalid_tool])
mock_server.user_manager.get_user_or_default = MagicMock()
mock_get_server.return_value = mock_server
# Should raise HTTPException for invalid schema
headers = HeaderParams(actor_id="test_user")
from letta.errors import LettaInvalidMCPSchemaError
with pytest.raises(LettaInvalidMCPSchemaError) as exc_info:
await add_mcp_tool(mcp_server_name="test_server", mcp_tool_name="test_tool", server=mock_server, headers=headers)
assert "invalid schema" in exc_info.value.message.lower()
assert exc_info.value.details["mcp_tool_name"] == "test_tool"
assert exc_info.value.details["reasons"] == ["Missing 'type' at root level"]
def test_mcp_schema_healing_for_optional_fields():
"""Test that optional fields in MCP schemas are healed only in strict mode."""
# Create an MCP tool with optional field 'b'
mcp_tool = MCPTool(
name="test_tool",
description="A test tool",
inputSchema={
"type": "object",
"properties": {
"a": {"type": "integer", "description": "Required field"},
"b": {"type": "integer", "description": "Optional field"},
},
"required": ["a"], # Only 'a' is required
"additionalProperties": False,
},
)
# Generate schema without strict mode - should NOT heal optional fields
non_strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=False)
assert "a" in non_strict_schema["parameters"]["required"]
assert "b" not in non_strict_schema["parameters"]["required"] # Should remain optional
assert non_strict_schema["parameters"]["properties"]["b"]["type"] == "integer" # No null added
# Validate non-strict schema - should still be STRICT_COMPLIANT because validator is relaxed
status, _ = validate_complete_json_schema(non_strict_schema["parameters"])
assert status == SchemaHealth.STRICT_COMPLIANT
# Generate schema with strict mode - should heal optional fields
strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
assert strict_schema["strict"] is True
assert "a" in strict_schema["parameters"]["required"]
assert "b" in strict_schema["parameters"]["required"] # Now required
assert set(strict_schema["parameters"]["properties"]["b"]["type"]) == {"integer", "null"} # Now accepts null
# Validate strict schema
status, _ = validate_complete_json_schema(strict_schema["parameters"])
assert status == SchemaHealth.STRICT_COMPLIANT # Should pass strict mode
def test_mcp_schema_healing_with_anyof():
"""Test schema healing for fields with anyOf that include optional types."""
mcp_tool = MCPTool(
name="test_tool",
description="A test tool",
inputSchema={
"type": "object",
"properties": {
"a": {"type": "string", "description": "Required field"},
"b": {
"anyOf": [{"type": "integer"}, {"type": "null"}],
"description": "Optional field with anyOf",
},
},
"required": ["a"], # Only 'a' is required
"additionalProperties": False,
},
)
# Generate strict schema
strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
assert strict_schema["strict"] is True
assert "a" in strict_schema["parameters"]["required"]
assert "b" in strict_schema["parameters"]["required"] # Now required
# anyOf should be preserved with integer and null types
b_prop = strict_schema["parameters"]["properties"]["b"]
assert "anyOf" in b_prop
assert len(b_prop["anyOf"]) == 2
types_in_anyof = {opt.get("type") for opt in b_prop["anyOf"]}
assert types_in_anyof == {"integer", "null"}
# Validate strict schema
status, _ = validate_complete_json_schema(strict_schema["parameters"])
assert status == SchemaHealth.STRICT_COMPLIANT
def test_mcp_schema_type_deduplication():
"""Test that anyOf duplicates are removed in schema generation."""
mcp_tool = MCPTool(
name="test_tool",
description="A test tool",
inputSchema={
"type": "object",
"properties": {
"field": {
"anyOf": [
{"type": "string"},
{"type": "string"}, # Duplicate
{"type": "null"},
],
"description": "Field with duplicate types",
},
},
"required": [],
"additionalProperties": False,
},
)
# Generate strict schema
strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
# Check that anyOf is preserved but duplicates are removed
field_prop = strict_schema["parameters"]["properties"]["field"]
assert "anyOf" in field_prop
types_in_anyof = [opt.get("type") for opt in field_prop["anyOf"]]
# Duplicates should be removed
assert len(types_in_anyof) == 2 # Deduplicated to 2 entries
assert types_in_anyof.count("string") == 1 # Only one string entry
assert types_in_anyof.count("null") == 1 # One null entry
def test_mcp_schema_healing_preserves_existing_null():
"""Test that schema healing doesn't add duplicate null when it already exists."""
mcp_tool = MCPTool(
name="test_tool",
description="A test tool",
inputSchema={
"type": "object",
"properties": {
"field": {
"type": ["string", "null"], # Already has null
"description": "Field that already accepts null",
},
},
"required": [], # Optional
"additionalProperties": False,
},
)
# Generate strict schema
strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
# Check that null wasn't duplicated
field_types = strict_schema["parameters"]["properties"]["field"]["type"]
null_count = field_types.count("null")
assert null_count == 1 # Should only have one null
def test_mcp_schema_healing_all_fields_already_required():
"""Test that schema healing works correctly when all fields are already required."""
mcp_tool = MCPTool(
name="test_tool",
description="A test tool",
inputSchema={
"type": "object",
"properties": {
"a": {"type": "string", "description": "Field A"},
"b": {"type": "integer", "description": "Field B"},
},
"required": ["a", "b"], # All fields already required
"additionalProperties": False,
},
)
# Generate strict schema
strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
# Check that fields remain as-is
assert set(strict_schema["parameters"]["required"]) == {"a", "b"}
assert strict_schema["parameters"]["properties"]["a"]["type"] == "string"
assert strict_schema["parameters"]["properties"]["b"]["type"] == "integer"
# Should be strict compliant
status, _ = validate_complete_json_schema(strict_schema["parameters"])
assert status == SchemaHealth.STRICT_COMPLIANT
def test_mcp_schema_with_uuid_format():
"""Test handling of UUID format in anyOf schemas (deduplicates but keeps format)."""
mcp_tool = MCPTool(
name="test_tool",
description="A test tool with UUID formatted field",
inputSchema={
"type": "object",
"properties": {
"session_id": {
"anyOf": [{"type": "string"}, {"format": "uuid", "type": "string"}, {"type": "null"}],
"description": "Session ID that can be a string, UUID, or null",
},
},
"required": [],
"additionalProperties": False,
},
)
# Generate strict schema
strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
# Check that anyOf is preserved with deduplication
session_props = strict_schema["parameters"]["properties"]["session_id"]
assert "anyOf" in session_props
# Deduplication should keep the string with format (more specific)
assert len(session_props["anyOf"]) == 2 # Deduplicated: string (with format) + null
types_in_anyof = [opt.get("type") for opt in session_props["anyOf"]]
assert types_in_anyof.count("string") == 1 # Only one string entry (the one with format)
assert "null" in types_in_anyof
# Verify the string entry has the uuid format
string_entry = next(opt for opt in session_props["anyOf"] if opt.get("type") == "string")
assert string_entry.get("format") == "uuid", "UUID format should be preserved"
# Should be in required array (healed)
assert "session_id" in strict_schema["parameters"]["required"]
# Should be strict compliant
status, _ = validate_complete_json_schema(strict_schema["parameters"])
assert status == SchemaHealth.STRICT_COMPLIANT
def test_mcp_schema_healing_only_in_strict_mode():
"""Test that schema healing only happens in strict mode."""
mcp_tool = MCPTool(
name="test_tool",
description="Test that healing only happens in strict mode",
inputSchema={
"type": "object",
"properties": {
"required_field": {"type": "string", "description": "Already required"},
"optional_field1": {"type": "integer", "description": "Optional 1"},
"optional_field2": {"type": "boolean", "description": "Optional 2"},
},
"required": ["required_field"],
"additionalProperties": False,
},
)
# Test with strict=False - no healing
non_strict = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=False)
assert "strict" not in non_strict # strict flag not set
assert non_strict["parameters"]["required"] == ["required_field"] # Only originally required field
assert non_strict["parameters"]["properties"]["required_field"]["type"] == "string"
assert non_strict["parameters"]["properties"]["optional_field1"]["type"] == "integer" # No null
assert non_strict["parameters"]["properties"]["optional_field2"]["type"] == "boolean" # No null
# Test with strict=True - healing happens
strict = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
assert strict["strict"] is True # strict flag is set
assert set(strict["parameters"]["required"]) == {"required_field", "optional_field1", "optional_field2"}
assert strict["parameters"]["properties"]["required_field"]["type"] == "string"
assert set(strict["parameters"]["properties"]["optional_field1"]["type"]) == {"integer", "null"}
assert set(strict["parameters"]["properties"]["optional_field2"]["type"]) == {"boolean", "null"}
# Both should be strict compliant (validator is relaxed)
status1, _ = validate_complete_json_schema(non_strict["parameters"])
status2, _ = validate_complete_json_schema(strict["parameters"])
assert status1 == SchemaHealth.STRICT_COMPLIANT
assert status2 == SchemaHealth.STRICT_COMPLIANT
def test_mcp_schema_with_uuid_format_required_field():
"""Test that UUID format is preserved and duplicates are removed for required fields."""
mcp_tool = MCPTool(
name="test_tool",
description="A test tool with required UUID formatted field",
inputSchema={
"type": "object",
"properties": {
"session_id": {
"anyOf": [{"type": "string"}, {"format": "uuid", "type": "string"}],
"description": "Session ID that must be a string with UUID format",
},
},
"required": ["session_id"], # Required field
"additionalProperties": False,
},
)
# Generate strict schema
strict_schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=False, strict=True)
# Check that anyOf is deduplicated, keeping the more specific version
session_props = strict_schema["parameters"]["properties"]["session_id"]
assert "anyOf" in session_props
# Deduplication should keep only the string with format (more specific)
assert len(session_props["anyOf"]) == 1 # Deduplicated to 1 entry
types_in_anyof = [opt.get("type") for opt in session_props["anyOf"]]
assert types_in_anyof.count("string") == 1 # Only one string entry
assert "null" not in types_in_anyof # No null since it's required
# UUID format should be preserved
string_entry = session_props["anyOf"][0]
assert string_entry.get("type") == "string"
assert string_entry.get("format") == "uuid", "UUID format should be preserved"
# Should be in required array
assert "session_id" in strict_schema["parameters"]["required"]
# Should be strict compliant
status, _ = validate_complete_json_schema(strict_schema["parameters"])
assert status == SchemaHealth.STRICT_COMPLIANT
def test_mcp_schema_complex_nested_with_defs():
"""Test generating exact schema with nested Pydantic-like models using $defs."""
from letta.functions.mcp_client.types import MCPToolHealth
mcp_tool = MCPTool(
name="get_vehicle_configuration",
description="Get vehicle configuration details for a given model type and optional dealer info and customization options.\n\nArgs:\n model_type (VehicleModel): The vehicle model type selection.\n dealer_location (str | None): Dealer location identifier from registration system, if available.\n customization_options (CustomizationData | None): Customization preferences for the vehicle from user selections, if available.\n\nReturns:\n str: The vehicle configuration details.",
inputSchema={
"type": "object",
"properties": {
"model_type": {
"$ref": "#/$defs/VehicleModel",
"description": "The vehicle model type selection.",
"title": "Model Type",
},
"dealer_location": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"default": None,
"description": "Dealer location identifier from registration system, if available.",
"title": "Dealer Location",
},
"customization_options": {
"anyOf": [{"$ref": "#/$defs/CustomizationData"}, {"type": "null"}],
"default": None,
"description": "Customization preferences for the vehicle from user selections, if available.",
"title": "Customization Options",
},
},
"required": ["model_type"],
"additionalProperties": False,
"$defs": {
"VehicleModel": {
"type": "string",
"enum": [
"sedan",
"suv",
"truck",
"coupe",
"hatchback",
"minivan",
"wagon",
"convertible",
"sports",
"luxury",
"electric",
"hybrid",
"compact",
"crossover",
"other",
"unknown",
],
"title": "VehicleModel",
},
"Feature": {
"type": "object",
"properties": {
"feature_id": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"default": None,
"title": "Feature ID",
},
"category_code": {
"anyOf": [{"type": "integer"}, {"type": "null"}],
"default": None,
"title": "Category Code",
},
"variant_code": {
"anyOf": [{"type": "integer"}, {"type": "null"}],
"default": None,
"title": "Variant Code",
},
"package_level": {
"anyOf": [{"type": "integer"}, {"type": "null"}],
"default": None,
"title": "Package Level",
},
},
"title": "Feature",
"additionalProperties": False,
},
"CustomizationData": {
"type": "object",
"properties": {
"has_premium_package": {
"anyOf": [{"type": "boolean"}, {"type": "null"}],
"default": None,
"title": "Has Premium Package",
},
"has_multiple_trims": {
"anyOf": [{"type": "boolean"}, {"type": "null"}],
"default": None,
"title": "Has Multiple Trims",
},
"selected_features": {
"anyOf": [
{"type": "array", "items": {"$ref": "#/$defs/Feature"}},
{"type": "null"},
],
"default": None,
"title": "Selected Features",
},
},
"title": "CustomizationData",
"additionalProperties": False,
},
},
},
)
# Initialize health status to simulate what happens in the server
mcp_tool.health = MCPToolHealth(status=SchemaHealth.STRICT_COMPLIANT.value, reasons=[])
# Generate schema with heartbeat
schema = generate_tool_schema_for_mcp(mcp_tool, append_heartbeat=True, strict=False)
# Add metadata fields (these are normally added by ToolCreate.from_mcp)
from letta.schemas.tool import MCP_TOOL_METADATA_SCHEMA_STATUS, MCP_TOOL_METADATA_SCHEMA_WARNINGS
schema[MCP_TOOL_METADATA_SCHEMA_STATUS] = mcp_tool.health.status
schema[MCP_TOOL_METADATA_SCHEMA_WARNINGS] = mcp_tool.health.reasons
# Expected schema
expected_schema = {
"name": "get_vehicle_configuration",
"description": "Get vehicle configuration details for a given model type and optional dealer info and customization options.\n\nArgs:\n model_type (VehicleModel): The vehicle model type selection.\n dealer_location (str | None): Dealer location identifier from registration system, if available.\n customization_options (CustomizationData | None): Customization preferences for the vehicle from user selections, if available.\n\nReturns:\n str: The vehicle configuration details.",
"parameters": {
"$defs": {
"Feature": {
"properties": {
"feature_id": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"default": None,
"title": "Feature ID",
},
"category_code": {
"anyOf": [{"type": "integer"}, {"type": "null"}],
"default": None,
"title": "Category Code",
},
"variant_code": {
"anyOf": [{"type": "integer"}, {"type": "null"}],
"default": None,
"title": "Variant Code",
},
"package_level": {
"anyOf": [{"type": "integer"}, {"type": "null"}],
"default": None,
"title": "Package Level",
},
},
"title": "Feature",
"type": "object",
"additionalProperties": False,
},
"CustomizationData": {
"properties": {
"has_premium_package": {
"anyOf": [{"type": "boolean"}, {"type": "null"}],
"default": None,
"title": "Has Premium Package",
},
"has_multiple_trims": {
"anyOf": [{"type": "boolean"}, {"type": "null"}],
"default": None,
"title": "Has Multiple Trims",
},
"selected_features": {
"anyOf": [
{"items": {"$ref": "#/$defs/Feature"}, "type": "array"},
{"type": "null"},
],
"default": None,
"title": "Selected Features",
},
},
"title": "CustomizationData",
"type": "object",
"additionalProperties": False,
},
"VehicleModel": {
"enum": [
"sedan",
"suv",
"truck",
"coupe",
"hatchback",
"minivan",
"wagon",
"convertible",
"sports",
"luxury",
"electric",
"hybrid",
"compact",
"crossover",
"other",
"unknown",
],
"title": "VehicleModel",
"type": "string",
},
},
"properties": {
"model_type": {
"$ref": "#/$defs/VehicleModel",
"description": "The vehicle model type selection.",
"title": "Model Type",
"type": "string",
},
"dealer_location": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"default": None,
"description": "Dealer location identifier from registration system, if available.",
"title": "Dealer Location",
},
"customization_options": {
"anyOf": [
{
"type": "object",
"title": "CustomizationData",
"additionalProperties": False,
"properties": {
"has_premium_package": {
"anyOf": [{"type": "boolean"}, {"type": "null"}],
"default": None,
"title": "Has Premium Package",
},
"has_multiple_trims": {
"anyOf": [{"type": "boolean"}, {"type": "null"}],
"default": None,
"title": "Has Multiple Trims",
},
"selected_features": {
"anyOf": [
{
"type": "array",
"items": {
"type": "object",
"title": "Feature",
"additionalProperties": False,
"properties": {
"feature_id": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"default": None,
"title": "Feature ID",
},
"category_code": {
"anyOf": [{"type": "integer"}, {"type": "null"}],
"default": None,
"title": "Category Code",
},
"variant_code": {
"anyOf": [{"type": "integer"}, {"type": "null"}],
"default": None,
"title": "Variant Code",
},
"package_level": {
"anyOf": [{"type": "integer"}, {"type": "null"}],
"default": None,
"title": "Package Level",
},
},
},
},
{"type": "null"},
],
"default": None,
"title": "Selected Features",
},
},
},
{"type": "null"},
],
"default": None,
"description": "Customization preferences for the vehicle from user selections, if available.",
"title": "Customization Options",
},
"request_heartbeat": {
"type": "boolean",
"description": "Request an immediate heartbeat after function execution. You MUST set this value to `True` if you want to send a follow-up message or run a follow-up tool call (chain multiple tools together). If set to `False` (the default), then the chain of execution will end immediately after this function call.",
},
},
"required": ["model_type", "request_heartbeat"],
"type": "object",
"additionalProperties": False,
},
"mcp:SCHEMA_STATUS": "STRICT_COMPLIANT",
"mcp:SCHEMA_WARNINGS": [],
}
# Compare key components
assert schema["name"] == expected_schema["name"]
assert schema["description"] == expected_schema["description"]
assert schema["parameters"]["type"] == expected_schema["parameters"]["type"]
assert schema["parameters"]["additionalProperties"] == expected_schema["parameters"]["additionalProperties"]
assert set(schema["parameters"]["required"]) == set(expected_schema["parameters"]["required"])
# Check $defs
assert "$defs" in schema["parameters"]
assert set(schema["parameters"]["$defs"].keys()) == set(expected_schema["parameters"]["$defs"].keys())
# Check properties
assert "model_type" in schema["parameters"]["properties"]
assert "dealer_location" in schema["parameters"]["properties"]
assert "customization_options" in schema["parameters"]["properties"]
assert "request_heartbeat" in schema["parameters"]["properties"]
# Verify model_type property ($ref is inlined)
model_prop = schema["parameters"]["properties"]["model_type"]
assert model_prop["type"] == "string"
assert "enum" in model_prop, "$ref should be inlined with enum values"
assert model_prop["description"] == "The vehicle model type selection."
# Verify dealer_location property (anyOf preserved)
dl_prop = schema["parameters"]["properties"]["dealer_location"]
assert "anyOf" in dl_prop, "anyOf should be preserved for optional primitives"
assert len(dl_prop["anyOf"]) == 2
types_in_anyof = {opt.get("type") for opt in dl_prop["anyOf"]}
assert types_in_anyof == {"string", "null"}
assert dl_prop["description"] == "Dealer location identifier from registration system, if available."
# Verify customization_options property (anyOf with fully inlined $refs)
co_prop = schema["parameters"]["properties"]["customization_options"]
assert "anyOf" in co_prop, "Should use anyOf structure"
assert len(co_prop["anyOf"]) == 2, "Should have object and null options"
# Find the object option in anyOf
object_option = next((opt for opt in co_prop["anyOf"] if opt.get("type") == "object"), None)
assert object_option is not None, "Should have object type in anyOf"
assert object_option["additionalProperties"] is False, "Object must have additionalProperties: false"
assert "properties" in object_option, "$ref should be fully inlined with properties"
# Verify the inlined properties are present
assert "has_premium_package" in object_option["properties"]
assert "has_multiple_trims" in object_option["properties"]
assert "selected_features" in object_option["properties"]
# Verify nested selected_features array has inlined Feature objects
features_prop = object_option["properties"]["selected_features"]
assert "anyOf" in features_prop, "selected_features should have anyOf"
array_option = next((opt for opt in features_prop["anyOf"] if opt.get("type") == "array"), None)
assert array_option is not None
assert "items" in array_option
assert array_option["items"]["type"] == "object"
assert array_option["items"]["additionalProperties"] is False
assert "feature_id" in array_option["items"]["properties"]
assert "category_code" in array_option["items"]["properties"]
# Verify metadata fields
assert schema[MCP_TOOL_METADATA_SCHEMA_STATUS] == "STRICT_COMPLIANT"
assert schema[MCP_TOOL_METADATA_SCHEMA_WARNINGS] == []
# Should be strict compliant
status, _ = validate_complete_json_schema(schema["parameters"])
assert status == SchemaHealth.STRICT_COMPLIANT
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/mcp_tests/test_mcp_schema_validation.py",
"license": "Apache License 2.0",
"lines": 727,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/mcp_tests/test_schema_validator.py | """
Unit tests for the JSON Schema validator for OpenAI strict mode compliance.
"""
from letta.functions.schema_validator import SchemaHealth, validate_complete_json_schema
class TestSchemaValidator:
"""Test cases for the schema validator."""
def test_valid_strict_compliant_schema(self):
"""Test a fully strict-compliant schema."""
schema = {
"type": "object",
"properties": {
"name": {"type": "string", "description": "The name of the user"},
"age": {"type": "integer", "description": "The age of the user"},
"address": {
"type": "object",
"properties": {"street": {"type": "string"}, "city": {"type": "string"}},
"required": ["street", "city"],
"additionalProperties": False,
},
},
"required": ["name", "age", "address"], # All properties must be required for strict mode
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_free_form_object_non_strict(self):
"""Test that free-form objects are marked as NON_STRICT_ONLY."""
schema = {
"type": "object",
"properties": {
"message": {
"type": "object",
"description": "A message object",
# Missing additionalProperties: false makes this free-form
}
},
"required": ["message"],
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.NON_STRICT_ONLY
assert any("additionalProperties" in reason for reason in reasons)
def test_empty_object_in_required_invalid(self):
"""Test that required properties allowing empty objects are marked INVALID."""
schema = {
"type": "object",
"properties": {
"config": {"type": "object", "properties": {}, "required": [], "additionalProperties": False} # Empty object schema
},
"required": ["config"], # Required but allows empty object
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.INVALID
assert any("empty object" in reason for reason in reasons)
def test_missing_type_invalid(self):
"""Test that schemas missing type are marked INVALID."""
schema = {
# Missing "type": "object"
"properties": {"name": {"type": "string"}},
"required": ["name"],
}
status, reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.INVALID
assert any("type" in reason.lower() for reason in reasons)
def test_missing_items_in_array_invalid(self):
"""Test that arrays without items definition are marked INVALID."""
schema = {
"type": "object",
"properties": {
"tags": {
"type": "array"
# Missing "items" definition
}
},
"required": ["tags"],
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.INVALID
assert any("items" in reason for reason in reasons)
def test_required_property_not_in_properties_invalid(self):
"""Test that required properties not defined in properties are marked INVALID."""
schema = {
"type": "object",
"properties": {"name": {"type": "string"}},
"required": ["name", "email"], # "email" not in properties
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.INVALID
assert any("email" in reason and "not found" in reason for reason in reasons)
def test_nested_object_validation(self):
"""Test that nested objects are properly validated."""
schema = {
"type": "object",
"properties": {
"user": {
"type": "object",
"properties": {
"profile": {
"type": "object",
"properties": {"bio": {"type": "string"}},
# Missing additionalProperties and required
}
},
"required": ["profile"],
"additionalProperties": False,
}
},
"required": ["user"],
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.NON_STRICT_ONLY
# Should have warnings about nested profile object
assert any("profile" in reason.lower() or "properties.profile" in reason for reason in reasons)
def test_union_types_with_anyof(self):
"""Test schemas with anyOf union types."""
schema = {
"type": "object",
"properties": {"value": {"anyOf": [{"type": "string"}, {"type": "number"}]}},
"required": ["value"],
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_array_with_proper_items(self):
"""Test arrays with properly defined items."""
schema = {
"type": "object",
"properties": {
"items": {
"type": "array",
"items": {
"type": "object",
"properties": {"id": {"type": "string"}, "value": {"type": "number"}},
"required": ["id", "value"],
"additionalProperties": False,
},
}
},
"required": ["items"],
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_empty_array_in_required_invalid(self):
"""Test that required properties allowing empty arrays are marked INVALID."""
schema = {
"type": "object",
"properties": {
"tags": {
"type": "array",
"items": {"type": "string"},
# No minItems constraint, allows empty array
}
},
"required": ["tags"],
"additionalProperties": False,
}
# This should actually be STRICT_COMPLIANT since empty arrays with defined items are OK
status, _reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.STRICT_COMPLIANT
def test_array_without_constraints_invalid(self):
"""Test that arrays without any constraints in required props are invalid."""
schema = {
"type": "object",
"properties": {
"data": {
"type": "array"
# No items defined at all - completely unconstrained
}
},
"required": ["data"],
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.INVALID
assert any("items" in reason for reason in reasons)
def test_non_dict_schema(self):
"""Test that non-dict schemas are marked INVALID."""
schema = "not a dict"
status, reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.INVALID
assert any("dict" in reason for reason in reasons)
def test_schema_with_defaults_non_strict(self):
"""Test that root-level schemas without required field are STRICT_COMPLIANT (validator is relaxed)."""
schema = {
"type": "object",
"properties": {"name": {"type": "string"}, "optional": {"type": "string"}},
# Missing "required" field at root level - validator now accepts this
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
# Validator is relaxed - schemas with optional fields are now STRICT_COMPLIANT
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_root_level_without_required_non_strict(self):
"""Test that root-level objects without 'required' field are STRICT_COMPLIANT (validator is relaxed)."""
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"age": {"type": "integer"},
},
# No "required" field at root level - validator now accepts this
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
# Validator is relaxed - accepts schemas without required field
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_nested_object_without_required_non_strict(self):
"""Test that nested objects without 'required' are STRICT_COMPLIANT (validator is relaxed)."""
schema = {
"type": "object",
"properties": {
"user": {
"type": "object",
"properties": {
"preferences": {
"type": "object",
"properties": {"theme": {"type": "string"}, "language": {"type": "string"}},
# Missing "required" field in nested object
"additionalProperties": False,
},
"name": {"type": "string"},
},
"required": ["name"], # Don't require preferences so it's not marked INVALID
"additionalProperties": False,
}
},
"required": ["user"],
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_user_example_schema_non_strict(self):
"""Test the user's example schema with optional properties - now STRICT_COMPLIANT (validator is relaxed)."""
schema = {
"type": "object",
"properties": {
"a": {"title": "A", "type": "integer"},
"b": {"anyOf": [{"type": "integer"}, {"type": "null"}], "default": None, "title": "B"},
},
"required": ["a"], # Only 'a' is required, 'b' is not
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_all_properties_required_strict_compliant(self):
"""Test that schemas with all properties required are STRICT_COMPLIANT."""
schema = {
"type": "object",
"properties": {
"a": {"title": "A", "type": "integer"},
"b": {"anyOf": [{"type": "integer"}, {"type": "null"}], "default": None, "title": "B"},
},
"required": ["a", "b"], # All properties are required
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/mcp_tests/test_schema_validator.py",
"license": "Apache License 2.0",
"lines": 270,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/mcp_tests/weather/weather.py | from typing import Any
import httpx
from mcp.server.fastmcp import FastMCP
# Initialize FastMCP server
mcp = FastMCP("weather")
# Constants
NWS_API_BASE = "https://api.weather.gov"
USER_AGENT = "weather-app/1.0"
async def make_nws_request(url: str) -> dict[str, Any] | None:
"""Make a request to the NWS API with proper error handling."""
headers = {"User-Agent": USER_AGENT, "Accept": "application/geo+json"}
async with httpx.AsyncClient() as client:
try:
response = await client.get(url, headers=headers, timeout=30.0)
response.raise_for_status()
return response.json()
except Exception:
return None
def format_alert(feature: dict) -> str:
"""Format an alert feature into a readable string."""
props = feature["properties"]
return f"""
Event: {props.get("event", "Unknown")}
Area: {props.get("areaDesc", "Unknown")}
Severity: {props.get("severity", "Unknown")}
Description: {props.get("description", "No description available")}
Instructions: {props.get("instruction", "No specific instructions provided")}
"""
@mcp.tool()
async def get_alerts(state: str) -> str:
"""Get weather alerts for a US state.
Args:
state: Two-letter US state code (e.g. CA, NY)
"""
url = f"{NWS_API_BASE}/alerts/active/area/{state}"
data = await make_nws_request(url)
if not data or "features" not in data:
return "Unable to fetch alerts or no alerts found."
if not data["features"]:
return "No active alerts for this state."
alerts = [format_alert(feature) for feature in data["features"]]
return "\n---\n".join(alerts)
@mcp.tool()
async def get_forecast(latitude: float, longitude: float) -> str:
"""Get weather forecast for a location.
Args:
latitude: Latitude of the location
longitude: Longitude of the location
"""
# First get the forecast grid endpoint
points_url = f"{NWS_API_BASE}/points/{latitude},{longitude}"
points_data = await make_nws_request(points_url)
if not points_data:
return "Unable to fetch forecast data for this location."
# Get the forecast URL from the points response
forecast_url = points_data["properties"]["forecast"]
forecast_data = await make_nws_request(forecast_url)
if not forecast_data:
return "Unable to fetch detailed forecast."
# Format the periods into a readable forecast
periods = forecast_data["properties"]["periods"]
forecasts = []
for period in periods[:5]: # Only show next 5 periods
forecast = f"""
{period["name"]}:
Temperature: {period["temperature"]}°{period["temperatureUnit"]}
Wind: {period["windSpeed"]} {period["windDirection"]}
Forecast: {period["detailedForecast"]}
"""
forecasts.append(forecast)
return "\n---\n".join(forecasts)
if __name__ == "__main__":
# Initialize and run the server
mcp.run(transport="stdio")
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/mcp_tests/weather/weather.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/performance_tests/test_agent_mass_creation.py | import logging
import os
import threading
import time
import uuid
from concurrent.futures import ThreadPoolExecutor, as_completed
import matplotlib.pyplot as plt
import pandas as pd
import pytest
from dotenv import load_dotenv
from letta_client import Letta
from tqdm import tqdm
from letta.schemas.block import Block
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.llm_config import LLMConfig
from letta.services.block_manager import BlockManager
logging.getLogger("httpx").setLevel(logging.WARNING)
logging.getLogger("httpcore").setLevel(logging.WARNING)
# --- Server Management --- #
def _run_server():
"""Starts the Letta server in a background thread."""
load_dotenv()
from letta.server.rest_api.app import start_server
start_server(debug=True)
@pytest.fixture(scope="session")
def server_url():
"""Ensures a server is running and returns its base URL."""
url = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
if not os.getenv("LETTA_SERVER_URL"):
thread = threading.Thread(target=_run_server, daemon=True)
thread.start()
time.sleep(2) # Allow server startup time
return url
# --- Client Setup --- #
@pytest.fixture(scope="session")
def client(server_url):
"""Creates a REST client for testing."""
client = Letta(base_url=server_url)
yield client
@pytest.fixture()
def roll_dice_tool(client):
def roll_dice():
"""
Rolls a 6 sided die.
Returns:
str: The roll result.
"""
return "Rolled a 10!"
tool = client.tools.upsert_from_function(func=roll_dice)
# Yield the created tool
yield tool
@pytest.fixture()
def rethink_tool(client):
def rethink_memory(agent_state: "AgentState", new_memory: str, target_block_label: str) -> str: # type: ignore # noqa: F821
"""
Re-evaluate the memory in block_name, integrating new and updated facts.
Replace outdated information with the most likely truths, avoiding redundancy with original memories.
Ensure consistency with other memory blocks.
Args:
new_memory (str): The new memory with information integrated from the memory block. If there is no new information, then this should be the same as the content in the source block.
target_block_label (str): The name of the block to write to.
Returns:
str: None is always returned as this function does not produce a response.
"""
agent_state.memory.update_block_value(label=target_block_label, value=new_memory)
return None
tool = client.tools.upsert_from_function(func=rethink_memory)
yield tool
@pytest.fixture
def default_block(default_user):
"""Fixture to create and return a default block."""
block_manager = BlockManager()
block_data = Block(
label="default_label",
value="Default Block Content",
description="A default test block",
limit=1000,
metadata={"type": "test"},
)
block = block_manager.create_or_update_block(block_data, actor=default_user)
yield block
@pytest.fixture(scope="function")
def agent_state(client, roll_dice_tool, weather_tool, rethink_tool):
agent_state = client.agents.create(
name=f"test_compl_{str(uuid.uuid4())[5:]}",
tool_ids=[roll_dice_tool.id, weather_tool.id, rethink_tool.id],
include_base_tools=True,
memory_blocks=[
{
"label": "human",
"value": "Name: Matt",
},
{
"label": "persona",
"value": "Friendly agent",
},
],
llm_config=LLMConfig.default_config(model_name="gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
)
yield agent_state
client.agents.delete(agent_state.id)
# --- Load Test --- #
def create_agents_for_user(client, roll_dice_tool, rethink_tool, user_index: int) -> tuple:
"""Create agents and return E2E latencies in seconds along with user index."""
# Setup blocks first
num_blocks = 10
blocks = []
for i in range(num_blocks):
block = client.blocks.create(
label=f"user{user_index}_block{i}",
value="Default Block Content",
description="A default test block",
limit=1000,
metadata={"index": str(i)},
)
blocks.append(block)
block_ids = [b.id for b in blocks]
# Now create agents and track individual latencies
agent_latencies = []
num_agents_per_user = 100
for i in range(num_agents_per_user):
start_time = time.time()
client.agents.create(
name=f"user{user_index}_agent_{str(uuid.uuid4())[5:]}",
tool_ids=[roll_dice_tool.id, rethink_tool.id],
include_base_tools=True,
memory_blocks=[
{"label": "human", "value": "Name: Matt"},
{"label": "persona", "value": "Friendly agent"},
],
model="openai/gpt-4o",
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=block_ids,
)
end_time = time.time()
latency = end_time - start_time
agent_latencies.append({"user_index": user_index, "agent_index": i, "latency": latency})
return user_index, agent_latencies
def plot_agent_creation_latencies(latency_data):
"""
Plot the distribution of agent creation latencies.
Args:
latency_data: List of dictionaries with latency information
"""
# Convert to DataFrame for easier analysis
df = pd.DataFrame(latency_data)
# Overall latency distribution
plt.figure(figsize=(12, 10))
# Plot 1: Overall latency histogram
plt.subplot(2, 2, 1)
plt.hist(df["latency"], bins=30, alpha=0.7, color="blue")
plt.title(f"Agent Creation Latency Distribution (n={len(df)})")
plt.xlabel("Latency (seconds)")
plt.ylabel("Frequency")
plt.grid(True, alpha=0.3)
# Plot 2: Latency by user (boxplot)
plt.subplot(2, 2, 2)
user_groups = df.groupby("user_index")
plt.boxplot([group["latency"] for _, group in user_groups])
plt.title("Latency Distribution by User")
plt.xlabel("User Index")
plt.ylabel("Latency (seconds)")
plt.xticks(range(1, len(user_groups) + 1), sorted(df["user_index"].unique()))
plt.grid(True, alpha=0.3)
# Plot 3: Time series of latencies
plt.subplot(2, 1, 2)
for user_idx in sorted(df["user_index"].unique()):
user_data = df[df["user_index"] == user_idx]
plt.plot(user_data["agent_index"], user_data["latency"], marker=".", linestyle="-", alpha=0.7, label=f"User {user_idx}")
plt.title("Agent Creation Latency Over Time")
plt.xlabel("Agent Creation Sequence")
plt.ylabel("Latency (seconds)")
plt.legend(loc="upper right")
plt.grid(True, alpha=0.3)
# Add statistics as text
stats_text = (
f"Mean: {df['latency'].mean():.2f}s\n"
f"Median: {df['latency'].median():.2f}s\n"
f"Min: {df['latency'].min():.2f}s\n"
f"Max: {df['latency'].max():.2f}s\n"
f"Std Dev: {df['latency'].std():.2f}s"
)
plt.figtext(0.02, 0.02, stats_text, fontsize=10, bbox=dict(facecolor="white", alpha=0.8))
plt.tight_layout()
# Save the plot
plot_file = f"agent_creation_latency_plot_{time.strftime('%Y%m%d_%H%M%S')}.png"
plt.savefig(plot_file)
plt.close()
print(f"Latency plot saved to {plot_file}")
# Return statistics for reporting
return {
"mean": df["latency"].mean(),
"median": df["latency"].median(),
"min": df["latency"].min(),
"max": df["latency"].max(),
"std": df["latency"].std(),
"count": len(df),
"plot_file": plot_file,
}
@pytest.mark.slow
def test_parallel_create_many_agents(client, roll_dice_tool, rethink_tool):
num_users = 7
max_workers = min(num_users, 20)
# To collect all latency data across users
all_latency_data = []
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = {
executor.submit(create_agents_for_user, client, roll_dice_tool, rethink_tool, user_idx): user_idx
for user_idx in range(num_users)
}
with tqdm(total=num_users, desc="Creating agents") as pbar:
for future in as_completed(futures):
try:
user_idx, user_latencies = future.result()
all_latency_data.extend(user_latencies)
# Calculate and display per-user statistics
latencies = [data["latency"] for data in user_latencies]
avg_latency = sum(latencies) / len(latencies)
tqdm.write(f"[User {user_idx}] Completed {len(latencies)} agents")
tqdm.write(f"[User {user_idx}] Avg: {avg_latency:.2f}s, Min: {min(latencies):.2f}s, Max: {max(latencies):.2f}s")
except Exception as e:
user_idx = futures[future]
tqdm.write(f"[User {user_idx}] Error during agent creation: {str(e)}")
pbar.update(1)
if all_latency_data:
# Plot all collected latency data
stats = plot_agent_creation_latencies(all_latency_data)
print("\n===== Agent Creation Latency Statistics =====")
print(f"Total agents created: {stats['count']}")
print(f"Mean latency: {stats['mean']:.2f} seconds")
print(f"Median latency: {stats['median']:.2f} seconds")
print(f"Min latency: {stats['min']:.2f} seconds")
print(f"Max latency: {stats['max']:.2f} seconds")
print(f"Standard deviation: {stats['std']:.2f} seconds")
print(f"Latency plot saved to: {stats['plot_file']}")
print("============================================")
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/performance_tests/test_agent_mass_creation.py",
"license": "Apache License 2.0",
"lines": 237,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/performance_tests/test_agent_mass_update.py | import logging
import os
import random
import threading
import time
import uuid
from concurrent.futures import ThreadPoolExecutor, as_completed
import matplotlib.pyplot as plt
import pandas as pd
import pytest
from dotenv import load_dotenv
from letta_client import Letta
from tqdm import tqdm
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.llm_config import LLMConfig
logging.getLogger("httpx").setLevel(logging.WARNING)
logging.getLogger("httpcore").setLevel(logging.WARNING)
# --- Server Management --- #
def _run_server():
"""Starts the Letta server in a background thread."""
load_dotenv()
from letta.server.rest_api.app import start_server
start_server(debug=True)
@pytest.fixture(scope="session")
def server_url():
"""Ensures a server is running and returns its base URL."""
url = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
if not os.getenv("LETTA_SERVER_URL"):
thread = threading.Thread(target=_run_server, daemon=True)
thread.start()
time.sleep(2) # Allow server startup time
return url
# --- Client Setup --- #
@pytest.fixture(scope="session")
def client(server_url):
"""Creates a REST client for testing."""
client = Letta(base_url=server_url)
yield client
@pytest.fixture()
def roll_dice_tool(client):
def roll_dice():
"""
Rolls a 6 sided die.
Returns:
str: The roll result.
"""
return "Rolled a 10!"
tool = client.tools.upsert_from_function(func=roll_dice)
# Yield the created tool
yield tool
@pytest.fixture()
def rethink_tool(client):
def rethink_memory(agent_state: "AgentState", new_memory: str, target_block_label: str) -> str: # type: ignore # noqa: F821
"""
Re-evaluate the memory in block_name, integrating new and updated facts.
Replace outdated information with the most likely truths, avoiding redundancy with original memories.
Ensure consistency with other memory blocks.
Args:
new_memory (str): The new memory with information integrated from the memory block. If there is no new information, then this should be the same as the content in the source block.
target_block_label (str): The name of the block to write to.
Returns:
str: None is always returned as this function does not produce a response.
"""
agent_state.memory.update_block_value(label=target_block_label, value=new_memory)
return None
tool = client.tools.upsert_from_function(func=rethink_memory)
yield tool
@pytest.fixture(scope="function")
def weather_tool(client):
def get_weather(location: str) -> str:
"""
Fetches the current weather for a given location.
Parameters:
location (str): The location to get the weather for.
Returns:
str: A formatted string describing the weather in the given location.
Raises:
RuntimeError: If the request to fetch weather data fails.
"""
import requests
url = f"https://wttr.in/{location}?format=%C+%t"
response = requests.get(url)
if response.status_code == 200:
weather_data = response.text
return f"The weather in {location} is {weather_data}."
else:
raise RuntimeError(f"Failed to get weather data, status code: {response.status_code}")
tool = client.tools.upsert_from_function(func=get_weather)
# Yield the created tool
yield tool
# --- Load Test --- #
@pytest.mark.slow
def test_parallel_mass_update_agents_complex(client, roll_dice_tool, weather_tool, rethink_tool):
# 1) Create 30 agents WITHOUT the rethink_tool initially
agent_ids = []
for i in range(5):
agent = client.agents.create(
name=f"complex_agent_{i}_{uuid.uuid4().hex[:6]}",
tool_ids=[roll_dice_tool.id, weather_tool.id],
include_base_tools=False,
memory_blocks=[
{"label": "human", "value": "Name: Matt"},
{"label": "persona", "value": "Friendly agent"},
],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
)
agent_ids.append(agent.id)
# 2) Pre-create 10 new blocks *per* agent
per_agent_blocks = {}
for aid in agent_ids:
block_ids = []
for j in range(10):
blk = client.blocks.create(
label=f"{aid[:6]}_blk{j}",
value="Precreated block content",
description="Load-test block",
limit=500,
metadata={"idx": str(j)},
)
block_ids.append(blk.id)
per_agent_blocks[aid] = block_ids
# 3) Dispatch 100 updates per agent in parallel
total_updates = len(agent_ids) * 100
latencies = []
def do_update(agent_id: str):
start = time.time()
if random.random() < 0.5:
client.agents.modify(agent_id=agent_id, tool_ids=[rethink_tool.id])
else:
bid = random.choice(per_agent_blocks[agent_id])
client.agents.modify(agent_id=agent_id, block_ids=[bid])
return time.time() - start
with ThreadPoolExecutor(max_workers=50) as executor:
futures = [executor.submit(do_update, aid) for aid in agent_ids for _ in range(10)]
for future in tqdm(as_completed(futures), total=total_updates, desc="Complex updates"):
latencies.append(future.result())
# 4) Cleanup
for aid in agent_ids:
client.agents.delete(aid)
# 5) Plot latency distribution
df = pd.DataFrame({"latency": latencies})
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.hist(df["latency"], bins=30, edgecolor="black")
plt.title("Update Latency Distribution")
plt.xlabel("Latency (seconds)")
plt.ylabel("Frequency")
plt.subplot(1, 2, 2)
plt.boxplot(df["latency"], vert=False)
plt.title("Update Latency Boxplot")
plt.xlabel("Latency (seconds)")
plt.tight_layout()
plot_file = f"complex_update_latency_{int(time.time())}.png"
plt.savefig(plot_file)
plt.close()
# 6) Report summary
mean = df["latency"].mean()
median = df["latency"].median()
minimum = df["latency"].min()
maximum = df["latency"].max()
stdev = df["latency"].std()
print("\n===== Complex Update Latency Statistics =====")
print(f"Total updates: {len(latencies)}")
print(f"Mean: {mean:.3f}s")
print(f"Median: {median:.3f}s")
print(f"Min: {minimum:.3f}s")
print(f"Max: {maximum:.3f}s")
print(f"Std: {stdev:.3f}s")
print(f"Plot saved to: {plot_file}")
# Sanity assertion
assert median < 2.0, f"Median update latency too high: {median:.3f}s"
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/performance_tests/test_agent_mass_update.py",
"license": "Apache License 2.0",
"lines": 172,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/performance_tests/test_insert_archival_memory.py | import asyncio
import logging
import os
import threading
import time
import uuid
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pytest
from dotenv import load_dotenv
from faker import Faker
from letta_client import AsyncLetta
from tqdm import tqdm
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.llm_config import LLMConfig
logging.getLogger("httpx").setLevel(logging.WARNING)
logging.getLogger("httpcore").setLevel(logging.WARNING)
# --- Server Management --- #
def _run_server():
"""Starts the Letta server in a background thread."""
load_dotenv()
from letta.server.rest_api.app import start_server
start_server(debug=True)
@pytest.fixture(scope="session")
def server_url():
"""Ensures a server is running and returns its base URL."""
url = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
if not os.getenv("LETTA_SERVER_URL"):
thread = threading.Thread(target=_run_server, daemon=True)
thread.start()
time.sleep(2) # Allow server startup time
return url
# --- Client Setup --- #
@pytest.fixture(scope="session")
def client(server_url):
"""Creates a REST client for testing."""
client = AsyncLetta(base_url=server_url)
yield client
# --- Load Test --- #
NUM_AGENTS = 30
@pytest.mark.asyncio
async def test_insert_archival_memories_concurrent(client):
fake = Faker()
# 1) Create agents
agent_ids = []
for i in tqdm(range(NUM_AGENTS), desc="Creating agents"):
agent = await client.agents.create(
name=f"complex_agent_{i}_{uuid.uuid4().hex[:6]}",
include_base_tools=True,
memory_blocks=[
{"label": "human", "value": "Name: Matt"},
{"label": "persona", "value": "Friendly agent"},
],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
)
agent_ids.append(agent.id)
# 2) Measure start and duration of each call
timeline = []
async def measure(agent_index, aid):
t0 = time.perf_counter()
await client.agents.passages.create(agent_id=aid, text=fake.paragraph())
t1 = time.perf_counter()
timeline.append((agent_index, t0, t1 - t0))
await asyncio.gather(*(measure(idx, aid) for idx, aid in enumerate(agent_ids)))
# 3) Convert to arrays
timeline.sort(key=lambda x: x[0])
indices = np.array([t[0] for t in timeline])
starts = np.array([t[1] for t in timeline])
durs = np.array([t[2] for t in timeline])
start_offset = starts - starts.min()
print(f"Latency stats (s): min={durs.min():.3f}, mean={durs.mean():.3f}, max={durs.max():.3f}, std={durs.std():.3f}")
# 4) Generate improved plots
# Helper: concurrency over time
events = np.concatenate([np.column_stack([starts, np.ones_like(starts)]), np.column_stack([starts + durs, -np.ones_like(durs)])])
events = events[events[:, 0].argsort()]
concurrency_t = np.cumsum(events[:, 1])
concurrency_x = events[:, 0] - starts.min()
# Helper: latency CDF
durs_sorted = np.sort(durs)
cdf_y = np.arange(1, len(durs_sorted) + 1) / len(durs_sorted)
# Plot all 6 subplots
_fig, axes = plt.subplots(2, 3, figsize=(15, 8))
axs = axes.ravel()
# 1) Kickoff timeline
axs[0].scatter(indices, start_offset, s=15)
axs[0].set_title("Kick-off timeline")
axs[0].set_xlabel("Call index")
axs[0].set_ylabel("Start offset (s)")
# 2) Per-call latency
axs[1].plot(indices, durs, marker="o", linestyle="")
axs[1].set_title("Per-call latency")
axs[1].set_xlabel("Call index")
axs[1].set_ylabel("Duration (s)")
# 3) Latency distribution (histogram)
axs[2].hist(durs, bins="auto")
axs[2].set_title("Latency distribution")
axs[2].set_xlabel("Duration (s)")
axs[2].set_ylabel("Count")
# 4) Empirical CDF
axs[3].step(durs_sorted, cdf_y, where="post")
axs[3].set_title("Latency CDF")
axs[3].set_xlabel("Duration (s)")
axs[3].set_ylabel("Fraction ≤ x")
# 5) Concurrency over time
axs[4].step(concurrency_x, concurrency_t, where="post")
axs[4].set_title("Concurrency vs. time")
axs[4].set_xlabel("Time since first start (s)")
axs[4].set_ylabel("# in-flight")
# 6) Summary stats
axs[5].axis("off")
summary_text = (
f"n = {len(durs)}\n"
f"min = {durs.min():.3f} s\n"
f"p50 = {np.percentile(durs, 50):.3f} s\n"
f"mean = {durs.mean():.3f} s\n"
f"p95 = {np.percentile(durs, 95):.3f} s\n"
f"max = {durs.max():.3f} s\n"
f"stdev = {durs.std():.3f} s"
)
axs[5].text(0.02, 0.98, summary_text, va="top", ha="left", fontsize=11, family="monospace", transform=axs[5].transAxes)
plt.tight_layout()
plt.savefig("latency_diagnostics.png", dpi=150)
print("Saved latency_diagnostics.png")
@pytest.mark.asyncio
async def test_insert_large_archival_memory(client):
# 1) Create 30 agents
agent = await client.agents.create(
include_base_tools=True,
memory_blocks=[
{"label": "human", "value": "Name: Matt"},
{"label": "persona", "value": "Friendly agent"},
],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
)
file_path = Path(__file__).parent / "data" / "paper1.txt"
text = file_path.read_text()
t0 = time.perf_counter()
await client.agents.passages.create(agent_id=agent.id, text=text)
t1 = time.perf_counter()
print(f"Total time: {t1 - t0}")
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/performance_tests/test_insert_archival_memory.py",
"license": "Apache License 2.0",
"lines": 143,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_agent_serialization_v2.py | from typing import List, Optional
import pytest
from letta.agents.agent_loop import AgentLoop
from letta.config import LettaConfig
from letta.errors import AgentFileExportError, AgentFileImportError
from letta.orm import Base
from letta.schemas.agent import AgentType, CreateAgent
from letta.schemas.agent_file import (
AgentFileSchema,
AgentSchema,
BlockSchema,
FileSchema,
GroupSchema,
MessageSchema,
SourceSchema,
ToolSchema,
)
from letta.schemas.block import Block, CreateBlock
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import MessageRole
from letta.schemas.group import ManagerType
from letta.schemas.llm_config import LLMConfig
from letta.schemas.message import MessageCreate
from letta.schemas.organization import Organization
from letta.schemas.run import Run
from letta.schemas.source import Source
from letta.schemas.user import User
from letta.server.server import SyncServer
from letta.services.agent_serialization_manager import AgentSerializationManager
from tests.utils import create_tool_from_func
# ------------------------------
# Fixtures
# ------------------------------
#
async def _clear_tables():
from letta.server.db import db_registry
async with db_registry.async_session() as session:
for table in reversed(Base.metadata.sorted_tables): # Reverse to avoid FK issues
await session.execute(table.delete()) # Truncate table
# context manager now handles commits
# await session.commit()
@pytest.fixture(autouse=True)
async def clear_tables():
await _clear_tables()
@pytest.fixture
async def server():
config = LettaConfig.load()
config.save()
server = SyncServer(init_with_default_org_and_user=True)
await server.init_async()
await server.tool_manager.upsert_base_tools_async(actor=server.default_user)
yield server
@pytest.fixture
async def default_organization(server: SyncServer):
"""Fixture to create and return the default organization."""
org = await server.organization_manager.create_default_organization_async()
yield org
@pytest.fixture
async def default_user(server: SyncServer, default_organization):
"""Fixture to create and return the default user within the default organization."""
user = await server.user_manager.create_default_actor_async(org_id=default_organization.id)
yield user
@pytest.fixture
async def other_organization(server: SyncServer):
"""Fixture to create and return another organization."""
org = await server.organization_manager.create_organization_async(pydantic_org=Organization(name="test_org"))
yield org
@pytest.fixture
async def other_user(server: SyncServer, other_organization):
"""Fixture to create and return another user within the other organization."""
user = await server.user_manager.create_actor_async(pydantic_user=User(organization_id=other_organization.id, name="test_user"))
yield user
@pytest.fixture
def weather_tool_func():
def get_weather(location: str) -> str:
"""Get the current weather for a given location.
Args:
location: The city and state, e.g. San Francisco, CA
Returns:
Weather description
"""
return f"The weather in {location} is sunny and 72 degrees."
return get_weather
@pytest.fixture
def print_tool_func():
def print_message(message: str) -> str:
"""Print a message to the console.
Args:
message: The message to print
Returns:
Confirmation message
"""
print(message)
return f"Printed: {message}"
return print_tool_func
@pytest.fixture
async def weather_tool(server, weather_tool_func, default_user):
weather_tool = await server.tool_manager.create_or_update_tool_async(create_tool_from_func(func=weather_tool_func), actor=default_user)
yield weather_tool
@pytest.fixture
async def print_tool(server, print_tool_func, default_user):
print_tool = await server.tool_manager.create_or_update_tool_async(create_tool_from_func(func=print_tool_func), actor=default_user)
yield print_tool
@pytest.fixture
async def test_block(server: SyncServer, default_user):
"""Fixture to create and return a test block."""
block_data = Block(
label="test_block",
value="Test Block Content",
description="A test block for agent file tests",
limit=1000,
metadata={"type": "test", "category": "demo"},
)
block = await server.block_manager.create_or_update_block_async(block_data, actor=default_user)
yield block
@pytest.fixture
def agent_serialization_manager(server, default_user):
"""Fixture to create AgentSerializationManager with all required services including file processing."""
manager = AgentSerializationManager(
agent_manager=server.agent_manager,
tool_manager=server.tool_manager,
source_manager=server.source_manager,
block_manager=server.block_manager,
group_manager=server.group_manager,
mcp_manager=server.mcp_manager,
file_manager=server.file_manager,
file_agent_manager=server.file_agent_manager,
message_manager=server.message_manager,
)
yield manager
async def send_message_to_agent(server: SyncServer, agent_state, actor: User, messages: list[MessageCreate]):
run = Run(
agent_id=agent_state.id,
)
run = await server.run_manager.create_run(
pydantic_run=run,
actor=actor,
)
agent_loop = AgentLoop.load(agent_state=agent_state, actor=actor)
result = await agent_loop.step(
run_id=run.id,
input_messages=messages,
)
return result
@pytest.fixture
async def test_agent(server: SyncServer, default_user, default_organization, test_block, weather_tool):
"""Fixture to create and return a test agent with messages."""
memory_blocks = [
CreateBlock(label="human", value="User is a test user"),
CreateBlock(label="persona", value="I am a helpful test assistant"),
]
create_agent_request = CreateAgent(
name="test_agent_v2",
agent_type=AgentType.letta_v1_agent,
system="You are a helpful assistant for testing agent file export/import.",
memory_blocks=memory_blocks,
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=[test_block.id],
tool_ids=[weather_tool.id],
tags=["test", "v2", "export"],
description="Test agent for agent file v2 testing",
metadata={"test_key": "test_value", "version": "v2"},
initial_message_sequence=[
MessageCreate(role=MessageRole.system, content="You are a helpful assistant."),
MessageCreate(role=MessageRole.user, content="Hello!"),
MessageCreate(role=MessageRole.assistant, content="Hello! How can I help you today?"),
],
tool_exec_environment_variables={"TEST_VAR": "test_value"},
message_buffer_autoclear=False,
)
agent_state = await server.agent_manager.create_agent_async(
agent_create=create_agent_request,
actor=default_user,
)
await send_message_to_agent(
server, agent_state, default_user, [MessageCreate(role=MessageRole.user, content="What's the weather like?")]
)
agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=agent_state.id, actor=default_user)
yield agent_state
@pytest.fixture(scope="function")
def embedding_handle_override():
# Use a non-default OpenAI embedding handle for override tests.
# NOTE: We avoid using hosted Letta embeddings in tests.
current_handle = EmbeddingConfig.default_config(provider="openai").handle or "openai/text-embedding-3-small"
assert current_handle != "openai/text-embedding-ada-002" # make sure it's different
return "openai/text-embedding-ada-002"
@pytest.fixture(scope="function")
def model_handle_override():
# Use a different OpenAI model handle for override tests.
# The default in tests is usually gpt-4o-mini, so we use gpt-4o.
current_handle = LLMConfig.default_config("gpt-4o-mini").handle or "openai/gpt-4o-mini"
assert current_handle != "openai/gpt-4o" # make sure it's different
return "openai/gpt-4o"
@pytest.fixture(scope="function")
async def test_source(server: SyncServer, default_user):
"""Fixture to create and return a test source."""
source_data = Source(
name="test_source",
description="Test source for file export tests",
embedding_config=EmbeddingConfig.default_config(provider="openai"),
)
source = await server.source_manager.create_source(source_data, default_user)
yield source
@pytest.fixture(scope="function")
async def test_file(server: SyncServer, default_user, test_source):
"""Fixture to create and return a test file attached to test_source."""
from letta.schemas.file import FileMetadata
file_data = FileMetadata(
source_id=test_source.id,
file_name="test.txt",
original_file_name="test.txt",
file_type="text/plain",
file_size=46,
)
file_metadata = await server.file_manager.create_file(file_data, default_user, text="This is a test file for export testing.")
yield file_metadata
@pytest.fixture(scope="function")
async def agent_with_files(server: SyncServer, default_user, test_block, weather_tool, test_source, test_file):
"""Fixture to create and return an agent with attached files."""
memory_blocks = [
CreateBlock(label="human", value="User is a test user"),
CreateBlock(label="persona", value="I am a helpful test assistant"),
]
create_agent_request = CreateAgent(
name="test_agent_v2",
system="You are a helpful assistant for testing agent file export/import.",
memory_blocks=memory_blocks,
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=[test_block.id],
tool_ids=[weather_tool.id],
tags=["test", "v2", "export"],
description="Test agent for agent file v2 testing",
metadata={"test_key": "test_value", "version": "v2"},
initial_message_sequence=[
MessageCreate(role=MessageRole.system, content="You are a helpful assistant."),
MessageCreate(role=MessageRole.user, content="Hello!"),
MessageCreate(role=MessageRole.assistant, content="Hello! How can I help you today?"),
],
tool_exec_environment_variables={"TEST_VAR": "test_value"},
message_buffer_autoclear=False,
source_ids=[test_source.id],
)
agent_state = await server.agent_manager.create_agent_async(
agent_create=create_agent_request,
actor=default_user,
)
await server.agent_manager.insert_files_into_context_window(
agent_state=agent_state, file_metadata_with_content=[test_file], actor=default_user
)
return (agent_state.id, test_source.id, test_file.id)
@pytest.fixture(scope="function")
async def test_mcp_server(server: SyncServer, default_user):
"""Fixture to create and return a test MCP server."""
from letta.schemas.mcp import MCPServer, MCPServerType
mcp_server_data = MCPServer(
server_name="test_mcp_server",
server_type=MCPServerType.SSE,
server_url="http://test-mcp-server.com",
token="test-token-12345", # This should be excluded during export
custom_headers={"X-API-Key": "secret-key"}, # This should be excluded during export
)
mcp_server = await server.mcp_manager.create_or_update_mcp_server(mcp_server_data, default_user)
yield mcp_server
@pytest.fixture(scope="function")
async def mcp_tool(server: SyncServer, default_user, test_mcp_server):
"""Fixture to create and return an MCP tool."""
from letta.schemas.tool import MCPTool, ToolCreate
# Create a mock MCP tool
mcp_tool_data = MCPTool(
name="test_mcp_tool",
description="Test MCP tool for serialization",
inputSchema={"type": "object", "properties": {"input": {"type": "string"}}},
)
tool_create = ToolCreate.from_mcp(test_mcp_server.server_name, mcp_tool_data)
# Create tool with MCP metadata
mcp_tool = await server.tool_manager.create_mcp_tool_async(tool_create, test_mcp_server.server_name, test_mcp_server.id, default_user)
yield mcp_tool
@pytest.fixture(scope="function")
async def agent_with_mcp_tools(server: SyncServer, default_user, test_block, mcp_tool, test_mcp_server):
"""Fixture to create and return an agent with MCP tools."""
memory_blocks = [
CreateBlock(label="human", value="User is a test user"),
CreateBlock(label="persona", value="I am a helpful test assistant"),
]
create_agent_request = CreateAgent(
name="test_agent_mcp",
system="You are a helpful assistant with MCP tools.",
memory_blocks=memory_blocks,
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
block_ids=[test_block.id],
tool_ids=[mcp_tool.id],
tags=["test", "mcp", "export"],
description="Test agent with MCP tools for serialization testing",
)
agent_state = await server.agent_manager.create_agent_async(
agent_create=create_agent_request,
actor=default_user,
)
return agent_state
# ------------------------------
# Helper Functions
# ------------------------------
async def create_test_source(server: SyncServer, name: str, user: User):
"""Helper function to create a test source using server."""
source_data = Source(
name=name,
description=f"Test source {name}",
embedding_config=EmbeddingConfig.default_config(provider="openai"),
)
return await server.source_manager.create_source(source_data, user)
async def create_test_file(server: SyncServer, filename: str, source_id: str, user: User, content: Optional[str] = None):
"""Helper function to create a test file using server."""
from letta.schemas.file import FileMetadata
content = content or f"Content of {filename}"
file_data = FileMetadata(
source_id=source_id,
file_name=filename,
original_file_name=filename,
file_type="text/plain",
file_size=len(content),
)
return await server.file_manager.create_file(file_data, user, text=content)
async def create_test_agent_with_files(server: SyncServer, name: str, user: User, file_relationships: List[tuple]):
"""Helper function to create agent with attached files using server.
Args:
server: SyncServer instance
name: Agent name
user: User creating the agent
file_relationships: List of (source_id, file_id) tuples
"""
memory_blocks = [
CreateBlock(label="human", value="User is a test user"),
CreateBlock(label="persona", value="I am a helpful test assistant"),
]
create_agent_request = CreateAgent(
name=name,
system="You are a helpful assistant for testing file export.",
memory_blocks=memory_blocks,
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
tags=["test", "files"],
description="Test agent with files",
)
agent_state = await server.agent_manager.create_agent_async(
agent_create=create_agent_request,
actor=user,
)
for source_id, file_id in file_relationships:
file_metadata = await server.file_manager.get_file_by_id(file_id, user)
await server.agent_manager.insert_files_into_context_window(
agent_state=agent_state, file_metadata_with_content=[file_metadata], actor=user
)
return agent_state
def compare_agent_files(original: AgentFileSchema, imported: AgentFileSchema) -> bool:
"""Compare two AgentFileSchema objects for logical equivalence."""
errors = []
if len(original.agents) != len(imported.agents):
errors.append(f"Agent count mismatch: {len(original.agents)} vs {len(imported.agents)}")
if len(original.tools) != len(imported.tools):
errors.append(f"Tool count mismatch: {len(original.tools)} vs {len(imported.tools)}")
if len(original.blocks) != len(imported.blocks):
errors.append(f"Block count mismatch: {len(original.blocks)} vs {len(imported.blocks)}")
if len(original.groups) != len(imported.groups):
errors.append(f"Group count mismatch: {len(original.groups)} vs {len(imported.groups)}")
if len(original.files) != len(imported.files):
errors.append(f"File count mismatch: {len(original.files)} vs {len(imported.files)}")
if len(original.sources) != len(imported.sources):
errors.append(f"Source count mismatch: {len(original.sources)} vs {len(imported.sources)}")
for i, (orig_agent, imp_agent) in enumerate(zip(original.agents, imported.agents)):
agent_errors = _compare_agents(orig_agent, imp_agent, i)
errors.extend(agent_errors)
orig_tools_sorted = sorted(original.tools, key=lambda x: x.name)
imp_tools_sorted = sorted(imported.tools, key=lambda x: x.name)
for i, (orig_tool, imp_tool) in enumerate(zip(orig_tools_sorted, imp_tools_sorted)):
tool_errors = _compare_tools(orig_tool, imp_tool, i)
errors.extend(tool_errors)
orig_blocks_sorted = sorted(original.blocks, key=lambda x: x.label)
imp_blocks_sorted = sorted(imported.blocks, key=lambda x: x.label)
for i, (orig_block, imp_block) in enumerate(zip(orig_blocks_sorted, imp_blocks_sorted)):
block_errors = _compare_blocks(orig_block, imp_block, i)
errors.extend(block_errors)
for i, (orig_group, imp_group) in enumerate(zip(original.groups, imported.groups)):
group_errors = _compare_groups(orig_group, imp_group, i)
errors.extend(group_errors)
for i, (orig_file, imp_file) in enumerate(zip(original.files, imported.files)):
file_errors = _compare_files(orig_file, imp_file, i)
errors.extend(file_errors)
for i, (orig_source, imp_source) in enumerate(zip(original.sources, imported.sources)):
source_errors = _compare_sources(orig_source, imp_source, i)
errors.extend(source_errors)
if errors:
print("Agent file comparison errors:")
for error in errors:
print(f" - {error}")
return False
return True
def _compare_agents(orig: AgentSchema, imp: AgentSchema, index: int) -> List[str]:
"""Compare two AgentSchema objects for logical equivalence."""
errors = []
if orig.name != imp.name:
errors.append(f"Agent {index}: name mismatch: '{orig.name}' vs '{imp.name}'")
if orig.system != imp.system:
errors.append(f"Agent {index}: system mismatch")
if orig.description != imp.description:
errors.append(f"Agent {index}: description mismatch")
if orig.agent_type != imp.agent_type:
errors.append(f"Agent {index}: agent_type mismatch: '{orig.agent_type}' vs '{imp.agent_type}'")
if sorted(orig.tags or []) != sorted(imp.tags or []):
errors.append(f"Agent {index}: tags mismatch: {orig.tags} vs {imp.tags}")
if orig.metadata != imp.metadata:
errors.append(f"Agent {index}: metadata mismatch")
if orig.llm_config != imp.llm_config:
errors.append(f"Agent {index}: llm_config mismatch")
if orig.embedding_config != imp.embedding_config:
errors.append(f"Agent {index}: embedding_config mismatch")
# Tool rules
if orig.tool_rules != imp.tool_rules:
errors.append(f"Agent {index}: tool_rules mismatch")
# Environment variables
if orig.secrets != imp.secrets:
errors.append(f"Agent {index}: tool_exec_environment_variables mismatch")
# Messages
if len(orig.messages) != len(imp.messages):
errors.append(f"Agent {index}: message count mismatch: {len(orig.messages)} vs {len(imp.messages)}")
else:
for j, (orig_msg, imp_msg) in enumerate(zip(orig.messages, imp.messages)):
msg_errors = _compare_messages(orig_msg, imp_msg, index, j)
errors.extend(msg_errors)
# In-context messages
if len(orig.in_context_message_ids) != len(imp.in_context_message_ids):
errors.append(
f"Agent {index}: in-context message count mismatch: {len(orig.in_context_message_ids)} vs {len(imp.in_context_message_ids)}"
)
# Relationship IDs (lengths should match)
if len(orig.tool_ids or []) != len(imp.tool_ids or []):
errors.append(f"Agent {index}: tool_ids count mismatch: {len(orig.tool_ids or [])} vs {len(imp.tool_ids or [])}")
if len(orig.block_ids or []) != len(imp.block_ids or []):
errors.append(f"Agent {index}: block_ids count mismatch: {len(orig.block_ids or [])} vs {len(imp.block_ids or [])}")
if len(orig.source_ids or []) != len(imp.source_ids or []):
errors.append(f"Agent {index}: source_ids count mismatch: {len(orig.source_ids or [])} vs {len(imp.source_ids or [])}")
return errors
def _compare_messages(orig: MessageSchema, imp: MessageSchema, agent_index: int, msg_index: int) -> List[str]:
"""Compare two MessageSchema objects for logical equivalence."""
errors = []
if orig.role != imp.role:
errors.append(f"Agent {agent_index}, Message {msg_index}: role mismatch: '{orig.role}' vs '{imp.role}'")
if orig.content != imp.content:
errors.append(f"Agent {agent_index}, Message {msg_index}: content mismatch")
if orig.name != imp.name:
errors.append(f"Agent {agent_index}, Message {msg_index}: name mismatch: '{orig.name}' vs '{imp.name}'")
if orig.model != imp.model:
errors.append(f"Agent {agent_index}, Message {msg_index}: model mismatch: '{orig.model}' vs '{imp.model}'")
# Skip agent_id comparison - expected to be different between original and imported
return errors
def _compare_tools(orig: ToolSchema, imp: ToolSchema, index: int) -> List[str]:
"""Compare two ToolSchema objects for logical equivalence."""
errors = []
if orig.name != imp.name:
errors.append(f"Tool {index}: name mismatch: '{orig.name}' vs '{imp.name}'")
if orig.description != imp.description:
errors.append(f"Tool {index}: description mismatch")
if orig.source_code != imp.source_code:
errors.append(f"Tool {index}: source_code mismatch")
if orig.json_schema != imp.json_schema:
errors.append(f"Tool {index}: json_schema mismatch")
if sorted(orig.tags or []) != sorted(imp.tags or []):
errors.append(f"Tool {index}: tags mismatch: {orig.tags} vs {imp.tags}")
if orig.metadata_ != imp.metadata_:
errors.append(f"Tool {index}: metadata mismatch")
# Skip organization_id comparison - expected to be different between orgs
return errors
def _compare_blocks(orig: BlockSchema, imp: BlockSchema, index: int) -> List[str]:
"""Compare two BlockSchema objects for logical equivalence."""
errors = []
if orig.label != imp.label:
errors.append(f"Block {index}: label mismatch: '{orig.label}' vs '{imp.label}'")
if orig.value != imp.value:
errors.append(f"Block {index}: value mismatch")
if orig.limit != imp.limit:
errors.append(f"Block {index}: limit mismatch: {orig.limit} vs {imp.limit}")
if orig.description != imp.description:
errors.append(f"Block {index}: description mismatch")
if orig.metadata != imp.metadata:
errors.append(f"Block {index}: metadata mismatch")
if orig.template_name != imp.template_name:
errors.append(f"Block {index}: template_name mismatch: '{orig.template_name}' vs '{imp.template_name}'")
if orig.is_template != imp.is_template:
errors.append(f"Block {index}: is_template mismatch: {orig.is_template} vs {imp.is_template}")
return errors
def _compare_groups(orig: GroupSchema, imp: GroupSchema, index: int) -> List[str]:
"""Compare two GroupSchema objects for logical equivalence."""
errors = []
orig_agent_ids = sorted(orig.agent_ids)
imp_agent_ids = sorted(imp.agent_ids)
if orig_agent_ids != imp_agent_ids:
errors.append(f"Group {index}: agent_ids mismatch: '{orig_agent_ids}' vs '{imp_agent_ids}'")
if orig.description != imp.description:
errors.append(f"Group {index}: description mismatch")
if orig.manager_config != imp.manager_config:
errors.append(f"Group {index}: manager config mismatch")
orig_shared_block_ids = sorted(orig.shared_block_ids)
imp_shared_block_ids = sorted(imp.shared_block_ids)
if orig_shared_block_ids != imp_shared_block_ids:
errors.append(f"Group {index}: metadata mismatch")
return errors
def _compare_files(orig: FileSchema, imp: FileSchema, index: int) -> List[str]:
"""Compare two FileSchema objects for logical equivalence."""
errors = []
if orig.file_name != imp.file_name:
errors.append(f"File {index}: file_name mismatch: '{orig.file_name}' vs '{imp.file_name}'")
if orig.original_file_name != imp.original_file_name:
errors.append(f"File {index}: original_file_name mismatch: '{orig.original_file_name}' vs '{imp.original_file_name}'")
if orig.file_size != imp.file_size:
errors.append(f"File {index}: file_size mismatch: {orig.file_size} vs {imp.file_size}")
if orig.file_type != imp.file_type:
errors.append(f"File {index}: file_type mismatch: '{orig.file_type}' vs '{imp.file_type}'")
if orig.processing_status != imp.processing_status:
errors.append(f"File {index}: processing_status mismatch: '{orig.processing_status}' vs '{imp.processing_status}'")
if orig.metadata != imp.metadata:
errors.append(f"File {index}: metadata mismatch")
# Check source_id reference format (should be remapped)
if not imp.source_id.startswith("source-"):
errors.append(f"File {index}: source_id not properly remapped: {imp.source_id}")
return errors
def _compare_sources(orig: SourceSchema, imp: SourceSchema, index: int) -> List[str]:
"""Compare two SourceSchema objects for logical equivalence."""
errors = []
if orig.name != imp.name:
errors.append(f"Source {index}: name mismatch: '{orig.name}' vs '{imp.name}'")
if orig.description != imp.description:
errors.append(f"Source {index}: description mismatch")
if orig.instructions != imp.instructions:
errors.append(f"Source {index}: instructions mismatch")
if orig.metadata != imp.metadata:
errors.append(f"Source {index}: metadata mismatch")
if orig.embedding_config != imp.embedding_config:
errors.append(f"Source {index}: embedding_config mismatch")
return errors
def _validate_entity_id(entity_id: str, expected_prefix: str) -> bool:
"""Helper function to validate that an ID follows the expected format (prefix-N)."""
if not entity_id.startswith(f"{expected_prefix}-"):
print(f"Invalid {expected_prefix} ID format: {entity_id} should start with '{expected_prefix}-'")
return False
try:
suffix = entity_id[len(expected_prefix) + 1 :]
int(suffix)
return True
except ValueError:
print(f"Invalid {expected_prefix} ID format: {entity_id} should have integer suffix")
return False
def validate_id_format(schema: AgentFileSchema) -> bool:
"""Validate that all IDs follow the expected format (entity-N)."""
for agent in schema.agents:
if not _validate_entity_id(agent.id, "agent"):
return False
for message in agent.messages:
if not _validate_entity_id(message.id, "message"):
return False
for msg_id in agent.in_context_message_ids:
if not _validate_entity_id(msg_id, "message"):
return False
for tool in schema.tools:
if not _validate_entity_id(tool.id, "tool"):
return False
for block in schema.blocks:
if not _validate_entity_id(block.id, "block"):
return False
for file in schema.files:
if not _validate_entity_id(file.id, "file"):
return False
for source in schema.sources:
if not _validate_entity_id(source.id, "source"):
return False
return True
# ------------------------------
# Tests
# ------------------------------
class TestFileExport:
"""Test file export functionality with comprehensive validation"""
@pytest.mark.asyncio
async def test_basic_file_export(self, default_user, agent_serialization_manager, agent_with_files):
"""Test basic file export functionality"""
agent_id, _source_id, _file_id = agent_with_files
exported = await agent_serialization_manager.export([agent_id], actor=default_user)
assert len(exported.agents) == 1
assert len(exported.sources) == 1
assert len(exported.files) == 1
agent = exported.agents[0]
assert len(agent.files_agents) == 1
assert _validate_entity_id(agent.id, "agent")
assert _validate_entity_id(exported.sources[0].id, "source")
assert _validate_entity_id(exported.files[0].id, "file")
file_agent = agent.files_agents[0]
assert file_agent.agent_id == agent.id
assert file_agent.file_id == exported.files[0].id
assert file_agent.source_id == exported.sources[0].id
@pytest.mark.asyncio
async def test_multiple_files_per_source(self, server, default_user, agent_serialization_manager):
"""Test export with multiple files from the same source"""
source = await create_test_source(server, "multi-file-source", default_user)
file1 = await create_test_file(server, "file1.txt", source.id, default_user)
file2 = await create_test_file(server, "file2.txt", source.id, default_user)
agent = await create_test_agent_with_files(server, "multi-file-agent", default_user, [(source.id, file1.id), (source.id, file2.id)])
exported = await agent_serialization_manager.export([agent.id], actor=default_user)
assert len(exported.agents) == 1
assert len(exported.sources) == 1
assert len(exported.files) == 2
agent = exported.agents[0]
assert len(agent.files_agents) == 2
source_id = exported.sources[0].id
for file_schema in exported.files:
assert file_schema.source_id == source_id
file_ids = {f.id for f in exported.files}
for file_agent in agent.files_agents:
assert file_agent.file_id in file_ids
assert file_agent.source_id == source_id
@pytest.mark.asyncio
async def test_multiple_sources_export(self, server, default_user, agent_serialization_manager):
"""Test export with files from multiple sources"""
source1 = await create_test_source(server, "source-1", default_user)
source2 = await create_test_source(server, "source-2", default_user)
file1 = await create_test_file(server, "file1.txt", source1.id, default_user)
file2 = await create_test_file(server, "file2.txt", source2.id, default_user)
agent = await create_test_agent_with_files(
server, "multi-source-agent", default_user, [(source1.id, file1.id), (source2.id, file2.id)]
)
exported = await agent_serialization_manager.export([agent.id], actor=default_user)
assert len(exported.agents) == 1
assert len(exported.sources) == 2
assert len(exported.files) == 2
source_ids = {s.id for s in exported.sources}
for file_schema in exported.files:
assert file_schema.source_id in source_ids
@pytest.mark.asyncio
async def test_cross_agent_file_deduplication(self, server, default_user, agent_serialization_manager):
"""Test that files shared across agents are deduplicated in export"""
source = await create_test_source(server, "shared-source", default_user)
shared_file = await create_test_file(server, "shared.txt", source.id, default_user)
agent1 = await create_test_agent_with_files(server, "agent-1", default_user, [(source.id, shared_file.id)])
agent2 = await create_test_agent_with_files(server, "agent-2", default_user, [(source.id, shared_file.id)])
exported = await agent_serialization_manager.export([agent1.id, agent2.id], actor=default_user)
assert len(exported.agents) == 2
assert len(exported.sources) == 1
assert len(exported.files) == 1
file_id = exported.files[0].id
source_id = exported.sources[0].id
for agent in exported.agents:
assert len(agent.files_agents) == 1
file_agent = agent.files_agents[0]
assert file_agent.file_id == file_id
assert file_agent.source_id == source_id
@pytest.mark.asyncio
async def test_file_agent_relationship_preservation(self, server, default_user, agent_serialization_manager):
"""Test that file-agent relationship details are preserved"""
source = await create_test_source(server, "test-source", default_user)
file = await create_test_file(server, "test.txt", source.id, default_user)
agent = await create_test_agent_with_files(server, "test-agent", default_user, [(source.id, file.id)])
exported = await agent_serialization_manager.export([agent.id], actor=default_user)
agent = exported.agents[0]
file_agent = agent.files_agents[0]
assert file_agent.file_name == file.file_name
assert file_agent.is_open is True
assert hasattr(file_agent, "last_accessed_at")
@pytest.mark.asyncio
async def test_id_remapping_consistency(self, server, default_user, agent_serialization_manager):
"""Test that ID remapping is consistent across all references"""
source = await create_test_source(server, "consistency-source", default_user)
file = await create_test_file(server, "consistency.txt", source.id, default_user)
agent = await create_test_agent_with_files(server, "consistency-agent", default_user, [(source.id, file.id)])
exported = await agent_serialization_manager.export([agent.id], actor=default_user)
agent_schema = exported.agents[0]
source_schema = exported.sources[0]
file_schema = exported.files[0]
file_agent = agent_schema.files_agents[0]
assert file_schema.source_id == source_schema.id
assert file_agent.agent_id == agent_schema.id
assert file_agent.file_id == file_schema.id
assert file_agent.source_id == source_schema.id
@pytest.mark.asyncio
async def test_empty_file_relationships(self, server, default_user, agent_serialization_manager):
"""Test export of agent with no file relationships"""
agent_create = CreateAgent(
name="no-files-agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
)
agent = await server.agent_manager.create_agent_async(agent_create, actor=default_user)
exported = await agent_serialization_manager.export([agent.id], actor=default_user)
assert len(exported.agents) == 1
assert len(exported.sources) == 0
assert len(exported.files) == 0
agent_schema = exported.agents[0]
assert len(agent_schema.files_agents) == 0
@pytest.mark.asyncio
async def test_file_content_inclusion_in_export(self, default_user, agent_serialization_manager, agent_with_files):
"""Test that file content is included in export"""
agent_id, _source_id, _file_id = agent_with_files
exported = await agent_serialization_manager.export([agent_id], actor=default_user)
file_schema = exported.files[0]
assert hasattr(file_schema, "content") or file_schema.content is not None
class TestAgentFileExport:
"""Tests for agent file export functionality."""
async def test_basic_export(self, agent_serialization_manager, test_agent, default_user):
"""Test basic agent export functionality."""
agent_file = await agent_serialization_manager.export([test_agent.id], default_user)
assert isinstance(agent_file, AgentFileSchema)
assert len(agent_file.agents) == 1
assert len(agent_file.tools) > 0 # Should include base tools + weather tool
assert len(agent_file.blocks) > 0 # Should include memory blocks + test block
assert agent_file.metadata.get("revision_id") is not None
assert agent_file.metadata.get("revision_id") != "unknown"
assert len(agent_file.metadata.get("revision_id")) > 0
assert validate_id_format(agent_file)
exported_agent = agent_file.agents[0]
assert exported_agent.name == test_agent.name
assert exported_agent.system == test_agent.system
assert len(exported_agent.messages) > 0
assert len(exported_agent.in_context_message_ids) > 0
async def test_export_multiple_agents(self, server, agent_serialization_manager, test_agent, default_user, weather_tool):
"""Test exporting multiple agents."""
create_agent_request = CreateAgent(
name="second_test_agent",
system="Second test agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
tool_ids=[weather_tool.id],
initial_message_sequence=[
MessageCreate(role=MessageRole.user, content="Second agent message"),
],
)
second_agent = await server.agent_manager.create_agent_async(
agent_create=create_agent_request,
actor=default_user,
)
agent_file = await agent_serialization_manager.export([test_agent.id, second_agent.id], default_user)
assert len(agent_file.agents) == 2
assert validate_id_format(agent_file)
agent_ids = {agent.id for agent in agent_file.agents}
assert len(agent_ids) == 2
async def test_export_id_remapping(self, agent_serialization_manager, test_agent, default_user):
"""Test that IDs are properly remapped during export."""
agent_file = await agent_serialization_manager.export([test_agent.id], default_user)
exported_agent = agent_file.agents[0]
assert exported_agent.id == "agent-0"
assert exported_agent.id != test_agent.id
if exported_agent.tool_ids:
for tool_id in exported_agent.tool_ids:
assert tool_id.startswith("tool-")
if exported_agent.block_ids:
for block_id in exported_agent.block_ids:
assert block_id.startswith("block-")
message_ids = {msg.id for msg in exported_agent.messages}
for in_context_id in exported_agent.in_context_message_ids:
assert in_context_id in message_ids, f"In-context message ID {in_context_id} not found in messages"
async def test_message_agent_id_remapping(self, agent_serialization_manager, test_agent, default_user):
"""Test that message.agent_id is properly remapped during export."""
agent_file = await agent_serialization_manager.export([test_agent.id], default_user)
exported_agent = agent_file.agents[0]
for message in exported_agent.messages:
assert message.agent_id == exported_agent.id, (
f"Message {message.id} has agent_id {message.agent_id}, expected {exported_agent.id}"
)
assert exported_agent.id == "agent-0"
assert exported_agent.id != test_agent.id
async def test_export_empty_agent_list(self, agent_serialization_manager, default_user):
"""Test exporting empty agent list."""
agent_file = await agent_serialization_manager.export([], default_user)
assert len(agent_file.agents) == 0
assert len(agent_file.tools) == 0
assert len(agent_file.blocks) == 0
async def test_export_nonexistent_agent(self, agent_serialization_manager, default_user):
"""Test exporting non-existent agent raises error."""
with pytest.raises(AgentFileExportError): # Should raise AgentFileExportError for non-existent agent
await agent_serialization_manager.export(["non-existent-id"], default_user)
@pytest.mark.asyncio
async def test_revision_id_automatic_setting(self, agent_serialization_manager, test_agent, default_user):
"""Test that revision_id is automatically set to the latest alembic revision."""
agent_file = await agent_serialization_manager.export([test_agent.id], default_user)
from letta.utils import get_latest_alembic_revision
expected_revision = await get_latest_alembic_revision()
assert agent_file.metadata.get("revision_id") == expected_revision
assert agent_file.metadata.get("revision_id") != "unknown"
assert len(agent_file.metadata.get("revision_id")) == 12
assert all(c in "0123456789abcdef" for c in agent_file.metadata.get("revision_id"))
async def test_export_sleeptime_enabled_agent(self, server, agent_serialization_manager, default_user, weather_tool):
"""Test exporting sleeptime enabled agent."""
create_agent_request = CreateAgent(
name="sleeptime-enabled-test-agent",
system="Sleeptime enabled test agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
tool_ids=[weather_tool.id],
initial_message_sequence=[
MessageCreate(role=MessageRole.user, content="Second agent message"),
],
enable_sleeptime=True,
)
sleeptime_enabled_agent = await server.create_agent_async(
request=create_agent_request,
actor=default_user,
)
agent_file = await agent_serialization_manager.export([sleeptime_enabled_agent.id], default_user)
assert sleeptime_enabled_agent.multi_agent_group != None
assert len(agent_file.agents) == 2
assert validate_id_format(agent_file)
agent_ids = {agent.id for agent in agent_file.agents}
assert len(agent_ids) == 2
assert len(agent_file.groups) == 1
sleeptime_group = agent_file.groups[0]
assert len(sleeptime_group.agent_ids) == 1
assert sleeptime_group.agent_ids[0] in agent_ids
assert sleeptime_group.manager_config.manager_type == ManagerType.sleeptime
assert sleeptime_group.manager_config.manager_agent_id in agent_ids
await server.agent_manager.delete_agent_async(agent_id=sleeptime_enabled_agent.id, actor=default_user)
async def test_tool_exec_environment_variables_scrubbing(self, server, agent_serialization_manager, default_user, weather_tool):
"""Test that tool_exec_environment_variables values are scrubbed during export."""
# create agent with environment variables containing secrets
create_agent_request = CreateAgent(
name="agent_with_env_vars",
system="Agent with environment variables",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
tool_ids=[weather_tool.id],
tool_exec_environment_variables={
"API_KEY": "super-secret-api-key-12345",
"DATABASE_PASSWORD": "ultra-secure-password",
"TOKEN": "bearer-token-xyz789",
},
initial_message_sequence=[
MessageCreate(role=MessageRole.user, content="Test environment variables"),
],
)
agent_with_env_vars = await server.agent_manager.create_agent_async(
agent_create=create_agent_request,
actor=default_user,
)
# export the agent
agent_file = await agent_serialization_manager.export([agent_with_env_vars.id], default_user)
# verify agent was exported
assert len(agent_file.agents) == 1
exported_agent = agent_file.agents[0]
# verify environment variables exist but values are scrubbed (empty strings)
assert exported_agent.secrets is not None
assert len(exported_agent.secrets) == 3
assert "API_KEY" in exported_agent.secrets
assert "DATABASE_PASSWORD" in exported_agent.secrets
assert "TOKEN" in exported_agent.secrets
# most importantly: verify all secret values have been wiped
assert exported_agent.secrets["API_KEY"] == ""
assert exported_agent.secrets["DATABASE_PASSWORD"] == ""
assert exported_agent.secrets["TOKEN"] == ""
# verify no secret values appear anywhere in the exported data
assert "super-secret-api-key-12345" not in str(agent_file)
assert "ultra-secure-password" not in str(agent_file)
assert "bearer-token-xyz789" not in str(agent_file)
# clean up
await server.agent_manager.delete_agent_async(agent_id=agent_with_env_vars.id, actor=default_user)
class TestAgentFileImport:
"""Tests for agent file import functionality."""
async def test_basic_import(self, agent_serialization_manager, test_agent, default_user, other_user):
"""Test basic agent import functionality."""
agent_file = await agent_serialization_manager.export([test_agent.id], default_user)
result = await agent_serialization_manager.import_file(agent_file, other_user)
assert result.success
assert result.imported_count > 0
assert len(result.id_mappings) > 0
for file_id, db_id in result.id_mappings.items():
if file_id.startswith("agent-"):
assert db_id != test_agent.id # New agent should have different ID
async def test_basic_import_with_embedding_override(
self, server, agent_serialization_manager, test_agent, default_user, other_user, embedding_handle_override
):
"""Test basic agent import functionality with embedding override."""
agent_file = await agent_serialization_manager.export([test_agent.id], default_user)
embedding_config_override = await server.get_embedding_config_from_handle_async(actor=other_user, handle=embedding_handle_override)
result = await agent_serialization_manager.import_file(agent_file, other_user, override_embedding_config=embedding_config_override)
assert result.success
assert result.imported_count > 0
assert len(result.id_mappings) > 0
for file_id, db_id in result.id_mappings.items():
if file_id.startswith("agent-"):
assert db_id != test_agent.id # New agent should have different ID
# check embedding handle
imported_agent_id = next(db_id for file_id, db_id in result.id_mappings.items() if file_id == "agent-0")
imported_agent = await server.agent_manager.get_agent_by_id_async(imported_agent_id, other_user)
assert imported_agent.embedding_config.handle == embedding_handle_override
async def test_basic_import_with_model_override(
self, server, agent_serialization_manager, test_agent, default_user, other_user, model_handle_override
):
"""Test basic agent import functionality with LLM model override."""
# Verify original agent has gpt-4o-mini (handle may be None for legacy configs)
assert "gpt-4o-mini" in (test_agent.llm_config.handle or "") or "gpt-4o-mini" in (test_agent.llm_config.model or "")
agent_file = await agent_serialization_manager.export([test_agent.id], default_user)
llm_config_override = await server.get_llm_config_from_handle_async(actor=other_user, handle=model_handle_override)
result = await agent_serialization_manager.import_file(agent_file, other_user, override_llm_config=llm_config_override)
assert result.success
assert result.imported_count > 0
assert len(result.id_mappings) > 0
for file_id, db_id in result.id_mappings.items():
if file_id.startswith("agent-"):
assert db_id != test_agent.id # New agent should have different ID
# check model handle was overridden
imported_agent_id = next(db_id for file_id, db_id in result.id_mappings.items() if file_id == "agent-0")
imported_agent = await server.agent_manager.get_agent_by_id_async(imported_agent_id, other_user)
assert imported_agent.llm_config.handle == model_handle_override
async def test_basic_import_with_both_overrides(
self, server, agent_serialization_manager, test_agent, default_user, other_user, embedding_handle_override, model_handle_override
):
"""Test agent import with both embedding and model overrides."""
agent_file = await agent_serialization_manager.export([test_agent.id], default_user)
embedding_config_override = await server.get_embedding_config_from_handle_async(actor=other_user, handle=embedding_handle_override)
llm_config_override = await server.get_llm_config_from_handle_async(actor=other_user, handle=model_handle_override)
result = await agent_serialization_manager.import_file(
agent_file, other_user, override_embedding_config=embedding_config_override, override_llm_config=llm_config_override
)
assert result.success
assert result.imported_count > 0
# Verify both overrides were applied
imported_agent_id = next(db_id for file_id, db_id in result.id_mappings.items() if file_id == "agent-0")
imported_agent = await server.agent_manager.get_agent_by_id_async(imported_agent_id, other_user)
assert imported_agent.embedding_config.handle == embedding_handle_override
assert imported_agent.llm_config.handle == model_handle_override
async def test_import_preserves_data(self, server, agent_serialization_manager, test_agent, default_user, other_user):
"""Test that import preserves all important data."""
agent_file = await agent_serialization_manager.export([test_agent.id], default_user)
result = await agent_serialization_manager.import_file(agent_file, other_user)
imported_agent_id = next(db_id for file_id, db_id in result.id_mappings.items() if file_id == "agent-0")
imported_agent = await server.agent_manager.get_agent_by_id_async(imported_agent_id, other_user)
assert imported_agent.name == test_agent.name
assert imported_agent.system == test_agent.system
assert imported_agent.description == test_agent.description
assert imported_agent.metadata == test_agent.metadata
assert imported_agent.tags == test_agent.tags
assert len(imported_agent.tools) == len(test_agent.tools)
assert len(imported_agent.memory.blocks) == len(test_agent.memory.blocks)
original_messages = await server.message_manager.list_messages(actor=default_user, agent_id=test_agent.id)
imported_messages = await server.message_manager.list_messages(actor=other_user, agent_id=imported_agent_id)
assert len(imported_messages) == len(original_messages)
for orig_msg, imp_msg in zip(original_messages, imported_messages):
assert orig_msg.role == imp_msg.role
assert orig_msg.content == imp_msg.content
assert imp_msg.agent_id == imported_agent_id # Should be remapped to new agent
async def test_import_message_context_preservation(self, server, agent_serialization_manager, test_agent, default_user, other_user):
"""Test that in-context message references are preserved during import."""
agent_file = await agent_serialization_manager.export([test_agent.id], default_user)
result = await agent_serialization_manager.import_file(agent_file, other_user)
imported_agent_id = next(db_id for file_id, db_id in result.id_mappings.items() if file_id == "agent-0")
imported_agent = await server.agent_manager.get_agent_by_id_async(imported_agent_id, other_user)
assert len(imported_agent.message_ids) == len(test_agent.message_ids)
imported_messages = await server.message_manager.list_messages(actor=other_user, agent_id=imported_agent_id)
imported_message_ids = {msg.id for msg in imported_messages}
for in_context_id in imported_agent.message_ids:
assert in_context_id in imported_message_ids
async def test_dry_run_import(self, agent_serialization_manager, test_agent, default_user, other_user):
"""Test dry run import validation."""
agent_file = await agent_serialization_manager.export([test_agent.id], default_user)
result = await agent_serialization_manager.import_file(agent_file, other_user, dry_run=True)
assert result.success
assert result.imported_count == 0 # No actual imports in dry run
assert len(result.id_mappings) == 0
assert "dry run" in result.message.lower()
async def test_import_validation_errors(self, agent_serialization_manager, other_user):
"""Test import validation catches errors."""
from letta.utils import get_latest_alembic_revision
current_revision = await get_latest_alembic_revision()
invalid_agent_file = AgentFileSchema(
metadata={"revision_id": current_revision},
agents=[
AgentSchema(id="agent-0", name="agent1"),
AgentSchema(id="agent-0", name="agent2"), # Duplicate ID
],
groups=[],
blocks=[],
files=[],
sources=[],
tools=[],
mcp_servers=[],
)
with pytest.raises(AgentFileImportError):
await agent_serialization_manager.import_file(invalid_agent_file, other_user)
async def test_import_sleeptime_enabled_agent(self, server, agent_serialization_manager, default_user, other_user, weather_tool):
"""Test basic agent import functionality."""
create_agent_request = CreateAgent(
name="sleeptime-enabled-test-agent",
system="Sleeptime enabled test agent",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
tool_ids=[weather_tool.id],
initial_message_sequence=[
MessageCreate(role=MessageRole.user, content="Second agent message"),
],
enable_sleeptime=True,
)
sleeptime_enabled_agent = await server.create_agent_async(
request=create_agent_request,
actor=default_user,
)
sleeptime_enabled_agent.multi_agent_group.id
sleeptime_enabled_agent.multi_agent_group.agent_ids[0]
agent_file = await agent_serialization_manager.export([sleeptime_enabled_agent.id], default_user)
result = await agent_serialization_manager.import_file(agent_file, other_user)
assert result.success
assert result.imported_count > 0
assert len(result.id_mappings) > 0
exported_agent_ids = [file_id for file_id in list(result.id_mappings.values()) if file_id.startswith("agent-")]
assert len(exported_agent_ids) == 2
exported_group_ids = [file_id for file_id in list(result.id_mappings.keys()) if file_id.startswith("group-")]
assert len(exported_group_ids) == 1
await server.agent_manager.delete_agent_async(agent_id=sleeptime_enabled_agent.id, actor=default_user)
async def test_import_with_environment_variables(self, server, agent_serialization_manager, default_user, other_user, weather_tool):
"""Test that environment variables can be provided during import."""
# create agent with environment variables
create_agent_request = CreateAgent(
name="agent_with_env_vars_import_test",
system="Agent with environment variables for import testing",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
tool_ids=[weather_tool.id],
tool_exec_environment_variables={
"API_KEY": "original-api-key",
"DATABASE_URL": "original-database-url",
"SECRET_TOKEN": "original-secret-token",
},
initial_message_sequence=[
MessageCreate(role=MessageRole.user, content="Test environment variables import"),
],
)
original_agent = await server.agent_manager.create_agent_async(
agent_create=create_agent_request,
actor=default_user,
)
# export the agent (values should be scrubbed)
agent_file = await agent_serialization_manager.export([original_agent.id], default_user)
# verify values are scrubbed in export
exported_agent = agent_file.agents[0]
assert exported_agent.secrets["API_KEY"] == ""
assert exported_agent.secrets["DATABASE_URL"] == ""
assert exported_agent.secrets["SECRET_TOKEN"] == ""
# import with new environment variable values
new_env_vars = {
"API_KEY": "new-api-key-for-import",
"DATABASE_URL": "new-database-url-for-import",
"SECRET_TOKEN": "new-secret-token-for-import",
}
result = await agent_serialization_manager.import_file(agent_file, other_user, env_vars=new_env_vars)
assert result.success
assert len(result.imported_agent_ids) == 1
# get the imported agent and verify environment variables were set correctly
imported_agent_id = result.imported_agent_ids[0]
imported_agent = await server.agent_manager.get_agent_by_id_async(
agent_id=imported_agent_id, actor=other_user, include_relationships=["tool_exec_environment_variables"]
)
# verify the imported agent has the new environment variable values
env_vars_dict = imported_agent.get_agent_env_vars_as_dict()
assert env_vars_dict["API_KEY"] == "new-api-key-for-import"
assert env_vars_dict["DATABASE_URL"] == "new-database-url-for-import"
assert env_vars_dict["SECRET_TOKEN"] == "new-secret-token-for-import"
# clean up
await server.agent_manager.delete_agent_async(agent_id=original_agent.id, actor=default_user)
await server.agent_manager.delete_agent_async(agent_id=imported_agent_id, actor=other_user)
class TestAgentFileImportWithProcessing:
"""Tests for agent file import with file processing (chunking/embedding)."""
async def test_import_with_file_processing(self, server, agent_serialization_manager, default_user, other_user):
"""Test that import processes files for chunking and embedding."""
source = await create_test_source(server, "processing-source", default_user)
file_content = "This is test content for processing. It should be chunked and embedded during import."
file_metadata = await create_test_file(server, "process.txt", source.id, default_user, content=file_content)
agent = await create_test_agent_with_files(server, "processing-agent", default_user, [(source.id, file_metadata.id)])
exported = await agent_serialization_manager.export([agent.id], default_user)
result = await agent_serialization_manager.import_file(exported, other_user)
assert result.success
assert result.imported_count > 0
imported_file_id = next(db_id for file_id, db_id in result.id_mappings.items() if file_id.startswith("file-"))
imported_file = await server.file_manager.get_file_by_id(imported_file_id, other_user)
# When using Pinecone, status stays at embedding until chunks are confirmed uploaded
assert imported_file.processing_status.value in {"embedding", "completed"}
async def test_import_passage_creation(self, server, agent_serialization_manager, default_user, other_user):
"""Test that import creates passages for file content."""
source = await create_test_source(server, "passage-source", default_user)
file_content = "This content should create passages. Each sentence should be chunked separately."
file_metadata = await create_test_file(server, "passages.txt", source.id, default_user, content=file_content)
agent = await create_test_agent_with_files(server, "passage-agent", default_user, [(source.id, file_metadata.id)])
exported = await agent_serialization_manager.export([agent.id], default_user)
result = await agent_serialization_manager.import_file(exported, other_user)
imported_file_id = next(db_id for file_id, db_id in result.id_mappings.items() if file_id.startswith("file-"))
await server.passage_manager.list_passages_by_file_id_async(imported_file_id, other_user)
async def test_import_file_status_updates(self, server, agent_serialization_manager, default_user, other_user):
"""Test that file processing status is updated correctly during import."""
source = await create_test_source(server, "status-source", default_user)
file_metadata = await create_test_file(server, "status.txt", source.id, default_user)
agent = await create_test_agent_with_files(server, "status-agent", default_user, [(source.id, file_metadata.id)])
exported = await agent_serialization_manager.export([agent.id], default_user)
result = await agent_serialization_manager.import_file(exported, other_user)
imported_file_id = next(db_id for file_id, db_id in result.id_mappings.items() if file_id.startswith("file-"))
imported_file = await server.file_manager.get_file_by_id(imported_file_id, other_user)
# When using Pinecone, status stays at embedding until chunks are confirmed uploaded
assert imported_file.processing_status.value in {"embedding", "completed"}
async def test_import_multiple_files_processing(self, server, agent_serialization_manager, default_user, other_user):
"""Test import processes multiple files efficiently."""
source = await create_test_source(server, "multi-source", default_user)
file1 = await create_test_file(server, "file1.txt", source.id, default_user)
file2 = await create_test_file(server, "file2.txt", source.id, default_user)
agent = await create_test_agent_with_files(server, "multi-agent", default_user, [(source.id, file1.id), (source.id, file2.id)])
exported = await agent_serialization_manager.export([agent.id], default_user)
result = await agent_serialization_manager.import_file(exported, other_user)
imported_file_ids = [db_id for file_id, db_id in result.id_mappings.items() if file_id.startswith("file-")]
assert len(imported_file_ids) == 2
for file_id in imported_file_ids:
imported_file = await server.file_manager.get_file_by_id(file_id, other_user)
# When using Pinecone, status stays at embedding until chunks are confirmed uploaded
assert imported_file.processing_status.value in {"embedding", "completed"}
class TestAgentFileRoundTrip:
"""Tests for complete export -> import -> export cycles."""
async def test_roundtrip_consistency(self, server, agent_serialization_manager, test_agent, default_user, other_user):
"""Test that export -> import -> export produces consistent results."""
original_export = await agent_serialization_manager.export([test_agent.id], default_user)
result = await agent_serialization_manager.import_file(original_export, other_user)
imported_agent_id = next(db_id for file_id, db_id in result.id_mappings.items() if file_id == "agent-0")
second_export = await agent_serialization_manager.export([imported_agent_id], other_user)
print(original_export.agents[0].tool_rules)
print(second_export.agents[0].tool_rules)
assert compare_agent_files(original_export, second_export)
async def test_multiple_roundtrips(self, server, agent_serialization_manager, test_agent, default_user, other_user):
"""Test multiple rounds of export/import maintain consistency."""
current_agent_id = test_agent.id
current_user = default_user
for i in range(3):
agent_file = await agent_serialization_manager.export([current_agent_id], current_user)
target_user = other_user if current_user == default_user else default_user
result = await agent_serialization_manager.import_file(agent_file, target_user)
current_agent_id = next(db_id for file_id, db_id in result.id_mappings.items() if file_id == "agent-0")
current_user = target_user
imported_agent = await server.agent_manager.get_agent_by_id_async(current_agent_id, current_user)
assert imported_agent.name == test_agent.name
class TestAgentFileEdgeCases:
"""Tests for edge cases and error conditions."""
async def test_agent_with_no_messages(self, server, agent_serialization_manager, default_user, other_user):
"""Test exporting/importing agent with no messages."""
# Create agent with no initial messages
create_agent_request = CreateAgent(
name="no_messages_agent",
system="Agent with no messages",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
initial_message_sequence=[],
)
agent_state = await server.agent_manager.create_agent_async(
agent_create=create_agent_request,
actor=default_user,
_init_with_no_messages=True, # Create with truly no messages
)
# Export
agent_file = await agent_serialization_manager.export([agent_state.id], default_user)
# Import
result = await agent_serialization_manager.import_file(agent_file, other_user)
# Verify
assert result.success
imported_agent_id = next(db_id for file_id, db_id in result.id_mappings.items() if file_id == "agent-0")
imported_agent = await server.agent_manager.get_agent_by_id_async(imported_agent_id, other_user)
assert len(imported_agent.message_ids) == 1
async def test_init_with_no_messages_still_has_system_message(self, server, default_user):
"""Test that _init_with_no_messages=True still creates a system message so context window doesn't crash."""
create_agent_request = CreateAgent(
name="partially_initialized_agent",
system="Test system prompt",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
initial_message_sequence=[],
)
agent_state = await server.agent_manager.create_agent_async(
agent_create=create_agent_request,
actor=default_user,
_init_with_no_messages=True,
)
assert agent_state.message_ids is not None
assert len(agent_state.message_ids) == 1
context_window = await server.agent_manager.get_context_window(agent_id=agent_state.id, actor=default_user)
assert context_window is not None
async def test_large_agent_file(self, server, agent_serialization_manager, default_user, other_user, weather_tool):
"""Test handling of larger agent files with many messages."""
# Create agent
create_agent_request = CreateAgent(
name="large_agent",
system="Agent with many messages",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
tool_ids=[weather_tool.id],
)
agent_state = await server.agent_manager.create_agent_async(
agent_create=create_agent_request,
actor=default_user,
)
num_messages = 5
for i in range(num_messages):
await send_message_to_agent(server, agent_state, default_user, [MessageCreate(role=MessageRole.user, content=f"Message {i}")])
agent_file = await agent_serialization_manager.export([agent_state.id], default_user)
exported_agent = agent_file.agents[0]
assert len(exported_agent.messages) >= num_messages
result = await agent_serialization_manager.import_file(agent_file, other_user)
# Verify all messages imported correctly
assert result.success
imported_agent_id = next(db_id for file_id, db_id in result.id_mappings.items() if file_id == "agent-0")
imported_messages = await server.message_manager.list_messages(actor=other_user, agent_id=imported_agent_id)
assert len(imported_messages) >= num_messages
class TestAgentFileValidation:
"""Tests for agent file validation and schema compliance."""
def test_agent_file_schema_validation(self, test_agent):
"""Test AgentFileSchema validation."""
# Use a dummy revision for this test since we can't await in sync test
current_revision = "495f3f474131" # Use a known valid revision format
# Valid schema
valid_schema = AgentFileSchema(
metadata={"revision_id": current_revision},
agents=[AgentSchema(id="agent-0", name="test")],
groups=[],
blocks=[],
files=[],
sources=[],
tools=[],
mcp_servers=[],
)
# Should not raise
assert valid_schema.agents[0].id == "agent-0"
assert valid_schema.metadata.get("revision_id") == current_revision
@pytest.mark.asyncio
async def test_message_schema_conversion(self, test_agent, server, default_user):
"""Test MessageSchema.from_message conversion."""
# Get a message from the test agent
messages = await server.message_manager.list_messages(actor=default_user, agent_id=test_agent.id)
if messages:
original_message = messages[0]
# Convert to MessageSchema
message_schema = MessageSchema.from_message(original_message)
# Verify conversion
assert message_schema.role == original_message.role
assert message_schema.content == original_message.content
assert message_schema.model == original_message.model
assert message_schema.agent_id == original_message.agent_id
def test_id_format_validation(self):
"""Test ID format validation helper."""
# Use a dummy revision for this test since we can't await in sync test
current_revision = "495f3f474131" # Use a known valid revision format
# Valid schema
valid_schema = AgentFileSchema(
metadata={"revision_id": current_revision},
agents=[AgentSchema(id="agent-0", name="test")],
groups=[],
blocks=[BlockSchema(id="block-0", label="test", value="test")],
files=[],
sources=[],
tools=[
ToolSchema(
id="tool-0",
name="test_tool",
source_code="test",
json_schema={"name": "test_tool", "parameters": {"type": "object", "properties": {}}},
)
],
mcp_servers=[],
)
assert validate_id_format(valid_schema)
# Invalid schema
invalid_schema = AgentFileSchema(
metadata={"revision_id": current_revision},
agents=[AgentSchema(id="invalid-id-format", name="test")],
groups=[],
blocks=[],
files=[],
sources=[],
tools=[],
mcp_servers=[],
)
assert not validate_id_format(invalid_schema)
class TestMCPServerSerialization:
"""Tests for MCP server export/import functionality."""
async def test_mcp_server_export(self, agent_serialization_manager, agent_with_mcp_tools, default_user):
"""Test that MCP servers are exported correctly."""
agent_file = await agent_serialization_manager.export([agent_with_mcp_tools.id], default_user)
# Verify MCP server is included
assert len(agent_file.mcp_servers) == 1
mcp_server = agent_file.mcp_servers[0]
# Verify server details
assert mcp_server.server_name == "test_mcp_server"
assert mcp_server.server_url == "http://test-mcp-server.com"
assert mcp_server.server_type == "sse"
# Verify auth fields are excluded
assert not hasattr(mcp_server, "token")
assert not hasattr(mcp_server, "custom_headers")
# Verify ID format
assert _validate_entity_id(mcp_server.id, "mcp_server")
async def test_mcp_server_auth_scrubbing(self, server, agent_serialization_manager, default_user):
"""Test that authentication information is scrubbed during export."""
from letta.schemas.mcp import MCPServer, MCPServerType
# Create MCP server with auth info
mcp_server_data_stdio = MCPServer(
server_name="auth_test_server",
server_type=MCPServerType.STDIO,
# token="super-secret-token",
# custom_headers={"Authorization": "Bearer secret-key", "X-Custom": "custom-value"},
stdio_config={
"server_name": "auth_test_server",
"command": "test-command",
"args": ["arg1", "arg2"],
"env": {"ENV_VAR": "value"},
},
)
mcp_server = await server.mcp_manager.create_or_update_mcp_server(mcp_server_data_stdio, default_user)
mcp_server_data_http = MCPServer(
server_name="auth_test_server_http",
server_type=MCPServerType.STREAMABLE_HTTP,
server_url="http://auth_test_server_http.com",
token="super-secret-token",
custom_headers={"X-Custom": "custom-value"},
)
mcp_server_http = await server.mcp_manager.create_or_update_mcp_server(mcp_server_data_http, default_user)
# Create tool from MCP server
from letta.schemas.tool import MCPTool, ToolCreate
mcp_tool_data = MCPTool(
name="auth_test_tool_stdio",
description="Tool with auth",
inputSchema={"type": "object", "properties": {}},
)
tool_create_stdio = ToolCreate.from_mcp(mcp_server.server_name, mcp_tool_data)
mcp_tool_data_http = MCPTool(
name="auth_test_tool_http",
description="Tool with auth",
inputSchema={"type": "object", "properties": {}},
)
tool_create_http = ToolCreate.from_mcp(mcp_server_http.server_name, mcp_tool_data_http)
mcp_tool = await server.tool_manager.create_mcp_tool_async(tool_create_stdio, mcp_server.server_name, mcp_server.id, default_user)
mcp_tool_http = await server.tool_manager.create_mcp_tool_async(
tool_create_http, mcp_server_http.server_name, mcp_server_http.id, default_user
)
# Create agent with the tool
from letta.schemas.agent import CreateAgent
create_agent_request = CreateAgent(
name="auth_test_agent",
tool_ids=[mcp_tool.id, mcp_tool_http.id],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
)
agent = await server.agent_manager.create_agent_async(create_agent_request, default_user)
# Export
agent_file = await agent_serialization_manager.export([agent.id], default_user)
for server in agent_file.mcp_servers:
if server.server_name == "auth_test_server":
exported_server_stdio = server
elif server.server_name == "auth_test_server_http":
exported_server_http = server
# Verify env variables in stdio server are excluded (typically used for auth)
assert exported_server_stdio.id != mcp_server.id
assert exported_server_stdio.server_name == "auth_test_server"
assert exported_server_stdio.stdio_config == {
"server_name": "auth_test_server",
"type": "stdio",
"command": "test-command",
"args": ["arg1", "arg2"],
} # Non-auth config preserved
assert exported_server_stdio.server_type == "stdio"
# Verify token and custom headers are excluded from export for http server
assert exported_server_http.id != mcp_server_http.id
assert exported_server_http.server_name == "auth_test_server_http"
assert exported_server_http.server_type == "streamable_http"
assert exported_server_http.server_url == "http://auth_test_server_http.com"
assert not hasattr(exported_server_http, "token")
assert not hasattr(exported_server_http, "custom_headers")
async def test_mcp_tool_metadata_with_server_id(self, agent_serialization_manager, agent_with_mcp_tools, default_user):
"""Test that MCP tools have server_id in metadata."""
agent_file = await agent_serialization_manager.export([agent_with_mcp_tools.id], default_user)
# Find the MCP tool
mcp_tool = next((t for t in agent_file.tools if t.name == "test_mcp_tool"), None)
assert mcp_tool is not None
# Verify metadata contains server info
assert mcp_tool.metadata_ is not None
assert "mcp" in mcp_tool.metadata_
assert "server_name" in mcp_tool.metadata_["mcp"]
assert "server_id" in mcp_tool.metadata_["mcp"]
assert mcp_tool.metadata_["mcp"]["server_name"] == "test_mcp_server"
# Verify tag format
assert any(tag.startswith("mcp:") for tag in mcp_tool.tags)
async def test_mcp_server_import(self, agent_serialization_manager, agent_with_mcp_tools, default_user, other_user):
"""Test importing agents with MCP servers."""
# Export from default user
agent_file = await agent_serialization_manager.export([agent_with_mcp_tools.id], default_user)
# Import to other user
result = await agent_serialization_manager.import_file(agent_file, other_user)
assert result.success
# Verify MCP server was imported
mcp_server_id = next((db_id for file_id, db_id in result.id_mappings.items() if file_id.startswith("mcp_server-")), None)
assert mcp_server_id is not None
async def test_multiple_mcp_servers_export(self, server, agent_serialization_manager, default_user):
"""Test exporting multiple MCP servers from different agents."""
from letta.schemas.mcp import MCPServer, MCPServerType
# Create two MCP servers
mcp_server1 = await server.mcp_manager.create_or_update_mcp_server(
MCPServer(
server_name="mcp1",
server_type=MCPServerType.STREAMABLE_HTTP,
server_url="http://mcp1.com",
token="super-secret-token",
custom_headers={"X-Custom": "custom-value"},
),
default_user,
)
mcp_server2 = await server.mcp_manager.create_or_update_mcp_server(
MCPServer(
server_name="mcp2",
server_type=MCPServerType.STDIO,
stdio_config={
"server_name": "mcp2",
"command": "mcp2-cmd",
"args": ["arg1", "arg2"],
},
),
default_user,
)
# Create tools from each server
from letta.schemas.tool import MCPTool, ToolCreate
tool1 = await server.tool_manager.create_mcp_tool_async(
ToolCreate.from_mcp(
"mcp1",
MCPTool(name="tool1", description="Tool 1", inputSchema={"type": "object", "properties": {}}),
),
"mcp1",
mcp_server1.id,
default_user,
)
tool2 = await server.tool_manager.create_mcp_tool_async(
ToolCreate.from_mcp(
"mcp2",
MCPTool(name="tool2", description="Tool 2", inputSchema={"type": "object", "properties": {}}),
),
"mcp2",
mcp_server2.id,
default_user,
)
# Create agents with different MCP tools
from letta.schemas.agent import CreateAgent
agent1 = await server.agent_manager.create_agent_async(
CreateAgent(
name="agent1",
tool_ids=[tool1.id],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
),
default_user,
)
agent2 = await server.agent_manager.create_agent_async(
CreateAgent(
name="agent2",
tool_ids=[tool2.id],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
),
default_user,
)
# Export both agents
agent_file = await agent_serialization_manager.export([agent1.id, agent2.id], default_user)
# Verify both MCP servers are included
assert len(agent_file.mcp_servers) == 2
# Verify server types
streamable_http_server = next(s for s in agent_file.mcp_servers if s.server_name == "mcp1")
stdio_server = next(s for s in agent_file.mcp_servers if s.server_name == "mcp2")
assert streamable_http_server.server_name == "mcp1"
assert streamable_http_server.server_type == "streamable_http"
assert streamable_http_server.server_url == "http://mcp1.com"
assert stdio_server.server_name == "mcp2"
assert stdio_server.server_type == "stdio"
assert stdio_server.stdio_config == {
"server_name": "mcp2",
"type": "stdio",
"command": "mcp2-cmd",
"args": ["arg1", "arg2"],
}
async def test_mcp_server_deduplication(self, server, agent_serialization_manager, default_user, test_mcp_server, mcp_tool):
"""Test that shared MCP servers are deduplicated during export."""
# Create two agents using the same MCP tool
from letta.schemas.agent import CreateAgent
agent1 = await server.agent_manager.create_agent_async(
CreateAgent(
name="agent_dup1",
tool_ids=[mcp_tool.id],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
),
default_user,
)
agent2 = await server.agent_manager.create_agent_async(
CreateAgent(
name="agent_dup2",
tool_ids=[mcp_tool.id],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
),
default_user,
)
# Export both agents
agent_file = await agent_serialization_manager.export([agent1.id, agent2.id], default_user)
# Verify only one MCP server is exported
assert len(agent_file.mcp_servers) == 1
assert agent_file.mcp_servers[0].server_name == "test_mcp_server"
if __name__ == "__main__":
pytest.main([__file__])
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_agent_serialization_v2.py",
"license": "Apache License 2.0",
"lines": 1506,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_crypto_utils.py | import base64
import json
import pytest
from letta.helpers.crypto_utils import CryptoUtils
class TestCryptoUtils:
"""Test suite for CryptoUtils encryption/decryption functionality."""
# Mock master keys for testing
MOCK_KEY_1 = "test-master-key-1234567890abcdef"
MOCK_KEY_2 = "another-test-key-fedcba0987654321"
def test_encrypt_decrypt_roundtrip(self):
"""Test that encryption followed by decryption returns the original value."""
test_cases = [
"simple text",
"text with special chars: !@#$%^&*()",
"unicode text: 你好世界 🌍",
"very long text " * 1000,
'{"json": "data", "nested": {"key": "value"}}',
"", # Empty string
]
for plaintext in test_cases:
encrypted = CryptoUtils.encrypt(plaintext, self.MOCK_KEY_1)
assert encrypted != plaintext, f"Encryption failed for: {plaintext[:50]}"
# Encrypted value is base64 encoded
assert len(encrypted) > 0, "Encrypted value should not be empty"
decrypted = CryptoUtils.decrypt(encrypted, self.MOCK_KEY_1)
assert decrypted == plaintext, f"Roundtrip failed for: {plaintext[:50]}"
def test_encrypt_with_different_keys(self):
"""Test that different keys produce different ciphertexts."""
plaintext = "sensitive data"
encrypted1 = CryptoUtils.encrypt(plaintext, self.MOCK_KEY_1)
encrypted2 = CryptoUtils.encrypt(plaintext, self.MOCK_KEY_2)
# Different keys should produce different ciphertexts
assert encrypted1 != encrypted2
# Each should decrypt correctly with its own key
assert CryptoUtils.decrypt(encrypted1, self.MOCK_KEY_1) == plaintext
assert CryptoUtils.decrypt(encrypted2, self.MOCK_KEY_2) == plaintext
def test_decrypt_with_wrong_key_fails(self):
"""Test that decryption with wrong key raises an error."""
plaintext = "secret message"
encrypted = CryptoUtils.encrypt(plaintext, self.MOCK_KEY_1)
with pytest.raises(Exception): # Could be ValueError or cryptography exception
CryptoUtils.decrypt(encrypted, self.MOCK_KEY_2)
def test_encrypt_none_value(self):
"""Test handling of None values."""
# Encrypt None should raise TypeError (None has no encode method)
with pytest.raises((TypeError, AttributeError)):
CryptoUtils.encrypt(None, self.MOCK_KEY_1)
def test_decrypt_none_value(self):
"""Test that decrypting None raises an error."""
with pytest.raises(ValueError):
CryptoUtils.decrypt(None, self.MOCK_KEY_1)
def test_decrypt_empty_string(self):
"""Test that decrypting empty string raises an error."""
with pytest.raises(Exception): # base64 decode error
CryptoUtils.decrypt("", self.MOCK_KEY_1)
def test_decrypt_plaintext_value(self):
"""Test that decrypting non-encrypted value raises an error."""
plaintext = "not encrypted"
with pytest.raises(Exception): # Will fail base64 decode or decryption
CryptoUtils.decrypt(plaintext, self.MOCK_KEY_1)
def test_encrypted_format_structure(self):
"""Test the structure of encrypted values."""
plaintext = "test data"
encrypted = CryptoUtils.encrypt(plaintext, self.MOCK_KEY_1)
# Should be base64 encoded
encrypted_data = encrypted
# Should be valid base64
try:
decoded = base64.b64decode(encrypted_data)
assert len(decoded) > 0
except Exception as e:
pytest.fail(f"Invalid base64 encoding: {e}")
# Decoded data should contain salt, IV, tag, and ciphertext
# Total should be at least SALT_SIZE + IV_SIZE + TAG_SIZE bytes
min_size = CryptoUtils.SALT_SIZE + CryptoUtils.IV_SIZE + CryptoUtils.TAG_SIZE
assert len(decoded) >= min_size
def test_deterministic_with_same_salt(self):
"""Test that encryption is deterministic when using the same salt (for testing)."""
plaintext = "deterministic test"
# Note: In production, each encryption generates a random salt
# This test verifies the encryption mechanism itself
encrypted1 = CryptoUtils.encrypt(plaintext, self.MOCK_KEY_1)
encrypted2 = CryptoUtils.encrypt(plaintext, self.MOCK_KEY_1)
# Due to random salt, these should be different
assert encrypted1 != encrypted2
# But both should decrypt to the same value
assert CryptoUtils.decrypt(encrypted1, self.MOCK_KEY_1) == plaintext
assert CryptoUtils.decrypt(encrypted2, self.MOCK_KEY_1) == plaintext
def test_encrypt_uses_env_key_when_none_provided(self):
"""Test that encryption uses environment key when no key is provided."""
from letta.settings import settings
# Mock the settings to have an encryption key
original_key = settings.encryption_key
settings.encryption_key = "env-test-key-123"
try:
plaintext = "test with env key"
# Should use key from settings
encrypted = CryptoUtils.encrypt(plaintext)
assert len(encrypted) > 0
# Should decrypt with same key
decrypted = CryptoUtils.decrypt(encrypted)
assert decrypted == plaintext
finally:
# Restore original key
settings.encryption_key = original_key
def test_encrypt_without_key_raises_error(self):
"""Test that encryption without any key raises an error."""
from letta.settings import settings
# Mock settings to have no encryption key
original_key = settings.encryption_key
settings.encryption_key = None
try:
with pytest.raises(ValueError, match="No encryption key configured"):
CryptoUtils.encrypt("test data")
with pytest.raises(ValueError, match="No encryption key configured"):
CryptoUtils.decrypt("test data")
finally:
# Restore original key
settings.encryption_key = original_key
def test_large_data_encryption(self):
"""Test encryption of large data."""
# Create 10MB of data
large_data = "x" * (10 * 1024 * 1024)
encrypted = CryptoUtils.encrypt(large_data, self.MOCK_KEY_1)
assert len(encrypted) > 0
assert encrypted != large_data
decrypted = CryptoUtils.decrypt(encrypted, self.MOCK_KEY_1)
assert decrypted == large_data
def test_json_data_encryption(self):
"""Test encryption of JSON data."""
json_data = {
"user": "test_user",
"token": "secret_token_123",
"nested": {"api_key": "sk-1234567890", "headers": {"Authorization": "Bearer token"}},
}
json_str = json.dumps(json_data)
encrypted = CryptoUtils.encrypt(json_str, self.MOCK_KEY_1)
decrypted_str = CryptoUtils.decrypt(encrypted, self.MOCK_KEY_1)
decrypted_data = json.loads(decrypted_str)
assert decrypted_data == json_data
def test_invalid_encrypted_format(self):
"""Test handling of invalid encrypted data format."""
invalid_cases = [
"invalid-base64!@#", # Invalid base64
"dGVzdA==", # Valid base64 but too short for encrypted data
]
for invalid in invalid_cases:
with pytest.raises(Exception): # Could be various exceptions
CryptoUtils.decrypt(invalid, self.MOCK_KEY_1)
def test_key_derivation_consistency(self):
"""Test that key derivation is consistent."""
plaintext = "test key derivation"
# Multiple encryptions with same key should work
encrypted_values = []
for _ in range(5):
encrypted = CryptoUtils.encrypt(plaintext, self.MOCK_KEY_1)
encrypted_values.append(encrypted)
# All should decrypt correctly
for encrypted in encrypted_values:
assert CryptoUtils.decrypt(encrypted, self.MOCK_KEY_1) == plaintext
def test_special_characters_in_key(self):
"""Test encryption with keys containing special characters."""
special_key = "key-with-special-chars!@#$%^&*()_+"
plaintext = "test data"
encrypted = CryptoUtils.encrypt(plaintext, special_key)
decrypted = CryptoUtils.decrypt(encrypted, special_key)
assert decrypted == plaintext
def test_whitespace_handling(self):
"""Test encryption of strings with various whitespace."""
test_cases = [
" leading spaces",
"trailing spaces ",
" both sides ",
"multiple\n\nlines",
"\ttabs\there\t",
"mixed \t\n whitespace \r\n",
]
for plaintext in test_cases:
encrypted = CryptoUtils.encrypt(plaintext, self.MOCK_KEY_1)
decrypted = CryptoUtils.decrypt(encrypted, self.MOCK_KEY_1)
assert decrypted == plaintext, f"Whitespace handling failed for: {repr(plaintext)}"
class TestIsEncrypted:
"""Test suite for is_encrypted heuristic detection."""
MOCK_KEY = "test-master-key-1234567890abcdef"
def test_actually_encrypted_values_detected(self):
"""Test that actually encrypted values are correctly identified."""
test_values = ["short", "medium length string", "a"]
for plaintext in test_values:
encrypted = CryptoUtils.encrypt(plaintext, self.MOCK_KEY)
assert CryptoUtils.is_encrypted(encrypted), f"Failed to detect encrypted value for: {plaintext}"
def test_openai_api_keys_not_detected(self):
"""Test that OpenAI API keys are not detected as encrypted."""
openai_keys = [
"sk-1234567890abcdefghijklmnopqrstuvwxyz1234567890ab",
"sk-proj-1234567890abcdefghijklmnopqrstuvwxyz",
"sk-ant-api03-1234567890abcdefghijklmnopqrstuvwxyz",
]
for key in openai_keys:
assert not CryptoUtils.is_encrypted(key), f"OpenAI key incorrectly detected as encrypted: {key}"
def test_github_tokens_not_detected(self):
"""Test that GitHub tokens are not detected as encrypted."""
github_tokens = [
"ghp_1234567890abcdefghijklmnopqrstuvwxyz",
"gho_1234567890abcdefghijklmnopqrstuvwxyz",
"ghu_1234567890abcdefghijklmnopqrstuvwxyz",
"ghs_1234567890abcdefghijklmnopqrstuvwxyz",
"ghr_1234567890abcdefghijklmnopqrstuvwxyz",
]
for token in github_tokens:
assert not CryptoUtils.is_encrypted(token), f"GitHub token incorrectly detected as encrypted: {token}"
def test_aws_keys_not_detected(self):
"""Test that AWS access keys are not detected as encrypted."""
aws_keys = [
"AKIAIOSFODNN7EXAMPLE",
"ASIAJEXAMPLEXEG2JICEA",
"ABIA1234567890ABCDEF",
"ACCA1234567890ABCDEF",
]
for key in aws_keys:
assert not CryptoUtils.is_encrypted(key), f"AWS key incorrectly detected as encrypted: {key}"
def test_slack_tokens_not_detected(self):
"""Test that Slack tokens are not detected as encrypted."""
slack_tokens = [
"xoxb-1234567890-1234567890123-abcdefghijklmnopqrstuvwx",
"xoxp-1234567890-1234567890123-1234567890123-abcdefghij",
]
for token in slack_tokens:
assert not CryptoUtils.is_encrypted(token), f"Slack token incorrectly detected as encrypted: {token}"
def test_bearer_tokens_not_detected(self):
"""Test that Bearer tokens are not detected as encrypted."""
bearer_tokens = [
"Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIn0.dozjgNryP4J3jVmNHl0w5N_XgL0n3I9PlFUP0THsR8U",
"Bearer some-long-token-string-1234567890abcdefghijklmnop",
]
for token in bearer_tokens:
assert not CryptoUtils.is_encrypted(token), f"Bearer token incorrectly detected as encrypted: {token}"
def test_generic_prefixes_not_detected(self):
"""Test that strings with generic API key prefixes are not detected as encrypted."""
generic_keys = [
"pk-1234567890abcdefghijklmnopqrstuvwxyz",
"api-1234567890abcdefghijklmnopqrstuvwxyz",
"key-1234567890abcdefghijklmnopqrstuvwxyz",
"token-1234567890abcdefghijklmnopqrstuvwxyz",
]
for key in generic_keys:
assert not CryptoUtils.is_encrypted(key), f"Generic key incorrectly detected as encrypted: {key}"
def test_short_strings_not_detected(self):
"""Test that short strings are not detected as encrypted."""
short_strings = ["short", "abc", "1234567890", ""]
for s in short_strings:
assert not CryptoUtils.is_encrypted(s), f"Short string incorrectly detected as encrypted: {s}"
def test_invalid_base64_not_detected(self):
"""Test that invalid base64 strings are not detected as encrypted."""
invalid_strings = [
"not-valid-base64!@#$",
"spaces are invalid",
"special!chars@here",
]
for s in invalid_strings:
assert not CryptoUtils.is_encrypted(s), f"Invalid base64 incorrectly detected as encrypted: {s}"
def test_valid_base64_but_too_short_not_detected(self):
"""Test that valid base64 strings that are too short are not detected."""
# base64 encode something short (less than SALT + IV + TAG + 1 = 45 bytes)
short_data = base64.b64encode(b"x" * 40).decode()
assert not CryptoUtils.is_encrypted(short_data)
class TestBackwardsCompatibility:
"""
Test suite to ensure backwards compatibility with values encrypted
using the previous cryptography library PBKDF2 implementation.
This is critical to ensure existing encrypted secrets in the database
can still be decrypted after switching to hashlib.pbkdf2_hmac.
"""
# Test key and known encrypted values generated with the cryptography library
MOCK_KEY = "test-master-key-1234567890abcdef"
def test_pbkdf2_iterations_not_changed(self):
"""
CRITICAL: Verify that PBKDF2_ITERATIONS has not been changed from 100000.
WARNING: DO NOT CHANGE THIS VALUE!
Changing the iteration count will break decryption of ALL existing
encrypted secrets in the database. If you need to change this value,
you MUST first migrate all existing encrypted values.
This test exists to prevent accidental changes that would cause
production outages due to inability to decrypt existing secrets.
"""
assert CryptoUtils.PBKDF2_ITERATIONS == 100000, (
"CRITICAL: PBKDF2_ITERATIONS has been changed from 100000! "
"This will BREAK DECRYPTION of all existing encrypted secrets in the database. "
"If you intentionally need to change this, you must first migrate all existing "
"encrypted values. Revert this change immediately if unintentional."
)
def test_hashlib_pbkdf2_matches_cryptography_pbkdf2(self):
"""
Verify that hashlib.pbkdf2_hmac produces identical output to
cryptography's PBKDF2HMAC for the same inputs.
This is the core compatibility test - if key derivation matches,
all existing encrypted values will decrypt correctly.
"""
import hashlib
# Use a fixed salt for this test
salt = b"0123456789abcdef" # 16 bytes
password = "test-password-for-comparison"
# Derive key using hashlib (current implementation)
hashlib_key = hashlib.pbkdf2_hmac(
hash_name="sha256",
password=password.encode(),
salt=salt,
iterations=100000,
dklen=32,
)
# Derive key using cryptography library (previous implementation)
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
length=32,
salt=salt,
iterations=100000,
backend=default_backend(),
)
cryptography_key = kdf.derive(password.encode())
# Keys must be identical
assert hashlib_key == cryptography_key, (
"hashlib.pbkdf2_hmac and cryptography PBKDF2HMAC produced different keys! "
"This would break decryption of existing encrypted values."
)
def test_decrypt_value_encrypted_with_cryptography_library(self):
"""
Test that values encrypted with the cryptography library's PBKDF2
can be decrypted with the current hashlib implementation.
This simulates the real-world scenario of existing encrypted values
in the database that were created before this change.
"""
# Pre-computed encrypted value generated with the cryptography library
# Plaintext: "secret-api-key-12345"
# Key: "test-master-key-1234567890abcdef"
# This was encrypted with the cryptography library's PBKDF2HMAC before the switch
#
# To regenerate this test value, use:
# from cryptography.hazmat.backends import default_backend
# from cryptography.hazmat.primitives import hashes
# from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
# from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
# import os, base64
# salt = os.urandom(16)
# iv = os.urandom(12)
# kdf = PBKDF2HMAC(algorithm=hashes.SHA256(), length=32, salt=salt, iterations=100000, backend=default_backend())
# key = kdf.derive("test-master-key-1234567890abcdef".encode())
# cipher = Cipher(algorithms.AES(key), modes.GCM(iv), backend=default_backend())
# encryptor = cipher.encryptor()
# ciphertext = encryptor.update("secret-api-key-12345".encode()) + encryptor.finalize()
# encrypted_data = salt + iv + ciphertext + encryptor.tag
# print(base64.b64encode(encrypted_data).decode())
# First, encrypt a value with the current implementation
plaintext = "secret-api-key-12345"
encrypted = CryptoUtils.encrypt(plaintext, self.MOCK_KEY)
# Verify it decrypts correctly
decrypted = CryptoUtils.decrypt(encrypted, self.MOCK_KEY)
assert decrypted == plaintext
def test_key_derivation_caching_works_with_hashlib(self):
"""
Verify that the LRU cache still works correctly with the hashlib implementation.
"""
salt = b"0123456789abcdef"
# Call key derivation multiple times with same inputs
key1 = CryptoUtils._derive_key(self.MOCK_KEY, salt)
key2 = CryptoUtils._derive_key(self.MOCK_KEY, salt)
key3 = CryptoUtils._derive_key(self.MOCK_KEY, salt)
# All should return identical results
assert key1 == key2 == key3
# Cache should have been hit (hard to verify directly, but we can
# verify the function returns consistent results)
assert len(key1) == 32 # AES-256 key size
def test_roundtrip_with_various_plaintexts(self):
"""
Comprehensive roundtrip test with various plaintext types
to ensure the hashlib implementation handles all cases.
"""
test_cases = [
# API keys (common use case)
"sk-1234567890abcdefghijklmnopqrstuvwxyz",
"AKIAIOSFODNN7EXAMPLE",
# JSON data
'{"api_key": "secret", "nested": {"token": "abc123"}}',
# Unicode
"密钥🔐secret",
# Empty string
"",
# Long string
"x" * 10000,
# Special characters
"!@#$%^&*()_+-=[]{}|;':\",./<>?",
]
for plaintext in test_cases:
encrypted = CryptoUtils.encrypt(plaintext, self.MOCK_KEY)
decrypted = CryptoUtils.decrypt(encrypted, self.MOCK_KEY)
assert decrypted == plaintext, f"Roundtrip failed for: {plaintext[:50]}..."
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_crypto_utils.py",
"license": "Apache License 2.0",
"lines": 397,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_embeddings.py | import glob
import json
import os
from unittest.mock import AsyncMock, patch
import pytest
from letta.config import LettaConfig
from letta.llm_api.llm_client import LLMClient
from letta.llm_api.openai_client import OpenAIClient
from letta.schemas.embedding_config import EmbeddingConfig
from letta.server.server import SyncServer
included_files = [
# "ollama.json",
"openai_embed.json",
]
config_dir = "tests/configs/embedding_model_configs"
config_files = glob.glob(os.path.join(config_dir, "*.json"))
embedding_configs = []
for config_file in config_files:
if config_file.split("/")[-1] in included_files:
with open(config_file, "r") as f:
embedding_configs.append(EmbeddingConfig(**json.load(f)))
@pytest.fixture
async def server():
config = LettaConfig.load()
config.save()
server = SyncServer()
await server.init_async()
return server
@pytest.fixture
async def default_organization(server: SyncServer):
"""Fixture to create and return the default organization."""
org = await server.organization_manager.create_default_organization_async()
yield org
@pytest.fixture
async def default_user(server: SyncServer, default_organization):
"""Fixture to create and return the default user within the default organization."""
user = await server.user_manager.create_default_actor_async(org_id=default_organization.id)
yield user
@pytest.mark.asyncio
@pytest.mark.parametrize(
"embedding_config",
embedding_configs,
ids=[c.embedding_model for c in embedding_configs],
)
async def test_embeddings(embedding_config: EmbeddingConfig, default_user):
embedding_client = LLMClient.create(
provider_type=embedding_config.embedding_endpoint_type,
actor=default_user,
)
test_input = "This is a test input."
embeddings = await embedding_client.request_embeddings([test_input], embedding_config)
assert len(embeddings) == 1
assert len(embeddings[0]) == embedding_config.embedding_dim
@pytest.mark.asyncio
async def test_openai_embedding_chunking(default_user):
"""Test that large inputs are split into 2048-sized chunks"""
embedding_config = EmbeddingConfig(
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_model="text-embedding-3-small",
embedding_dim=1536,
)
client = OpenAIClient(actor=default_user)
with patch("letta.llm_api.openai_client.AsyncOpenAI") as mock_openai:
mock_client = AsyncMock()
mock_openai.return_value = mock_client
async def mock_create(**kwargs):
input_size = len(kwargs["input"])
assert input_size <= 2048 # verify chunking
mock_response = AsyncMock()
mock_response.data = [AsyncMock(embedding=[0.1] * 1536) for _ in range(input_size)]
return mock_response
mock_client.embeddings.create.side_effect = mock_create
# test with 5000 inputs (should be split into 3 chunks: 2048, 2048, 904)
test_inputs = [f"Input {i}" for i in range(5000)]
embeddings = await client.request_embeddings(test_inputs, embedding_config)
assert len(embeddings) == 5000
assert mock_client.embeddings.create.call_count == 3
@pytest.mark.asyncio
async def test_openai_embedding_retry_logic(default_user):
"""Test that failed chunks are retried with reduced batch size"""
embedding_config = EmbeddingConfig(
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_model="text-embedding-3-small",
embedding_dim=1536,
)
client = OpenAIClient(actor=default_user)
with patch("letta.llm_api.openai_client.AsyncOpenAI") as mock_openai:
mock_client = AsyncMock()
mock_openai.return_value = mock_client
call_count = 0
async def mock_create(**kwargs):
nonlocal call_count
call_count += 1
input_size = len(kwargs["input"])
# fail on first attempt for large batches only
if input_size == 2048 and call_count <= 2:
raise Exception("Too many inputs")
mock_response = AsyncMock()
mock_response.data = [AsyncMock(embedding=[0.1] * 1536) for _ in range(input_size)]
return mock_response
mock_client.embeddings.create.side_effect = mock_create
test_inputs = [f"Input {i}" for i in range(3000)]
embeddings = await client.request_embeddings(test_inputs, embedding_config)
assert len(embeddings) == 3000
# initial: 2 chunks (2048, 952)
# after retry: first 2048 splits into 2x1024 with reduced batch_size, so total 3 successful calls + 2 failed = 5
assert call_count > 3
@pytest.mark.asyncio
async def test_openai_embedding_order_preserved(default_user):
"""Test that order is maintained despite chunking and retries"""
embedding_config = EmbeddingConfig(
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_model="text-embedding-3-small",
embedding_dim=1536,
)
client = OpenAIClient(actor=default_user)
with patch("letta.llm_api.openai_client.AsyncOpenAI") as mock_openai:
mock_client = AsyncMock()
mock_openai.return_value = mock_client
async def mock_create(**kwargs):
# return embeddings where first element = input index
mock_response = AsyncMock()
mock_response.data = []
for text in kwargs["input"]:
idx = int(text.split()[-1])
embedding = [float(idx)] + [0.0] * 1535
mock_response.data.append(AsyncMock(embedding=embedding))
return mock_response
mock_client.embeddings.create.side_effect = mock_create
test_inputs = [f"Text {i}" for i in range(100)]
embeddings = await client.request_embeddings(test_inputs, embedding_config)
assert len(embeddings) == 100
for i in range(100):
assert embeddings[i][0] == float(i)
@pytest.mark.asyncio
async def test_openai_embedding_minimum_chunk_failure(default_user):
"""Test that persistent failures at minimum chunk size raise error"""
embedding_config = EmbeddingConfig(
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_model="text-embedding-3-small",
embedding_dim=1536,
)
client = OpenAIClient(actor=default_user)
with patch("letta.llm_api.openai_client.AsyncOpenAI") as mock_openai:
mock_client = AsyncMock()
mock_openai.return_value = mock_client
async def mock_create(**kwargs):
raise Exception("API error")
mock_client.embeddings.create.side_effect = mock_create
# test with 300 inputs - will retry down to 256 minimum then fail
test_inputs = [f"Input {i}" for i in range(300)]
with pytest.raises(Exception, match="API error"):
await client.request_embeddings(test_inputs, embedding_config)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_embeddings.py",
"license": "Apache License 2.0",
"lines": 157,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_file_processor.py | from unittest.mock import AsyncMock, Mock, patch
import openai
import pytest
from letta.errors import ErrorCode, LLMBadRequestError
from letta.schemas.embedding_config import EmbeddingConfig
from letta.services.file_processor.embedder.openai_embedder import OpenAIEmbedder
class TestOpenAIEmbedder:
"""Test suite for OpenAI embedder functionality"""
@pytest.fixture
def mock_user(self):
"""Create a mock user for testing"""
user = Mock()
user.organization_id = "test_org_id"
return user
@pytest.fixture
def embedding_config(self):
"""Create a test embedding config"""
return EmbeddingConfig(
embedding_model="text-embedding-3-small",
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_dim=3, # small dimension for testing
embedding_chunk_size=300,
batch_size=2, # small batch size for testing
)
@pytest.fixture
def embedder(self, embedding_config):
"""Create OpenAI embedder with test config"""
with patch("letta.services.file_processor.embedder.openai_embedder.LLMClient.create") as mock_create:
mock_client = Mock()
mock_client.handle_llm_error = Mock()
mock_create.return_value = mock_client
embedder = OpenAIEmbedder(embedding_config)
embedder.client = mock_client
return embedder
@pytest.mark.asyncio
async def test_successful_embedding_generation(self, embedder, mock_user):
"""Test successful embedding generation for normal cases"""
# mock successful embedding response
mock_embeddings = [[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]]
embedder.client.request_embeddings = AsyncMock(return_value=mock_embeddings)
chunks = ["chunk 1", "chunk 2"]
file_id = "test_file"
source_id = "test_source"
passages = await embedder.generate_embedded_passages(file_id, source_id, chunks, mock_user)
assert len(passages) == 2
assert passages[0].text == "chunk 1"
assert passages[1].text == "chunk 2"
# embeddings are padded to MAX_EMBEDDING_DIM, so check first 3 values
assert passages[0].embedding[:3] == [0.1, 0.2, 0.3]
assert passages[1].embedding[:3] == [0.4, 0.5, 0.6]
assert passages[0].file_id == file_id
assert passages[0].source_id == source_id
@pytest.mark.asyncio
async def test_token_limit_retry_splits_batch(self, embedder, mock_user):
"""Test that token limit errors trigger batch splitting and retry"""
# create a mock token limit error
mock_error_body = {"error": {"code": "max_tokens_per_request", "message": "Requested 319270 tokens, max 300000 tokens per request"}}
token_limit_error = openai.BadRequestError(message="Token limit exceeded", response=Mock(status_code=400), body=mock_error_body)
# first call fails with token limit, subsequent calls succeed
call_count = 0
async def mock_request_embeddings(inputs, embedding_config):
nonlocal call_count
call_count += 1
if call_count == 1 and len(inputs) == 4: # first call with full batch
raise token_limit_error
elif len(inputs) == 2: # split batches succeed
return [[0.1, 0.2], [0.3, 0.4]] if call_count == 2 else [[0.5, 0.6], [0.7, 0.8]]
else:
return [[0.1, 0.2]] * len(inputs)
embedder.client.request_embeddings = AsyncMock(side_effect=mock_request_embeddings)
chunks = ["chunk 1", "chunk 2", "chunk 3", "chunk 4"]
file_id = "test_file"
source_id = "test_source"
passages = await embedder.generate_embedded_passages(file_id, source_id, chunks, mock_user)
# should still get all 4 passages despite the retry
assert len(passages) == 4
assert all(len(p.embedding) == 4096 for p in passages) # padded to MAX_EMBEDDING_DIM
# verify multiple calls were made (original + retries)
assert call_count >= 2
@pytest.mark.asyncio
async def test_token_limit_error_detection(self, embedder):
"""Test various token limit error detection patterns"""
# test openai BadRequestError with proper structure
mock_error_body = {"error": {"code": "max_tokens_per_request", "message": "Requested 319270 tokens, max 300000 tokens per request"}}
openai_error = openai.BadRequestError(message="Token limit exceeded", response=Mock(status_code=400), body=mock_error_body)
assert embedder._is_token_limit_error(openai_error) is True
# test error with message but no code
mock_error_body_no_code = {"error": {"message": "max_tokens_per_request exceeded"}}
openai_error_no_code = openai.BadRequestError(
message="Token limit exceeded", response=Mock(status_code=400), body=mock_error_body_no_code
)
assert embedder._is_token_limit_error(openai_error_no_code) is True
# test fallback string detection
generic_error = Exception("Requested 100000 tokens, max 50000 tokens per request")
assert embedder._is_token_limit_error(generic_error) is True
# test non-token errors
other_error = Exception("Some other error")
assert embedder._is_token_limit_error(other_error) is False
auth_error = openai.AuthenticationError(
message="Invalid API key", response=Mock(status_code=401), body={"error": {"code": "invalid_api_key"}}
)
assert embedder._is_token_limit_error(auth_error) is False
@pytest.mark.asyncio
async def test_non_token_error_handling(self, embedder, mock_user):
"""Test that non-token errors are properly handled and re-raised"""
# create a non-token error
auth_error = openai.AuthenticationError(
message="Invalid API key", response=Mock(status_code=401), body={"error": {"code": "invalid_api_key"}}
)
# mock handle_llm_error to return a standardized error
handled_error = LLMBadRequestError(message="Handled error", code=ErrorCode.UNAUTHENTICATED)
embedder.client.handle_llm_error.return_value = handled_error
embedder.client.request_embeddings = AsyncMock(side_effect=auth_error)
chunks = ["chunk 1"]
file_id = "test_file"
source_id = "test_source"
with pytest.raises(LLMBadRequestError) as exc_info:
await embedder.generate_embedded_passages(file_id, source_id, chunks, mock_user)
assert exc_info.value == handled_error
embedder.client.handle_llm_error.assert_called_once_with(auth_error)
@pytest.mark.asyncio
async def test_single_item_batch_no_retry(self, embedder, mock_user):
"""Test that single-item batches don't retry on token limit errors"""
# create a token limit error
mock_error_body = {"error": {"code": "max_tokens_per_request", "message": "Requested 319270 tokens, max 300000 tokens per request"}}
token_limit_error = openai.BadRequestError(message="Token limit exceeded", response=Mock(status_code=400), body=mock_error_body)
handled_error = LLMBadRequestError(message="Handled token limit error", code=ErrorCode.INVALID_ARGUMENT)
embedder.client.handle_llm_error.return_value = handled_error
embedder.client.request_embeddings = AsyncMock(side_effect=token_limit_error)
chunks = ["very long chunk that exceeds token limit"]
file_id = "test_file"
source_id = "test_source"
with pytest.raises(LLMBadRequestError) as exc_info:
await embedder.generate_embedded_passages(file_id, source_id, chunks, mock_user)
assert exc_info.value == handled_error
embedder.client.handle_llm_error.assert_called_once_with(token_limit_error)
@pytest.mark.asyncio
async def test_empty_chunks_handling(self, embedder, mock_user):
"""Test handling of empty chunks list"""
chunks = []
file_id = "test_file"
source_id = "test_source"
passages = await embedder.generate_embedded_passages(file_id, source_id, chunks, mock_user)
assert passages == []
# should not call request_embeddings for empty input
embedder.client.request_embeddings.assert_not_called()
@pytest.mark.asyncio
async def test_embedding_order_preservation(self, embedder, mock_user):
"""Test that embedding order is preserved even with retries"""
# set up embedder to split batches (batch_size=2)
embedder.embedding_config.batch_size = 2
# mock responses for each batch
async def mock_request_embeddings(inputs, embedding_config):
# return embeddings that correspond to input order
if inputs == ["chunk 1", "chunk 2"]:
return [[0.1, 0.1], [0.2, 0.2]]
elif inputs == ["chunk 3", "chunk 4"]:
return [[0.3, 0.3], [0.4, 0.4]]
else:
return [[0.1, 0.1]] * len(inputs)
embedder.client.request_embeddings = AsyncMock(side_effect=mock_request_embeddings)
chunks = ["chunk 1", "chunk 2", "chunk 3", "chunk 4"]
file_id = "test_file"
source_id = "test_source"
passages = await embedder.generate_embedded_passages(file_id, source_id, chunks, mock_user)
# verify order is preserved
assert len(passages) == 4
assert passages[0].text == "chunk 1"
assert passages[0].embedding[:2] == [0.1, 0.1] # check first 2 values before padding
assert passages[1].text == "chunk 2"
assert passages[1].embedding[:2] == [0.2, 0.2]
assert passages[2].text == "chunk 3"
assert passages[2].embedding[:2] == [0.3, 0.3]
assert passages[3].text == "chunk 4"
assert passages[3].embedding[:2] == [0.4, 0.4]
class TestFileProcessorWithPinecone:
"""Test suite for file processor with Pinecone integration"""
@pytest.mark.asyncio
async def test_file_processor_sets_chunks_embedded_zero_with_pinecone(self):
"""Test that file processor sets total_chunks and chunks_embedded=0 when using Pinecone"""
from letta.schemas.enums import FileProcessingStatus
from letta.schemas.file import FileMetadata
from letta.services.file_processor.embedder.pinecone_embedder import PineconeEmbedder
from letta.services.file_processor.file_processor import FileProcessor
from letta.services.file_processor.parser.markitdown_parser import MarkitdownFileParser
# Mock dependencies
mock_actor = Mock()
mock_actor.organization_id = "test_org"
# Create real parser
file_parser = MarkitdownFileParser()
# Create file metadata with content
mock_file = FileMetadata(
file_name="test.txt",
source_id="source-87654321",
processing_status=FileProcessingStatus.PARSING,
total_chunks=0,
chunks_embedded=0,
content="This is test content that will be chunked.",
)
# Mock only the Pinecone-specific functionality
with patch("letta.services.file_processor.embedder.pinecone_embedder.PINECONE_AVAILABLE", True):
with patch("letta.services.file_processor.embedder.pinecone_embedder.upsert_file_records_to_pinecone_index") as mock_upsert:
# Mock successful Pinecone upsert
mock_upsert.return_value = None
# Create real Pinecone embedder
embedder = PineconeEmbedder()
# Create file processor with Pinecone enabled
file_processor = FileProcessor(file_parser=file_parser, embedder=embedder, actor=mock_actor)
# Track file manager update calls
update_calls = []
async def track_update(*args, **kwargs):
update_calls.append(kwargs)
return mock_file
# Mock managers to track calls
with patch.object(file_processor.file_manager, "update_file_status", new=track_update):
with patch.object(file_processor.passage_manager, "create_many_source_passages_async", new=AsyncMock()):
# Process the imported file (which has content)
await file_processor.process_imported_file(mock_file, mock_file.source_id)
# Find the call that sets total_chunks and chunks_embedded
chunk_update_call = None
for call in update_calls:
if "total_chunks" in call and "chunks_embedded" in call:
chunk_update_call = call
break
# Verify the correct values were set
assert chunk_update_call is not None, "No update_file_status call found with total_chunks and chunks_embedded"
assert chunk_update_call["total_chunks"] > 0, "total_chunks should be greater than 0"
assert chunk_update_call["chunks_embedded"] == 0, "chunks_embedded should be 0 when using Pinecone"
# Verify Pinecone upsert was called
mock_upsert.assert_called_once()
call_args = mock_upsert.call_args
assert call_args.kwargs["file_id"] == mock_file.id
assert call_args.kwargs["source_id"] == mock_file.source_id
assert len(call_args.kwargs["chunks"]) > 0
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_file_processor.py",
"license": "Apache License 2.0",
"lines": 235,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_letta_request_schema.py | """Tests for LettaRequest schema validation"""
import pytest
from pydantic import ValidationError
from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG
from letta.schemas.letta_request import CreateBatch, LettaBatchRequest, LettaRequest, LettaStreamingRequest
from letta.schemas.message import MessageCreate
class TestLettaRequest:
"""Test cases for LettaRequest schema"""
def test_letta_request_with_default_max_steps(self):
"""Test that LettaRequest uses default max_steps value"""
messages = [MessageCreate(role="user", content="Test message")]
request = LettaRequest(messages=messages)
assert request.max_steps == 10
assert request.messages == messages
assert request.use_assistant_message is True
assert request.assistant_message_tool_name == DEFAULT_MESSAGE_TOOL
assert request.assistant_message_tool_kwarg == DEFAULT_MESSAGE_TOOL_KWARG
def test_letta_request_with_custom_max_steps(self):
"""Test that LettaRequest accepts custom max_steps value"""
messages = [MessageCreate(role="user", content="Test message")]
request = LettaRequest(messages=messages, max_steps=5)
assert request.max_steps == 5
assert request.messages == messages
def test_letta_request_with_zero_max_steps(self):
"""Test that LettaRequest accepts zero max_steps"""
messages = [MessageCreate(role="user", content="Test message")]
request = LettaRequest(messages=messages, max_steps=0)
assert request.max_steps == 0
def test_letta_request_with_negative_max_steps(self):
"""Test that LettaRequest accepts negative max_steps (edge case)"""
messages = [MessageCreate(role="user", content="Test message")]
request = LettaRequest(messages=messages, max_steps=-1)
assert request.max_steps == -1
def test_letta_request_required_fields(self):
"""Test that messages field is required"""
with pytest.raises(ValidationError) as exc_info:
LettaRequest()
assert "messages" in str(exc_info.value)
def test_letta_request_with_all_fields(self):
"""Test LettaRequest with all fields specified"""
messages = [MessageCreate(role="user", content="Test message")]
request = LettaRequest(
messages=messages,
max_steps=15,
use_assistant_message=False,
assistant_message_tool_name="custom_tool",
assistant_message_tool_kwarg="custom_kwarg",
)
assert request.max_steps == 15
assert request.use_assistant_message is False
assert request.assistant_message_tool_name == "custom_tool"
assert request.assistant_message_tool_kwarg == "custom_kwarg"
def test_letta_request_json_serialization(self):
"""Test that LettaRequest can be serialized to/from JSON"""
messages = [MessageCreate(role="user", content="Test message")]
request = LettaRequest(messages=messages, max_steps=7)
# Serialize to dict
request_dict = request.model_dump()
assert request_dict["max_steps"] == 7
# Deserialize from dict
request_from_dict = LettaRequest.model_validate(request_dict)
assert request_from_dict.max_steps == 7
assert request_from_dict.messages[0].role == "user"
class TestLettaStreamingRequest:
"""Test cases for LettaStreamingRequest schema"""
def test_letta_streaming_request_inherits_max_steps(self):
"""Test that LettaStreamingRequest inherits max_steps from LettaRequest"""
messages = [MessageCreate(role="user", content="Test message")]
request = LettaStreamingRequest(messages=messages, max_steps=12)
assert request.max_steps == 12
assert request.stream_tokens is False # Default value
def test_letta_streaming_request_with_streaming_options(self):
"""Test LettaStreamingRequest with streaming-specific options"""
messages = [MessageCreate(role="user", content="Test message")]
request = LettaStreamingRequest(messages=messages, max_steps=8, stream_tokens=True)
assert request.max_steps == 8
assert request.stream_tokens is True
class TestLettaBatchRequest:
"""Test cases for LettaBatchRequest schema"""
def test_letta_batch_request_inherits_max_steps(self):
"""Test that LettaBatchRequest inherits max_steps from LettaRequest"""
messages = [MessageCreate(role="user", content="Test message")]
request = LettaBatchRequest(messages=messages, agent_id="test-agent-id", max_steps=20)
assert request.max_steps == 20
assert request.agent_id == "test-agent-id"
def test_letta_batch_request_required_agent_id(self):
"""Test that agent_id is required for LettaBatchRequest"""
messages = [MessageCreate(role="user", content="Test message")]
with pytest.raises(ValidationError) as exc_info:
LettaBatchRequest(messages=messages)
assert "agent_id" in str(exc_info.value)
class TestCreateBatch:
"""Test cases for CreateBatch schema"""
def test_create_batch_with_max_steps(self):
"""Test CreateBatch containing requests with max_steps"""
messages = [MessageCreate(role="user", content="Test message")]
batch_requests = [
LettaBatchRequest(messages=messages, agent_id="agent-1", max_steps=5),
LettaBatchRequest(messages=messages, agent_id="agent-2", max_steps=10),
]
batch = CreateBatch(requests=batch_requests)
assert len(batch.requests) == 2
assert batch.requests[0].max_steps == 5
assert batch.requests[1].max_steps == 10
def test_create_batch_with_callback_url(self):
"""Test CreateBatch with callback URL"""
messages = [MessageCreate(role="user", content="Test message")]
batch_requests = [LettaBatchRequest(messages=messages, agent_id="agent-1", max_steps=3)]
batch = CreateBatch(requests=batch_requests, callback_url="https://example.com/callback")
assert str(batch.callback_url) == "https://example.com/callback"
assert batch.requests[0].max_steps == 3
class TestLettaRequestIntegration:
"""Integration tests for LettaRequest usage patterns"""
def test_max_steps_propagation_in_inheritance_chain(self):
"""Test that max_steps works correctly across the inheritance chain"""
messages = [MessageCreate(role="user", content="Test message")]
# Test base LettaRequest
base_request = LettaRequest(messages=messages, max_steps=3)
assert base_request.max_steps == 3
# Test LettaStreamingRequest inheritance
streaming_request = LettaStreamingRequest(messages=messages, max_steps=6)
assert streaming_request.max_steps == 6
# Test LettaBatchRequest inheritance
batch_request = LettaBatchRequest(messages=messages, agent_id="test-agent", max_steps=9)
assert batch_request.max_steps == 9
def test_backwards_compatibility(self):
"""Test that existing code without max_steps still works"""
messages = [MessageCreate(role="user", content="Test message")]
# Should work without max_steps (uses default)
request = LettaRequest(messages=messages)
assert request.max_steps == 10
# Should work with all other fields
request = LettaRequest(messages=messages, use_assistant_message=False, assistant_message_tool_name="custom_tool")
assert request.max_steps == 10 # Still uses default
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_letta_request_schema.py",
"license": "Apache License 2.0",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_long_running_agents.py | import os
import threading
import time
import httpx
import pytest
import requests
from dotenv import load_dotenv
from letta_client import Letta, MessageCreate, TextContent
from tests.helpers.utils import upload_test_agentfile_from_disk
RESEARCH_INSTRUCTIONS = "\n Lead Name: Kian Jones\n Lead Title: Software Engineer\n Lead LinkedIn URL: https://www.linkedin.com/in/kian-jones\n Company Name: Letta\n Company Domain: letta.com\n Company Industry: technology/software/ai\n \n**Research Instructions**\n"
DEEP_RESEARCH_INSTRUCTIONS = "Let's get started, we have to research mantis shrimps. I need to know everything there is, or my grandmother will die. Please begin immediately and do a great job, they are scaring me."
@pytest.fixture(scope="module")
def server_url() -> str:
"""
Provides the URL for the Letta server.
If LETTA_SERVER_URL is not set, starts the server in a background thread
and polls until it's accepting connections.
"""
def _run_server() -> None:
load_dotenv()
from letta.server.rest_api.app import start_server
start_server(debug=True)
api_url = os.getenv("LETTA_API_URL")
if api_url:
return api_url
url: str = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
if not os.getenv("LETTA_SERVER_URL"):
thread = threading.Thread(target=_run_server, daemon=True)
thread.start()
# Poll until the server is up (or timeout)
timeout_seconds = 60
deadline = time.time() + timeout_seconds
while time.time() < deadline:
try:
resp = requests.get(url + "/v1/health")
if resp.status_code < 500:
break
except requests.exceptions.RequestException:
pass
time.sleep(0.1)
else:
raise RuntimeError(f"Could not reach {url} within {timeout_seconds}s")
return url
@pytest.fixture(scope="module")
def client(server_url: str) -> Letta:
"""
Creates and returns a synchronous Letta REST client for testing.
"""
api_url = os.getenv("LETTA_API_URL")
api_key = os.getenv("LETTA_API_KEY")
if api_url and not api_key:
raise ValueError("LETTA_API_KEY is required when passing LETTA_API_URL")
client_instance = Letta(token=api_key, base_url=api_url if api_url else server_url)
return client_instance
async def test_deep_research_agent(client: Letta, server_url, disable_e2b_api_key):
imported_af = upload_test_agentfile_from_disk(client, "deep-thought.af")
agent_id = imported_af.agent_ids[0]
try:
response = client.agents.messages.create_stream(
agent_id=agent_id,
stream_tokens=True,
include_pings=True,
messages=[
MessageCreate(
role="user",
content=[
TextContent(
text=DEEP_RESEARCH_INSTRUCTIONS,
)
],
)
],
)
for chunk in response:
if chunk.message_type is not None:
print(chunk)
except httpx.ReadTimeout as e:
print("Timeout on create_stream. Consider enabling pings in create_stream if you have long running agents. ", e)
assert False
finally:
client.agents.delete(agent_id=agent_id)
async def test_kylie_agent(client: Letta, server_url, disable_e2b_api_key):
imported_af = upload_test_agentfile_from_disk(client, "long_running_kylie.af")
agent_id = imported_af.agent_ids[0]
try:
response = client.agents.messages.create_stream(
agent_id=agent_id,
include_pings=True,
stream_tokens=True,
messages=[
MessageCreate(
role="user",
content=[
TextContent(
text=RESEARCH_INSTRUCTIONS,
)
],
)
],
)
for chunk in response:
if chunk.message_type is not None:
print(chunk)
except httpx.ReadTimeout as e:
print("Timeout on create_stream. Consider enabling pings in create_stream if you have long running agents. ", e)
assert False
finally:
client.agents.delete(agent_id=agent_id)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_long_running_agents.py",
"license": "Apache License 2.0",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_mcp_encryption.py | """
Integration tests for MCP server and OAuth session encryption.
Tests the end-to-end encryption functionality in the MCP manager.
"""
import json
from datetime import datetime, timezone
from unittest.mock import AsyncMock, patch
from uuid import uuid4
import pytest
from sqlalchemy import select
from letta.config import LettaConfig
from letta.helpers.crypto_utils import CryptoUtils
from letta.orm import MCPOAuth, MCPServer as ORMMCPServer
from letta.schemas.mcp import (
MCPOAuthSessionCreate,
MCPOAuthSessionUpdate,
MCPServer as PydanticMCPServer,
MCPServerType,
)
from letta.server.db import db_registry
from letta.server.server import SyncServer
from letta.settings import settings
@pytest.fixture(scope="module")
def server():
"""Fixture to create and return a SyncServer instance with MCP manager."""
config = LettaConfig.load()
config.save()
server = SyncServer(init_with_default_org_and_user=False)
return server
class TestMCPServerEncryption:
"""Test MCP server encryption functionality."""
MOCK_ENCRYPTION_KEY = "test-mcp-encryption-key-123456"
@pytest.mark.asyncio
@patch("letta.services.mcp_manager.MCPManager.get_mcp_client")
async def test_create_mcp_server_with_token_encryption(self, mock_get_client, server, default_user):
"""Test that MCP server tokens are encrypted when stored."""
# Set encryption key directly on settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_ENCRYPTION_KEY
try:
# Mock the MCP client
mock_client = AsyncMock()
mock_client.list_tools.return_value = []
mock_get_client.return_value = mock_client
# Create MCP server with token
server_name = f"test_encrypted_server_{uuid4().hex[:8]}"
token = "super-secret-api-token-12345"
server_url = "https://api.example.com/mcp"
mcp_server = PydanticMCPServer(server_name=server_name, server_type=MCPServerType.SSE, server_url=server_url, token=token)
created_server = await server.mcp_manager.create_or_update_mcp_server(mcp_server, actor=default_user)
# Verify server was created
assert created_server.server_name == server_name
assert created_server.server_type == MCPServerType.SSE
# Check database directly to verify encryption
async with db_registry.async_session() as session:
result = await session.execute(select(ORMMCPServer).where(ORMMCPServer.id == created_server.id))
db_server = result.scalar_one()
# Token should be encrypted in database
assert db_server.token_enc is not None
assert db_server.token_enc != token # Should not be plaintext
# Decrypt to verify correctness
decrypted_token = CryptoUtils.decrypt(db_server.token_enc)
assert decrypted_token == token
# Plaintext column should NOT be written to (encrypted-only)
assert db_server.token is None
# Clean up
await server.mcp_manager.delete_mcp_server_by_id(created_server.id, actor=default_user)
finally:
# Restore original encryption key
settings.encryption_key = original_key
@pytest.mark.asyncio
@patch("letta.services.mcp_manager.MCPManager.get_mcp_client")
async def test_create_mcp_server_with_custom_headers_encryption(self, mock_get_client, server, default_user):
"""Test that MCP server custom headers are encrypted when stored."""
# Set encryption key directly on settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_ENCRYPTION_KEY
try:
# Mock the MCP client
mock_client = AsyncMock()
mock_client.list_tools.return_value = []
mock_get_client.return_value = mock_client
server_name = f"test_headers_server_{uuid4().hex[:8]}"
custom_headers = {"Authorization": "Bearer secret-token-xyz", "X-API-Key": "api-key-123456", "X-Custom-Header": "custom-value"}
server_url = "https://api.example.com/mcp"
mcp_server = PydanticMCPServer(
server_name=server_name, server_type=MCPServerType.STREAMABLE_HTTP, server_url=server_url, custom_headers=custom_headers
)
created_server = await server.mcp_manager.create_or_update_mcp_server(mcp_server, actor=default_user)
# Check database directly
async with db_registry.async_session() as session:
result = await session.execute(select(ORMMCPServer).where(ORMMCPServer.id == created_server.id))
db_server = result.scalar_one()
# Custom headers should be encrypted as JSON
assert db_server.custom_headers_enc is not None
# Decrypt and parse JSON
decrypted_json = CryptoUtils.decrypt(db_server.custom_headers_enc)
decrypted_headers = json.loads(decrypted_json)
assert decrypted_headers == custom_headers
# Clean up
await server.mcp_manager.delete_mcp_server_by_id(created_server.id, actor=default_user)
finally:
# Restore original encryption key
settings.encryption_key = original_key
@pytest.mark.asyncio
async def test_retrieve_mcp_server_decrypts_values(self, server, default_user):
"""Test that retrieving MCP server decrypts encrypted values."""
# Set encryption key directly on settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_ENCRYPTION_KEY
try:
# Manually insert encrypted server into database
server_id = f"mcp_server-{uuid4().hex[:8]}"
server_name = f"test_decrypt_server_{uuid4().hex[:8]}"
plaintext_token = "decryption-test-token"
encrypted_token = CryptoUtils.encrypt(plaintext_token)
async with db_registry.async_session() as session:
db_server = ORMMCPServer(
id=server_id,
server_name=server_name,
server_type=MCPServerType.SSE.value,
server_url="https://test.com",
token_enc=encrypted_token,
token=None, # No plaintext
created_by_id=default_user.id,
last_updated_by_id=default_user.id,
organization_id=default_user.organization_id,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
session.add(db_server)
# context manager now handles commits
# await session.commit()
# Retrieve server directly by ID to avoid issues with other servers in DB
test_server = await server.mcp_manager.get_mcp_server_by_id_async(server_id, actor=default_user)
assert test_server is not None
assert test_server.server_name == server_name
# Token should be decrypted when accessed via the _enc column
assert test_server.token_enc is not None
assert test_server.token_enc.get_plaintext() == plaintext_token
# Clean up
async with db_registry.async_session() as session:
result = await session.execute(select(ORMMCPServer).where(ORMMCPServer.id == server_id))
db_server = result.scalar_one()
await session.delete(db_server)
# context manager now handles commits
# await session.commit()
finally:
# Restore original encryption key
settings.encryption_key = original_key
@pytest.mark.asyncio
@patch("letta.services.mcp_manager.MCPManager.get_mcp_client")
async def test_create_mcp_server_without_encryption_key_stores_plaintext(self, mock_get_client, server, default_user):
"""Test that MCP servers work without encryption key by storing plaintext in _enc column.
Note: In Phase 1 of migration, if no encryption key is configured, the value
is stored as plaintext directly in the _enc column. This allows users without
encryption keys to continue working while migrating off the old plaintext columns.
"""
# Save and clear encryption key
original_key = settings.encryption_key
settings.encryption_key = None
try:
# Mock the MCP client
mock_client = AsyncMock()
mock_client.list_tools.return_value = []
mock_get_client.return_value = mock_client
server_name = f"test_no_encrypt_server_{uuid4().hex[:8]}"
token = "plaintext-token-no-encryption"
mcp_server = PydanticMCPServer(
server_name=server_name, server_type=MCPServerType.SSE, server_url="https://api.example.com", token=token
)
# Should work without encryption key - stores plaintext in _enc column
created_server = await server.mcp_manager.create_or_update_mcp_server(mcp_server, actor=default_user)
# Check database - should store plaintext in _enc column (no encryption key)
async with db_registry.async_session() as session:
result = await session.execute(select(ORMMCPServer).where(ORMMCPServer.id == created_server.id))
db_server = result.scalar_one()
# Token should be stored as plaintext in _enc column (not encrypted)
assert db_server.token_enc == token # Plaintext stored directly
# Plaintext column should NOT be written to (encrypted-only)
assert db_server.token is None
# Clean up
await server.mcp_manager.delete_mcp_server_by_id(created_server.id, actor=default_user)
finally:
# Restore original encryption key
settings.encryption_key = original_key
class TestMCPOAuthEncryption:
"""Test MCP OAuth session encryption functionality."""
MOCK_ENCRYPTION_KEY = "test-oauth-encryption-key-123456"
@pytest.mark.asyncio
async def test_create_oauth_session_with_encryption(self, server, default_user):
"""Test that OAuth tokens are encrypted when stored."""
# Set encryption key directly on settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_ENCRYPTION_KEY
try:
server_url = "https://github.com/mcp"
server_name = "GitHub MCP"
# Step 1: Create OAuth session (without tokens initially)
oauth_session_create = MCPOAuthSessionCreate(
server_url=server_url,
server_name=server_name,
organization_id=default_user.organization_id,
user_id=default_user.id,
)
created_session = await server.mcp_manager.create_oauth_session(oauth_session_create, actor=default_user)
assert created_session.server_url == server_url
assert created_session.server_name == server_name
# Step 2: Update session with tokens (simulating OAuth callback)
update_data = MCPOAuthSessionUpdate(
access_token="github-access-token-abc123",
refresh_token="github-refresh-token-xyz789",
client_id="client-id-123",
client_secret="client-secret-super-secret",
expires_at=datetime.now(timezone.utc),
)
await server.mcp_manager.update_oauth_session(created_session.id, update_data, actor=default_user)
# Check database directly for encryption
async with db_registry.async_session() as session:
result = await session.execute(select(MCPOAuth).where(MCPOAuth.id == created_session.id))
db_oauth = result.scalar_one()
# All sensitive fields should be encrypted
assert db_oauth.access_token_enc is not None
assert db_oauth.access_token_enc != update_data.access_token
assert db_oauth.refresh_token_enc is not None
assert db_oauth.client_secret_enc is not None
# Verify decryption
decrypted_access = CryptoUtils.decrypt(db_oauth.access_token_enc)
assert decrypted_access == update_data.access_token
decrypted_refresh = CryptoUtils.decrypt(db_oauth.refresh_token_enc)
assert decrypted_refresh == update_data.refresh_token
decrypted_secret = CryptoUtils.decrypt(db_oauth.client_secret_enc)
assert decrypted_secret == update_data.client_secret
finally:
# Restore original encryption key
settings.encryption_key = original_key
# Clean up not needed - test database is reset
@pytest.mark.asyncio
async def test_retrieve_oauth_session_decrypts_tokens(self, server, default_user):
"""Test that retrieving OAuth session decrypts tokens."""
# Set encryption key directly on settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_ENCRYPTION_KEY
try:
# Manually insert encrypted OAuth session
session_id = f"mcp-oauth-{str(uuid4())[:8]}"
access_token = "test-access-token"
refresh_token = "test-refresh-token"
client_secret = "test-client-secret"
encrypted_access = CryptoUtils.encrypt(access_token)
encrypted_refresh = CryptoUtils.encrypt(refresh_token)
encrypted_secret = CryptoUtils.encrypt(client_secret)
async with db_registry.async_session() as session:
db_oauth = MCPOAuth(
id=session_id,
state=f"test-state-{uuid4().hex[:8]}",
server_url="https://test.com/mcp",
server_name="Test Provider",
access_token_enc=encrypted_access,
refresh_token_enc=encrypted_refresh,
client_id="test-client",
client_secret_enc=encrypted_secret,
user_id=default_user.id,
organization_id=default_user.organization_id,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
session.add(db_oauth)
# context manager now handles commits
# await session.commit()
# Retrieve through manager by ID
test_session = await server.mcp_manager.get_oauth_session_by_id(session_id, actor=default_user)
assert test_session is not None
# Tokens should be decrypted from _enc columns
assert test_session.access_token_enc is not None
assert test_session.access_token_enc.get_plaintext() == access_token
assert test_session.refresh_token_enc is not None
assert test_session.refresh_token_enc.get_plaintext() == refresh_token
assert test_session.client_secret_enc is not None
assert test_session.client_secret_enc.get_plaintext() == client_secret
# Clean up not needed - test database is reset
finally:
# Restore original encryption key
settings.encryption_key = original_key
@pytest.mark.asyncio
async def test_update_oauth_session_maintains_encryption(self, server, default_user):
"""Test that updating OAuth session maintains encryption."""
# Set encryption key directly on settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_ENCRYPTION_KEY
try:
# Create initial session (without tokens)
oauth_session_create = MCPOAuthSessionCreate(
server_url="https://test.com/mcp",
server_name="Test Update Provider",
organization_id=default_user.organization_id,
user_id=default_user.id,
)
created_session = await server.mcp_manager.create_oauth_session(oauth_session_create, actor=default_user)
# Add initial tokens
initial_update = MCPOAuthSessionUpdate(
access_token="initial-token",
refresh_token="initial-refresh",
client_id="client-123",
client_secret="initial-secret",
)
await server.mcp_manager.update_oauth_session(created_session.id, initial_update, actor=default_user)
# Update with new tokens
new_access_token = "updated-access-token"
new_refresh_token = "updated-refresh-token"
new_update = MCPOAuthSessionUpdate(
access_token=new_access_token,
refresh_token=new_refresh_token,
)
updated_session = await server.mcp_manager.update_oauth_session(created_session.id, new_update, actor=default_user)
# Verify update worked - read from _enc columns
assert updated_session.access_token_enc is not None
assert updated_session.access_token_enc.get_plaintext() == new_access_token
assert updated_session.refresh_token_enc is not None
assert updated_session.refresh_token_enc.get_plaintext() == new_refresh_token
# Check database encryption
async with db_registry.async_session() as session:
result = await session.execute(select(MCPOAuth).where(MCPOAuth.id == created_session.id))
db_oauth = result.scalar_one()
# New tokens should be encrypted
decrypted_access = CryptoUtils.decrypt(db_oauth.access_token_enc)
assert decrypted_access == new_access_token
decrypted_refresh = CryptoUtils.decrypt(db_oauth.refresh_token_enc)
assert decrypted_refresh == new_refresh_token
# Clean up not needed - test database is reset
finally:
# Restore original encryption key
settings.encryption_key = original_key
@pytest.mark.asyncio
async def test_encrypted_only_reads(self, server, default_user):
"""Test that system only reads from encrypted columns, ignoring plaintext.
Note: In Phase 1 of migration, reads are encrypted-only. Plaintext columns
are ignored even if they contain values. This test verifies that the
encrypted value is used and plaintext is never used as fallback.
"""
# Set encryption key directly on settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_ENCRYPTION_KEY
try:
# Insert a record with both encrypted and plaintext values
session_id = f"mcp-oauth-{str(uuid4())[:8]}"
plaintext_token = "legacy-plaintext-token"
new_encrypted_token = "new-encrypted-token"
encrypted_new = CryptoUtils.encrypt(new_encrypted_token)
async with db_registry.async_session() as session:
db_oauth = MCPOAuth(
id=session_id,
state=f"dual-read-state-{uuid4().hex[:8]}",
server_url="https://test.com/mcp",
server_name="Encrypted Only Read Test",
# Both encrypted and plaintext values
access_token=plaintext_token, # Legacy plaintext - should be ignored
access_token_enc=encrypted_new, # Encrypted value - should be used
client_id="test-client",
user_id=default_user.id,
organization_id=default_user.organization_id,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
session.add(db_oauth)
# context manager now handles commits
# await session.commit()
# Retrieve through manager
test_session = await server.mcp_manager.get_oauth_session_by_id(session_id, actor=default_user)
assert test_session is not None
# Should read from encrypted column only (plaintext is ignored)
assert test_session.access_token_enc is not None
assert test_session.access_token_enc.get_plaintext() == new_encrypted_token
# Clean up not needed - test database is reset
finally:
# Restore original encryption key
settings.encryption_key = original_key
@pytest.mark.asyncio
async def test_plaintext_only_record_returns_none(self, server, default_user):
"""Test that records with only plaintext values return None for encrypted fields.
With encrypted-only migration complete, if a record only has plaintext value
(no encrypted value), the system returns None for that field since we only
read from _enc columns now.
"""
# Set encryption key directly on settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_ENCRYPTION_KEY
try:
# Insert a record with only plaintext value (no encrypted)
session_id = f"mcp-oauth-{str(uuid4())[:8]}"
plaintext_token = "legacy-plaintext-token"
async with db_registry.async_session() as session:
db_oauth = MCPOAuth(
id=session_id,
state=f"plaintext-only-state-{uuid4().hex[:8]}",
server_url="https://test.com/mcp",
server_name="Plaintext Only Test",
# Only plaintext value, no encrypted
access_token=plaintext_token, # Legacy plaintext - should be ignored
access_token_enc=None, # No encrypted value
client_id="test-client",
user_id=default_user.id,
organization_id=default_user.organization_id,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
session.add(db_oauth)
# context manager now handles commits
# await session.commit()
# Retrieve through manager
test_session = await server.mcp_manager.get_oauth_session_by_id(session_id, actor=default_user)
assert test_session is not None
# Should return None since we only read from _enc columns now
assert test_session.access_token_enc is None
# Clean up not needed - test database is reset
finally:
# Restore original encryption key
settings.encryption_key = original_key
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_mcp_encryption.py",
"license": "Apache License 2.0",
"lines": 418,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_modal_sandbox_v2.py | import json
import pickle
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from letta.schemas.pip_requirement import PipRequirement
from letta.schemas.sandbox_config import ModalSandboxConfig, SandboxConfig, SandboxType
from letta.schemas.tool import Tool
from letta.services.tool_sandbox.modal_sandbox_v2 import AsyncToolSandboxModalV2
from letta.services.tool_sandbox.modal_version_manager import ModalVersionManager
from sandbox.modal_executor import ModalFunctionExecutor
class TestModalFunctionExecutor:
"""Test the ModalFunctionExecutor class."""
def test_execute_tool_dynamic_success(self):
"""Test successful execution of a simple tool."""
tool_source = """
def add_numbers(a: int, b: int) -> int:
return a + b
"""
args = {"a": 5, "b": 3}
args_pickled = pickle.dumps(args)
result = ModalFunctionExecutor.execute_tool_dynamic(
tool_source=tool_source,
tool_name="add_numbers",
args_pickled=args_pickled,
agent_state_pickled=None,
inject_agent_state=False,
is_async=False,
args_schema_code=None,
)
assert result["error"] is None
assert result["result"] == 8 # Actual integer value
assert result["agent_state"] is None
def test_execute_tool_dynamic_with_error(self):
"""Test execution with an error."""
tool_source = """
def divide_numbers(a: int, b: int) -> float:
return a / b
"""
args = {"a": 5, "b": 0}
args_pickled = pickle.dumps(args)
result = ModalFunctionExecutor.execute_tool_dynamic(
tool_source=tool_source,
tool_name="divide_numbers",
args_pickled=args_pickled,
agent_state_pickled=None,
inject_agent_state=False,
is_async=False,
args_schema_code=None,
)
assert result["error"] is not None
assert result["error"]["name"] == "ZeroDivisionError"
assert "division by zero" in result["error"]["value"]
assert result["result"] is None
def test_execute_async_tool(self):
"""Test execution of an async tool."""
tool_source = """
async def async_add(a: int, b: int) -> int:
import asyncio
await asyncio.sleep(0.001)
return a + b
"""
args = {"a": 10, "b": 20}
args_pickled = pickle.dumps(args)
result = ModalFunctionExecutor.execute_tool_dynamic(
tool_source=tool_source,
tool_name="async_add",
args_pickled=args_pickled,
agent_state_pickled=None,
inject_agent_state=False,
is_async=True,
args_schema_code=None,
)
assert result["error"] is None
assert result["result"] == 30
def test_execute_with_stdout_capture(self):
"""Test that stdout is properly captured."""
tool_source = """
def print_and_return(message: str) -> str:
print(f"Processing: {message}")
print("Done!")
return message.upper()
"""
args = {"message": "hello"}
args_pickled = pickle.dumps(args)
result = ModalFunctionExecutor.execute_tool_dynamic(
tool_source=tool_source,
tool_name="print_and_return",
args_pickled=args_pickled,
agent_state_pickled=None,
inject_agent_state=False,
is_async=False,
args_schema_code=None,
)
assert result["error"] is None
assert result["result"] == "HELLO"
assert "Processing: hello" in result["stdout"]
assert "Done!" in result["stdout"]
class TestModalVersionManager:
"""Test the Modal Version Manager."""
@pytest.mark.asyncio
async def test_register_and_get_deployment(self):
"""Test registering and retrieving deployments."""
from unittest.mock import AsyncMock
from letta.schemas.user import User
manager = ModalVersionManager()
# Mock the tool manager
mock_tool = MagicMock()
mock_tool.id = "tool-abc12345"
mock_tool.metadata_ = {}
manager.tool_manager.get_tool_by_id = MagicMock(return_value=mock_tool)
manager.tool_manager.update_tool_by_id_async = AsyncMock(return_value=mock_tool)
# Create a mock actor
mock_actor = MagicMock(spec=User)
mock_actor.id = "user-123"
# Register a deployment
mock_app = MagicMock(spec=["deploy", "stop"])
info = await manager.register_deployment(
tool_id="tool-abc12345",
app_name="test-app",
version_hash="abc123",
app=mock_app,
dependencies={"pandas", "numpy"},
sandbox_config_id="config-123",
actor=mock_actor,
)
assert info.app_name == "test-app"
assert info.version_hash == "abc123"
assert info.dependencies == {"pandas", "numpy"}
# Retrieve the deployment
retrieved = await manager.get_deployment("tool-abc12345", "config-123", actor=mock_actor)
assert retrieved.app_name == info.app_name
assert retrieved.version_hash == info.version_hash
@pytest.mark.asyncio
async def test_needs_redeployment(self):
"""Test checking if redeployment is needed."""
from unittest.mock import AsyncMock
from letta.schemas.user import User
manager = ModalVersionManager()
# Mock the tool manager
mock_tool = MagicMock()
mock_tool.id = "tool-def45678"
mock_tool.metadata_ = {}
manager.tool_manager.get_tool_by_id = MagicMock(return_value=mock_tool)
manager.tool_manager.update_tool_by_id_async = AsyncMock(return_value=mock_tool)
# Create a mock actor
mock_actor = MagicMock(spec=User)
# No deployment exists yet
assert await manager.needs_redeployment("tool-def45678", "v1", "config-123", actor=mock_actor) is True
# Register a deployment
mock_app = MagicMock()
await manager.register_deployment(
tool_id="tool-def45678",
app_name="test-app",
version_hash="v1",
app=mock_app,
sandbox_config_id="config-123",
actor=mock_actor,
)
# Update mock to return the registered deployment
mock_tool.metadata_ = {
"modal_deployments": {
"config-123": {
"app_name": "test-app",
"version_hash": "v1",
"deployed_at": "2024-01-01T00:00:00",
"dependencies": [],
}
}
}
# Same version - no redeployment needed
assert await manager.needs_redeployment("tool-def45678", "v1", "config-123", actor=mock_actor) is False
# Different version - redeployment needed
assert await manager.needs_redeployment("tool-def45678", "v2", "config-123", actor=mock_actor) is True
@pytest.mark.skip(reason="get_deployment_stats method not implemented in ModalVersionManager")
@pytest.mark.asyncio
async def test_deployment_stats(self):
"""Test getting deployment statistics."""
from unittest.mock import AsyncMock
from letta.schemas.user import User
manager = ModalVersionManager()
# Mock the tool manager
mock_tools = {}
for i in range(3):
tool_id = f"tool-{i:08x}"
mock_tool = MagicMock()
mock_tool.id = tool_id
mock_tool.metadata_ = {}
mock_tools[tool_id] = mock_tool
def get_tool_by_id(tool_id, actor=None):
return mock_tools.get(tool_id)
manager.tool_manager.get_tool_by_id = MagicMock(side_effect=get_tool_by_id)
manager.tool_manager.update_tool_by_id_async = AsyncMock()
# Create a mock actor
mock_actor = MagicMock(spec=User)
# Register multiple deployments
for i in range(3):
tool_id = f"tool-{i:08x}"
mock_app = MagicMock()
await manager.register_deployment(
tool_id=tool_id,
app_name=f"app-{i}",
version_hash=f"v{i}",
app=mock_app,
sandbox_config_id="config-123",
actor=mock_actor,
)
stats = await manager.get_deployment_stats()
# Note: The actual implementation may store deployments differently
# This test assumes the stats method exists and returns expected format
assert stats["total_deployments"] >= 0 # Adjust based on actual implementation
assert "deployments" in stats
@pytest.mark.skip(reason="export_state and import_state methods not implemented in ModalVersionManager")
@pytest.mark.asyncio
async def test_export_import_state(self):
"""Test exporting and importing deployment state."""
from unittest.mock import AsyncMock
from letta.schemas.user import User
manager1 = ModalVersionManager()
# Mock the tool manager for manager1
mock_tools = {
"tool-11111111": MagicMock(id="tool-11111111", metadata_={}),
"tool-22222222": MagicMock(id="tool-22222222", metadata_={}),
}
def get_tool_by_id(tool_id, actor=None):
return mock_tools.get(tool_id)
manager1.tool_manager.get_tool_by_id = MagicMock(side_effect=get_tool_by_id)
manager1.tool_manager.update_tool_by_id_async = AsyncMock()
# Create a mock actor
mock_actor = MagicMock(spec=User)
# Register deployments
mock_app = MagicMock()
await manager1.register_deployment(
tool_id="tool-11111111",
app_name="app1",
version_hash="v1",
app=mock_app,
dependencies={"dep1"},
sandbox_config_id="config-123",
actor=mock_actor,
)
await manager1.register_deployment(
tool_id="tool-22222222",
app_name="app2",
version_hash="v2",
app=mock_app,
dependencies={"dep2", "dep3"},
sandbox_config_id="config-123",
actor=mock_actor,
)
# Export state
state_json = await manager1.export_state()
state = json.loads(state_json)
# Verify exported state structure
assert "tool-11111111" in state or "deployments" in state # Depends on implementation
# Import into new manager
manager2 = ModalVersionManager()
manager2.tool_manager.get_tool_by_id = MagicMock(side_effect=get_tool_by_id)
await manager2.import_state(state_json)
# Note: The actual implementation may not have export/import methods
# This test assumes they exist or should be modified based on actual API
class TestAsyncToolSandboxModalV2:
"""Test the AsyncToolSandboxModalV2 class."""
@pytest.fixture
def mock_tool(self):
"""Create a mock tool for testing."""
return Tool(
id="tool-12345678", # Valid tool ID format
name="test_function",
source_code="""
def test_function(x: int, y: int) -> int:
'''Add two numbers together.'''
return x + y
""",
json_schema={
"parameters": {
"properties": {
"x": {"type": "integer"},
"y": {"type": "integer"},
}
}
},
pip_requirements=[PipRequirement(name="requests")],
)
@pytest.fixture
def mock_user(self):
"""Create a mock user for testing."""
user = MagicMock()
user.organization_id = "test-org"
return user
@pytest.fixture
def mock_sandbox_config(self):
"""Create a mock sandbox configuration."""
modal_config = ModalSandboxConfig(
timeout=60,
pip_requirements=["pandas"],
)
config = SandboxConfig(
id="sandbox-12345678", # Valid sandbox ID format
type=SandboxType.MODAL, # Changed from sandbox_type to type
config=modal_config.model_dump(),
)
return config
def test_version_hash_calculation(self, mock_tool, mock_user, mock_sandbox_config):
"""Test that version hash is calculated correctly."""
sandbox = AsyncToolSandboxModalV2(
tool_name="test_function",
args={"x": 1, "y": 2},
user=mock_user,
tool_id=mock_tool.id,
tool_object=mock_tool,
sandbox_config=mock_sandbox_config,
)
# Access through deployment manager
version1 = sandbox._deployment_manager.calculate_version_hash(mock_sandbox_config)
assert version1 # Should not be empty
assert len(version1) == 12 # We take first 12 chars of hash
# Same inputs should produce same hash
version2 = sandbox._deployment_manager.calculate_version_hash(mock_sandbox_config)
assert version1 == version2
# Changing tool code should change hash
mock_tool.source_code = "def test_function(x, y): return x * y"
sandbox2 = AsyncToolSandboxModalV2(
tool_name="test_function",
args={"x": 1, "y": 2},
user=mock_user,
tool_id=mock_tool.id,
tool_object=mock_tool,
sandbox_config=mock_sandbox_config,
)
version3 = sandbox2._deployment_manager.calculate_version_hash(mock_sandbox_config)
assert version3 != version1
# Changing dependencies should also change hash
mock_tool.source_code = "def test_function(x, y): return x + y" # Reset
mock_tool.pip_requirements = [PipRequirement(name="numpy")]
sandbox3 = AsyncToolSandboxModalV2(
tool_name="test_function",
args={"x": 1, "y": 2},
user=mock_user,
tool_id=mock_tool.id,
tool_object=mock_tool,
sandbox_config=mock_sandbox_config,
)
version4 = sandbox3._deployment_manager.calculate_version_hash(mock_sandbox_config)
assert version4 != version1
# Changing sandbox config should change hash
modal_config2 = ModalSandboxConfig(
timeout=120, # Different timeout
pip_requirements=["pandas"],
)
config2 = SandboxConfig(
id="sandbox-87654321",
type=SandboxType.MODAL,
config=modal_config2.model_dump(),
)
version5 = sandbox3._deployment_manager.calculate_version_hash(config2)
assert version5 != version4
def test_app_name_generation(self, mock_tool, mock_user):
"""Test app name generation."""
sandbox = AsyncToolSandboxModalV2(
tool_name="test_function",
args={"x": 1, "y": 2},
user=mock_user,
tool_id=mock_tool.id,
tool_object=mock_tool,
)
# App name generation is now in deployment manager and uses tool ID
app_name = sandbox._deployment_manager._generate_app_name()
# App name is based on tool ID truncated to 40 chars
assert app_name == mock_tool.id[:40]
@pytest.mark.asyncio
async def test_run_with_mocked_modal(self, mock_tool, mock_user, mock_sandbox_config):
"""Test the run method with mocked Modal components."""
with (
patch("letta.services.tool_sandbox.modal_sandbox_v2.modal") as mock_modal,
patch("letta.services.tool_sandbox.modal_deployment_manager.modal") as mock_modal2,
):
# Mock Modal app
mock_app = MagicMock() # Use MagicMock for the app itself
mock_app.run = MagicMock()
# Mock the function decorator
def mock_function_decorator(*args, **kwargs):
def decorator(func):
# Create a mock that has a remote attribute
mock_func = MagicMock()
mock_func.remote = mock_remote
# Store the mocked function as tool_executor on the app
mock_app.tool_executor = mock_func
return mock_func
return decorator
mock_app.function = mock_function_decorator
# Mock deployment
mock_app.deploy = MagicMock()
mock_app.deploy.aio = AsyncMock()
# Mock the remote execution
mock_remote = MagicMock()
mock_remote.aio = AsyncMock(
return_value={
"result": 3, # Return actual integer, not string
"agent_state": None,
"stdout": "Executing...",
"stderr": "",
"error": None,
}
)
mock_modal.App.return_value = mock_app
mock_modal2.App.return_value = mock_app
# Mock App.lookup.aio to handle app lookup attempts
mock_modal.App.lookup = MagicMock()
mock_modal.App.lookup.aio = AsyncMock(side_effect=Exception("App not found"))
mock_modal2.App.lookup = MagicMock()
mock_modal2.App.lookup.aio = AsyncMock(side_effect=Exception("App not found"))
# Mock enable_output context manager
mock_modal.enable_output = MagicMock()
mock_modal.enable_output.return_value.__enter__ = MagicMock()
mock_modal.enable_output.return_value.__exit__ = MagicMock()
mock_modal2.enable_output = MagicMock()
mock_modal2.enable_output.return_value.__enter__ = MagicMock()
mock_modal2.enable_output.return_value.__exit__ = MagicMock()
# Mock the SandboxConfigManager to avoid type checking issues
with patch("letta.services.tool_sandbox.base.SandboxConfigManager") as MockSCM:
mock_scm = MockSCM.return_value
mock_scm.get_sandbox_env_vars_as_dict_async = AsyncMock(return_value={})
# Create sandbox
sandbox = AsyncToolSandboxModalV2(
tool_name="test_function",
args={"x": 1, "y": 2},
user=mock_user,
tool_id=mock_tool.id,
tool_object=mock_tool,
sandbox_config=mock_sandbox_config,
)
# Mock the version manager through deployment manager
version_manager = sandbox._deployment_manager.version_manager
if version_manager:
with patch.object(version_manager, "get_deployment", return_value=None):
with patch.object(version_manager, "register_deployment", return_value=None):
# Run the tool
result = await sandbox.run()
else:
# If no version manager (use_version_tracking=False), just run
result = await sandbox.run()
assert result.func_return == 3 # Check for actual integer
assert result.status == "success"
assert "Executing..." in result.stdout[0]
def test_detect_async_function(self, mock_user):
"""Test detection of async functions."""
# Test with sync function
sync_tool = Tool(
id="tool-abcdef12", # Valid tool ID format
name="sync_func",
source_code="def sync_func(x): return x",
json_schema={"parameters": {"properties": {}}},
)
sandbox_sync = AsyncToolSandboxModalV2(
tool_name="sync_func",
args={},
user=mock_user,
tool_id=sync_tool.id,
tool_object=sync_tool,
)
assert sandbox_sync._detect_async_function() is False
# Test with async function
async_tool = Tool(
id="tool-fedcba21", # Valid tool ID format
name="async_func",
source_code="async def async_func(x): return x",
json_schema={"parameters": {"properties": {}}},
)
sandbox_async = AsyncToolSandboxModalV2(
tool_name="async_func",
args={},
user=mock_user,
tool_id=async_tool.id,
tool_object=async_tool,
)
assert sandbox_async._detect_async_function() is True
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_modal_sandbox_v2.py",
"license": "Apache License 2.0",
"lines": 478,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_plugins.py | import pytest
from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client
from letta.helpers.decorators import experimental
from letta.settings import settings
@pytest.mark.asyncio
async def test_default_experimental_decorator():
settings.plugin_register = "experimental_check=tests.helpers.plugins_helper:is_experimental_okay"
@experimental("test_just_pass", fallback_function=lambda: False, kwarg1=3)
def _return_true():
return True
assert _return_true()
settings.plugin_register = ""
@pytest.mark.asyncio
async def test_overwrite_arg_success():
settings.plugin_register = "experimental_check=tests.helpers.plugins_helper:is_experimental_okay"
@experimental("test_override_kwarg", fallback_function=lambda *args, **kwargs: False, bool_val=True)
async def _return_true(a_val: bool, bool_val: bool):
assert bool_val is False
return True
assert _return_true(False, False)
settings.plugin_register = ""
@pytest.mark.asyncio
async def test_overwrite_arg_fail():
# Should fallback to lambda
settings.plugin_register = "experimental_check=tests.helpers.plugins_helper:is_experimental_okay"
@experimental("test_override_kwarg", fallback_function=lambda *args, **kwargs: True, bool_val=False)
async def _return_false(a_val: bool, bool_val: bool):
assert bool_val is True
return False
assert _return_false(False, True)
@experimental("test_override_kwarg", fallback_function=lambda *args, **kwargs: False, bool_val=True)
async def _return_true(a_val: bool, bool_val: bool):
assert bool_val is False
return True
assert _return_true(False, bool_val=False)
@experimental("test_override_kwarg", fallback_function=lambda *args, **kwargs: True)
async def _get_true(a_val: bool, bool_val: bool):
return True
assert await _get_true(True, bool_val=True)
with pytest.raises(Exception):
# kwarg must be included in either experimental flag or function call
assert await _get_true(True, True)
settings.plugin_register = ""
@pytest.mark.asyncio
async def test_redis_flag():
settings.plugin_register = "experimental_check=tests.helpers.plugins_helper:is_experimental_okay"
@experimental("test_redis_flag", fallback_function=lambda *args, **kwargs: _raise())
async def _new_feature(user_id: str) -> str:
return "new_feature"
def _raise():
raise Exception()
redis_client = await get_redis_client()
group_name = "TEST_GROUP"
include_key = redis_client._get_group_inclusion_key(group_name)
exclude_key = redis_client._get_group_exclusion_key(group_name)
test_user = "user123"
# reset
for member in await redis_client.smembers(include_key):
await redis_client.srem(include_key, member)
for member in await redis_client.smembers(exclude_key):
await redis_client.srem(exclude_key, member)
await redis_client.create_inclusion_exclusion_keys(group=group_name)
await redis_client.sadd(include_key, test_user)
if not isinstance(redis_client, NoopAsyncRedisClient):
assert await _new_feature(user_id=test_user) == "new_feature"
with pytest.raises(Exception):
await _new_feature(user_id=test_user + "1")
print("members: ", await redis_client.smembers(include_key))
else:
with pytest.raises(Exception):
await _new_feature(user_id=test_user)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_plugins.py",
"license": "Apache License 2.0",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_provider_trace.py | """
Comprehensive tests for provider trace telemetry.
Tests verify that provider traces are correctly created with all telemetry context
(agent_id, agent_tags, run_id, step_id, call_type) across:
- Agent steps (non-streaming and streaming)
- Tool calls
- Summarization calls
- Different agent architectures (V2, V3)
"""
import asyncio
import os
import threading
import time
import uuid
import pytest
from dotenv import load_dotenv
from letta_client import Letta
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.letta_message_content import TextContent
from letta.schemas.llm_config import LLMConfig
from letta.schemas.message import MessageCreate
def _run_server():
"""Starts the Letta server in a background thread."""
load_dotenv()
from letta.server.rest_api.app import start_server
start_server(debug=True)
@pytest.fixture(scope="session")
def server_url():
"""Ensures a server is running and returns its base URL."""
url = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
if not os.getenv("LETTA_SERVER_URL"):
thread = threading.Thread(target=_run_server, daemon=True)
thread.start()
time.sleep(5)
return url
@pytest.fixture(scope="session")
def client(server_url):
"""Creates a REST client for testing."""
client = Letta(base_url=server_url)
yield client
@pytest.fixture(scope="session")
def event_loop(request):
"""Create an instance of the default event loop for each test case."""
loop = asyncio.get_event_loop_policy().new_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="function")
def roll_dice_tool(client, roll_dice_tool_func):
tool = client.tools.upsert_from_function(func=roll_dice_tool_func)
yield tool
@pytest.fixture(scope="function")
def weather_tool(client, weather_tool_func):
tool = client.tools.upsert_from_function(func=weather_tool_func)
yield tool
@pytest.fixture(scope="function")
def print_tool(client, print_tool_func):
tool = client.tools.upsert_from_function(func=print_tool_func)
yield tool
@pytest.fixture(scope="function")
def agent_state(client, roll_dice_tool, weather_tool):
"""Creates an agent with tools and ensures cleanup after tests."""
agent_state = client.agents.create(
name=f"test_provider_trace_{str(uuid.uuid4())[:8]}",
tool_ids=[roll_dice_tool.id, weather_tool.id],
include_base_tools=True,
tags=["test", "provider-trace"],
memory_blocks=[
{"label": "human", "value": "Name: TestUser"},
{"label": "persona", "value": "Helpful test agent"},
],
llm_config=LLMConfig.default_config(model_name="gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
)
yield agent_state
client.agents.delete(agent_state.id)
@pytest.fixture(scope="function")
def agent_state_with_tags(client, weather_tool):
"""Creates an agent with specific tags for tag verification tests."""
agent_state = client.agents.create(
name=f"test_tagged_agent_{str(uuid.uuid4())[:8]}",
tool_ids=[weather_tool.id],
include_base_tools=True,
tags=["env:test", "team:telemetry", "version:v1"],
memory_blocks=[
{"label": "human", "value": "Name: TagTestUser"},
{"label": "persona", "value": "Agent with tags"},
],
llm_config=LLMConfig.default_config(model_name="gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
)
yield agent_state
client.agents.delete(agent_state.id)
class TestProviderTraceBasicStep:
"""Tests for basic agent step provider traces."""
@pytest.mark.asyncio
async def test_non_streaming_step_creates_provider_trace(self, client, agent_state):
"""Verify provider trace is created for non-streaming agent step."""
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[MessageCreate(role="user", content=[TextContent(text="Hello, how are you?")])],
)
assert len(response.messages) > 0
step_id = response.messages[-1].step_id
assert step_id is not None
trace = client.telemetry.retrieve_provider_trace(step_id=step_id)
assert trace is not None
assert trace.request_json is not None
assert trace.response_json is not None
@pytest.mark.asyncio
async def test_streaming_step_creates_provider_trace(self, client, agent_state):
"""Verify provider trace is created for streaming agent step."""
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)[0]
stream = client.agents.messages.create_stream(
agent_id=agent_state.id,
messages=[MessageCreate(role="user", content=[TextContent(text="Tell me a joke.")])],
)
list(stream)
messages = client.agents.messages.list(agent_id=agent_state.id, after=last_message.id)
step_ids = list({msg.step_id for msg in messages if msg.step_id is not None})
assert len(step_ids) > 0
for step_id in step_ids:
trace = client.telemetry.retrieve_provider_trace(step_id=step_id)
assert trace is not None
assert trace.request_json is not None
assert trace.response_json is not None
class TestProviderTraceWithToolCalls:
"""Tests for provider traces when tools are called."""
@pytest.mark.asyncio
async def test_tool_call_step_has_provider_trace(self, client, agent_state):
"""Verify provider trace exists for steps that invoke tools."""
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[MessageCreate(role="user", content=[TextContent(text="Get the weather in San Francisco.")])],
)
tool_call_step_id = response.messages[0].step_id
final_step_id = response.messages[-1].step_id
tool_trace = client.telemetry.retrieve_provider_trace(step_id=tool_call_step_id)
assert tool_trace is not None
assert tool_trace.request_json is not None
if tool_call_step_id != final_step_id:
final_trace = client.telemetry.retrieve_provider_trace(step_id=final_step_id)
assert final_trace is not None
assert final_trace.request_json is not None
@pytest.mark.asyncio
async def test_streaming_tool_call_has_provider_trace(self, client, agent_state):
"""Verify provider trace exists for streaming steps with tool calls."""
last_message = client.agents.messages.list(agent_id=agent_state.id, limit=1)[0]
stream = client.agents.messages.create_stream(
agent_id=agent_state.id,
messages=[MessageCreate(role="user", content=[TextContent(text="Roll the dice for me.")])],
)
list(stream)
messages = client.agents.messages.list(agent_id=agent_state.id, after=last_message.id)
step_ids = list({msg.step_id for msg in messages if msg.step_id is not None})
assert len(step_ids) > 0
for step_id in step_ids:
trace = client.telemetry.retrieve_provider_trace(step_id=step_id)
assert trace is not None
assert trace.request_json is not None
class TestProviderTraceTelemetryContext:
"""Tests verifying telemetry context fields are correctly populated."""
@pytest.mark.asyncio
async def test_provider_trace_contains_agent_id(self, client, agent_state):
"""Verify provider trace contains the correct agent_id."""
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[MessageCreate(role="user", content=[TextContent(text="Hello")])],
)
step_id = response.messages[-1].step_id
trace = client.telemetry.retrieve_provider_trace(step_id=step_id)
assert trace is not None
assert trace.agent_id == agent_state.id
@pytest.mark.asyncio
async def test_provider_trace_contains_agent_tags(self, client, agent_state_with_tags):
"""Verify provider trace contains the agent's tags."""
response = client.agents.messages.create(
agent_id=agent_state_with_tags.id,
messages=[MessageCreate(role="user", content=[TextContent(text="Hello")])],
)
step_id = response.messages[-1].step_id
trace = client.telemetry.retrieve_provider_trace(step_id=step_id)
assert trace is not None
assert trace.agent_tags is not None
assert set(trace.agent_tags) == {"env:test", "team:telemetry", "version:v1"}
@pytest.mark.asyncio
async def test_provider_trace_contains_step_id(self, client, agent_state):
"""Verify provider trace step_id matches the message step_id."""
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[MessageCreate(role="user", content=[TextContent(text="Hello")])],
)
step_id = response.messages[-1].step_id
trace = client.telemetry.retrieve_provider_trace(step_id=step_id)
assert trace is not None
assert trace.step_id == step_id
@pytest.mark.asyncio
async def test_provider_trace_contains_run_id_for_async_job(self, client, agent_state):
"""Verify provider trace contains run_id when created via async job."""
job = client.agents.messages.create_async(
agent_id=agent_state.id,
messages=[MessageCreate(role="user", content=[TextContent(text="Hello")])],
)
while job.status not in ["completed", "failed"]:
time.sleep(0.5)
job = client.jobs.retrieve(job.id)
assert job.status == "completed"
messages = client.agents.messages.list(agent_id=agent_state.id, limit=5)
step_ids = list({msg.step_id for msg in messages if msg.step_id is not None})
assert len(step_ids) > 0
trace = client.telemetry.retrieve_provider_trace(step_id=step_ids[0])
assert trace is not None
assert trace.run_id == job.id
class TestProviderTraceMultiStep:
"""Tests for provider traces across multiple agent steps."""
@pytest.mark.asyncio
async def test_multi_step_conversation_has_traces_for_each_step(self, client, agent_state):
"""Verify each step in a multi-step conversation has its own provider trace."""
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreate(
role="user",
content=[TextContent(text="First, get the weather in NYC. Then roll the dice.")],
)
],
)
step_ids = list({msg.step_id for msg in response.messages if msg.step_id is not None})
assert len(step_ids) >= 1
for step_id in step_ids:
trace = client.telemetry.retrieve_provider_trace(step_id=step_id)
assert trace is not None, f"No trace found for step_id={step_id}"
assert trace.request_json is not None
assert trace.agent_id == agent_state.id
@pytest.mark.asyncio
async def test_consecutive_messages_have_separate_traces(self, client, agent_state):
"""Verify consecutive messages create separate traces."""
response1 = client.agents.messages.create(
agent_id=agent_state.id,
messages=[MessageCreate(role="user", content=[TextContent(text="Hello")])],
)
step_id_1 = response1.messages[-1].step_id
response2 = client.agents.messages.create(
agent_id=agent_state.id,
messages=[MessageCreate(role="user", content=[TextContent(text="How are you?")])],
)
step_id_2 = response2.messages[-1].step_id
assert step_id_1 != step_id_2
trace1 = client.telemetry.retrieve_provider_trace(step_id=step_id_1)
trace2 = client.telemetry.retrieve_provider_trace(step_id=step_id_2)
assert trace1 is not None
assert trace2 is not None
assert trace1.id != trace2.id
class TestProviderTraceRequestResponseContent:
"""Tests verifying request and response JSON content."""
@pytest.mark.asyncio
async def test_request_json_contains_model(self, client, agent_state):
"""Verify request_json contains model information."""
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[MessageCreate(role="user", content=[TextContent(text="Hello")])],
)
step_id = response.messages[-1].step_id
trace = client.telemetry.retrieve_provider_trace(step_id=step_id)
assert trace is not None
assert trace.request_json is not None
assert "model" in trace.request_json
@pytest.mark.asyncio
async def test_request_json_contains_messages(self, client, agent_state):
"""Verify request_json contains messages array."""
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[MessageCreate(role="user", content=[TextContent(text="Hello")])],
)
step_id = response.messages[-1].step_id
trace = client.telemetry.retrieve_provider_trace(step_id=step_id)
assert trace is not None
assert trace.request_json is not None
assert "messages" in trace.request_json
assert isinstance(trace.request_json["messages"], list)
@pytest.mark.asyncio
async def test_response_json_contains_usage(self, client, agent_state):
"""Verify response_json contains usage statistics."""
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[MessageCreate(role="user", content=[TextContent(text="Hello")])],
)
step_id = response.messages[-1].step_id
trace = client.telemetry.retrieve_provider_trace(step_id=step_id)
assert trace is not None
assert trace.response_json is not None
assert "usage" in trace.response_json or "usage" in str(trace.response_json)
class TestProviderTraceEdgeCases:
"""Tests for edge cases and error scenarios."""
@pytest.mark.asyncio
async def test_nonexistent_step_id_returns_none_or_empty(self, client):
"""Verify querying nonexistent step_id handles gracefully."""
fake_step_id = f"step-{uuid.uuid4()}"
try:
trace = client.telemetry.retrieve_provider_trace(step_id=fake_step_id)
assert trace is None or trace.request_json is None
except Exception:
pass
@pytest.mark.asyncio
async def test_empty_message_still_creates_trace(self, client, agent_state):
"""Verify trace is created even for minimal messages."""
response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[MessageCreate(role="user", content=[TextContent(text="Hi")])],
)
step_id = response.messages[-1].step_id
assert step_id is not None
trace = client.telemetry.retrieve_provider_trace(step_id=step_id)
assert trace is not None
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_provider_trace.py",
"license": "Apache License 2.0",
"lines": 313,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_redis_client.py | import pytest
from letta.data_sources.redis_client import get_redis_client
@pytest.mark.asyncio
async def test_redis_client():
test_values = {"LETTA_TEST_0": [1, 2, 3], "LETTA_TEST_1": ["apple", "pear", "banana"], "LETTA_TEST_2": ["{}", 3.2, "cat"]}
redis_client = await get_redis_client()
# Clear out keys
await redis_client.delete(*test_values.keys())
# Add items
for k, v in test_values.items():
assert await redis_client.sadd(k, *v) == 3
# Check Membership
for k, v in test_values.items():
assert await redis_client.smembers(k) == set(str(val) for val in v)
for k, v in test_values.items():
assert await redis_client.smismember(k, "invalid") == 0
assert await redis_client.smismember(k, v[0]) == 1
assert await redis_client.smismember(k, v[:2]) == [1, 1]
assert await redis_client.smismember(k, [*v[2:], "invalid"]) == [1, 0]
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_redis_client.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_schema_validator.py | """
Test schema validation for OpenAI strict mode compliance.
"""
from letta.functions.schema_validator import SchemaHealth, validate_complete_json_schema
def test_user_example_schema_now_strict():
"""Test that schemas with optional fields are now considered STRICT_COMPLIANT (will be healed)."""
schema = {
"properties": {
"a": {"title": "A", "type": "integer"},
"b": {
"anyOf": [{"type": "integer"}, {"type": "null"}],
"default": None,
"title": "B",
},
},
"required": ["a"], # Only 'a' is required, 'b' is not
"type": "object",
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
# Should now be STRICT_COMPLIANT because we can heal optional fields
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_all_properties_required_is_strict():
"""Test that schemas with all properties required are STRICT_COMPLIANT."""
schema = {
"type": "object",
"properties": {
"a": {"type": "integer"},
"b": {"anyOf": [{"type": "integer"}, {"type": "null"}]}, # Optional via null type
},
"required": ["a", "b"], # All properties are required
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
# Should be STRICT_COMPLIANT since all properties are required
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_nested_object_missing_required_now_strict():
"""Test that nested objects with optional fields are now STRICT_COMPLIANT (will be healed)."""
schema = {
"type": "object",
"properties": {
"config": {
"type": "object",
"properties": {
"host": {"type": "string"},
"port": {"type": "integer"},
"optional_field": {"anyOf": [{"type": "string"}, {"type": "null"}]},
},
"required": ["host", "port"], # optional_field not required
"additionalProperties": False,
}
},
"required": ["config"],
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
# Should now be STRICT_COMPLIANT because we can heal optional fields
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_nested_object_all_required_is_strict():
"""Test that nested objects with all properties required are STRICT_COMPLIANT."""
schema = {
"type": "object",
"properties": {
"config": {
"type": "object",
"properties": {
"host": {"type": "string"},
"port": {"type": "integer"},
"timeout": {"anyOf": [{"type": "integer"}, {"type": "null"}]},
},
"required": ["host", "port", "timeout"], # All properties required
"additionalProperties": False,
}
},
"required": ["config"],
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
# Should be STRICT_COMPLIANT since all properties at all levels are required
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_empty_object_no_properties_is_strict():
"""Test that objects with no properties are STRICT_COMPLIANT."""
schema = {
"type": "object",
"properties": {},
"required": [],
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
# Empty objects with no properties should be STRICT_COMPLIANT
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_missing_additionalproperties_not_strict():
"""Test that missing additionalProperties makes schema NON_STRICT_ONLY."""
schema = {
"type": "object",
"properties": {
"field": {"type": "string"},
},
"required": ["field"],
# Missing additionalProperties
}
status, reasons = validate_complete_json_schema(schema)
# Should be NON_STRICT_ONLY due to missing additionalProperties
assert status == SchemaHealth.NON_STRICT_ONLY
assert any("additionalProperties" in reason and "not explicitly set" in reason for reason in reasons)
def test_additionalproperties_true_not_strict():
"""Test that additionalProperties: true makes schema NON_STRICT_ONLY."""
schema = {
"type": "object",
"properties": {
"field": {"type": "string"},
},
"required": ["field"],
"additionalProperties": True, # Allows additional properties
}
status, reasons = validate_complete_json_schema(schema)
# Should be NON_STRICT_ONLY due to additionalProperties not being false
assert status == SchemaHealth.NON_STRICT_ONLY
assert any("additionalProperties" in reason and "not false" in reason for reason in reasons)
def test_complex_schema_with_arrays():
"""Test a complex schema with arrays and nested objects."""
schema = {
"type": "object",
"properties": {
"items": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {"type": "integer"},
"name": {"type": "string"},
"tags": {
"type": "array",
"items": {"type": "string"},
},
},
"required": ["id", "name", "tags"], # All properties required
"additionalProperties": False,
},
},
"total": {"type": "integer"},
},
"required": ["items", "total"], # All properties required
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
# Should be STRICT_COMPLIANT since all properties at all levels are required
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_fastmcp_tool_schema_now_strict():
"""Test that a schema from FastMCP with optional field 'b' is now STRICT_COMPLIANT."""
# This is the exact schema format provided by the user
schema = {
"properties": {
"a": {"title": "A", "type": "integer"},
"b": {"anyOf": [{"type": "integer"}, {"type": "null"}], "default": None, "title": "B"},
},
"required": ["a"], # Only 'a' is required, but we can heal this
"type": "object",
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
# Should now be STRICT_COMPLIANT because we can heal optional fields
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_union_types_with_anyof():
"""Test that anyOf unions are handled correctly."""
schema = {
"type": "object",
"properties": {
"value": {
"anyOf": [
{"type": "string"},
{"type": "number"},
{"type": "null"},
]
}
},
"required": ["value"],
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
# Should be STRICT_COMPLIANT - anyOf is allowed and all properties are required
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_healed_schema_with_type_array():
"""Test that healed schemas with type arrays including null are STRICT_COMPLIANT."""
# This represents a schema that has been healed by adding null to optional fields
schema = {
"type": "object",
"properties": {
"required_field": {"type": "string"},
"optional_field": {"type": ["integer", "null"]}, # Healed: was optional, now required with null
},
"required": ["required_field", "optional_field"], # All fields now required
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
# Should be STRICT_COMPLIANT since all properties are required
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
def test_healed_nested_schema():
"""Test that healed nested schemas are STRICT_COMPLIANT."""
schema = {
"type": "object",
"properties": {
"config": {
"type": "object",
"properties": {
"host": {"type": "string"},
"port": {"type": ["integer", "null"]}, # Healed optional field
"timeout": {"type": ["number", "null"]}, # Healed optional field
},
"required": ["host", "port", "timeout"], # All fields required after healing
"additionalProperties": False,
}
},
"required": ["config"],
"additionalProperties": False,
}
status, reasons = validate_complete_json_schema(schema)
# Should be STRICT_COMPLIANT after healing
assert status == SchemaHealth.STRICT_COMPLIANT
assert reasons == []
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_schema_validator.py",
"license": "Apache License 2.0",
"lines": 229,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_secret.py | from unittest.mock import patch
import pytest
from letta.helpers.crypto_utils import CryptoUtils
from letta.schemas.secret import Secret
class TestSecret:
"""Test suite for Secret wrapper class."""
MOCK_KEY = "test-secret-key-1234567890"
def test_from_plaintext_with_key(self):
"""Test creating a Secret from plaintext value with encryption key."""
from letta.settings import settings
# Set encryption key
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_KEY
try:
plaintext = "my-secret-value"
secret = Secret.from_plaintext(plaintext)
# Should store encrypted value
assert secret.encrypted_value is not None
assert secret.encrypted_value != plaintext
# Should decrypt to original value
assert secret.get_plaintext() == plaintext
finally:
settings.encryption_key = original_key
def test_from_plaintext_without_key_stores_plaintext(self):
"""Test creating a Secret from plaintext without encryption key stores as plaintext."""
from letta.settings import settings
# Clear encryption key
original_key = settings.encryption_key
settings.encryption_key = None
try:
plaintext = "my-plaintext-value"
# Should store as plaintext in _enc column when no encryption key
secret = Secret.from_plaintext(plaintext)
# Should store the plaintext value directly in encrypted_value
assert secret.encrypted_value == plaintext
assert secret.get_plaintext() == plaintext
finally:
settings.encryption_key = original_key
def test_from_plaintext_with_none(self):
"""Test creating a Secret from None value."""
secret = Secret.from_plaintext(None)
assert secret.encrypted_value is None
assert secret.get_plaintext() is None
assert secret.is_empty() is True
def test_from_encrypted(self):
"""Test creating a Secret from already encrypted value."""
from letta.settings import settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_KEY
try:
plaintext = "database-secret"
encrypted = CryptoUtils.encrypt(plaintext, self.MOCK_KEY)
secret = Secret.from_encrypted(encrypted)
assert secret.encrypted_value == encrypted
assert secret.get_plaintext() == plaintext
finally:
settings.encryption_key = original_key
def test_get_encrypted(self):
"""Test getting the encrypted value for database storage."""
from letta.settings import settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_KEY
try:
plaintext = "test-encryption"
secret = Secret.from_plaintext(plaintext)
encrypted_value = secret.get_encrypted()
assert encrypted_value is not None
# Should decrypt back to original
decrypted = CryptoUtils.decrypt(encrypted_value, self.MOCK_KEY)
assert decrypted == plaintext
finally:
settings.encryption_key = original_key
def test_is_empty(self):
"""Test checking if secret is empty."""
# Empty secret
empty_secret = Secret.from_plaintext(None)
assert empty_secret.is_empty() is True
# Non-empty secret
from letta.settings import settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_KEY
try:
non_empty_secret = Secret.from_plaintext("value")
assert non_empty_secret.is_empty() is False
finally:
settings.encryption_key = original_key
def test_string_representation(self):
"""Test that string representation doesn't expose secret."""
from letta.settings import settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_KEY
try:
secret = Secret.from_plaintext("sensitive-data")
# String representation should not contain the actual value
str_repr = str(secret)
assert "sensitive-data" not in str_repr
assert "****" in str_repr
# Empty secret
empty_secret = Secret.from_plaintext(None)
assert "empty" in str(empty_secret)
finally:
settings.encryption_key = original_key
def test_equality(self):
"""Test comparing two secrets."""
from letta.settings import settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_KEY
try:
plaintext = "same-value"
secret1 = Secret.from_plaintext(plaintext)
secret2 = Secret.from_plaintext(plaintext)
# Should be equal based on plaintext value
assert secret1 == secret2
# Different values should not be equal
secret3 = Secret.from_plaintext("different-value")
assert secret1 != secret3
finally:
settings.encryption_key = original_key
def test_plaintext_caching(self):
"""Test that plaintext values are cached after first decryption."""
from letta.settings import settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_KEY
try:
plaintext = "cached-value"
secret = Secret.from_plaintext(plaintext)
# First call should decrypt and cache
result1 = secret.get_plaintext()
assert result1 == plaintext
assert secret._plaintext_cache == plaintext
# Second call should use cache
result2 = secret.get_plaintext()
assert result2 == plaintext
assert result1 is result2 # Should be the same object reference
finally:
settings.encryption_key = original_key
def test_caching_only_decrypts_once(self):
"""Test that decryption only happens once when caching is enabled."""
from letta.settings import settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_KEY
try:
plaintext = "test-single-decrypt"
encrypted = CryptoUtils.encrypt(plaintext, self.MOCK_KEY)
# Create a Secret from encrypted value
secret = Secret.from_encrypted(encrypted)
# Mock the decrypt method to track calls
with patch.object(CryptoUtils, "decrypt", wraps=CryptoUtils.decrypt) as mock_decrypt:
# First call should decrypt
result1 = secret.get_plaintext()
assert result1 == plaintext
assert mock_decrypt.call_count == 1
# Second and third calls should use cache
result2 = secret.get_plaintext()
result3 = secret.get_plaintext()
assert result2 == plaintext
assert result3 == plaintext
# Decrypt should still have been called only once
assert mock_decrypt.call_count == 1
finally:
settings.encryption_key = original_key
@pytest.mark.asyncio
async def test_from_plaintext_async_with_key(self):
"""Test creating a Secret from plaintext value asynchronously with encryption key."""
from letta.settings import settings
# Set encryption key
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_KEY
try:
plaintext = "my-async-secret-value"
secret = await Secret.from_plaintext_async(plaintext)
# Should store encrypted value
assert secret.encrypted_value is not None
assert secret.encrypted_value != plaintext
# Should decrypt to original value
result = await secret.get_plaintext_async()
assert result == plaintext
finally:
settings.encryption_key = original_key
@pytest.mark.asyncio
async def test_from_plaintext_async_without_key_stores_plaintext(self):
"""Test creating a Secret asynchronously without encryption key stores as plaintext."""
from letta.settings import settings
# Clear encryption key
original_key = settings.encryption_key
settings.encryption_key = None
try:
plaintext = "my-async-plaintext-value"
# Should store as plaintext in _enc column when no encryption key
secret = await Secret.from_plaintext_async(plaintext)
# Should store the plaintext value directly in encrypted_value
assert secret.encrypted_value == plaintext
result = await secret.get_plaintext_async()
assert result == plaintext
finally:
settings.encryption_key = original_key
@pytest.mark.asyncio
async def test_from_plaintext_async_with_none(self):
"""Test creating a Secret asynchronously from None value."""
secret = await Secret.from_plaintext_async(None)
assert secret.encrypted_value is None
result = await secret.get_plaintext_async()
assert result is None
assert secret.is_empty() is True
@pytest.mark.asyncio
async def test_from_plaintexts_async(self):
"""Test batch encrypting multiple secrets concurrently."""
from letta.settings import settings
original_key = settings.encryption_key
settings.encryption_key = self.MOCK_KEY
try:
values = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
secrets = await Secret.from_plaintexts_async(values)
# Should return dict with same keys
assert set(secrets.keys()) == {"key1", "key2", "key3"}
# Each secret should decrypt to original value
for key, secret in secrets.items():
assert isinstance(secret, Secret)
assert secret.encrypted_value is not None
assert secret.encrypted_value != values[key]
result = await secret.get_plaintext_async()
assert result == values[key]
finally:
settings.encryption_key = original_key
@pytest.mark.asyncio
async def test_from_plaintexts_async_empty_dict(self):
"""Test batch encrypting with empty dict."""
secrets = await Secret.from_plaintexts_async({})
assert secrets == {}
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_secret.py",
"license": "Apache License 2.0",
"lines": 233,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_sonnet_nonnative_reasoning_buffering.py | """Test to verify streaming behavior of Anthropic models with and without native reasoning.
This test confirms:
1. Sonnet 3.5 (20241022) with non-native reasoning exhibits batch streaming (API limitation)
- UPDATE: With fine-grained-tool-streaming beta header, this may improve
2. Sonnet 4 (20250514) with native reasoning should stream progressively
3. GPT-4.1 streams progressively as expected
Note: We've added the 'fine-grained-tool-streaming-2025-05-14' beta header to potentially
improve streaming performance with Anthropic models, especially for tool call parameters.
"""
import os
import time
from typing import List, Tuple
import pytest
from letta_client import Letta, MessageCreate
from tests.utils import wait_for_server
def run_server():
"""Start the Letta server."""
from dotenv import load_dotenv
load_dotenv()
from letta.server.rest_api.app import start_server
print("Starting server...")
start_server(debug=True)
@pytest.fixture(scope="module")
def client():
"""Create a Letta client for testing."""
import threading
# Get URL from environment or start server
api_url = os.getenv("LETTA_API_URL")
server_url = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
if not os.getenv("LETTA_SERVER_URL"):
print("Starting server thread")
thread = threading.Thread(target=run_server, daemon=True)
thread.start()
wait_for_server(server_url)
print("Running client tests with server:", server_url)
# Override the base_url if the LETTA_API_URL is set
base_url = api_url if api_url else server_url
# create the Letta client
yield Letta(base_url=base_url, token=None)
@pytest.fixture
def agent_factory(client: Letta):
"""Factory fixture to create agents with different models."""
created_agents = []
def _create_agent(model_name: str):
"""Create an agent with the specified model."""
# Check for required API keys
if "claude" in model_name.lower():
anthropic_key = os.getenv("ANTHROPIC_API_KEY")
if not anthropic_key:
pytest.skip("ANTHROPIC_API_KEY not set, skipping Anthropic test")
elif "gpt" in model_name.lower():
openai_key = os.getenv("OPENAI_API_KEY")
if not openai_key:
pytest.skip("OPENAI_API_KEY not set, skipping OpenAI test")
agent_state = client.agents.create(
name=f"test_agent_{model_name.replace('/', '_').replace('.', '_')}",
memory_blocks=[{"label": "human", "value": "Test user"}, {"label": "persona", "value": "You are a creative storyteller."}],
model=model_name,
embedding="openai/text-embedding-3-small",
)
created_agents.append(agent_state)
return agent_state
yield _create_agent
# Cleanup all created agents
for agent_state in created_agents:
try:
client.agents.delete(agent_state.id)
except Exception:
pass # Agent might have already been deleted
def detect_burst_chunks(chunks: List[Tuple[float, any]], burst_threshold: float = 0.05) -> List[List[int]]:
"""
Detect bursts of chunks arriving close together in time.
Args:
chunks: List of (timestamp, chunk) tuples
burst_threshold: Maximum time difference (in seconds) to consider chunks as part of the same burst
Returns:
List of bursts, where each burst is a list of chunk indices
"""
if not chunks:
return []
bursts = []
current_burst = [0]
for i in range(1, len(chunks)):
time_diff = chunks[i][0] - chunks[i - 1][0]
if time_diff <= burst_threshold:
# Part of the same burst
current_burst.append(i)
else:
# New burst
if len(current_burst) > 1: # Only count as burst if more than 1 chunk
bursts.append(current_burst)
current_burst = [i]
# Don't forget the last burst
if len(current_burst) > 1:
bursts.append(current_burst)
return bursts
@pytest.mark.parametrize(
"model,expected_buffering",
[
("anthropic/claude-sonnet-4-20250514", False), # With fine-grained streaming beta, should stream better
("anthropic/claude-sonnet-4-20250514", False), # Sonnet 4 should NOT show buffering (has native reasoning)
("openai/gpt-4.1", False), # GPT-4.1 should NOT show buffering (uses native reasoning)
],
)
def test_streaming_buffering_behavior(client: Letta, agent_factory, model: str, expected_buffering: bool):
"""
Test streaming behavior for different models.
With fine-grained-tool-streaming beta header:
- Sonnet 3.5 (20241022) should now stream progressively (beta feature improves tool streaming)
- Sonnet 4 (20250514) with native reasoning should stream progressively without buffering
- GPT-4.1 should stream progressively without buffering
"""
print(f"\n=== Testing Streaming Behavior for {model} ===")
print(f"Expected buffering: {expected_buffering}")
# Create agent with the specified model
agent = agent_factory(model)
# Send a message that should generate reasoning and tool calls
# This prompt should trigger inner thoughts and then a response
user_message = "Think step by step about what makes a good story, then write me a creative story about a toad named Ted. Make it exactly 3 paragraphs long."
# Create the stream
response_stream = client.agents.messages.create_stream(
agent_id=agent.id,
messages=[MessageCreate(role="user", content=user_message)],
stream_tokens=True, # Enable token streaming
)
# Collect chunks with timestamps
chunks_with_time = []
reasoning_chunks = []
assistant_chunks = []
tool_chunks = []
start_time = time.time()
try:
for chunk in response_stream:
elapsed = time.time() - start_time
chunks_with_time.append((elapsed, chunk))
# Categorize chunks by type
chunk_type = type(chunk).__name__
chunk_info = f"[{elapsed:.3f}s] {chunk_type}"
# Check for different message types
if hasattr(chunk, "message_type"):
chunk_info += f" (message_type: {chunk.message_type})"
if chunk.message_type == "reasoning_message":
reasoning_chunks.append((elapsed, chunk))
elif chunk.message_type == "assistant_message":
assistant_chunks.append((elapsed, chunk))
elif chunk.message_type == "tool_call_message":
tool_chunks.append((elapsed, chunk))
elif type(chunk).__name__ == "ReasoningMessage":
chunk_info += " (ReasoningMessage)"
reasoning_chunks.append((elapsed, chunk))
elif type(chunk).__name__ == "AssistantMessage":
chunk_info += " (AssistantMessage)"
assistant_chunks.append((elapsed, chunk))
elif type(chunk).__name__ == "ToolCallMessage":
chunk_info += " (ToolCallMessage)"
tool_chunks.append((elapsed, chunk))
# Check for inner thoughts (in tool calls for non-native reasoning)
if hasattr(chunk, "tool_calls") and chunk.tool_calls:
for tool_call in chunk.tool_calls:
if hasattr(tool_call, "function") and hasattr(tool_call.function, "arguments"):
# Check if this is inner thoughts
if "inner_thoughts" in str(tool_call.function.arguments):
chunk_info += " [contains inner_thoughts]"
tool_chunks.append((elapsed, chunk))
print(chunk_info)
# Optional: print chunk content snippet for debugging
if hasattr(chunk, "content") and chunk.content:
content_preview = str(chunk.content)[:100]
if content_preview:
print(f" Content: {content_preview}...")
except Exception as e:
print(f"Stream error: {e}")
import traceback
traceback.print_exc()
# Analyze results
print("\n=== Analysis ===")
print(f"Total chunks: {len(chunks_with_time)}")
print(f"Reasoning chunks: {len(reasoning_chunks)}")
print(f"Assistant chunks: {len(assistant_chunks)}")
print(f"Tool chunks: {len(tool_chunks)}")
# Detect bursts for each type
if reasoning_chunks:
reasoning_bursts = detect_burst_chunks(reasoning_chunks)
print(f"\nReasoning bursts detected: {len(reasoning_bursts)}")
for i, burst in enumerate(reasoning_bursts):
burst_times = [reasoning_chunks[idx][0] for idx in burst]
print(f" Burst {i + 1}: {len(burst)} chunks from {burst_times[0]:.3f}s to {burst_times[-1]:.3f}s")
if assistant_chunks:
assistant_bursts = detect_burst_chunks(assistant_chunks)
print(f"\nAssistant bursts detected: {len(assistant_bursts)}")
for i, burst in enumerate(assistant_bursts):
burst_times = [assistant_chunks[idx][0] for idx in burst]
print(f" Burst {i + 1}: {len(burst)} chunks from {burst_times[0]:.3f}s to {burst_times[-1]:.3f}s")
if tool_chunks:
tool_bursts = detect_burst_chunks(tool_chunks)
print(f"\nTool call bursts detected: {len(tool_bursts)}")
for i, burst in enumerate(tool_bursts):
burst_times = [tool_chunks[idx][0] for idx in burst]
print(f" Burst {i + 1}: {len(burst)} chunks from {burst_times[0]:.3f}s to {burst_times[-1]:.3f}s")
# Analyze results based on expected behavior
print("\n=== Test Results ===")
# Check if we detected large bursts
has_significant_bursts = False
if reasoning_chunks:
reasoning_bursts = detect_burst_chunks(reasoning_chunks, burst_threshold=0.1)
if reasoning_bursts:
largest_burst = max(reasoning_bursts, key=len)
burst_percentage = len(largest_burst) / len(reasoning_chunks) * 100
print(f"\nLargest reasoning burst: {len(largest_burst)}/{len(reasoning_chunks)} chunks ({burst_percentage:.1f}%)")
if burst_percentage >= 80: # Consider 80%+ as significant buffering
has_significant_bursts = True
print(f" -> BUFFERING DETECTED: {burst_percentage:.1f}% of reasoning chunks in single burst")
if assistant_chunks:
assistant_bursts = detect_burst_chunks(assistant_chunks, burst_threshold=0.1)
if assistant_bursts:
largest_burst = max(assistant_bursts, key=len)
burst_percentage = len(largest_burst) / len(assistant_chunks) * 100
print(f"Largest assistant burst: {len(largest_burst)}/{len(assistant_chunks)} chunks ({burst_percentage:.1f}%)")
if burst_percentage >= 80:
has_significant_bursts = True
print(f" -> BUFFERING DETECTED: {burst_percentage:.1f}% of assistant chunks in single burst")
if tool_chunks:
tool_bursts = detect_burst_chunks(tool_chunks, burst_threshold=0.1)
if tool_bursts:
largest_burst = max(tool_bursts, key=len)
burst_percentage = len(largest_burst) / len(tool_chunks) * 100
print(f"Largest tool burst: {len(largest_burst)}/{len(tool_chunks)} chunks ({burst_percentage:.1f}%)")
if burst_percentage >= 80:
has_significant_bursts = True
print(f" -> BUFFERING DETECTED: {burst_percentage:.1f}% of tool chunks in single burst")
# Overall streaming analysis
total_time = chunks_with_time[-1][0] if chunks_with_time else 0
avg_time_between = total_time / len(chunks_with_time) if chunks_with_time else 0
print(f"\nTotal streaming time: {total_time:.2f}s")
print(f"Average time between chunks: {avg_time_between:.3f}s")
# Verify test expectations
if expected_buffering:
assert has_significant_bursts, (
f"Expected buffering behavior for {model}, but streaming appeared progressive. "
f"This suggests the issue may be fixed or the test isn't detecting it properly."
)
print(f"\n✓ Test PASSED: {model} shows expected buffering behavior")
else:
assert not has_significant_bursts, (
f"Did NOT expect buffering for {model}, but detected significant burst behavior. "
f"This suggests {model} may also have streaming issues."
)
print(f"\n✓ Test PASSED: {model} shows expected progressive streaming")
if __name__ == "__main__":
# Allow running directly for debugging
pytest.main([__file__, "-v", "-s"])
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_sonnet_nonnative_reasoning_buffering.py",
"license": "Apache License 2.0",
"lines": 251,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_sources.py | import asyncio
import os
import re
import tempfile
import threading
import time
from datetime import datetime, timedelta
from typing import Any
import pytest
from dotenv import load_dotenv
from letta_client import Letta as LettaSDKClient
from letta_client.types import CreateBlockParam
from letta_client.types.agent_state import AgentState
from letta.constants import DEFAULT_ORG_ID, FILES_TOOLS
from letta.helpers.pinecone_utils import should_use_pinecone
from letta.helpers.tpuf_client import TurbopufferClient
from letta.schemas.enums import FileProcessingStatus, ToolType
from letta.schemas.message import MessageCreate
from letta.schemas.user import User
from letta.settings import settings
from tests.helpers.utils import upload_file_and_wait, upload_file_and_wait_list_files
from tests.utils import wait_for_server
# Constants
SERVER_PORT = 8283
def recompile_agent_system_prompt(client: LettaSDKClient, agent_id: str) -> None:
"""Force a system prompt recompilation for deterministic raw-preview assertions."""
client.post(
f"/v1/agents/{agent_id}/recompile",
cast_to=str,
body={},
)
def get_raw_system_message(client: LettaSDKClient, agent_id: str, recompile: bool = False) -> str:
"""Helper function to get the raw system message from an agent's preview payload."""
if recompile:
recompile_agent_system_prompt(client, agent_id)
raw_payload = client.post(
f"/v1/agents/{agent_id}/messages/preview-raw-payload",
cast_to=dict[str, Any],
body={
"messages": [
{
"role": "user",
"content": "Testing",
}
],
},
)
return raw_payload["messages"][0]["content"]
@pytest.fixture(autouse=True)
def clear_sources(client: LettaSDKClient):
# Clear existing sources
for source in list(client.folders.list()):
client.folders.delete(folder_id=source.id)
def run_server():
load_dotenv()
from letta.server.rest_api.app import start_server
print("Starting server...")
start_server(debug=True)
@pytest.fixture(scope="module")
def client() -> LettaSDKClient:
# Get URL from environment or start server
server_url = os.getenv("LETTA_SERVER_URL", f"http://localhost:{SERVER_PORT}")
if not os.getenv("LETTA_SERVER_URL"):
print("Starting server thread")
thread = threading.Thread(target=run_server, daemon=True)
thread.start()
# Use 60s timeout to allow for provider model syncing during server startup
wait_for_server(server_url, timeout=60)
print("Running client tests with server:", server_url)
client = LettaSDKClient(base_url=server_url)
yield client
@pytest.fixture
def agent_state(disable_pinecone, client: LettaSDKClient):
open_file_tool = next(iter(client.tools.list(name="open_files")))
search_files_tool = next(iter(client.tools.list(name="semantic_search_files")))
grep_tool = next(iter(client.tools.list(name="grep_files")))
agent_state = client.agents.create(
name="test_sources_agent",
memory_blocks=[
CreateBlockParam(
label="human",
value="username: sarah",
),
],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
tool_ids=[open_file_tool.id, search_files_tool.id, grep_tool.id],
)
yield agent_state
# Tests
def test_auto_attach_detach_files_tools(disable_pinecone, disable_turbopuffer, client: LettaSDKClient):
"""Test automatic attachment and detachment of file tools when managing agent sources."""
# Create agent with basic configuration
agent = client.agents.create(
memory_blocks=[
CreateBlockParam(label="human", value="username: sarah"),
],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
)
# Helper function to get file tools from agent
def get_file_tools(agent_state):
return {tool.name for tool in agent_state.tools if tool.tool_type == ToolType.LETTA_FILES_CORE}
# Helper function to assert file tools presence
def assert_file_tools_present(agent_state, expected_tools):
actual_tools = get_file_tools(agent_state)
assert actual_tools == expected_tools, f"File tools mismatch.\nExpected: {expected_tools}\nFound: {actual_tools}"
# Helper function to assert no file tools
def assert_no_file_tools(agent_state):
has_file_tools = any(tool.tool_type == ToolType.LETTA_FILES_CORE for tool in agent_state.tools)
assert not has_file_tools, "File tools should not be present"
# Initial state: no file tools
assert_no_file_tools(agent)
# Create and attach first source
source_1 = client.folders.create(name="test_source", embedding="openai/text-embedding-3-small")
assert len(list(client.folders.list())) == 1
client.agents.folders.attach(folder_id=source_1.id, agent_id=agent.id)
agent = client.agents.retrieve(agent_id=agent.id, include=["agent.sources", "agent.tools"])
assert len(agent.sources) == 1
assert_file_tools_present(agent, set(FILES_TOOLS))
# Create and attach second source
source_2 = client.folders.create(name="another_test_source", embedding="openai/text-embedding-3-small")
assert len(list(client.folders.list())) == 2
client.agents.folders.attach(folder_id=source_2.id, agent_id=agent.id)
agent = client.agents.retrieve(agent_id=agent.id, include=["agent.sources", "agent.tools"])
assert len(agent.sources) == 2
# File tools should remain after attaching second source
assert_file_tools_present(agent, set(FILES_TOOLS))
# Detach second source - tools should remain (first source still attached)
client.agents.folders.detach(folder_id=source_2.id, agent_id=agent.id)
agent = client.agents.retrieve(agent_id=agent.id, include=["agent.sources", "agent.tools"])
assert_file_tools_present(agent, set(FILES_TOOLS))
# Detach first source - all file tools should be removed
client.agents.folders.detach(folder_id=source_1.id, agent_id=agent.id)
agent = client.agents.retrieve(agent_id=agent.id, include=["agent.sources", "agent.tools"])
assert_no_file_tools(agent)
@pytest.mark.parametrize("use_mistral_parser", [True, False])
@pytest.mark.parametrize(
"file_path, expected_value, expected_label_regex",
[
("tests/data/test.txt", "test", r"test_source/test\.txt"),
("tests/data/memgpt_paper.pdf", "MemGPT", r"test_source/memgpt_paper\.pdf"),
("tests/data/toy_chat_fine_tuning.jsonl", '{"messages"', r"test_source/toy_chat_fine_tuning\.jsonl"),
("tests/data/test.md", "h2 Heading", r"test_source/test\.md"),
("tests/data/test.json", "glossary", r"test_source/test\.json"),
("tests/data/react_component.jsx", "UserProfile", r"test_source/react_component\.jsx"),
("tests/data/task_manager.java", "TaskManager", r"test_source/task_manager\.java"),
("tests/data/data_structures.cpp", "BinarySearchTree", r"test_source/data_structures\.cpp"),
("tests/data/api_server.go", "UserService", r"test_source/api_server\.go"),
("tests/data/data_analysis.py", "StatisticalAnalyzer", r"test_source/data_analysis\.py"),
("tests/data/test.csv", "Smart Fridge Plus", r"test_source/test\.csv"),
],
)
def test_file_upload_creates_source_blocks_correctly(
disable_pinecone,
disable_turbopuffer,
client: LettaSDKClient,
agent_state: AgentState,
file_path: str,
expected_value: str,
expected_label_regex: str,
use_mistral_parser: bool,
):
# Override mistral API key setting to force parser selection for testing
original_mistral_key = settings.mistral_api_key
try:
if not use_mistral_parser:
# Set to None to force markitdown parser selection
settings.mistral_api_key = None
# Create a new source
source = client.folders.create(name="test_source", embedding="openai/text-embedding-3-small")
assert len(list(client.folders.list())) == 1
# Attach
client.agents.folders.attach(folder_id=source.id, agent_id=agent_state.id)
# Upload the file
upload_file_and_wait(client, source.id, file_path)
# Get uploaded files
files = list(client.folders.files.list(folder_id=source.id, limit=1))
assert len(files) == 1
assert files[0].source_id == source.id
# Check that blocks were created
agent_state = client.agents.retrieve(agent_id=agent_state.id, include=["agent.blocks"])
blocks = agent_state.memory.file_blocks
assert len(blocks) == 1
assert any(expected_value in b.value for b in blocks)
assert any(b.value.startswith("[Viewing file start") for b in blocks)
assert any(re.fullmatch(expected_label_regex, b.label) for b in blocks)
# verify raw system message contains source information
raw_system_message = get_raw_system_message(client, agent_state.id, recompile=True)
assert "test_source" in raw_system_message
assert "<directories>" in raw_system_message
# verify file-specific details in raw system message
file_name = files[0].file_name
assert f'name="test_source/{file_name}"' in raw_system_message
assert 'status="open"' in raw_system_message
# Remove file from source
client.folders.files.delete(folder_id=source.id, file_id=files[0].id)
# Confirm blocks were removed
agent_state = client.agents.retrieve(agent_id=agent_state.id, include=["agent.blocks"])
blocks = agent_state.memory.file_blocks
assert len(blocks) == 0
assert not any(expected_value in b.value for b in blocks)
assert not any(re.fullmatch(expected_label_regex, b.label) for b in blocks)
# verify raw system message no longer contains source information
raw_system_message_after_removal = get_raw_system_message(client, agent_state.id, recompile=True)
# this should be in, because we didn't delete the source
assert "test_source" in raw_system_message_after_removal
assert "<directories>" in raw_system_message_after_removal
# verify file-specific details are also removed
assert f'name="test_source/{file_name}"' not in raw_system_message_after_removal
finally:
# Restore original mistral API key setting
settings.mistral_api_key = original_mistral_key
def test_attach_existing_files_creates_source_blocks_correctly(
disable_pinecone, disable_turbopuffer, client: LettaSDKClient, agent_state: AgentState
):
# Create a new source
source = client.folders.create(name="test_source", embedding="openai/text-embedding-3-small")
assert len(list(client.folders.list())) == 1
# Load files into the source
file_path = "tests/data/test.txt"
# Upload the files
upload_file_and_wait(client, source.id, file_path)
# Get the first file with pagination
files = list(client.folders.files.list(folder_id=source.id, limit=1))
assert len(files) == 1
assert files[0].source_id == source.id
# Attach after uploading the file
client.agents.folders.attach(folder_id=source.id, agent_id=agent_state.id)
raw_system_message = get_raw_system_message(client, agent_state.id, recompile=True)
# Assert that the expected chunk is in the raw system message
expected_chunk = """<directories>
<file_limits>
- current_files_open=1
- max_files_open=5
</file_limits>
<directory name="test_source">
<file status="open" name="test_source/test.txt">
<metadata>
- read_only=true
- chars_current=45
- chars_limit=15000
</metadata>
<value>
[Viewing file start (out of 1 lines)]
1: test
</value>
</file>
</directory>
</directories>"""
assert expected_chunk in raw_system_message
# Get the agent state, check blocks exist
agent_state = client.agents.retrieve(agent_id=agent_state.id, include=["agent.blocks"])
blocks = agent_state.memory.file_blocks
assert len(blocks) == 1
assert any("test" in b.value for b in blocks)
assert any(b.value.startswith("[Viewing file start") for b in blocks)
# Detach the source
client.agents.folders.detach(folder_id=source.id, agent_id=agent_state.id)
# Get the agent state, check blocks do NOT exist
agent_state = client.agents.retrieve(agent_id=agent_state.id, include=["agent.blocks"])
blocks = agent_state.memory.file_blocks
assert len(blocks) == 0
assert not any("test" in b.value for b in blocks)
# Verify no traces of the prompt exist in the raw system message after detaching
raw_system_message_after_detach = get_raw_system_message(client, agent_state.id, recompile=True)
assert expected_chunk not in raw_system_message_after_detach
assert "test_source" not in raw_system_message_after_detach
assert "<directories>" not in raw_system_message_after_detach
def test_delete_source_removes_source_blocks_correctly(
disable_pinecone, disable_turbopuffer, client: LettaSDKClient, agent_state: AgentState
):
# Create a new source
source = client.folders.create(name="test_source", embedding="openai/text-embedding-3-small")
assert len(list(client.folders.list())) == 1
client.agents.folders.attach(folder_id=source.id, agent_id=agent_state.id)
raw_system_message = get_raw_system_message(client, agent_state.id, recompile=True)
assert "test_source" in raw_system_message
assert "<directories>" in raw_system_message
# Load files into the source
file_path = "tests/data/test.txt"
# Upload the files
upload_file_and_wait(client, source.id, file_path)
raw_system_message = get_raw_system_message(client, agent_state.id, recompile=True)
# Assert that the expected chunk is in the raw system message
expected_chunk = """<directories>
<file_limits>
- current_files_open=1
- max_files_open=5
</file_limits>
<directory name="test_source">
<file status="open" name="test_source/test.txt">
<metadata>
- read_only=true
- chars_current=45
- chars_limit=15000
</metadata>
<value>
[Viewing file start (out of 1 lines)]
1: test
</value>
</file>
</directory>
</directories>"""
assert expected_chunk in raw_system_message
# Get the agent state, check blocks exist
agent_state = client.agents.retrieve(agent_id=agent_state.id, include=["agent.blocks"])
blocks = agent_state.memory.file_blocks
assert len(blocks) == 1
assert any("test" in b.value for b in blocks)
# Remove file from source
client.folders.delete(folder_id=source.id)
raw_system_message_after_detach = get_raw_system_message(client, agent_state.id, recompile=True)
assert expected_chunk not in raw_system_message_after_detach
assert "test_source" not in raw_system_message_after_detach
assert "<directories>" not in raw_system_message_after_detach
# Get the agent state, check blocks do NOT exist
agent_state = client.agents.retrieve(agent_id=agent_state.id, include=["agent.blocks"])
blocks = agent_state.memory.file_blocks
assert len(blocks) == 0
assert not any("test" in b.value for b in blocks)
def test_agent_uses_open_close_file_correctly(disable_pinecone, disable_turbopuffer, client: LettaSDKClient, agent_state: AgentState):
# Create a new source
source = client.folders.create(name="test_source", embedding="openai/text-embedding-3-small")
sources_list = list(client.folders.list())
assert len(sources_list) == 1
# Attach source to agent
client.agents.folders.attach(folder_id=source.id, agent_id=agent_state.id)
# Load files into the source
file_path = "tests/data/long_test.txt"
# Upload the files
upload_file_and_wait(client, source.id, file_path)
# Get uploaded files
files = list(client.folders.files.list(folder_id=source.id, limit=1))
assert len(files) == 1
assert files[0].source_id == source.id
file = files[0]
# Check that file is opened initially
agent_state = client.agents.retrieve(agent_id=agent_state.id, include=["agent.blocks"])
blocks = agent_state.memory.file_blocks
print(f"Agent has {len(blocks)} file block(s)")
if blocks:
initial_content_length = len(blocks[0].value)
print(f"Initial file content length: {initial_content_length} characters")
print(f"First 100 chars of content: {blocks[0].value[:100]}...")
assert initial_content_length > 10, f"Expected file content > 10 chars, got {initial_content_length}"
# Ask agent to open the file for a specific range using offset/length
offset, length = 0, 5 # 0-indexed offset, 5 lines
print(f"Requesting agent to open file with offset={offset}, length={length}")
open_response1 = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreate(
role="user",
content=f"Use ONLY the open_files tool to open the file named test_source/{file.file_name} with offset {offset} and length {length}",
)
],
)
print(f"First open request sent, got {len(open_response1.messages)} message(s) in response")
print(open_response1.messages)
# Check that file is opened
agent_state = client.agents.retrieve(agent_id=agent_state.id, include=["agent.blocks"])
blocks = agent_state.memory.file_blocks
assert len(blocks) == 1
old_value = blocks[0].value
old_content_length = len(old_value)
print(f"File content length after first open: {old_content_length} characters")
print(f"First range content: '{old_value}'")
assert old_content_length > 10, f"Expected content > 10 chars for offset={offset}, length={length}, got {old_content_length}"
# Assert specific content expectations for first range (lines 1-5)
assert "[Viewing lines 1 to 5 (out of " in old_value, f"Expected viewing header for lines 1-5, got: {old_value[:100]}..."
assert "1: Enrico Letta" in old_value, f"Expected line 1 to start with '1: Enrico Letta', got: {old_value[:200]}..."
assert "5: " in old_value, f"Expected line 5 to be present, got: {old_value}"
# Ask agent to open the file for a different range
offset, length = 5, 5 # Different offset, same length
open_response2 = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreate(
role="user",
content=f"Use ONLY the open_files tool to open the file named {file.file_name} with offset {offset} and length {length}",
)
],
)
print(f"Second open request sent, got {len(open_response2.messages)} message(s) in response")
print(open_response2.messages)
# Check that file is opened, but for different range
print("Verifying file is opened with second range...")
agent_state = client.agents.retrieve(agent_id=agent_state.id, include=["agent.blocks"])
blocks = agent_state.memory.file_blocks
new_value = blocks[0].value
new_content_length = len(new_value)
print(f"File content length after second open: {new_content_length} characters")
print(f"Second range content: '{new_value}'")
assert new_content_length > 10, f"Expected content > 10 chars for offset={offset}, length={length}, got {new_content_length}"
# Assert specific content expectations for second range (lines 6-10)
assert "[Viewing lines 6 to 10 (out of " in new_value, f"Expected viewing header for lines 6-10, got: {new_value[:100]}..."
assert "6: " in new_value, f"Expected line 6 to be present, got: {new_value[:200]}..."
assert "10: " in new_value, f"Expected line 10 to be present, got: {new_value}"
print("Comparing content ranges:")
print(f" First range (offset=0, length=5): '{old_value}'")
print(f" Second range (offset=5, length=5): '{new_value}'")
assert new_value != old_value, f"Different view ranges should have different content. New: '{new_value}', Old: '{old_value}'"
# Assert that ranges don't overlap - first range should not contain line 6, second should not contain line 1
assert "6: was promoted" not in old_value, f"First range (1-5) should not contain line 6, got: {old_value}"
assert "1: Enrico Letta" not in new_value, f"Second range (6-10) should not contain line 1, got: {new_value}"
print("✓ File successfully opened with different range - content differs as expected")
def test_agent_uses_search_files_correctly(disable_pinecone, disable_turbopuffer, client: LettaSDKClient, agent_state: AgentState):
# Create a new source
source = client.folders.create(name="test_source", embedding="openai/text-embedding-3-small")
sources_list = list(client.folders.list())
assert len(sources_list) == 1
# Attach source to agent
client.agents.folders.attach(folder_id=source.id, agent_id=agent_state.id)
# Load files into the source
file_path = "tests/data/long_test.txt"
print(f"Uploading file: {file_path}")
# Upload the files
file_metadata = upload_file_and_wait(client, source.id, file_path)
print(f"File uploaded and processed: {file_metadata['file_name']}")
# Get uploaded files
files = list(client.folders.files.list(folder_id=source.id, limit=1))
assert len(files) == 1
assert files[0].source_id == source.id
# Ask agent to use the semantic_search_files tool
search_files_response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreate(
role="user", content="Use ONLY the semantic_search_files tool to search for details regarding the electoral history."
)
],
)
print(f"Search file request sent, got {len(search_files_response.messages)} message(s) in response")
print(search_files_response.messages)
# Check that archival_memory_search was called
tool_calls = [msg for msg in search_files_response.messages if msg.message_type == "tool_call_message"]
assert len(tool_calls) > 0, "No tool calls found"
assert any(tc.tool_call.name == "semantic_search_files" for tc in tool_calls), "semantic_search_files not called"
# Check it returned successfully
tool_returns = [msg for msg in search_files_response.messages if msg.message_type == "tool_return_message"]
assert len(tool_returns) > 0, "No tool returns found"
failed_returns = [tr for tr in tool_returns if tr.status != "success"]
assert len(failed_returns) == 0, f"Tool call failed: {failed_returns}"
def test_agent_uses_grep_correctly_basic(disable_pinecone, disable_turbopuffer, client: LettaSDKClient, agent_state: AgentState):
# Create a new source
source = client.folders.create(name="test_source", embedding="openai/text-embedding-3-small")
sources_list = list(client.folders.list())
assert len(sources_list) == 1
# Attach source to agent
client.agents.folders.attach(folder_id=source.id, agent_id=agent_state.id)
# Load files into the source
file_path = "tests/data/long_test.txt"
print(f"Uploading file: {file_path}")
# Upload the files
file_metadata = upload_file_and_wait(client, source.id, file_path)
if not isinstance(file_metadata, dict):
file_metadata = file_metadata.model_dump()
print(f"File uploaded and processed: {file_metadata['file_name']}")
# Get uploaded files
files = list(client.folders.files.list(folder_id=source.id, limit=1))
assert len(files) == 1
assert files[0].source_id == source.id
# Ask agent to use the semantic_search_files tool
search_files_response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[MessageCreate(role="user", content="Use ONLY the grep_files tool to search for `Nunzia De Girolamo`.")],
)
print(f"Grep request sent, got {len(search_files_response.messages)} message(s) in response")
print(search_files_response.messages)
# Check that grep_files was called
tool_calls = [msg for msg in search_files_response.messages if msg.message_type == "tool_call_message"]
assert len(tool_calls) > 0, "No tool calls found"
assert any(tc.tool_call.name == "grep_files" for tc in tool_calls), "semantic_search_files not called"
# Check it returned successfully
tool_returns = [msg for msg in search_files_response.messages if msg.message_type == "tool_return_message"]
assert len(tool_returns) > 0, "No tool returns found"
assert all(tr.status == "success" for tr in tool_returns), "Tool call failed"
def test_agent_uses_grep_correctly_advanced(disable_pinecone, disable_turbopuffer, client: LettaSDKClient, agent_state: AgentState):
# Create a new source
source = client.folders.create(name="test_source", embedding="openai/text-embedding-3-small")
sources_list = list(client.folders.list())
assert len(sources_list) == 1
# Attach source to agent
client.agents.folders.attach(folder_id=source.id, agent_id=agent_state.id)
# Load files into the source
file_path = "tests/data/list_tools.json"
print(f"Uploading file: {file_path}")
# Upload the files
file_metadata = upload_file_and_wait(client, source.id, file_path)
if not isinstance(file_metadata, dict):
file_metadata = file_metadata.model_dump()
print(f"File uploaded and processed: {file_metadata['file_name']}")
# Get uploaded files
files = list(client.folders.files.list(folder_id=source.id, limit=1))
assert len(files) == 1
assert files[0].source_id == source.id
# Ask agent to use the semantic_search_files tool
search_files_response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreate(role="user", content="Use ONLY the grep_files tool to search for `tool-f5b80b08-5a45-4a0a-b2cd-dd8a0177b7ef`.")
],
)
print(f"Grep request sent, got {len(search_files_response.messages)} message(s) in response")
print(search_files_response.messages)
tool_return_message = next((m for m in search_files_response.messages if m.message_type == "tool_return_message"), None)
assert tool_return_message is not None, "No ToolReturnMessage found in messages"
# Basic structural integrity checks
assert tool_return_message.name == "grep_files"
assert tool_return_message.status == "success"
assert "Found 1 total matches across 1 files" in tool_return_message.tool_return
assert "tool-f5b80b08-5a45-4a0a-b2cd-dd8a0177b7ef" in tool_return_message.tool_return
# Context line integrity (3 lines before and after)
assert "509:" in tool_return_message.tool_return
assert "> 510:" in tool_return_message.tool_return # Match line with > prefix
assert "511:" in tool_return_message.tool_return
def test_create_agent_with_source_ids_creates_source_blocks_correctly(disable_pinecone, disable_turbopuffer, client: LettaSDKClient):
"""Test that creating an agent with source_ids parameter correctly creates source blocks."""
# Create a new source
source = client.folders.create(name="test_source", embedding="openai/text-embedding-3-small")
assert len(list(client.folders.list())) == 1
# Upload a file to the source before attaching
file_path = "tests/data/long_test.txt"
upload_file_and_wait(client, source.id, file_path)
# Get uploaded files to verify
files = list(client.folders.files.list(folder_id=source.id, limit=1))
assert len(files) == 1
assert files[0].source_id == source.id
# Create agent with source_ids parameter
temp_agent_state = client.agents.create(
name="test_agent_with_sources",
memory_blocks=[
CreateBlockParam(
label="human",
value="username: sarah",
),
],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
source_ids=[source.id], # Attach source during creation
)
# Verify agent was created successfully
assert temp_agent_state is not None
assert temp_agent_state.name == "test_agent_with_sources"
# Check that source blocks were created correctly
blocks = temp_agent_state.memory.file_blocks
assert len(blocks) == 1
assert any(b.value.startswith("[Viewing file start (out of ") for b in blocks)
# Verify file tools were automatically attached
file_tools = {tool.name for tool in temp_agent_state.tools if tool.tool_type == ToolType.LETTA_FILES_CORE}
assert file_tools == set(FILES_TOOLS)
def test_view_ranges_have_metadata(disable_pinecone, disable_turbopuffer, client: LettaSDKClient, agent_state: AgentState):
# Create a new source
source = client.folders.create(name="test_source", embedding="openai/text-embedding-3-small")
sources_list = list(client.folders.list())
assert len(sources_list) == 1
# Attach source to agent
client.agents.folders.attach(folder_id=source.id, agent_id=agent_state.id)
# Load files into the source
file_path = "tests/data/1_to_100.py"
# Upload the files
upload_file_and_wait(client, source.id, file_path)
# Get uploaded files
files = list(client.folders.files.list(folder_id=source.id, limit=1))
assert len(files) == 1
assert files[0].source_id == source.id
file = files[0]
# Check that file is opened initially
agent_state = client.agents.retrieve(agent_id=agent_state.id, include=["agent.blocks"])
blocks = agent_state.memory.file_blocks
assert len(blocks) == 1
block = blocks[0]
assert block.value.startswith("[Viewing file start (out of 100 lines)]")
# Open a specific range using offset/length
offset = 49 # 0-indexed for line 50
length = 5 # 5 lines (50-54)
open_response = client.agents.messages.create(
agent_id=agent_state.id,
messages=[
MessageCreate(
role="user",
content=f"Use ONLY the open_files tool to open the file named test_source/{file.file_name} with offset {offset} and length {length}",
)
],
)
print(f"Open request sent, got {len(open_response.messages)} message(s) in response")
print(open_response.messages)
# Check that file is opened correctly
agent_state = client.agents.retrieve(agent_id=agent_state.id, include=["agent.blocks"])
blocks = agent_state.memory.file_blocks
assert len(blocks) == 1
block = blocks[0]
print(block.value)
assert (
block.value
== """
[Viewing lines 50 to 54 (out of 100 lines)]
50: x50 = 50
51: x51 = 51
52: x52 = 52
53: x53 = 53
54: x54 = 54
""".strip()
)
def test_duplicate_file_renaming(disable_pinecone, disable_turbopuffer, client: LettaSDKClient):
"""Test that duplicate files are renamed with count-based suffixes (e.g., file.txt, file (1).txt, file (2).txt)"""
# Create a new source
source = client.folders.create(name="test_duplicate_source", embedding="openai/text-embedding-3-small")
# Upload the same file three times
file_path = "tests/data/test.txt"
with open(file_path, "rb") as f:
client.folders.files.upload(folder_id=source.id, file=f)
with open(file_path, "rb") as f:
client.folders.files.upload(folder_id=source.id, file=f)
with open(file_path, "rb") as f:
client.folders.files.upload(folder_id=source.id, file=f)
# Get all uploaded files
files = list(client.folders.files.list(folder_id=source.id, limit=10))
assert len(files) == 3, f"Expected 3 files, got {len(files)}"
# Sort files by creation time to ensure predictable order
files.sort(key=lambda f: f.created_at)
# Verify filenames follow the count-based pattern
expected_filenames = ["test.txt", "test_(1).txt", "test_(2).txt"]
actual_filenames = [f.file_name for f in files]
assert actual_filenames == expected_filenames, f"Expected {expected_filenames}, got {actual_filenames}"
# Verify all files have the same original_file_name
for file in files:
assert file.original_file_name == "test.txt", f"Expected original_file_name='test.txt', got '{file.original_file_name}'"
print("✓ Successfully tested duplicate file renaming:")
for i, file in enumerate(files):
print(f" File {i + 1}: original='{file.original_file_name}' → renamed='{file.file_name}'")
def test_duplicate_file_handling_replace(disable_pinecone, disable_turbopuffer, client: LettaSDKClient):
"""Test that DuplicateFileHandling.REPLACE replaces existing files with same name"""
# Create a new source
source = client.folders.create(name="test_replace_source", embedding="openai/text-embedding-3-small")
# Create agent and attach source to test memory blocks
agent_state = client.agents.create(
name="test_replace_agent",
memory_blocks=[
CreateBlockParam(label="human", value="username: sarah"),
],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
source_ids=[source.id],
)
# Create a temporary file with original content
original_content = "original file content for testing"
with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as f:
f.write(original_content)
temp_file_path = f.name
temp_filename = os.path.basename(f.name)
try:
# Wait for the file to be processed
upload_file_and_wait(client, source.id, temp_file_path)
# Verify original file was uploaded
files = list(client.folders.files.list(folder_id=source.id, limit=10))
assert len(files) == 1, f"Expected 1 file, got {len(files)}"
original_file = files[0]
assert original_file.original_file_name == temp_filename
# Get agent state and verify original content is in memory blocks
agent_state = client.agents.retrieve(agent_id=agent_state.id, include=["agent.blocks"])
file_blocks = agent_state.memory.file_blocks
assert len(file_blocks) == 1, f"Expected 1 file block, got {len(file_blocks)}"
original_block_content = file_blocks[0].value
assert original_content in original_block_content
# Create replacement content
replacement_content = "this is the replacement content that should overwrite the original"
with open(temp_file_path, "w") as f:
f.write(replacement_content)
# Upload replacement file with REPLACE duplicate handling
upload_file_and_wait(client, source.id, temp_file_path, duplicate_handling="replace")
# Verify we still have only 1 file (replacement, not addition)
files_after_replace = list(client.folders.files.list(folder_id=source.id, limit=10))
assert len(files_after_replace) == 1, f"Expected 1 file after replacement, got {len(files_after_replace)}"
replaced_file = files_after_replace[0]
# Verify file metadata shows replacement
assert replaced_file.original_file_name == temp_filename, "Original filename should be preserved"
assert replaced_file.file_name == temp_filename, "File name should match original"
# Verify the file ID is different (new file replaced the old one)
assert replaced_file.id != original_file.id, "Replacement file should have different ID"
# Verify agent memory blocks contain replacement content
agent_state = client.agents.retrieve(agent_id=agent_state.id, include=["agent.blocks"])
updated_file_blocks = agent_state.memory.file_blocks
assert len(updated_file_blocks) == 1, f"Expected 1 file block after replacement, got {len(updated_file_blocks)}"
replacement_block_content = updated_file_blocks[0].value
assert replacement_content in replacement_block_content, f"Expected replacement content in block, got: {replacement_block_content}"
assert original_content not in replacement_block_content, (
f"Original content should not be present after replacement: {replacement_block_content}"
)
print("✓ Successfully tested DuplicateFileHandling.REPLACE functionality")
finally:
# Clean up temporary file
if os.path.exists(temp_file_path):
os.unlink(temp_file_path)
def test_upload_file_with_custom_name(disable_pinecone, disable_turbopuffer, client: LettaSDKClient):
"""Test that uploading a file with a custom name overrides the original filename"""
# Create agent
agent_state = client.agents.create(
name="test_agent_custom_name",
memory_blocks=[
CreateBlockParam(
label="persona",
value="I am a helpful assistant",
),
CreateBlockParam(
label="human",
value="The user is a developer",
),
],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
)
# Create source
source = client.folders.create(name="test_source_custom_name", embedding="openai/text-embedding-3-small")
# Attach source to agent
client.agents.folders.attach(folder_id=source.id, agent_id=agent_state.id)
# Create a temporary file with specific content
import tempfile
temp_file_path = None
try:
with tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) as f:
f.write("This is a test file for custom naming")
temp_file_path = f.name
# Upload file with custom name
custom_name = "my_custom_file_name.txt"
file_metadata = upload_file_and_wait(client, source.id, temp_file_path, name=custom_name)
if not isinstance(file_metadata, dict):
file_metadata = file_metadata.model_dump()
# Verify the file uses the custom name
assert file_metadata["file_name"] == custom_name
assert file_metadata["original_file_name"] == custom_name
# Verify file appears in source files list with custom name
files = list(client.folders.files.list(folder_id=source.id, limit=1))
assert len(files) == 1
assert files[0].file_name == custom_name
assert files[0].original_file_name == custom_name
# Verify the custom name is used in file blocks
agent_state = client.agents.retrieve(agent_id=agent_state.id, include=["agent.blocks"])
file_blocks = agent_state.memory.file_blocks
assert len(file_blocks) == 1
# Check that the custom name appears in the block label
assert custom_name.replace(".txt", "") in file_blocks[0].label
# Test duplicate handling with custom name - upload same file with same custom name
with pytest.raises(Exception) as exc_info:
upload_file_and_wait(client, source.id, temp_file_path, name=custom_name, duplicate_handling="error")
assert "already exists" in str(exc_info.value).lower()
# Upload same file with different custom name should succeed
different_custom_name = "folder_a/folder_b/another_custom_name.txt"
file_metadata2 = upload_file_and_wait(client, source.id, temp_file_path, name=different_custom_name)
if not isinstance(file_metadata2, dict):
file_metadata2 = file_metadata2.model_dump()
assert file_metadata2["file_name"] == different_custom_name
assert file_metadata2["original_file_name"] == different_custom_name
# Verify both files exist
files = list(client.folders.files.list(folder_id=source.id, limit=10))
assert len(files) == 2
file_names = {f.file_name for f in files}
assert custom_name in file_names
assert different_custom_name in file_names
finally:
# Clean up temporary file
if temp_file_path and os.path.exists(temp_file_path):
os.unlink(temp_file_path)
def test_open_files_schema_descriptions(disable_pinecone, disable_turbopuffer, client: LettaSDKClient):
"""Test that open_files tool schema contains correct descriptions from docstring"""
# Get the open_files tool
tools = list(client.tools.list(name="open_files"))
assert len(tools) == 1, "Expected exactly one open_files tool"
open_files_tool = tools[0]
schema = open_files_tool.json_schema
# Check main function description includes the full multiline docstring with examples
description = schema["description"]
# Check main description line
assert (
"Open one or more files and load their contents into files section in core memory. Maximum of 5 files can be opened simultaneously."
in description
)
# Check that examples are included
assert "Examples:" in description
assert 'FileOpenRequest(file_name="project_utils/config.py")' in description
assert 'FileOpenRequest(file_name="project_utils/config.py", offset=0, length=50)' in description
assert "# Lines 1-50" in description
assert "# Lines 101-200" in description
assert "# Entire file" in description
assert "close_all_others=True" in description
assert "View specific portions of large files (e.g. functions or definitions)" in description
# Check parameters structure
assert "parameters" in schema
assert "properties" in schema["parameters"]
properties = schema["parameters"]["properties"]
# Check file_requests parameter
assert "file_requests" in properties
file_requests_prop = properties["file_requests"]
expected_file_requests_desc = "List of file open requests, each specifying file name and optional view range."
assert file_requests_prop["description"] == expected_file_requests_desc, (
f"Expected file_requests description: '{expected_file_requests_desc}', got: '{file_requests_prop['description']}'"
)
# Check close_all_others parameter
assert "close_all_others" in properties
close_all_others_prop = properties["close_all_others"]
expected_close_all_others_desc = "If True, closes all other currently open files first. Defaults to False."
assert close_all_others_prop["description"] == expected_close_all_others_desc, (
f"Expected close_all_others description: '{expected_close_all_others_desc}', got: '{close_all_others_prop['description']}'"
)
# Check that file_requests is an array type
assert file_requests_prop["type"] == "array", f"Expected file_requests type to be 'array', got: '{file_requests_prop['type']}'"
# Check FileOpenRequest schema within file_requests items
assert "items" in file_requests_prop
file_request_items = file_requests_prop["items"]
assert file_request_items["type"] == "object", "Expected FileOpenRequest to be object type"
# Check FileOpenRequest properties
assert "properties" in file_request_items
file_request_properties = file_request_items["properties"]
# Check file_name field
assert "file_name" in file_request_properties
file_name_prop = file_request_properties["file_name"]
assert file_name_prop["description"] == "Name of the file to open"
assert file_name_prop["type"] == "string"
# Check offset field
assert "offset" in file_request_properties
offset_prop = file_request_properties["offset"]
expected_offset_desc = "Optional offset for starting line number (0-indexed). If not specified, starts from beginning of file."
assert offset_prop["description"] == expected_offset_desc
assert offset_prop["type"] == "integer"
# Check length field
assert "length" in file_request_properties
length_prop = file_request_properties["length"]
expected_length_desc = "Optional number of lines to view from offset (inclusive). If not specified, views to end of file."
assert length_prop["description"] == expected_length_desc
assert length_prop["type"] == "integer"
def test_grep_files_schema_descriptions(disable_pinecone, disable_turbopuffer, client: LettaSDKClient):
"""Test that grep_files tool schema contains correct descriptions from docstring"""
# Get the grep_files tool
tools = list(client.tools.list(name="grep_files"))
assert len(tools) == 1, "Expected exactly one grep_files tool"
grep_files_tool = tools[0]
schema = grep_files_tool.json_schema
# Check main function description includes the full multiline docstring with examples
description = schema["description"]
# Check main description line
assert "Searches file contents for pattern matches with surrounding context." in description
# Check important details are included
assert "Results are paginated - shows 20 matches per call" in description
assert "The response includes:" in description
assert "A summary of total matches and which files contain them" in description
assert "The current page of matches (20 at a time)" in description
assert "Instructions for viewing more matches using the offset parameter" in description
# Check examples are included
assert "Example usage:" in description
assert 'grep_files(pattern="TODO")' in description
assert 'grep_files(pattern="TODO", offset=20)' in description
assert "# Shows matches 21-40" in description
# Check parameters structure
assert "parameters" in schema
assert "properties" in schema["parameters"]
properties = schema["parameters"]["properties"]
# Check pattern parameter
assert "pattern" in properties
pattern_prop = properties["pattern"]
expected_pattern_desc = "Keyword or regex pattern to search within file contents."
assert pattern_prop["description"] == expected_pattern_desc, (
f"Expected pattern description: '{expected_pattern_desc}', got: '{pattern_prop['description']}'"
)
assert pattern_prop["type"] == "string"
# Check include parameter
assert "include" in properties
include_prop = properties["include"]
expected_include_desc = "Optional keyword or regex pattern to filter filenames to include in the search."
assert include_prop["description"] == expected_include_desc, (
f"Expected include description: '{expected_include_desc}', got: '{include_prop['description']}'"
)
assert include_prop["type"] == "string"
# Check context_lines parameter
assert "context_lines" in properties
context_lines_prop = properties["context_lines"]
expected_context_lines_desc = (
"Number of lines of context to show before and after each match.\nEquivalent to `-C` in grep_files. Defaults to 1."
)
assert context_lines_prop["description"] == expected_context_lines_desc, (
f"Expected context_lines description: '{expected_context_lines_desc}', got: '{context_lines_prop['description']}'"
)
assert context_lines_prop["type"] == "integer"
# Check offset parameter
assert "offset" in properties
offset_prop = properties["offset"]
expected_offset_desc = (
"Number of matches to skip before showing results. Used for pagination.\n"
"For example, offset=20 shows matches starting from the 21st match.\n"
"Use offset=0 (or omit) for first page, offset=20 for second page,\n"
"offset=40 for third page, etc. The tool will tell you the exact\n"
"offset to use for the next page."
)
assert offset_prop["description"] == expected_offset_desc, (
f"Expected offset description: '{expected_offset_desc}', got: '{offset_prop['description']}'"
)
assert offset_prop["type"] == "integer"
# Check return description in main description
assert "Returns search results containing:" in description
assert "Summary with total match count and file distribution" in description
assert "List of files with match counts per file" in description
assert "Current page of matches (up to 20)" in description
assert "Navigation hint for next page if more matches exist" in description
def test_agent_open_file(disable_pinecone, disable_turbopuffer, client: LettaSDKClient, agent_state: AgentState):
"""Test client.agents.open_file() function"""
# Create a new source
source = client.folders.create(name="test_source", embedding="openai/text-embedding-3-small")
# Attach source to agent
client.agents.folders.attach(folder_id=source.id, agent_id=agent_state.id)
# Upload a file
file_path = "tests/data/test.txt"
file_metadata = upload_file_and_wait(client, source.id, file_path)
if not isinstance(file_metadata, dict):
file_metadata = file_metadata.model_dump()
# Basic test open_file function
closed_files = client.agents.files.open(agent_id=agent_state.id, file_id=file_metadata["id"])
assert len(closed_files) == 0
system = get_raw_system_message(client, agent_state.id, recompile=True)
assert '<file status="open" name="test_source/test.txt">' in system
assert "[Viewing file start (out of 1 lines)]" in system
def test_agent_close_file(disable_pinecone, disable_turbopuffer, client: LettaSDKClient, agent_state: AgentState):
"""Test client.agents.close_file() function"""
# Create a new source
source = client.folders.create(name="test_source", embedding="openai/text-embedding-3-small")
# Attach source to agent
client.agents.folders.attach(folder_id=source.id, agent_id=agent_state.id)
# Upload a file
file_path = "tests/data/test.txt"
file_metadata = upload_file_and_wait(client, source.id, file_path)
if not isinstance(file_metadata, dict):
file_metadata = file_metadata.model_dump()
# First open the file
client.agents.files.open(agent_id=agent_state.id, file_id=file_metadata["id"])
# Test close_file function
client.agents.files.close(agent_id=agent_state.id, file_id=file_metadata["id"])
system = get_raw_system_message(client, agent_state.id, recompile=True)
assert '<file status="closed" name="test_source/test.txt">' in system
def test_agent_close_all_open_files(disable_pinecone, disable_turbopuffer, client: LettaSDKClient, agent_state: AgentState):
"""Test client.agents.close_all_open_files() function"""
# Create a new source
source = client.folders.create(name="test_source", embedding="openai/text-embedding-3-small")
# Attach source to agent
client.agents.folders.attach(folder_id=source.id, agent_id=agent_state.id)
# Upload multiple files
file_paths = ["tests/data/test.txt", "tests/data/test.md"]
file_metadatas = []
for file_path in file_paths:
file_metadata = upload_file_and_wait(client, source.id, file_path)
if not isinstance(file_metadata, dict):
file_metadata = file_metadata.model_dump()
file_metadatas.append(file_metadata)
# Open each file
client.agents.files.open(agent_id=agent_state.id, file_id=file_metadata["id"])
system = get_raw_system_message(client, agent_state.id, recompile=True)
assert '<file status="open"' in system
# Test close_all_open_files function
result = client.agents.files.close_all(agent_id=agent_state.id)
# Verify result is a list of strings
assert isinstance(result, list), f"Expected list, got {type(result)}"
assert all(isinstance(item, str) for item in result), "All items in result should be strings"
system = get_raw_system_message(client, agent_state.id, recompile=True)
assert '<file status="open"' not in system
def test_file_processing_timeout(disable_pinecone, disable_turbopuffer, client: LettaSDKClient):
"""Test that files in non-terminal states are moved to error after timeout"""
# Create a source
source = client.folders.create(name="test_timeout_source", embedding="openai/text-embedding-3-small")
# Upload a file
file_path = "tests/data/test.txt"
with open(file_path, "rb") as f:
file_metadata = client.folders.files.upload(folder_id=source.id, file=f)
# Get the file ID
file_id = file_metadata.id
# Test the is_terminal_state method directly (this doesn't require server mocking)
assert FileProcessingStatus.COMPLETED.is_terminal_state() == True
assert FileProcessingStatus.ERROR.is_terminal_state() == True
assert FileProcessingStatus.PARSING.is_terminal_state() == False
assert FileProcessingStatus.EMBEDDING.is_terminal_state() == False
assert FileProcessingStatus.PENDING.is_terminal_state() == False
# For testing the actual timeout logic, we can check the current file status
current_file = client.get(
path=f"/v1/sources/{source.id}/files/{file_id}",
cast_to=dict[str, Any],
)
# Convert string status to enum for testing
if not isinstance(current_file, dict):
current_file = current_file.model_dump()
processing_status = current_file["processing_status"]
status_enum = FileProcessingStatus(processing_status)
# Verify that files in terminal states are not affected by timeout checks
if status_enum.is_terminal_state():
# This is the expected behavior - files that completed processing shouldn't timeout
print(f"File {file_id} is in terminal state: {processing_status}")
assert status_enum in [FileProcessingStatus.COMPLETED, FileProcessingStatus.ERROR]
else:
# If file is still processing, it should eventually complete or timeout
# In a real scenario, we'd wait and check, but for unit tests we just verify the logic exists
print(f"File {file_id} is still processing: {processing_status}")
assert status_enum in [FileProcessingStatus.PENDING, FileProcessingStatus.PARSING, FileProcessingStatus.EMBEDDING]
@pytest.mark.unit
def test_file_processing_timeout_logic():
"""Test the timeout logic directly without server dependencies"""
from datetime import timezone
# Test scenario: file created 35 minutes ago, timeout is 30 minutes
old_time = datetime.now(timezone.utc) - timedelta(minutes=35)
current_time = datetime.now(timezone.utc)
timeout_minutes = 30
# Calculate timeout threshold
timeout_threshold = current_time - timedelta(minutes=timeout_minutes)
# Verify timeout logic
assert old_time < timeout_threshold, "File created 35 minutes ago should be past 30-minute timeout"
# Test edge case: file created exactly at timeout
edge_time = current_time - timedelta(minutes=timeout_minutes)
assert not (edge_time < timeout_threshold), "File created exactly at timeout should not trigger timeout"
# Test recent file
recent_time = current_time - timedelta(minutes=10)
assert not (recent_time < timeout_threshold), "Recent file should not trigger timeout"
def test_openai_embedding(disable_pinecone, disable_turbopuffer, client: LettaSDKClient):
"""Test creating a source with OpenAI embeddings and uploading a file"""
source = client.folders.create(name="test_openai_embed_source", embedding="openai/text-embedding-3-small")
# verify source was created with correct embedding
assert source.name == "test_openai_embed_source"
# upload test.txt file
file_path = "tests/data/test.txt"
file_metadata = upload_file_and_wait(client, source.id, file_path)
# verify file was uploaded successfully
if not isinstance(file_metadata, dict):
file_metadata = file_metadata.model_dump()
assert file_metadata["processing_status"] == "completed"
assert file_metadata["source_id"] == source.id
assert file_metadata["file_name"] == "test.txt"
# verify file appears in source files list
files = list(client.folders.files.list(folder_id=source.id, limit=1))
assert len(files) == 1
assert files[0].id == file_metadata["id"]
# cleanup
client.folders.delete(folder_id=source.id)
# --- Pinecone Tests ---
def test_pinecone_search_files_tool(disable_turbopuffer, client: LettaSDKClient):
"""Test that search_files tool uses Pinecone when enabled"""
from letta.helpers.pinecone_utils import should_use_pinecone
if not should_use_pinecone(verbose=True):
pytest.skip("Pinecone not configured (missing API key or disabled), skipping Pinecone-specific tests")
print("Testing Pinecone search_files tool functionality")
# Create agent with file tools
agent = client.agents.create(
name="test_pinecone_agent",
memory_blocks=[
CreateBlockParam(label="human", value="username: testuser"),
],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
)
# Create source and attach to agent
source = client.folders.create(name="test_pinecone_source", embedding="openai/text-embedding-3-small")
client.agents.folders.attach(folder_id=source.id, agent_id=agent.id)
# Upload a file with searchable content
file_path = "tests/data/long_test.txt"
upload_file_and_wait(client, source.id, file_path)
# Test semantic search using Pinecone
search_response = client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreate(role="user", content="Use the semantic_search_files tool to search for 'electoral history' in the files.")],
)
# Verify tool was called successfully
tool_calls = [msg for msg in search_response.messages if msg.message_type == "tool_call_message"]
assert len(tool_calls) > 0, "No tool calls found"
assert any(tc.tool_call.name == "semantic_search_files" for tc in tool_calls), "semantic_search_files not called"
# Verify tool returned results
tool_returns = [msg for msg in search_response.messages if msg.message_type == "tool_return_message"]
assert len(tool_returns) > 0, "No tool returns found"
assert all(tr.status == "success" for tr in tool_returns), "Tool call failed"
# Check that results contain expected content
search_results = tool_returns[0].tool_return
print(search_results)
assert "electoral" in search_results.lower() or "history" in search_results.lower(), (
f"Search results should contain relevant content: {search_results}"
)
def test_pinecone_list_files_status(disable_turbopuffer, client: LettaSDKClient):
"""Test that list_source_files properly syncs embedding status with Pinecone"""
if not should_use_pinecone():
pytest.skip("Pinecone not configured (missing API key or disabled), skipping Pinecone-specific tests")
# create source
source = client.folders.create(name="test_list_files_status", embedding="openai/text-embedding-3-small")
file_paths = ["tests/data/long_test.txt"]
uploaded_files = []
for file_path in file_paths:
# use the new helper that polls via list_files
file_metadata = upload_file_and_wait_list_files(client, source.id, file_path)
uploaded_files.append(file_metadata)
if not isinstance(file_metadata, dict):
file_metadata = file_metadata.model_dump()
assert file_metadata["processing_status"] == "completed", f"File {file_path} should be completed"
# now get files using list_source_files to verify status checking works
files_list = client.folders.files.list(folder_id=source.id, limit=100)
# verify all files show completed status and have proper embedding counts
assert len(files_list) == len(uploaded_files), f"Expected {len(uploaded_files)} files, got {len(files_list)}"
for file_metadata in files_list:
if not isinstance(file_metadata, dict):
file_metadata = file_metadata.model_dump()
assert file_metadata["processing_status"] == "completed", f"File {file_metadata['file_name']} should show completed status"
# verify embedding counts for files that have chunks
if file_metadata["total_chunks"] and file_metadata["total_chunks"] > 0:
assert file_metadata["chunks_embedded"] == file_metadata["total_chunks"], (
f"File {file_metadata['file_name']} should have all chunks embedded: {file_metadata['chunks_embedded']}/{file_metadata['total_chunks']}"
)
# cleanup
client.folders.delete(folder_id=source.id)
def test_pinecone_lifecycle_file_and_source_deletion(disable_turbopuffer, client: LettaSDKClient):
"""Test that file and source deletion removes records from Pinecone"""
from letta.helpers.pinecone_utils import list_pinecone_index_for_files, should_use_pinecone
if not should_use_pinecone():
pytest.skip("Pinecone not configured (missing API key or disabled), skipping Pinecone-specific tests")
print("Testing Pinecone file and source deletion lifecycle")
# Create source
source = client.folders.create(name="test_lifecycle_source", embedding="openai/text-embedding-3-small")
# Upload multiple files and wait for processing
file_paths = ["tests/data/test.txt", "tests/data/test.md"]
uploaded_files = []
for file_path in file_paths:
file_metadata = upload_file_and_wait(client, source.id, file_path)
uploaded_files.append(file_metadata)
# Get temp user for Pinecone operations
user = User(name="temp", organization_id=DEFAULT_ORG_ID)
# Test file-level deletion first
if len(uploaded_files) > 1:
file_to_delete = uploaded_files[0]
# Check records for the specific file using list function
records_before = asyncio.run(list_pinecone_index_for_files(file_to_delete.id, user))
print(f"Found {len(records_before)} records for file before deletion")
# Delete the file
client.folders.files.delete(folder_id=source.id, file_id=file_to_delete.id)
# Allow time for deletion to propagate
time.sleep(2)
# Verify file records are removed
records_after = asyncio.run(list_pinecone_index_for_files(file_to_delete.id, user))
print(f"Found {len(records_after)} records for file after deletion")
assert len(records_after) == 0, f"File records should be removed from Pinecone after deletion, but found {len(records_after)}"
# Test source-level deletion - check remaining files
# Check records for remaining files
remaining_records = []
for file_metadata in uploaded_files[1:]: # Skip the already deleted file
file_records = asyncio.run(list_pinecone_index_for_files(file_metadata.id, user))
remaining_records.extend(file_records)
records_before = len(remaining_records)
print(f"Found {records_before} records for remaining files before source deletion")
# Delete the entire source
client.folders.delete(folder_id=source.id)
# Allow time for deletion to propagate
time.sleep(3)
# Verify all remaining file records are removed
records_after = []
for file_metadata in uploaded_files[1:]:
file_records = asyncio.run(list_pinecone_index_for_files(file_metadata.id, user))
records_after.extend(file_records)
print(f"Found {len(records_after)} records for files after source deletion")
assert len(records_after) == 0, (
f"All source records should be removed from Pinecone after source deletion, but found {len(records_after)}"
)
# --- End Pinecone Tests ---
# --- Turbopuffer Tests ---
def test_turbopuffer_search_files_tool(disable_pinecone, client: LettaSDKClient):
"""Test that search_files tool uses Turbopuffer when enabled"""
agent = client.agents.create(
name="test_turbopuffer_agent",
memory_blocks=[
CreateBlockParam(label="human", value="username: testuser"),
],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
)
source = client.folders.create(name="test_turbopuffer_source", embedding="openai/text-embedding-3-small")
client.agents.folders.attach(folder_id=source.id, agent_id=agent.id)
file_path = "tests/data/long_test.txt"
upload_file_and_wait(client, source.id, file_path)
search_response = client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreate(role="user", content="Use the semantic_search_files tool to search for 'electoral history' in the files.")],
)
tool_calls = [msg for msg in search_response.messages if msg.message_type == "tool_call_message"]
assert len(tool_calls) > 0, "No tool calls found"
assert any(tc.tool_call.name == "semantic_search_files" for tc in tool_calls), "semantic_search_files not called"
tool_returns = [msg for msg in search_response.messages if msg.message_type == "tool_return_message"]
assert len(tool_returns) > 0, "No tool returns found"
assert all(tr.status == "success" for tr in tool_returns), "Tool call failed"
search_results = tool_returns[0].tool_return
print(f"Turbopuffer search results: {search_results}")
assert "electoral" in search_results.lower() or "history" in search_results.lower(), (
f"Search results should contain relevant content: {search_results}"
)
client.agents.delete(agent_id=agent.id)
client.folders.delete(folder_id=source.id)
def test_turbopuffer_file_processing_status(disable_pinecone, client: LettaSDKClient):
"""Test that file processing completes successfully with Turbopuffer"""
print("Testing Turbopuffer file processing status")
source = client.folders.create(name="test_tpuf_file_status", embedding="openai/text-embedding-3-small")
file_paths = ["tests/data/long_test.txt", "tests/data/test.md"]
uploaded_files = []
for file_path in file_paths:
file_metadata = upload_file_and_wait(client, source.id, file_path)
uploaded_files.append(file_metadata)
if not isinstance(file_metadata, dict):
file_metadata = file_metadata.model_dump()
assert file_metadata["processing_status"] == "completed", f"File {file_path} should be completed"
files_list = client.folders.files.list(folder_id=source.id, limit=100).items
assert len(files_list) == len(uploaded_files), f"Expected {len(uploaded_files)} files, got {len(files_list)}"
for file_metadata in files_list:
if not isinstance(file_metadata, dict):
file_metadata = file_metadata.model_dump()
assert file_metadata["processing_status"] == "completed", f"File {file_metadata['file_name']} should show completed status"
if file_metadata["total_chunks"] and file_metadata["total_chunks"] > 0:
assert file_metadata["chunks_embedded"] == file_metadata["total_chunks"], (
f"File {file_metadata['file_name']} should have all chunks embedded: {file_metadata['chunks_embedded']}/{file_metadata['total_chunks']}"
)
client.folders.delete(folder_id=source.id)
def test_turbopuffer_lifecycle_file_and_source_deletion(disable_pinecone, client: LettaSDKClient):
"""Test that file and source deletion removes records from Turbopuffer"""
source = client.folders.create(name="test_tpuf_lifecycle", embedding="openai/text-embedding-3-small")
file_paths = ["tests/data/test.txt", "tests/data/test.md"]
uploaded_files = []
for file_path in file_paths:
file_metadata = upload_file_and_wait(client, source.id, file_path)
uploaded_files.append(file_metadata)
user = User(name="temp", organization_id=DEFAULT_ORG_ID)
tpuf_client = TurbopufferClient()
# test file-level deletion
if len(uploaded_files) > 1:
file_to_delete = uploaded_files[0]
passages_before = asyncio.run(
tpuf_client.query_file_passages(
source_ids=[source.id], organization_id=user.organization_id, actor=user, file_id=file_to_delete["id"], top_k=100
)
)
print(f"Found {len(passages_before)} passages for file before deletion")
assert len(passages_before) > 0, "Should have passages before deletion"
client.folders.files.delete(folder_id=source.id, file_id=file_to_delete["id"])
time.sleep(2)
passages_after = asyncio.run(
tpuf_client.query_file_passages(
source_ids=[source.id], organization_id=user.organization_id, actor=user, file_id=file_to_delete["id"], top_k=100
)
)
print(f"Found {len(passages_after)} passages for file after deletion")
assert len(passages_after) == 0, f"File passages should be removed from Turbopuffer after deletion, but found {len(passages_after)}"
# test source-level deletion
remaining_passages_before = []
for file_metadata in uploaded_files[1:]:
passages = asyncio.run(
tpuf_client.query_file_passages(
source_ids=[source.id], organization_id=user.organization_id, actor=user, file_id=file_metadata["id"], top_k=100
)
)
remaining_passages_before.extend(passages)
print(f"Found {len(remaining_passages_before)} passages for remaining files before source deletion")
assert len(remaining_passages_before) > 0, "Should have passages for remaining files"
client.folders.delete(folder_id=source.id)
time.sleep(3)
remaining_passages_after = []
for file_metadata in uploaded_files[1:]:
try:
passages = asyncio.run(
tpuf_client.query_file_passages(
source_ids=[source.id], organization_id=user.organization_id, actor=user, file_id=file_metadata["id"], top_k=100
)
)
remaining_passages_after.extend(passages)
except Exception as e:
print(f"Expected error querying deleted source: {e}")
print(f"Found {len(remaining_passages_after)} passages for files after source deletion")
assert len(remaining_passages_after) == 0, (
f"All source passages should be removed from Turbopuffer after source deletion, but found {len(remaining_passages_after)}"
)
def test_turbopuffer_multiple_sources(disable_pinecone, client: LettaSDKClient):
"""Test that Turbopuffer correctly isolates passages by source in org-scoped namespace"""
source1 = client.folders.create(name="test_tpuf_source1", embedding="openai/text-embedding-3-small")
source2 = client.folders.create(name="test_tpuf_source2", embedding="openai/text-embedding-3-small")
file1_metadata = upload_file_and_wait(client, source1.id, "tests/data/test.txt")
file2_metadata = upload_file_and_wait(client, source2.id, "tests/data/test.md")
user = User(name="temp", organization_id=DEFAULT_ORG_ID)
tpuf_client = TurbopufferClient()
source1_passages = asyncio.run(
tpuf_client.query_file_passages(source_ids=[source1.id], organization_id=user.organization_id, actor=user, top_k=100)
)
source2_passages = asyncio.run(
tpuf_client.query_file_passages(source_ids=[source2.id], organization_id=user.organization_id, actor=user, top_k=100)
)
print(f"Source1 has {len(source1_passages)} passages")
print(f"Source2 has {len(source2_passages)} passages")
assert len(source1_passages) > 0, "Source1 should have passages"
assert len(source2_passages) > 0, "Source2 should have passages"
for passage, _, _ in source1_passages:
assert passage.source_id == source1.id, f"Passage should belong to source1, but has folder_id={passage.source_id}"
assert passage.file_id == file1_metadata["id"], f"Passage should belong to file1, but has file_id={passage.file_id}"
for passage, _, _ in source2_passages:
assert passage.source_id == source2.id, f"Passage should belong to source2, but has folder_id={passage.source_id}"
assert passage.file_id == file2_metadata["id"], f"Passage should belong to file2, but has file_id={passage.file_id}"
# delete source1 and verify source2 is unaffected
client.folders.delete(folder_id=source1.id)
time.sleep(2)
source2_passages_after = asyncio.run(
tpuf_client.query_file_passages(source_ids=[source2.id], organization_id=user.organization_id, actor=user, top_k=100)
)
assert len(source2_passages_after) == len(source2_passages), (
f"Source2 should still have all passages after source1 deletion: {len(source2_passages_after)} vs {len(source2_passages)}"
)
client.folders.delete(folder_id=source2.id)
# --- End Turbopuffer Tests ---
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_sources.py",
"license": "Apache License 2.0",
"lines": 1306,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_timezone_formatting.py | import json
from datetime import datetime
import pytest
import pytz
from letta.helpers.datetime_helpers import get_local_time, get_local_time_timezone
from letta.system import (
get_heartbeat,
get_login_event,
package_function_response,
package_summarize_message,
package_system_message,
package_user_message,
)
class TestTimezoneFormatting:
"""Test suite for timezone formatting functions in system.py"""
def _extract_time_from_json(self, json_str: str) -> str:
"""Helper to extract time field from JSON string"""
data = json.loads(json_str)
return data["time"]
def _validate_timezone_accuracy(self, formatted_time: str, expected_timezone: str, tolerance_minutes: int = 2):
"""
Validate that the formatted time is accurate for the given timezone within tolerance.
Args:
formatted_time: The time string from the system functions
expected_timezone: The timezone string (e.g., "America/New_York")
tolerance_minutes: Acceptable difference in minutes
"""
# Parse the formatted time - handle the actual format produced
# Expected format: "2025-06-24 12:53:40 AM EDT-0400"
import re
from datetime import timedelta, timezone
# Match pattern like "2025-06-24 12:53:40 AM EDT-0400"
pattern = r"(\d{4}-\d{2}-\d{2} \d{1,2}:\d{2}:\d{2} [AP]M) ([A-Z]{3,4})([-+]\d{4})"
match = re.match(pattern, formatted_time)
if not match:
# Fallback: just check basic format without detailed parsing
assert len(formatted_time) > 20, f"Time string too short: {formatted_time}"
assert " AM " in formatted_time or " PM " in formatted_time, f"No AM/PM in time: {formatted_time}"
return
time_part, tz_name, tz_offset = match.groups()
# Parse the time part without timezone
time_without_tz = datetime.strptime(time_part, "%Y-%m-%d %I:%M:%S %p")
# Create timezone offset
hours_offset = int(tz_offset[:3])
minutes_offset = int(tz_offset[3:5]) if len(tz_offset) > 3 else 0
if tz_offset[0] == "-" and hours_offset >= 0:
hours_offset = -hours_offset
total_offset = timedelta(hours=hours_offset, minutes=minutes_offset)
tz_info = timezone(total_offset)
parsed_time = time_without_tz.replace(tzinfo=tz_info)
# Get current time in the expected timezone
tz = pytz.timezone(expected_timezone)
current_time_in_tz = datetime.now(tz)
# Check that times are within tolerance
time_diff = abs((parsed_time - current_time_in_tz).total_seconds())
assert time_diff <= tolerance_minutes * 60, (
f"Time difference too large: {time_diff}s. Parsed: {parsed_time}, Expected timezone: {current_time_in_tz}"
)
# Verify timezone info exists and format looks reasonable
assert parsed_time.tzinfo is not None, "Parsed time should have timezone info"
assert tz_name in formatted_time, f"Timezone abbreviation {tz_name} should be in formatted time"
def test_get_heartbeat_timezone_accuracy(self):
"""Test that get_heartbeat produces accurate timestamps for different timezones"""
test_timezones = ["UTC", "America/New_York", "America/Los_Angeles", "Europe/London", "Asia/Tokyo"]
for tz in test_timezones:
heartbeat = get_heartbeat(timezone=tz, reason="Test heartbeat")
time_str = self._extract_time_from_json(heartbeat)
self._validate_timezone_accuracy(time_str, tz)
def test_get_login_event_timezone_accuracy(self):
"""Test that get_login_event produces accurate timestamps for different timezones"""
test_timezones = ["UTC", "US/Eastern", "US/Pacific", "Australia/Sydney"]
for tz in test_timezones:
login = get_login_event(timezone=tz, last_login="2024-01-01")
time_str = self._extract_time_from_json(login)
self._validate_timezone_accuracy(time_str, tz)
def test_package_user_message_timezone_accuracy(self):
"""Test that package_user_message produces accurate timestamps for different timezones"""
test_timezones = ["UTC", "America/Chicago", "Europe/Paris", "Asia/Shanghai"]
for tz in test_timezones:
message = package_user_message("Test message", timezone=tz)
time_str = self._extract_time_from_json(message)
self._validate_timezone_accuracy(time_str, tz)
def test_package_function_response_timezone_accuracy(self):
"""Test that package_function_response produces accurate timestamps for different timezones"""
test_timezones = ["UTC", "America/Denver", "Europe/Berlin", "Pacific/Auckland"]
for tz in test_timezones:
response = package_function_response(True, "Success", timezone=tz)
time_str = self._extract_time_from_json(response)
self._validate_timezone_accuracy(time_str, tz)
def test_package_system_message_timezone_accuracy(self):
"""Test that package_system_message produces accurate timestamps for different timezones"""
test_timezones = ["UTC", "America/Phoenix", "Europe/Rome", "Asia/Kolkata"] # Mumbai is now called Kolkata in pytz
for tz in test_timezones:
message = package_system_message("System alert", timezone=tz)
time_str = self._extract_time_from_json(message)
self._validate_timezone_accuracy(time_str, tz)
def test_package_summarize_message_timezone_accuracy(self):
"""Test that package_summarize_message produces accurate timestamps for different timezones"""
test_timezones = ["UTC", "America/Anchorage", "Europe/Stockholm", "Asia/Seoul"]
for tz in test_timezones:
summary = package_summarize_message(
summary="Test summary", summary_message_count=2, hidden_message_count=5, total_message_count=7, timezone=tz
)
time_str = self._extract_time_from_json(summary)
self._validate_timezone_accuracy(time_str, tz)
def test_get_local_time_timezone_direct(self):
"""Test get_local_time_timezone directly for accuracy"""
test_timezones = ["UTC", "America/New_York", "Europe/London", "Asia/Tokyo", "Australia/Melbourne"]
for tz in test_timezones:
time_str = get_local_time_timezone(timezone=tz)
self._validate_timezone_accuracy(time_str, tz)
def test_get_local_time_with_timezone_param(self):
"""Test get_local_time when timezone parameter is provided"""
test_timezones = ["UTC", "America/Los_Angeles", "Europe/Madrid", "Asia/Bangkok"]
for tz in test_timezones:
time_str = get_local_time(timezone=tz)
self._validate_timezone_accuracy(time_str, tz)
def test_timezone_offset_differences(self):
"""Test that different timezones produce appropriately offset times"""
# Get times for different timezones at the same moment
utc_heartbeat = get_heartbeat(timezone="UTC")
utc_time_str = self._extract_time_from_json(utc_heartbeat)
ny_heartbeat = get_heartbeat(timezone="America/New_York")
ny_time_str = self._extract_time_from_json(ny_heartbeat)
tokyo_heartbeat = get_heartbeat(timezone="Asia/Tokyo")
tokyo_time_str = self._extract_time_from_json(tokyo_heartbeat)
# Just validate that all times have the expected format
# UTC should have UTC in the string
assert "UTC" in utc_time_str, f"UTC timezone not found in: {utc_time_str}"
# NY should have EST or EDT
assert any(tz in ny_time_str for tz in ["EST", "EDT"]), f"EST/EDT not found in: {ny_time_str}"
# Tokyo should have JST
assert "JST" in tokyo_time_str, f"JST not found in: {tokyo_time_str}"
def test_daylight_saving_time_handling(self):
"""Test that DST transitions are handled correctly"""
# Test timezone that observes DST
eastern_tz = "America/New_York"
# Get current time in Eastern timezone
message = package_user_message("DST test", timezone=eastern_tz)
time_str = self._extract_time_from_json(message)
# Validate against current Eastern time
self._validate_timezone_accuracy(time_str, eastern_tz)
# The timezone abbreviation should be either EST or EDT
assert any(tz in time_str for tz in ["EST", "EDT"]), f"EST/EDT not found in: {time_str}"
@pytest.mark.parametrize(
"timezone_str,expected_format_parts",
[
("UTC", ["UTC", "+0000"]),
("America/New_York", ["EST", "EDT"]), # Either EST or EDT depending on date
("Europe/London", ["GMT", "BST"]), # Either GMT or BST depending on date
("Asia/Tokyo", ["JST", "+0900"]),
("Australia/Sydney", ["AEDT", "AEST"]), # Either AEDT or AEST depending on date
],
)
def test_timezone_format_components(self, timezone_str, expected_format_parts):
"""Test that timezone formatting includes expected components"""
heartbeat = get_heartbeat(timezone=timezone_str)
time_str = self._extract_time_from_json(heartbeat)
# Check that at least one expected format part is present
found_expected_part = any(part in time_str for part in expected_format_parts)
assert found_expected_part, f"None of expected format parts {expected_format_parts} found in time string: {time_str}"
# Validate the time is accurate
self._validate_timezone_accuracy(time_str, timezone_str)
def test_timezone_parameter_working(self):
"""Test that timezone parameter correctly affects the output"""
# Test that different timezones produce different time formats
utc_message = package_user_message("Test", timezone="UTC")
utc_time = self._extract_time_from_json(utc_message)
ny_message = package_user_message("Test", timezone="America/New_York")
ny_time = self._extract_time_from_json(ny_message)
# Times should have different timezone indicators
assert "UTC" in utc_time, f"UTC not found in: {utc_time}"
assert any(tz in ny_time for tz in ["EST", "EDT"]), f"EST/EDT not found in: {ny_time}"
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_timezone_formatting.py",
"license": "Apache License 2.0",
"lines": 173,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_tool_schema_parsing_files/expected_base_tool_schemas.py | def get_rethink_user_memory_schema():
return {
"name": "rethink_user_memory",
"description": (
"Rewrite memory block for the main agent, new_memory should contain all current "
"information from the block that is not outdated or inconsistent, integrating any "
"new information, resulting in a new memory block that is organized, readable, and "
"comprehensive."
),
"parameters": {
"type": "object",
"properties": {
"new_memory": {
"type": "string",
"description": (
"The new memory with information integrated from the memory block. "
"If there is no new information, then this should be the same as the "
"content in the source block."
),
},
},
"required": ["new_memory"],
},
}
def get_finish_rethinking_memory_schema():
return {
"name": "finish_rethinking_memory",
"description": "This function is called when the agent is done rethinking the memory.",
"parameters": {
"type": "object",
"properties": {},
"required": [],
},
}
def get_store_memories_schema():
return {
"name": "store_memories",
"description": "Persist dialogue that is about to fall out of the agent’s context window.",
"parameters": {
"type": "object",
"properties": {
"chunks": {
"type": "array",
"items": {
"type": "object",
"properties": {
"start_index": {"type": "integer", "description": "Zero-based index of the first evicted line in this chunk."},
"end_index": {"type": "integer", "description": "Zero-based index of the last evicted line (inclusive)."},
"context": {
"type": "string",
"description": "1-3 sentence paraphrase capturing key facts/details, user preferences, or goals that this chunk reveals—written for future retrieval.",
},
},
"required": ["start_index", "end_index", "context"],
},
"description": "Each chunk pinpoints a contiguous block of **evicted** lines and provides a short, forward-looking synopsis (`context`) that will be embedded for future semantic lookup.",
}
},
"required": ["chunks"],
},
}
def get_search_memory_schema():
return {
"name": "search_memory",
"description": "Look in long-term or earlier-conversation memory only when the user asks about something missing from the visible context. The user’s latest utterance is sent automatically as the main query.",
"parameters": {
"type": "object",
"properties": {
"convo_keyword_queries": {
"type": "array",
"items": {"type": "string"},
"description": (
"Extra keywords (e.g., order ID, place name). Use *null* if not appropriate for the latest user message."
),
},
"start_minutes_ago": {
"type": "integer",
"description": (
"Newer bound of the time window for results, specified in minutes ago. Use *null* if no lower time bound is needed."
),
},
"end_minutes_ago": {
"type": "integer",
"description": ("Older bound of the time window, in minutes ago. Use *null* if no upper bound is needed."),
},
},
"required": [],
},
}
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_tool_schema_parsing_files/expected_base_tool_schemas.py",
"license": "Apache License 2.0",
"lines": 89,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
locustio/locust:examples/qdrant/locustfile.py | """
Minimal example demonstrating Qdrant load testing with Locust.
"""
from locust import between, task
from locust.contrib.qdrant import QdrantUser
import random
from qdrant_client.models import Distance, PointStruct, VectorParams
class SimpleQdrantUser(QdrantUser):
"""Minimal Qdrant user for load testing."""
wait_time = between(1, 3)
def on_start(self):
self.dimension = 128
self.test_vectors = [[random.random() for _ in range(self.dimension)] for _ in range(10)]
collection_name = "load_test_collection"
vectors_config = VectorParams(
size=128,
distance=Distance.COSINE,
)
def __init__(self, environment):
self.url = environment.host
super().__init__(environment)
@task(3)
def upsert_data(self):
points = [
PointStruct(
id=random.randint(1, 10000),
vector=[random.random() for _ in range(self.dimension)],
payload={"name": f"item_{random.randint(1, 1000)}"},
)
]
self.upsert(points)
@task(5)
def search_vectors(self):
search_vector = random.choice(self.test_vectors)
self.search(query=search_vector, limit=5)
@task(2)
def scroll_data(self):
self.scroll(limit=5)
@task(1)
def delete_data(self):
delete_id = random.randint(1, 10000)
self.delete(points_selector=[delete_id])
| {
"repo_id": "locustio/locust",
"file_path": "examples/qdrant/locustfile.py",
"license": "MIT License",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
locustio/locust:locust/contrib/qdrant.py | from locust import User, events
import time
from typing import Any
from qdrant_client import QdrantClient
from qdrant_client.models import VectorParams
class QdrantLocustClient:
"""Qdrant Client Wrapper"""
def __init__(self, url, collection_name, api_key=None, timeout=60, **kwargs):
self.url = url
self.collection_name = collection_name
self.api_key = api_key
self.timeout = timeout
self.client = QdrantClient(
url=self.url,
api_key=self.api_key,
timeout=self.timeout,
**kwargs,
)
def close(self):
self.client.close()
def create_collection(self, vectors_config, **kwargs):
if not self.client.collection_exists(collection_name=self.collection_name):
self.client.create_collection(
collection_name=self.collection_name,
vectors_config=vectors_config,
**kwargs,
)
def upsert(self, points):
start = time.time()
try:
result = self.client.upsert(
collection_name=self.collection_name,
points=points,
)
total_time = (time.time() - start) * 1000
return {"success": True, "response_time": total_time, "result": result}
except Exception as e:
return {
"success": False,
"response_time": (time.time() - start) * 1000,
"exception": e,
}
def search(
self,
query,
limit=10,
query_filter=None,
search_params=None,
with_payload=True,
):
start = time.time()
try:
result = self.client.query_points(
collection_name=self.collection_name,
query=query,
limit=limit,
query_filter=query_filter,
search_params=search_params,
with_payload=with_payload,
)
total_time = (time.time() - start) * 1000
empty = len(result.points) == 0
return {
"success": not empty,
"response_time": total_time,
"empty": empty,
"result": result,
}
except Exception as e:
return {
"success": False,
"response_time": (time.time() - start) * 1000,
"exception": e,
}
def scroll(
self,
scroll_filter=None,
limit=10,
with_payload=True,
):
start = time.time()
try:
result, next_offset = self.client.scroll(
collection_name=self.collection_name,
scroll_filter=scroll_filter,
limit=limit,
with_payload=with_payload,
)
total_time = (time.time() - start) * 1000
empty = len(result) == 0
return {
"success": not empty,
"response_time": total_time,
"empty": empty,
"result": result,
"next_offset": next_offset,
}
except Exception as e:
return {
"success": False,
"response_time": (time.time() - start) * 1000,
"exception": e,
}
def delete(self, points_selector):
start = time.time()
try:
result = self.client.delete(
collection_name=self.collection_name,
points_selector=points_selector,
)
total_time = (time.time() - start) * 1000
return {"success": True, "response_time": total_time, "result": result}
except Exception as e:
return {
"success": False,
"response_time": (time.time() - start) * 1000,
"exception": e,
}
# ----------------------------------
# Locust User wrapper
# ----------------------------------
class QdrantUser(User):
"""Locust User implementation for Qdrant operations.
This class wraps the QdrantLocustClient implementation and translates
client method results into Locust request events so that performance
statistics are collected properly.
Parameters
----------
host : str
Qdrant server URL, e.g. ``"http://localhost:6333"``.
collection_name : str
The name of the collection to operate on.
**client_kwargs
Additional keyword arguments forwarded to the client.
**collection_kwargs
Additional keyword arguments forwarded to ``create_collection``.
"""
abstract = True
url: str = "http://localhost:6333"
api_key: str | None = None
collection_name: str | None = None
timeout: int = 60
vectors_config: VectorParams | None = None
client_kwargs: dict | None = None
collection_kwargs: dict | None = None
def __init__(self, environment):
super().__init__(environment)
if self.collection_name is None:
raise ValueError("'collection_name' must be provided for QdrantUser")
self.client_type = "qdrant"
self.client = QdrantLocustClient(
url=self.url,
api_key=self.api_key,
collection_name=self.collection_name,
timeout=self.timeout,
**(self.client_kwargs or {}),
)
if self.vectors_config is not None:
self.client.create_collection(vectors_config=self.vectors_config, **(self.collection_kwargs or {}))
@staticmethod
def _fire_event(request_type: str, name: str, result: dict[str, Any]):
"""Emit a Locust request event from a Qdrant client result dict."""
response_time = int(result.get("response_time", 0))
events.request.fire(
request_type=f"{request_type}",
name=name,
response_time=response_time,
response_length=0,
exception=result.get("exception"),
)
def upsert(self, points):
result = self.client.upsert(points)
self._fire_event(self.client_type, "upsert", result)
return result
def search(
self,
query,
limit=10,
query_filter=None,
search_params=None,
with_payload=True,
):
result = self.client.search(
query=query,
limit=limit,
query_filter=query_filter,
search_params=search_params,
with_payload=with_payload,
)
self._fire_event(self.client_type, "search", result)
return result
def scroll(
self,
scroll_filter=None,
limit=10,
with_payload=True,
):
result = self.client.scroll(
scroll_filter=scroll_filter,
limit=limit,
with_payload=with_payload,
)
self._fire_event(self.client_type, "scroll", result)
return result
def delete(self, points_selector):
result = self.client.delete(points_selector)
self._fire_event(self.client_type, "delete", result)
return result
def on_stop(self):
self.client.close()
| {
"repo_id": "locustio/locust",
"file_path": "locust/contrib/qdrant.py",
"license": "MIT License",
"lines": 210,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
locustio/locust:locust/opentelemetry.py | import logging
import os
from urllib.parse import urlparse
from ._version import __version__
logger = logging.getLogger(__name__)
def setup_opentelemetry() -> bool:
try:
from opentelemetry import metrics, trace
from opentelemetry.sdk.resources import Resource
except ImportError:
logger.error("OpenTelemetry SDK is not installed, opentelemetry not enabled. Run 'pip install locust[otel]'")
return False
traces_exporters = {e.strip().lower() for e in os.getenv("OTEL_TRACES_EXPORTER", "otlp").split(",") if e.strip()}
metrics_exporters = {e.strip().lower() for e in os.getenv("OTEL_METRICS_EXPORTER", "otlp").split(",") if e.strip()}
if traces_exporters == {"none"} and metrics_exporters == {"none"}:
logger.info("No OpenTelemetry exporters configured, opentelemetry not enabled")
return False
resource = Resource.create(
{
"service.name": os.getenv("OTEL_SERVICE_NAME", "locust"),
"service.version": __version__,
}
)
if traces_exporters:
tracer_provider = _setup_tracer_provider(resource, traces_exporters)
trace.set_tracer_provider(tracer_provider)
if metrics_exporters:
meter_provider = _setup_meter_provider(resource, metrics_exporters)
metrics.set_meter_provider(meter_provider)
_setup_auto_instrumentation()
logger.debug("OpenTelemetry configured!")
return True
def _setup_tracer_provider(resource, traces_exporters):
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter, SimpleSpanProcessor
tracer_provider = TracerProvider(resource=resource)
for exporter in traces_exporters:
if exporter == "otlp":
protocol = (
os.getenv("OTEL_EXPORTER_OTLP_TRACES_PROTOCOL", os.getenv("OTEL_EXPORTER_OTLP_PROTOCOL", "grpc"))
.lower()
.strip()
)
try:
if protocol == "grpc":
from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import OTLPSpanExporter
elif protocol == "http/protobuf" or protocol == "http":
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
else:
logger.warning(
f"Unknown OpenTelemetry otlp exporter protocol '{protocol}'. Use 'grpc' or 'http/protobuf'"
)
continue
except ImportError:
logger.warning(
f"OpenTelemetry otlp exporter for '{protocol}' is not available. Please install the required package: opentelemetry-exporter-otlp-proto-{'grpc' if protocol == 'grpc' else 'http'}"
)
continue
tracer_provider.add_span_processor(BatchSpanProcessor(OTLPSpanExporter()))
logger.debug("Configured traces exporter: otlp")
elif exporter == "console":
tracer_provider.add_span_processor(SimpleSpanProcessor(ConsoleSpanExporter()))
logger.debug("Configured traces exporter: console")
elif exporter == "none":
continue
else:
logger.warning(f"Unknown traces exporter '{exporter}'. Ignored")
return tracer_provider
def _setup_meter_provider(resource, metrics_exporters):
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import ConsoleMetricExporter, PeriodicExportingMetricReader
metric_readers = []
for exporter in metrics_exporters:
if exporter == "otlp":
protocol = (
os.getenv("OTEL_EXPORTER_OTLP_METRICS_PROTOCOL", os.getenv("OTEL_EXPORTER_OTLP_PROTOCOL", "grpc"))
.lower()
.strip()
)
try:
if protocol == "grpc":
from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import OTLPMetricExporter
elif protocol == "http/protobuf" or protocol == "http":
from opentelemetry.exporter.otlp.proto.http.metric_exporter import OTLPMetricExporter
else:
logger.warning(
f"Unknown OpenTelemetry otlp exporter protocol '{protocol}'. Use 'grpc' or 'http/protobuf'"
)
continue
except ImportError:
logger.warning(
f"OpenTelemetry otlp exporter for '{protocol}' is not available. Please install the required package: opentelemetry-exporter-otlp-proto-{'grpc' if protocol == 'grpc' else 'http'}"
)
continue
metric_reader = PeriodicExportingMetricReader(OTLPMetricExporter())
metric_readers.append(metric_reader)
logger.debug("Configured metrics exporter: otlp")
elif exporter == "prometheus":
# TODO: Add support for Prometheus metrics exporter
logger.warning("Prometheus metrics exporter is not yet implemented!")
elif exporter == "console":
metric_reader = PeriodicExportingMetricReader(ConsoleMetricExporter())
metric_readers.append(metric_reader)
logger.debug("Configured metrics exporter: console")
elif exporter == "none":
continue
else:
logger.warning(f"Unknown metrics exporter '{exporter}'. Ignored")
return MeterProvider(resource=resource, metric_readers=metric_readers)
def _setup_auto_instrumentation():
try:
import requests
from opentelemetry.instrumentation.requests import RequestsInstrumentor
from opentelemetry.sdk.trace import Span
def request_hook(span: Span, request: requests.PreparedRequest):
if name := getattr(request, "_explicit_name", None):
span.update_name(f"{request.method} {name}")
else:
parsed = urlparse(request.url)
span.update_name(f"{request.method} {str(parsed.path) or '/'}")
RequestsInstrumentor().instrument(request_hook=request_hook)
except ImportError:
logger.info(
"OpenTelemetry 'requests' instrumentation is not installed. Please install 'opentelemetry-instrumentation-requests'"
)
try:
from opentelemetry.instrumentation.urllib3 import URLLib3Instrumentor
URLLib3Instrumentor().instrument()
except ImportError:
logger.info(
"OpenTelemetry 'urllib3' instrumentation is not installed. Please install 'opentelemetry-instrumentation-urllib3'"
)
| {
"repo_id": "locustio/locust",
"file_path": "locust/opentelemetry.py",
"license": "MIT License",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
locustio/locust:examples/dns_ex.py | from locust import run_single_user, task
from locust.contrib.dns import DNSUser
import time
import dns.message
import dns.rdatatype
class MyDNSUser(DNSUser):
@task
def t(self):
message = dns.message.make_query("example.com", dns.rdatatype.A)
# self.client wraps all dns.query methods https://dnspython.readthedocs.io/en/stable/query.html
self.client.udp(message, "8.8.8.8")
self.client.tcp(message, "1.1.1.1")
self.client.udp(
dns.message.make_query("doesnot-exist-1234234.com", dns.rdatatype.A),
"1.1.1.1",
name="You can rename requests",
)
# don't spam other people's DNS servers
time.sleep(10)
if __name__ == "__main__":
run_single_user(MyDNSUser)
| {
"repo_id": "locustio/locust",
"file_path": "examples/dns_ex.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
locustio/locust:locust/contrib/dns.py | from locust import User
from locust.exception import LocustError
import time
from collections.abc import Callable
import dns.query
from dns.exception import DNSException
from dns.message import Message
class DNSClient:
def __init__(self, request_event):
self.request_event = request_event
def __getattr__(self, function_name) -> Callable[..., Message]:
func = getattr(dns.query, function_name)
def wrapper(message: Message, *args, name=None, **kwargs) -> Message:
response = None
request_meta = {
"request_type": "DNS",
"name": name or function_name,
"start_time": time.time(),
"response_length": 0,
"context": {},
"exception": None,
}
start_perf_counter = time.perf_counter()
try:
response = func(message, *args, **kwargs)
except DNSException as e:
request_meta["exception"] = e
else:
if not response.answer:
request_meta["exception"] = LocustError("No answer in DNS response")
request_meta["response_time"] = (time.perf_counter() - start_perf_counter) * 1000
request_meta["response"] = response
self.request_event.fire(**request_meta)
return response
return wrapper # for some reason, pyright still wont infer the return type to be Message
class DNSUser(User):
"""
DNSUser provides a locust client class for dnspython's :py:mod:`dns.query` methods.
See example in :gh:`examples/dns_ex.py`.
"""
abstract = True
def __init__(self, environment):
super().__init__(environment)
self.client = DNSClient(environment.events.request)
"""
Example (inside task method)::
message = dns.message.make_query("example.com", dns.rdatatype.A)
self.client.udp(message, "1.1.1.1")
self.client.https(message, "1.1.1.1")
"""
| {
"repo_id": "locustio/locust",
"file_path": "locust/contrib/dns.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
locustio/locust:examples/mqtt/locustfile.py | from locust import task
from locust.contrib.mqtt import MqttUser
from locust.user.wait_time import between
import time
class MyUser(MqttUser):
host = "localhost"
port = 1883
# We could uncomment below to use the WebSockets transport
# transport = "websockets"
# ws_path = "/mqtt/custom/path"
# We'll probably want to throttle our publishing a bit: let's limit it to
# 10-100 messages per second.
wait_time = between(0.01, 0.1)
# Uncomment below if you need to set MQTTv5
# protocol = paho.mqtt.client.MQTTv5
# Sleep for a while to allow the client time to connect.
# This is probably not the most "correct" way to do this: a better method
# might be to add a gevent.event.Event to the MqttClient's on_connect
# callback and wait for that (with a timeout) here.
# However, this works well enough for the sake of an example.
def on_start(self):
time.sleep(5)
@task
def say_hello(self):
self.client.publish("hello/locust", b"hello world")
| {
"repo_id": "locustio/locust",
"file_path": "examples/mqtt/locustfile.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
locustio/locust:examples/mqtt/locustfile_custom_mqtt_client.py | from locust import task
from locust.contrib.mqtt import MqttClient, MqttUser
from locust.user.wait_time import between
import time
# extend the MqttClient class with your own custom implementation
class MyMqttClient(MqttClient):
# you can override the event name with your custom implementation
def _generate_event_name(self, event_type: str, qos: int, topic: str):
return f"mqtt:{event_type}:{qos}"
class MyUser(MqttUser):
host = "localhost"
port = 1883
# We could uncomment below to use the WebSockets transport
# transport = "websockets"
# ws_path = "/mqtt/custom/path"
# We'll probably want to throttle our publishing a bit: let's limit it to
# 10-100 messages per second.
wait_time = between(0.01, 0.1)
# override the client_cls with your custom MqttClient implementation
client_cls = MyMqttClient
# Sleep for a while to allow the client time to connect.
# This is probably not the most "correct" way to do this: a better method
# might be to add a gevent.event.Event to the MqttClient's on_connect
# callback and wait for that (with a timeout) here.
# However, this works well enough for the sake of an example.
def on_start(self):
time.sleep(5)
@task
def say_hello(self):
self.client.publish("hello/locust", b"hello world locust custom client")
| {
"repo_id": "locustio/locust",
"file_path": "examples/mqtt/locustfile_custom_mqtt_client.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
locustio/locust:locust/contrib/mqtt.py | from __future__ import annotations
from locust import User
from locust.env import Environment
import random
import selectors
import time
import typing
from contextlib import suppress
import paho.mqtt.client as mqtt
from paho.mqtt.enums import MQTTErrorCode
if typing.TYPE_CHECKING:
from paho.mqtt.client import MQTTMessageInfo
from paho.mqtt.enums import MQTTProtocolVersion
from paho.mqtt.properties import Properties
from paho.mqtt.reasoncodes import ReasonCode
from paho.mqtt.subscribeoptions import SubscribeOptions
# A SUBACK response for MQTT can only contain 0x00, 0x01, 0x02, or 0x80. 0x80
# indicates a failure to subscribe.
#
# http://docs.oasis-open.org/mqtt/mqtt/v3.1.1/os/mqtt-v3.1.1-os.html#_Figure_3.26_-
SUBACK_FAILURE = 0x80
REQUEST_TYPE = "MQTT"
def _generate_random_id(
length: int,
alphabet: str = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ",
):
"""Generate a random ID from the given alphabet.
Args:
length: the number of random characters to generate.
alphabet: the pool of random characters to choose from.
"""
return "".join(random.choice(alphabet) for _ in range(length))
def _generate_mqtt_event_name(event_type: str, qos: int, topic: str):
"""Generate a name to identify publish/subscribe tasks.
This will be used to ultimately identify tasks in the Locust web console.
This will identify publish/subscribe tasks with their QoS & associated
topic.
Examples:
publish:0:my/topic
subscribe:1:my/other/topic
Args:
event_type: The type of MQTT event (subscribe or publish)
qos: The quality-of-service associated with this event
topic: The MQTT topic associated with this event
"""
return f"{event_type}:{qos}:{topic}"
class PublishedMessageContext(typing.NamedTuple):
"""Stores metadata about outgoing published messages."""
qos: int
topic: str
start_time: float
payload_size: int
class MqttClient(mqtt.Client):
def __init__(
self,
*args,
environment: Environment,
client_id: str | None = None,
protocol: MQTTProtocolVersion = mqtt.MQTTv311,
use_loop_selectors: bool = False,
**kwargs,
):
"""Initializes a paho.mqtt.Client for use in Locust swarms.
This class passes most args & kwargs through to the underlying
paho.mqtt constructor.
Args:
environment: the Locust environment with which to associate events.
client_id: the MQTT Client ID to use in connecting to the broker.
If not set, one will be randomly generated.
protocol: the MQTT protocol version.
defaults to MQTT v3.11.
"""
# If a client ID is not provided, this class will randomly generate an ID
# of the form: `locust-[0-9a-zA-Z]{16}` (i.e., `locust-` followed by 16
# random characters, so that the resulting client ID does not exceed the
# specification limit of 23 characters).
# This is done in this wrapper class so that this locust client can
# self-identify when firing requests, since some versions of MQTT will
# have the broker assign IDs to clients that do not provide one: in this
# case, there is no way to retrieve the client ID.
# See https://github.com/eclipse/paho.mqtt.python/issues/237
if not client_id:
client_id = f"locust-{_generate_random_id(16)}"
super().__init__(*args, client_id=client_id, protocol=protocol, **kwargs)
self.environment = environment
# we need to set client_id in case the broker assigns one to us
self.client_id = client_id
self.on_publish = self._on_publish_cb # type: ignore[assignment]
if protocol == mqtt.MQTTv5:
self.on_disconnect = self._on_disconnect_cb_v5
self.on_connect = self._on_connect_cb_v5
self.on_subscribe = self._on_subscribe_cb_v5
else:
self.on_disconnect = self._on_disconnect_cb_v3x # type: ignore[assignment]
self.on_connect = self._on_connect_cb_v3x # type: ignore[assignment]
self.on_subscribe = self._on_subscribe_cb_v3x # type: ignore[assignment]
self._publish_requests: dict[int, PublishedMessageContext] = {}
self._subscribe_requests: dict[int, tuple[int, str, float]] = {}
self._use_loop_selectors = use_loop_selectors
def _generate_event_name(self, event_type: str, qos: int, topic: str):
return _generate_mqtt_event_name(event_type, qos, topic)
def _on_publish_cb(
self,
client: mqtt.Client,
userdata: typing.Any,
mid: int,
):
cb_time = time.time()
try:
request_context = self._publish_requests.pop(mid)
except KeyError:
# we shouldn't hit this block of code
self.environment.events.request.fire(
request_type=REQUEST_TYPE,
name="publish",
response_time=0,
response_length=0,
exception=AssertionError(f"Could not find message data for mid '{mid}' in _on_publish_cb."),
context={
"client_id": self.client_id,
"mid": mid,
},
)
else:
# fire successful publish event
self.environment.events.request.fire(
request_type=REQUEST_TYPE,
name=self._generate_event_name("publish", request_context.qos, request_context.topic),
response_time=(cb_time - request_context.start_time) * 1000,
response_length=request_context.payload_size,
exception=None,
context={
"client_id": self.client_id,
**request_context._asdict(),
},
)
def _on_subscribe_cb_v3x(
self,
client: mqtt.Client,
userdata: typing.Any,
mid: int,
granted_qos: list[int],
):
cb_time = time.time()
try:
qos, topic, start_time = self._subscribe_requests.pop(mid)
except KeyError:
# we shouldn't hit this block of code
self.environment.events.request.fire(
request_type=REQUEST_TYPE,
name="subscribe",
response_time=0,
response_length=0,
exception=AssertionError(f"Could not find message data for mid '{mid}' in _on_subscribe_cb."),
context={
"client_id": self.client_id,
"mid": mid,
},
)
else:
if SUBACK_FAILURE in granted_qos:
self.environment.events.request.fire(
request_type=REQUEST_TYPE,
name=self._generate_event_name("subscribe", qos, topic),
response_time=(cb_time - start_time) * 1000,
response_length=0,
exception=AssertionError(f"Broker returned an error response during subscription: {granted_qos}"),
context={
"client_id": self.client_id,
"qos": qos,
"topic": topic,
"start_time": start_time,
},
)
else:
# fire successful subscribe event
self.environment.events.request.fire(
request_type=REQUEST_TYPE,
name=self._generate_event_name("subscribe", qos, topic),
response_time=(cb_time - start_time) * 1000,
response_length=0,
exception=None,
context={
"client_id": self.client_id,
"qos": qos,
"topic": topic,
"start_time": start_time,
},
)
# pylint: disable=unused-argument
def _on_subscribe_cb_v5(
self,
client: mqtt.Client,
userdata: typing.Any,
mid: int,
reason_codes: list[ReasonCode],
properties: Properties,
) -> None:
granted_qos = [rc.value for rc in reason_codes]
return self._on_subscribe_cb_v3x(client, userdata, mid, granted_qos)
def _on_disconnect_cb(
self,
client: mqtt.Client,
userdata: typing.Any,
rc: int | ReasonCode,
):
if rc != 0:
self.environment.events.request.fire(
request_type=REQUEST_TYPE,
name="disconnect",
response_time=0,
response_length=0,
exception=rc,
context={
"client_id": self.client_id,
},
)
else:
self.environment.events.request.fire(
request_type=REQUEST_TYPE,
name="disconnect",
response_time=0,
response_length=0,
exception=None,
context={
"client_id": self.client_id,
},
)
def _on_disconnect_cb_v3x(
self,
client: mqtt.Client,
userdata: typing.Any,
rc: int,
):
return self._on_disconnect_cb(client, userdata, rc)
# pylint: disable=unused-argument
def _on_disconnect_cb_v5(
self,
client: mqtt.Client,
userdata: typing.Any,
disconnect_flags: mqtt.DisconnectFlags,
reasoncode: ReasonCode,
properties: Properties | None,
) -> None:
return self._on_disconnect_cb(client, userdata, reasoncode)
def _on_connect_cb(
self,
client: mqtt.Client,
userdata: typing.Any,
flags: dict[str, int],
rc: int | ReasonCode,
):
if rc != 0:
self.environment.events.request.fire(
request_type=REQUEST_TYPE,
name="connect",
response_time=0,
response_length=0,
exception=Exception(str(rc)),
context={
"client_id": self.client_id,
},
)
else:
self.environment.events.request.fire(
request_type=REQUEST_TYPE,
name="connect",
response_time=0,
response_length=0,
exception=None,
context={
"client_id": self.client_id,
},
)
def _on_connect_cb_v3x(
self,
client: mqtt.Client,
userdata: typing.Any,
flags: dict[str, int],
rc: int,
):
return self._on_connect_cb(client, userdata, flags, rc)
# pylint: disable=unused-argument
def _on_connect_cb_v5(
self,
client: mqtt.Client,
userdata: typing.Any,
connect_flags: mqtt.ConnectFlags,
reasoncode: ReasonCode,
properties: Properties | None,
) -> None:
self._on_connect_cb(client, userdata, {}, reasoncode)
def _loop(self, timeout: float = 1.0) -> MQTTErrorCode:
"""Override the parent's _loop method to optionally use selectors.
When use_loop_selectors is True, this uses a selector-based implementation that allows more than 340 connections.
Otherwise, it falls back to the parent's implementation.
"""
if self._use_loop_selectors:
return self._loop_selectors(timeout)
else:
return super()._loop(timeout)
def _loop_selectors(self, timeout: float = 1.0) -> MQTTErrorCode:
if timeout < 0.0:
raise ValueError("Invalid timeout.")
sel = selectors.DefaultSelector()
eventmask = selectors.EVENT_READ
with suppress(IndexError):
packet = self._out_packet.popleft()
self._out_packet.appendleft(packet)
eventmask = selectors.EVENT_WRITE | eventmask
# used to check if there are any bytes left in the (SSL) socket
pending_bytes = 0
if hasattr(self._sock, "pending"):
pending_bytes = self._sock.pending() # type: ignore
# if bytes are pending do not wait in select
if pending_bytes > 0:
timeout = 0.0
try:
if self._sockpairR is None:
sel.register(self._sock, eventmask) # type: ignore
else:
sel.register(self._sock, eventmask) # type: ignore
sel.register(self._sockpairR, selectors.EVENT_READ)
events = sel.select(timeout)
except TypeError:
# Socket isn't correct type, in likelihood connection is lost
return mqtt.MQTT_ERR_CONN_LOST
except ValueError:
# Can occur if we just reconnected but rlist/wlist contain a -1 for
# some reason.
return mqtt.MQTT_ERR_CONN_LOST
except Exception:
# Note that KeyboardInterrupt, etc. can still terminate since they
# are not derived from Exception
return mqtt.MQTT_ERR_UNKNOWN
socklist: list[list] = [[], []]
for key, _event in events:
if key.events & selectors.EVENT_READ:
socklist[0].append(key.fileobj)
if key.events & selectors.EVENT_WRITE:
socklist[1].append(key.fileobj)
if self._sock in socklist[0] or pending_bytes > 0:
rc = self.loop_read()
if rc or self._sock is None:
return rc
if self._sockpairR and self._sockpairR in socklist[0]:
# Stimulate output write even though we didn't ask for it, because
# at that point the publish or other command wasn't present.
socklist[1].insert(0, self._sock)
# Clear sockpairR - only ever a single byte written.
with suppress(BlockingIOError):
# Read many bytes at once - this allows up to 10000 calls to
# publish() inbetween calls to loop().
self._sockpairR.recv(10000)
if self._sock in socklist[1]:
rc = self.loop_write()
if rc or self._sock is None:
return rc
sel.close()
return self.loop_misc()
def publish(
self,
topic: str,
payload: mqtt.PayloadType | None = None,
qos: int = 0,
retain: bool = False,
properties: Properties | None = None,
) -> MQTTMessageInfo:
"""Publish a message to the MQTT broker.
This method wraps the underlying paho-mqtt client's method in order to
set up & fire Locust events.
"""
request_context = PublishedMessageContext(
qos=qos,
topic=topic,
start_time=time.time(),
payload_size=len(payload) if payload and not isinstance(payload, (int, float)) else 0,
)
publish_info = super().publish(topic, payload=payload, qos=qos, retain=retain, properties=properties)
if publish_info.rc != mqtt.MQTT_ERR_SUCCESS:
self.environment.events.request.fire(
request_type=REQUEST_TYPE,
name=self._generate_event_name("publish", request_context.qos, request_context.topic),
response_time=0,
response_length=0,
exception=publish_info.rc,
context={
"client_id": self.client_id,
**request_context._asdict(),
},
)
else:
# store this for use in the on_publish callback
self._publish_requests[publish_info.mid] = request_context
return publish_info
def subscribe(
self,
topic: str
| tuple[str, int]
| tuple[str, SubscribeOptions]
| list[tuple[str, int]]
| list[tuple[str, SubscribeOptions]],
qos: int = 0,
options: SubscribeOptions | None = None,
properties: Properties | None = None,
) -> tuple[mqtt.MQTTErrorCode, int | None]:
"""Subscribe to a given topic.
This method wraps the underlying paho-mqtt client's method in order to
set up & fire Locust events.
"""
start_time = time.time()
subscribe_topic = topic if isinstance(topic, str) else topic[0][0]
result, mid = super().subscribe(topic, qos, options, properties) # type: ignore[arg-type]
if result != mqtt.MQTT_ERR_SUCCESS:
self.environment.events.request.fire(
request_type=REQUEST_TYPE,
name=self._generate_event_name("subscribe", qos, subscribe_topic),
response_time=0,
response_length=0,
exception=result,
context={
"client_id": self.client_id,
"qos": qos,
"topic": subscribe_topic,
"start_time": start_time,
},
)
else:
if mid is None:
# QoS 0 subscriptions do not have a mid, so we'll just fire a success event immediately
self.environment.events.request.fire(
request_type=REQUEST_TYPE,
name=self._generate_event_name("subscribe", qos, subscribe_topic),
response_time=(time.time() - start_time) * 1000,
response_length=0,
exception=None,
context={
"client_id": self.client_id,
"qos": qos,
"topic": subscribe_topic,
"start_time": start_time,
},
)
else:
self._subscribe_requests[mid] = (qos, subscribe_topic, start_time)
return result, mid
class MqttUser(User):
abstract = True
host = "localhost"
port = 1883
transport = "tcp"
ws_path = "/mqtt"
tls_context = None
client_cls: type[MqttClient] = MqttClient
client_id = None
username = None
password = None
protocol = mqtt.MQTTv311
use_loop_selectors: bool = False
def __init__(self, environment: Environment):
super().__init__(environment)
self.client: MqttClient = self.client_cls(
environment=self.environment,
transport=self.transport,
client_id=self.client_id,
protocol=self.protocol,
use_loop_selectors=self.use_loop_selectors,
)
if self.tls_context:
self.client.tls_set_context(self.tls_context)
if self.transport == "websockets" and self.ws_path:
self.client.ws_set_options(path=self.ws_path)
if self.username and self.password:
self.client.username_pw_set(
username=self.username,
password=self.password,
)
self.client.connect_async(
host=self.host, # type: ignore
port=self.port,
)
self.client.loop_start()
| {
"repo_id": "locustio/locust",
"file_path": "locust/contrib/mqtt.py",
"license": "MIT License",
"lines": 482,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
locustio/locust:examples/test_pytest.py | from locust.clients import HttpSession # this import is just for type hints
import time
# pytest/locust will discover any functions prefixed with "test_" as test cases.
# session and fastsession are pytest fixtures provided by Locust's pytest plugin.
def test_stuff(session):
resp = session.get("https://www.locust.io/")
# Bad HTTP status codes in the response dont automatically raise an exception,
# so if that is what you want, you need to call:
resp.raise_for_status()
# In Locust, request-related exceptions are caught (and the test case restarted),
# in pytest any exceptions fail the test case
# Just like with Locust, you can set a base URL using --host/-H when using pytest.
# Or you can set a default:
if not session.base_url:
session.base_url = "https://www.locust.io"
# catch_response works just like in regular locustfiles
with session.get("/", catch_response=True) as resp:
if not resp.text or not "Locust" in resp.text:
resp.failure("important text was missing in response")
# raise_for_status also respects calls to resp.failure()/.success()
# so this will raise an exception and fail the test case if "Load" was missing
resp.raise_for_status()
# you can call helper functions as needed
helper_function(session)
# unlike regular Locust Users, there's no wait_time, so use time.sleep instead
time.sleep(0.1)
# this is not a test case and won't be detected by pytest/locust
def helper_function(session: HttpSession):
session.get("/")
| {
"repo_id": "locustio/locust",
"file_path": "examples/test_pytest.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
locustio/locust:locust/test/test_pytest_locustfile.py | # pytest style locustfiles, can be run from both pytest and locust!
#
# Example use:
#
# locust -H https://locust.io -f test_pytest.py -u 2 test_regular test_host
# pytest -H https://locust.io test_pytest.py
from locust.clients import HttpSession
from locust.contrib.fasthttp import FastHttpSession
from locust.exception import CatchResponseError
import pytest
def test_regular(session: HttpSession):
session.get("https://www.locust.io/")
def test_fasthttp(fastsession: FastHttpSession):
fastsession.get("https://www.locust.io/")
@pytest.mark.xfail(strict=True)
def test_failure(session: HttpSession):
session.get("https://www.locust.io/")
resp = session.get("https://www.locust.io/doesnt_exist")
# the next line will raise a requests.Exception, which will be caught and ignored by Locust.
# It still prevents the test from going to the next statement, and is useful for failing the test case when run as pytest
resp.raise_for_status()
session.get("https://www.locust.io/will_never_run")
def test_catch_response(session: HttpSession):
with session.get("https://www.locust.io/", catch_response=True) as resp:
if not resp.text or not "asdfasdf" in resp.text:
resp.failure("important text was missing in response")
pytest.raises(CatchResponseError, resp.raise_for_status)
def test_fasthttp_catch_response(fastsession: FastHttpSession):
with fastsession.get("https://www.locust.io/", catch_response=True) as resp:
if not resp.text or not "asdfasdf" in resp.text:
resp.failure("important text was missing in response")
pytest.raises(CatchResponseError, resp.raise_for_status)
@pytest.mark.xfail(strict=True)
def test_fasthttp_failure(fastsession: FastHttpSession):
fastsession.get("https://www.locust.io/")
resp = fastsession.get("https://www.locust.io/doesnt_exist")
# the next line will raise a requests.Exception, which will be caught and ignored by Locust.
# It still prevents the test from going to the next statement, and is useful for failing the test case when run as pytest
resp.raise_for_status()
fastsession.get("https://www.locust.io/will_never_run")
host = "https://www.locust.io/"
def test_host(fastsession: FastHttpSession):
if not fastsession.base_url:
pytest.skip("Set hostname with --host/-H to run this test (works for both locust and pytest)")
resp = fastsession.get("/")
resp.raise_for_status()
| {
"repo_id": "locustio/locust",
"file_path": "locust/test/test_pytest_locustfile.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
locustio/locust:examples/socketio/echo_server.py | # Used by socketio_ex.py as a mock target. Requires installing gevent-websocket
import gevent.monkey
gevent.monkey.patch_all()
import time
import socketio
from flask import Flask
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
# Create a Socket.IO server
sio = socketio.Server(async_mode="gevent")
app = Flask(__name__)
app.wsgi_app = socketio.WSGIApp(sio, app.wsgi_app)
DELAY = 0.01
# When a client connects
@sio.event
def connect(sid, environ):
time.sleep(DELAY)
print(f"Client connected: {sid}")
# Join a room
@sio.event
def join_room(sid, data):
time.sleep(DELAY)
room = data.get("room")
sio.enter_room(sid, room)
print(f"Client {sid} joined room {room}")
# Optionally notify the room
sio.emit("room_joined", f"{sid} joined {room}", room=room)
return f"Joined room {room}"
# Leave a room
@sio.event
def leave_room(sid, data):
time.sleep(DELAY)
room = data.get("room")
sio.leave_room(sid, room)
print(f"Client {sid} left room {room}")
# Broadcast message to a room
@sio.event
def send_message(sid, data):
time.sleep(DELAY)
room = data.get("room")
msg = data.get("message")
print(f"Sent message ({msg} to room {room}")
sio.emit("chat_message", msg, room=room)
# When a client disconnects
@sio.event
def disconnect(sid):
time.sleep(DELAY)
print(f"Client disconnected: {sid}")
server = pywsgi.WSGIServer(("0.0.0.0", 5001), app, handler_class=WebSocketHandler)
server.serve_forever()
| {
"repo_id": "locustio/locust",
"file_path": "examples/socketio/echo_server.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
locustio/locust:examples/socketio/socketio_ex.py | from locust import HttpUser, task
from locust.contrib.socketio import SocketIOUser
from threading import Event
import gevent
from socketio import Client
class MySIOHttpUser(SocketIOUser, HttpUser):
options = {
# "logger": True,
# "engineio_logger": True,
}
event: Event
def on_start(self) -> None:
self.sio.connect("ws://localhost:5001", wait_timeout=10)
self.sio_greenlet = gevent.spawn(self.sio.wait)
# If you need authorization, here's how to do it:
# resp = self.client.post("/login", json={"username": "foo", "password": "bar"})
# token = resp.json()["access_token"]
# self.sio.connect(
# "ws://localhost:5001",
# # Option 1: using Authorization header:
# headers={"Authorization": f"Bearer {token}"},
# # Option 2: using auth:
# # auth={"token": token},
# )
@task
def my_task(self):
self.event = Event()
# Send message and wait for confirmation
self.sio.call("join_room", {"room": "room1"})
# Register an event handler
self.sio.on("chat_message", self.on_chat_message)
# Use socketio.Client to send a message that wont be logged as a request
Client.call(self.sio, "send_message", {"room": "room1", "message": "foo"})
# Emit doesnt wait for confirmation
self.sio.emit("send_message", {"room": "room1", "message": "bar"})
self.event.wait() # wait for on_chat_message to set this event
self.sio.call("leave_room", {"room": "room1"})
# We've used multiple inheritance to combine this with HttpUser, so we can also make normal HTTP requests
self.client.get("/")
def on_chat_message(self, event: str, data: str) -> None:
if data.startswith("bar"):
self.event.set()
self.sio.on_message(event, data)
def on_stop(self) -> None:
self.sio.disconnect()
| {
"repo_id": "locustio/locust",
"file_path": "examples/socketio/socketio_ex.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
locustio/locust:locust/contrib/socketio.py | from locust import User
from locust.event import EventHook
from typing import Any
import gevent
import socketio
class SocketIOClient(socketio.Client):
def __init__(self, request_event: EventHook, *args, **kwargs):
super().__init__(*args, **kwargs)
self.request_event = request_event
def connect(self, *args, **kwargs):
"""
Wraps :meth:`socketio.Client.connect`.
"""
with self.request_event.measure("WS", "connect") as _:
super().connect(*args, **kwargs)
def send(self, data, namespace=None, callback=None, name="Unnamed") -> None:
"""
Wraps :meth:`socketio.Client.send`.
"""
exception = None
try:
super().send(data, namespace, callback)
except Exception as e:
exception = e
self.request_event.fire(
request_type="WSS",
name=name,
response_time=0,
response_length=len(data or []),
exception=exception,
context={},
)
def emit(self, event, data=None, namespace=None, callback=None) -> None:
"""
Wraps :meth:`socketio.Client.emit`.
"""
exception = None
try:
super().emit(event, data, namespace, callback)
except Exception as e:
exception = e
self.request_event.fire(
request_type="WSE",
name=str(event),
response_time=0,
response_length=len(data or []),
exception=exception,
context={},
)
def call(self, event, data=None, *args, **kwargs):
"""
Wraps :meth:`socketio.Client.call`.
"""
with self.request_event.measure("WSC", event) as _:
return super().call(event, data, *args, **kwargs)
def on_message(self, event: str, data: str) -> None:
"""
This is the default handler for events received.
You can register separate handlers using self.sio.on(event, handler)
Measuring response_time isn't obvious for for WebSockets/SocketIO so we set them to 0.
Sometimes response time can be inferred from the event data (if it contains a timestamp)
or related to a message that you sent. Override this method in your User class to do that.
"""
self.request_event.fire(
request_type="WSR",
name=event,
response_time=0,
response_length=len(data or []),
exception=None,
context={},
)
class SocketIOUser(User):
"""
SocketIOUser creates an instance of :class:`socketio.Client` to log requests.
See example in :gh:`examples/socketio/socketio_ex.py`.
"""
abstract = True
options: dict[str, Any] = {}
"""socketio.Client options, e.g. `{"reconnection_attempts": 1, "reconnection_delay": 2, "logger": True, "engineio_logger": True}`"""
sio: SocketIOClient
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sio = SocketIOClient(self.environment.events.request, **self.options)
self.sio_greenlet = gevent.spawn(self.sio.wait)
self.sio.on("*", self.sio.on_message)
| {
"repo_id": "locustio/locust",
"file_path": "locust/contrib/socketio.py",
"license": "MIT License",
"lines": 85,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
locustio/locust:locust/test/test_socketio.py | from locust.contrib.socketio import SocketIOUser
import time
from unittest.mock import patch
import socketio
from .testcases import LocustTestCase
class TestSocketIOUser(LocustTestCase):
def test_everything(self):
def connect(self, *args, **kwargs): ...
def emit(self, *args, **kwargs): ...
def call(self, event, data, *args, **kwargs):
if event == "error":
raise Exception("Simulated error")
time.sleep(0.001)
return {"mock": "response"}
with patch.multiple(socketio.Client, connect=connect, emit=emit, call=call) as _mocks:
user = SocketIOUser(self.environment)
user.sio.connect("http://fake-url.com")
user.sio.emit("test_event", {"data": "test_data"})
resp = user.sio.call("test_2", {"data": "test_data"})
user.sio.call("error", {})
self.assertEqual(1, self.environment.stats.entries[("connect", "WS")].num_requests)
self.assertEqual(1, self.environment.stats.entries[("test_event", "WSE")].num_requests)
self.assertEqual(1, self.environment.stats.entries[("test_2", "WSC")].num_requests)
assert isinstance(resp, dict)
self.assertDictEqual({"mock": "response"}, resp)
self.assertLess(0.001, self.environment.stats.entries[("test_2", "WSC")].avg_response_time)
self.assertEqual(1, self.environment.stats.entries[("error", "WSC")].num_requests)
self.assertEqual(1, len(self.environment.stats.errors))
| {
"repo_id": "locustio/locust",
"file_path": "locust/test/test_socketio.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
locustio/locust:examples/milvus/locustfile.py | """
Minimal example demonstrating Milvus load testing with Locust.
"""
from locust import between, task
from locust.contrib.milvus import MilvusUser
import random
from pymilvus import CollectionSchema, DataType, FieldSchema
from pymilvus.milvus_client import IndexParams
class SimpleMilvusUser(MilvusUser):
"""Minimal Milvus user for load testing."""
wait_time = between(1, 3)
def on_start(self):
"""Generate test vectors."""
self.dimension = 128
self.test_vectors = [[random.random() for _ in range(self.dimension)] for _ in range(10)]
def __init__(self, environment):
# Define collection schema
schema = CollectionSchema(
fields=[
FieldSchema(name="id", dtype=DataType.INT64, is_primary=True),
FieldSchema(name="vector", dtype=DataType.FLOAT_VECTOR, dim=128),
FieldSchema(name="name", dtype=DataType.VARCHAR, max_length=50),
],
description="Test collection",
)
# Define index parameters
index_params = IndexParams()
index_params.add_index(
field_name="vector",
index_type="IVF_FLAT",
metric_type="L2",
)
super().__init__(
environment,
uri=environment.host,
collection_name="load_test_collection",
schema=schema,
index_params=index_params,
enable_dynamic_field=True,
num_shards=2,
consistency_level="Eventually",
)
@task(3)
def insert_data(self):
"""Insert data into Milvus."""
data = [
{
"id": random.randint(1, 10000),
"vector": random.choice(self.test_vectors),
"name": f"item_{random.randint(1, 1000)}",
}
]
self.insert(data)
@task(5)
def search_vectors(self):
"""Search for similar vectors."""
search_vector = random.choice(self.test_vectors)
self.search(data=[search_vector], anns_field="vector", limit=5)
@task(2)
def query_data(self):
"""Query data by ID."""
query_id = random.randint(1, 10000)
self.query(filter=f"id == {query_id}")
@task(1)
def delete_data(self):
"""Delete data."""
delete_id = random.randint(1, 10000)
self.delete(filter=f"id == {delete_id}")
| {
"repo_id": "locustio/locust",
"file_path": "examples/milvus/locustfile.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
locustio/locust:locust/contrib/milvus.py | import gevent.monkey
gevent.monkey.patch_all()
import grpc.experimental.gevent as grpc_gevent
grpc_gevent.init_gevent()
from locust import User, events
import time
from abc import ABC, abstractmethod
from typing import Any
from pymilvus import CollectionSchema, MilvusClient
from pymilvus.milvus_client import IndexParams
class BaseClient(ABC):
@abstractmethod
def close(self) -> None:
pass
@abstractmethod
def create_collection(self, schema, index_params, **kwargs) -> None:
pass
@abstractmethod
def insert(self, data) -> dict[str, Any]:
pass
@abstractmethod
def upsert(self, data) -> dict[str, Any]:
pass
@abstractmethod
def search(
self,
data,
anns_field,
limit,
filter="",
search_params=None,
output_fields=None,
calculate_recall=False,
ground_truth=None,
) -> dict[str, Any]:
pass
@abstractmethod
def hybrid_search(self, reqs, ranker, limit, output_fields=None) -> dict[str, Any]:
pass
@abstractmethod
def query(self, filter, output_fields=None) -> dict[str, Any]:
pass
@abstractmethod
def delete(self, filter) -> dict[str, Any]:
pass
class MilvusV2Client(BaseClient):
"""Milvus v2 Python SDK Client Wrapper"""
def __init__(self, uri, collection_name, token="root:Milvus", db_name="default", timeout=60):
self.uri = uri
self.collection_name = collection_name
self.token = token
self.db_name = db_name
self.timeout = timeout
# Initialize MilvusClient v2
self.client = MilvusClient(
uri=self.uri,
token=self.token,
db_name=self.db_name,
timeout=self.timeout,
)
def close(self):
self.client.close()
def create_collection(self, schema, index_params, **kwargs):
self.client.create_collection(
collection_name=self.collection_name,
schema=schema,
index_params=index_params,
**kwargs,
)
def insert(self, data):
start = time.time()
try:
result = self.client.insert(collection_name=self.collection_name, data=data)
total_time = (time.time() - start) * 1000
return {"success": True, "response_time": total_time, "result": result}
except Exception as e:
return {
"success": False,
"response_time": (time.time() - start) * 1000,
"exception": e,
}
def upsert(self, data):
start = time.time()
try:
result = self.client.upsert(collection_name=self.collection_name, data=data)
total_time = (time.time() - start) * 1000
return {"success": True, "response_time": total_time, "result": result}
except Exception as e:
return {
"success": False,
"response_time": (time.time() - start) * 1000,
"exception": e,
}
def search(
self,
data,
anns_field,
limit,
filter="",
search_params=None,
output_fields=None,
calculate_recall=False,
ground_truth=None,
):
if output_fields is None:
output_fields = ["id"]
start = time.time()
try:
result = self.client.search(
collection_name=self.collection_name,
data=data,
anns_field=anns_field,
filter=filter,
limit=limit,
search_params=search_params,
output_fields=output_fields,
)
total_time = (time.time() - start) * 1000
empty = len(result) == 0 or all(len(r) == 0 for r in result)
# Prepare base result
search_result = {
"success": not empty,
"response_time": total_time,
"empty": empty,
"result": result,
}
# Calculate recall if requested
if calculate_recall and ground_truth is not None and not empty:
recall_value = self.get_recall(result, ground_truth, limit)
search_result["recall"] = recall_value
return search_result
except Exception as e:
return {
"success": False,
"response_time": (time.time() - start) * 1000,
"exception": e,
}
def hybrid_search(self, reqs, ranker, limit, output_fields=None):
if output_fields is None:
output_fields = ["id"]
start = time.time()
try:
result = self.client.hybrid_search(
collection_name=self.collection_name,
reqs=reqs,
ranker=ranker,
limit=limit,
output_fields=output_fields,
timeout=self.timeout,
)
total_time = (time.time() - start) * 1000
empty = len(result) == 0 or all(len(r) == 0 for r in result)
# Prepare base result
search_result = {
"success": not empty,
"response_time": total_time,
"empty": empty,
"result": result,
}
return search_result
except Exception as e:
return {
"success": False,
"response_time": (time.time() - start) * 1000,
"exception": e,
}
@staticmethod
def get_recall(search_results, ground_truth, limit=None):
"""Calculate recall for V2 client search results."""
try:
# Extract IDs from V2 search results
retrieved_ids = []
if isinstance(search_results, list) and len(search_results) > 0:
# search_results[0] contains the search results for the first query
for hit in search_results[0] if isinstance(search_results[0], list) else search_results:
if isinstance(hit, dict) and "id" in hit:
retrieved_ids.append(hit["id"])
elif hasattr(hit, "get"):
retrieved_ids.append(hit.get("id"))
# Apply limit if specified
if limit is None:
limit = len(retrieved_ids)
if len(ground_truth) < limit:
raise ValueError(f"Ground truth length is less than limit: {len(ground_truth)} < {limit}")
# Calculate recall
ground_truth_set = set(ground_truth[:limit])
retrieved_set = set(retrieved_ids)
intersect = len(ground_truth_set.intersection(retrieved_set))
return intersect / len(ground_truth_set)
except Exception:
return 0.0
def query(self, filter, output_fields=None):
if output_fields is None:
output_fields = ["id"]
start = time.time()
try:
result = self.client.query(
collection_name=self.collection_name,
filter=filter,
output_fields=output_fields,
)
total_time = (time.time() - start) * 1000
empty = len(result) == 0
return {
"success": not empty,
"response_time": total_time,
"empty": empty,
"result": result,
}
except Exception as e:
return {
"success": False,
"response_time": (time.time() - start) * 1000,
"exception": e,
}
def delete(self, filter):
start = time.time()
try:
result = self.client.delete(collection_name=self.collection_name, filter=filter)
total_time = (time.time() - start) * 1000
return {"success": True, "response_time": total_time, "result": result}
except Exception as e:
return {
"success": False,
"response_time": (time.time() - start) * 1000,
"exception": e,
}
# ----------------------------------
# Locust User wrapper
# ----------------------------------
class MilvusUser(User):
"""Locust User implementation for Milvus operations.
This class wraps the MilvusV2Client implementation and translates
client method results into Locust request events so that performance
statistics are collected properly.
Parameters
----------
host : str
Milvus server URI, e.g. ``"http://localhost:19530"``.
collection_name : str
The name of the collection to operate on.
**client_kwargs
Additional keyword arguments forwarded to the client.
"""
abstract = True
def __init__(
self,
environment,
uri: str = "http://localhost:19530",
token: str = "root:Milvus",
collection_name: str = "test_collection",
db_name: str = "default",
timeout: int = 60,
schema: CollectionSchema | None = None,
index_params: IndexParams | None = None,
**kwargs, # enable_dynamic_field, num_shards, consistency_level etc. ref: https://milvus.io/api-reference/pymilvus/v2.6.x/MilvusClient/Collections/create_collection.md
):
super().__init__(environment)
if uri is None:
raise ValueError("'uri' must be provided for MilvusUser")
if collection_name is None:
raise ValueError("'collection_name' must be provided for MilvusUser")
self.client_type = "milvus"
self.client = MilvusV2Client(
uri=uri,
token=token,
collection_name=collection_name,
db_name=db_name,
timeout=timeout,
)
if schema is not None:
self.client.create_collection(schema=schema, index_params=index_params, **kwargs)
@staticmethod
def _fire_event(request_type: str, name: str, result: dict[str, Any]):
"""Emit a Locust request event from a Milvus client result dict."""
response_time = int(result.get("response_time", 0))
events.request.fire(
request_type=f"{request_type}",
name=name,
response_time=response_time,
response_length=0,
exception=result.get("exception"),
)
@staticmethod
def _fire_recall_event(request_type: str, name: str, result: dict[str, Any]):
"""Emit a Locust request event for recall metric using recall value instead of response time."""
recall_value = result.get("recall", 0.0)
# Use recall value as response_time for metric display (scaled by 100 for better visualization) percentage
response_time_as_recall = int(recall_value * 100)
events.request.fire(
request_type=f"{request_type}",
name=name,
response_time=response_time_as_recall,
response_length=result.get("retrieved_count", 0),
exception=result.get("exception"),
)
def insert(self, data):
result = self.client.insert(data)
self._fire_event(self.client_type, "insert", result)
return result
def upsert(self, data):
result = self.client.upsert(data)
self._fire_event(self.client_type, "upsert", result)
return result
def search(
self,
data,
anns_field,
limit,
filter="",
search_params=None,
output_fields=None,
calculate_recall=False,
ground_truth=None,
):
result = self.client.search(
data,
anns_field,
limit,
filter=filter,
search_params=search_params,
output_fields=output_fields,
calculate_recall=calculate_recall,
ground_truth=ground_truth,
)
# Fire search event
self._fire_event(self.client_type, "search", result)
# Fire recall event if recall was calculated
if calculate_recall and "recall" in result:
self._fire_recall_event(self.client_type, "recall", result)
return result
def hybrid_search(self, reqs, ranker, limit, output_fields=None):
result = self.client.hybrid_search(reqs, ranker, limit, output_fields)
self._fire_event(self.client_type, "hybrid_search", result)
return result
def query(self, filter, output_fields=None):
result = self.client.query(
filter=filter,
output_fields=output_fields,
)
self._fire_event(self.client_type, "query", result)
return result
def delete(self, filter):
result = self.client.delete(filter)
self._fire_event(self.client_type, "delete", result)
return result
def on_stop(self):
self.client.close()
| {
"repo_id": "locustio/locust",
"file_path": "locust/contrib/milvus.py",
"license": "MIT License",
"lines": 348,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
locustio/locust:locust/test/subprocess_utils.py | import os
import shlex
import signal
import subprocess
import time
from typing import IO
import gevent
import pytest
from .util import IS_WINDOWS
class TestProcess:
"""
Wraps a subprocess for testing purposes.
"""
__test__ = False
def __init__(
self,
command: str,
*,
extra_env: dict[str, str] = {},
expect_return_code: int | None = 0,
sigint_on_exit: bool = True,
expect_timeout: int = 5,
use_pty: bool = False,
join_timeout: int = 1,
):
self.proc: subprocess.Popen[str]
self._terminated = False
self._failed = False
self.expect_return_code = expect_return_code
self.expect_timeout = expect_timeout
self.sigint_on_exit = sigint_on_exit
self.join_timeout = join_timeout
self.stderr_output: list[str] = []
self.stdout_output: list[str] = []
self._stderr_cursor: int = 0 # Used for stateful log matching
self._stdout_cursor: int = 0
self.use_pty: bool = use_pty
# Create PTY pair
if self.use_pty:
if IS_WINDOWS:
raise Exception("termios doesn't exist on windows, and thus we cannot import pty")
import pty
self.stdin_m, self.stdin_s = pty.openpty()
self.proc = subprocess.Popen(
shlex.split(command) if not IS_WINDOWS else command.split(" "),
env={"PYTHONUNBUFFERED": "1", **os.environ, **extra_env},
stdin=self.stdin_s if self.use_pty else None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
)
def _consume_output(source: IO[str], to: list[str]):
for line in iter(source.readline, ""):
line = line.rstrip("\n")
to.append(line)
self.stdout_reader = gevent.spawn(_consume_output, self.proc.stdout, self.stdout_output)
self.stderr_reader = gevent.spawn(_consume_output, self.proc.stderr, self.stderr_output)
def on_fail(self, reason: str = ""):
__tracebackhide__ = True
if self._failed:
return
self._failed = True
for line in self.stderr_output:
print(line)
pytest.fail(reason)
def __enter__(self) -> "TestProcess":
return self
def __exit__(self, *_) -> None:
self.close()
def close(self) -> None:
__tracebackhide__ = True
if self.use_pty:
os.close(self.stdin_m)
os.close(self.stdin_s)
try:
if self.sigint_on_exit and not self._terminated:
self.terminate()
proc_return_code = self.proc.wait(timeout=self.join_timeout)
# Locust does not perform a graceful shutdown on Windows since we send SIGTERM
if not IS_WINDOWS and self.expect_return_code is not None and proc_return_code != self.expect_return_code:
self.on_fail(
f"Process exited with return code {proc_return_code}. Expected {self.expect_return_code} ({proc_return_code} != {self.expect_return_code})"
)
except subprocess.TimeoutExpired:
self.proc.kill()
self.proc.wait()
self.on_fail(f"Process took more than {self.join_timeout} seconds to terminate.")
self.stdout_reader.join(timeout=self.join_timeout)
self.stderr_reader.join(timeout=self.join_timeout)
# Check output logs from last found (stateful)
def expect(self, to_expect, *, stream="stderr"):
__tracebackhide__ = True
if stream == "stdout":
buffer = self.stdout_output
cursor = self._stdout_cursor
else:
buffer = self.stderr_output
cursor = self._stderr_cursor
start_time = time.time()
while time.time() - start_time < self.expect_timeout:
new_lines = buffer[cursor:]
for idx, line in enumerate(new_lines):
if to_expect in line:
cursor += idx + 1
return
time.sleep(0.05)
output = "\n".join(buffer[-5:])
self.on_fail(f"Timed out waiting for '{to_expect}' after {self.expect_timeout} seconds. Got\n{output}")
# Check all output logs (stateless)
def expect_any(self, to_expect, *, stream="stderr"):
__tracebackhide__ = True
if stream == "stdout":
buffer = self.stdout_output
else:
buffer = self.stderr_output
if any(to_expect in line for line in buffer):
return
output = "\n".join(buffer[-5:])
self.on_fail(f"Did not see expected message: '{to_expect}'. Got\n{output}")
def not_expect_any(self, to_not_expect, *, stream="stderr"):
__tracebackhide__ = True
if stream == "stdout":
buffer = self.stdout_output
else:
buffer = self.stderr_output
if any(to_not_expect in line for line in buffer):
self.on_fail(f"Found unexpected message: '{to_not_expect}'.")
def send_input(self, content: str):
if self.use_pty:
os.write(self.stdin_m, content.encode())
else:
raise Exception("Cannot send input to proccess without pty.")
def terminate(self):
if IS_WINDOWS:
# Signals are hard on Windows
sig = signal.SIGTERM
else:
sig = signal.SIGINT
self.proc.send_signal(sig)
self._terminated = True
| {
"repo_id": "locustio/locust",
"file_path": "locust/test/subprocess_utils.py",
"license": "MIT License",
"lines": 140,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.