sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
letta-ai/letta:tests/test_provider_trace_agents.py | """
Unit tests for provider trace telemetry across agent versions and adapters.
Tests verify that telemetry context is correctly passed through:
- Tool generation endpoint
- LettaAgent (v1), LettaAgentV2, LettaAgentV3
- Streaming and non-streaming paths
- Different stream adapters
"""
import uuid
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from letta.schemas.llm_config import LLMConfig
@pytest.fixture
def mock_llm_config():
"""Create a mock LLM config."""
return LLMConfig(
model="gpt-4o-mini",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=8000,
)
class TestToolGenerationTelemetry:
"""Tests for tool generation endpoint telemetry."""
@pytest.mark.asyncio
async def test_generate_tool_sets_call_type(self, mock_llm_config):
"""Verify generate_tool endpoint sets call_type='tool_generation'."""
from letta.llm_api.llm_client import LLMClient
from letta.schemas.user import User
mock_actor = User(
id=f"user-{uuid.uuid4()}",
organization_id=f"org-{uuid.uuid4()}",
name="test_user",
)
captured_telemetry = {}
def capture_telemetry(**kwargs):
captured_telemetry.update(kwargs)
with patch.object(LLMClient, "create") as mock_create:
mock_client = MagicMock()
mock_client.set_telemetry_context = capture_telemetry
mock_client.build_request_data = MagicMock(return_value={})
mock_client.request_async_with_telemetry = AsyncMock(return_value={})
mock_client.convert_response_to_chat_completion = AsyncMock(
return_value=MagicMock(
choices=[
MagicMock(
message=MagicMock(
tool_calls=[
MagicMock(
function=MagicMock(
arguments='{"raw_source_code": "def test(): pass", "sample_args_json": "{}", "pip_requirements_json": "{}"}'
)
)
],
content=None,
)
)
]
)
)
mock_create.return_value = mock_client
from letta.server.rest_api.routers.v1.tools import GenerateToolInput, generate_tool_from_prompt
mock_server = MagicMock()
mock_server.user_manager.get_actor_or_default_async = AsyncMock(return_value=mock_actor)
mock_server.get_llm_config_from_handle_async = AsyncMock(return_value=mock_llm_config)
mock_headers = MagicMock()
mock_headers.actor_id = mock_actor.id
request = GenerateToolInput(
prompt="Create a function that adds two numbers",
tool_name="add_numbers",
validation_errors=[],
)
with patch("letta.server.rest_api.routers.v1.tools.derive_openai_json_schema") as mock_schema:
mock_schema.return_value = {"name": "add_numbers", "parameters": {}}
try:
await generate_tool_from_prompt(request=request, server=mock_server, headers=mock_headers)
except Exception:
pass
assert captured_telemetry.get("call_type") == "tool_generation"
@pytest.mark.asyncio
async def test_generate_tool_has_no_agent_context(self, mock_llm_config):
"""Verify generate_tool doesn't have agent_id since it's not agent-bound."""
from letta.llm_api.llm_client import LLMClient
from letta.schemas.user import User
mock_actor = User(
id=f"user-{uuid.uuid4()}",
organization_id=f"org-{uuid.uuid4()}",
name="test_user",
)
captured_telemetry = {}
def capture_telemetry(**kwargs):
captured_telemetry.update(kwargs)
with patch.object(LLMClient, "create") as mock_create:
mock_client = MagicMock()
mock_client.set_telemetry_context = capture_telemetry
mock_client.build_request_data = MagicMock(return_value={})
mock_client.request_async_with_telemetry = AsyncMock(return_value={})
mock_client.convert_response_to_chat_completion = AsyncMock(
return_value=MagicMock(
choices=[
MagicMock(
message=MagicMock(
tool_calls=[
MagicMock(
function=MagicMock(
arguments='{"raw_source_code": "def test(): pass", "sample_args_json": "{}", "pip_requirements_json": "{}"}'
)
)
],
content=None,
)
)
]
)
)
mock_create.return_value = mock_client
from letta.server.rest_api.routers.v1.tools import GenerateToolInput, generate_tool_from_prompt
mock_server = MagicMock()
mock_server.user_manager.get_actor_or_default_async = AsyncMock(return_value=mock_actor)
mock_server.get_llm_config_from_handle_async = AsyncMock(return_value=mock_llm_config)
mock_headers = MagicMock()
mock_headers.actor_id = mock_actor.id
request = GenerateToolInput(
prompt="Create a function",
tool_name="test_func",
validation_errors=[],
)
with patch("letta.server.rest_api.routers.v1.tools.derive_openai_json_schema") as mock_schema:
mock_schema.return_value = {"name": "test_func", "parameters": {}}
try:
await generate_tool_from_prompt(request=request, server=mock_server, headers=mock_headers)
except Exception:
pass
assert captured_telemetry.get("agent_id") is None
assert captured_telemetry.get("step_id") is None
assert captured_telemetry.get("run_id") is None
class TestLLMClientTelemetryContext:
"""Tests for LLMClient telemetry context methods."""
def test_llm_client_has_set_telemetry_context_method(self):
"""Verify LLMClient exposes set_telemetry_context."""
from letta.llm_api.llm_client import LLMClient
client = LLMClient.create(provider_type="openai", put_inner_thoughts_first=True)
assert hasattr(client, "set_telemetry_context")
assert callable(client.set_telemetry_context)
def test_llm_client_set_telemetry_context_accepts_all_fields(self):
"""Verify set_telemetry_context accepts all telemetry fields."""
from letta.llm_api.llm_client import LLMClient
client = LLMClient.create(provider_type="openai", put_inner_thoughts_first=True)
client.set_telemetry_context(
agent_id=f"agent-{uuid.uuid4()}",
agent_tags=["tag1", "tag2"],
run_id=f"run-{uuid.uuid4()}",
step_id=f"step-{uuid.uuid4()}",
call_type="summarization",
)
class TestAdapterTelemetryAttributes:
"""Tests for adapter telemetry attribute support."""
def test_base_adapter_has_telemetry_attributes(self, mock_llm_config):
"""Verify base LettaLLMAdapter has telemetry attributes."""
from letta.adapters.letta_llm_adapter import LettaLLMAdapter
from letta.llm_api.llm_client import LLMClient
from letta.schemas.enums import LLMCallType
mock_client = LLMClient.create(provider_type="openai", put_inner_thoughts_first=True)
agent_id = f"agent-{uuid.uuid4()}"
agent_tags = ["test-tag"]
run_id = f"run-{uuid.uuid4()}"
class TestAdapter(LettaLLMAdapter):
async def invoke_llm(self, *args, **kwargs):
pass
adapter = TestAdapter(
llm_client=mock_client,
llm_config=mock_llm_config,
call_type=LLMCallType.agent_step,
agent_id=agent_id,
agent_tags=agent_tags,
run_id=run_id,
)
assert adapter.agent_id == agent_id
assert adapter.agent_tags == agent_tags
assert adapter.run_id == run_id
assert adapter.call_type == LLMCallType.agent_step
def test_request_adapter_inherits_telemetry_attributes(self, mock_llm_config):
"""Verify LettaLLMRequestAdapter inherits telemetry attributes."""
from letta.adapters.letta_llm_request_adapter import LettaLLMRequestAdapter
from letta.llm_api.llm_client import LLMClient
from letta.schemas.enums import LLMCallType
mock_client = LLMClient.create(provider_type="openai", put_inner_thoughts_first=True)
agent_id = f"agent-{uuid.uuid4()}"
agent_tags = ["request-tag"]
run_id = f"run-{uuid.uuid4()}"
adapter = LettaLLMRequestAdapter(
llm_client=mock_client,
llm_config=mock_llm_config,
call_type=LLMCallType.agent_step,
agent_id=agent_id,
agent_tags=agent_tags,
run_id=run_id,
)
assert adapter.agent_id == agent_id
assert adapter.agent_tags == agent_tags
assert adapter.run_id == run_id
def test_stream_adapter_inherits_telemetry_attributes(self, mock_llm_config):
"""Verify LettaLLMStreamAdapter inherits telemetry attributes."""
from letta.adapters.letta_llm_stream_adapter import LettaLLMStreamAdapter
from letta.llm_api.llm_client import LLMClient
from letta.schemas.enums import LLMCallType
mock_client = LLMClient.create(provider_type="openai", put_inner_thoughts_first=True)
agent_id = f"agent-{uuid.uuid4()}"
agent_tags = ["stream-tag"]
run_id = f"run-{uuid.uuid4()}"
adapter = LettaLLMStreamAdapter(
llm_client=mock_client,
llm_config=mock_llm_config,
call_type=LLMCallType.agent_step,
agent_id=agent_id,
agent_tags=agent_tags,
run_id=run_id,
)
assert adapter.agent_id == agent_id
assert adapter.agent_tags == agent_tags
assert adapter.run_id == run_id
def test_request_and_stream_adapters_have_consistent_interface(self, mock_llm_config):
"""Verify both adapter types have the same telemetry interface."""
from letta.adapters.letta_llm_request_adapter import LettaLLMRequestAdapter
from letta.adapters.letta_llm_stream_adapter import LettaLLMStreamAdapter
from letta.llm_api.llm_client import LLMClient
from letta.schemas.enums import LLMCallType
mock_client = LLMClient.create(provider_type="openai", put_inner_thoughts_first=True)
request_adapter = LettaLLMRequestAdapter(llm_client=mock_client, llm_config=mock_llm_config, call_type=LLMCallType.agent_step)
stream_adapter = LettaLLMStreamAdapter(llm_client=mock_client, llm_config=mock_llm_config, call_type=LLMCallType.agent_step)
for attr in ["agent_id", "agent_tags", "run_id", "call_type"]:
assert hasattr(request_adapter, attr), f"LettaLLMRequestAdapter missing {attr}"
assert hasattr(stream_adapter, attr), f"LettaLLMStreamAdapter missing {attr}"
class TestSummarizerTelemetry:
"""Tests for Summarizer class telemetry context."""
def test_summarizer_stores_telemetry_context(self):
"""Verify Summarizer stores telemetry context from constructor."""
from letta.schemas.user import User
from letta.services.summarizer.enums import SummarizationMode
from letta.services.summarizer.summarizer import Summarizer
mock_actor = User(
id=f"user-{uuid.uuid4()}",
organization_id=f"org-{uuid.uuid4()}",
name="test_user",
)
agent_id = f"agent-{uuid.uuid4()}"
run_id = f"run-{uuid.uuid4()}"
step_id = f"step-{uuid.uuid4()}"
summarizer = Summarizer(
mode=SummarizationMode.PARTIAL_EVICT_MESSAGE_BUFFER,
summarizer_agent=None,
message_buffer_limit=100,
message_buffer_min=10,
partial_evict_summarizer_percentage=0.5,
agent_manager=MagicMock(),
message_manager=MagicMock(),
actor=mock_actor,
agent_id=agent_id,
run_id=run_id,
step_id=step_id,
)
assert summarizer.agent_id == agent_id
assert summarizer.run_id == run_id
assert summarizer.step_id == step_id
@pytest.mark.asyncio
async def test_summarize_method_accepts_runtime_telemetry(self):
"""Verify summarize() method accepts runtime run_id/step_id."""
from letta.schemas.enums import MessageRole
from letta.schemas.message import Message
from letta.schemas.user import User
from letta.services.summarizer.enums import SummarizationMode
from letta.services.summarizer.summarizer import Summarizer
mock_actor = User(
id=f"user-{uuid.uuid4()}",
organization_id=f"org-{uuid.uuid4()}",
name="test_user",
)
agent_id = f"agent-{uuid.uuid4()}"
mock_messages = [
Message(
id=f"message-{uuid.uuid4()}",
role=MessageRole.user,
content=[{"type": "text", "text": "Hello"}],
agent_id=agent_id,
)
]
summarizer = Summarizer(
mode=SummarizationMode.PARTIAL_EVICT_MESSAGE_BUFFER,
summarizer_agent=None,
message_buffer_limit=100,
message_buffer_min=10,
partial_evict_summarizer_percentage=0.5,
agent_manager=MagicMock(),
message_manager=MagicMock(),
actor=mock_actor,
agent_id=agent_id,
)
run_id = f"run-{uuid.uuid4()}"
step_id = f"step-{uuid.uuid4()}"
result = await summarizer.summarize(
in_context_messages=mock_messages,
new_letta_messages=[],
force=False,
run_id=run_id,
step_id=step_id,
)
assert result is not None
class TestAgentAdapterInstantiation:
"""Tests verifying agents instantiate adapters with telemetry context."""
def test_agent_v2_creates_summarizer_with_agent_id(self, mock_llm_config):
"""Verify LettaAgentV2 creates Summarizer with correct agent_id."""
from letta.agents.letta_agent_v2 import LettaAgentV2
from letta.schemas.agent import AgentState, AgentType
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.memory import Memory
from letta.schemas.user import User
mock_actor = User(
id=f"user-{uuid.uuid4()}",
organization_id=f"org-{uuid.uuid4()}",
name="test_user",
)
agent_id = f"agent-{uuid.uuid4()}"
agent_state = AgentState(
id=agent_id,
name="test_agent",
agent_type=AgentType.letta_v1_agent,
llm_config=mock_llm_config,
embedding_config=EmbeddingConfig.default_config(provider="openai"),
tags=["test"],
memory=Memory(blocks=[]),
system="You are a helpful assistant.",
tools=[],
sources=[],
blocks=[],
)
agent = LettaAgentV2(agent_state=agent_state, actor=mock_actor)
assert agent.summarizer.agent_id == agent_id
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_provider_trace_agents.py",
"license": "Apache License 2.0",
"lines": 336,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_provider_trace_summarization.py | """
Unit tests for summarization provider trace telemetry context.
These tests verify that summarization LLM calls correctly pass telemetry context
(agent_id, agent_tags, run_id, step_id) to the provider trace system.
"""
import uuid
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from letta.schemas.agent import AgentState
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import MessageRole
from letta.schemas.llm_config import LLMConfig
from letta.schemas.message import Message
from letta.schemas.user import User
from letta.services.summarizer import summarizer_all, summarizer_sliding_window
from letta.services.summarizer.enums import SummarizationMode
from letta.services.summarizer.summarizer import Summarizer, simple_summary
from letta.services.summarizer.summarizer_config import CompactionSettings
@pytest.fixture
def mock_actor():
"""Create a mock user/actor."""
return User(
id=f"user-{uuid.uuid4()}",
organization_id=f"org-{uuid.uuid4()}",
name="test_user",
)
@pytest.fixture
def mock_llm_config():
"""Create a mock LLM config."""
return LLMConfig(
model="gpt-4o-mini",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=8000,
)
@pytest.fixture
def mock_agent_state(mock_llm_config):
"""Create a mock agent state."""
agent_id = f"agent-{uuid.uuid4()}"
return AgentState(
id=agent_id,
name="test_agent",
llm_config=mock_llm_config,
embedding_config=EmbeddingConfig.default_config(provider="openai"),
tags=["env:test", "team:ml"],
memory=MagicMock(
compile=MagicMock(return_value="Memory content"),
),
message_ids=[],
tool_ids=[],
system="You are a helpful assistant.",
)
@pytest.fixture
def mock_messages():
"""Create mock messages for summarization."""
agent_id = f"agent-{uuid.uuid4()}"
messages = []
for i in range(10):
msg = Message(
id=f"message-{uuid.uuid4()}",
role=MessageRole.user if i % 2 == 0 else MessageRole.assistant,
content=[{"type": "text", "text": f"Message content {i}"}],
agent_id=agent_id,
)
messages.append(msg)
return messages
class TestSimpleSummaryTelemetryContext:
"""Tests for simple_summary telemetry context passing."""
@pytest.mark.asyncio
async def test_simple_summary_accepts_telemetry_params(self, mock_messages, mock_llm_config, mock_actor):
"""Verify simple_summary accepts all telemetry context parameters."""
agent_id = f"agent-{uuid.uuid4()}"
agent_tags = ["tag1", "tag2"]
run_id = f"run-{uuid.uuid4()}"
step_id = f"step-{uuid.uuid4()}"
with patch("letta.services.summarizer.summarizer.LLMClient") as mock_client_class:
mock_client = MagicMock()
mock_client.set_telemetry_context = MagicMock()
mock_client.send_llm_request_async = AsyncMock(return_value=MagicMock(content="Summary of conversation"))
mock_client_class.create.return_value = mock_client
try:
await simple_summary(
messages=mock_messages,
llm_config=mock_llm_config,
actor=mock_actor,
agent_id=agent_id,
agent_tags=agent_tags,
run_id=run_id,
step_id=step_id,
)
except Exception:
pass
mock_client.set_telemetry_context.assert_called_once()
call_kwargs = mock_client.set_telemetry_context.call_args[1]
assert call_kwargs["agent_id"] == agent_id
assert call_kwargs["agent_tags"] == agent_tags
assert call_kwargs["run_id"] == run_id
assert call_kwargs["step_id"] == step_id
assert call_kwargs["call_type"] == "summarization"
class TestSummarizeAllTelemetryContext:
"""Tests for summarize_all telemetry context passing."""
@pytest.fixture
def mock_compaction_settings(self):
"""Create mock compaction settings."""
return CompactionSettings(model="openai/gpt-4o-mini")
@pytest.mark.asyncio
async def test_summarize_all_passes_telemetry_to_simple_summary(
self, mock_messages, mock_llm_config, mock_actor, mock_compaction_settings
):
"""Verify summarize_all passes telemetry context to simple_summary."""
agent_id = f"agent-{uuid.uuid4()}"
agent_tags = ["env:prod", "team:core"]
run_id = f"run-{uuid.uuid4()}"
step_id = f"step-{uuid.uuid4()}"
captured_kwargs = {}
async def capture_simple_summary(*args, **kwargs):
captured_kwargs.update(kwargs)
return "Mocked summary"
with patch.object(summarizer_all, "simple_summary", new=capture_simple_summary):
await summarizer_all.summarize_all(
actor=mock_actor,
llm_config=mock_llm_config,
summarizer_config=mock_compaction_settings,
in_context_messages=mock_messages,
agent_id=agent_id,
agent_tags=agent_tags,
run_id=run_id,
step_id=step_id,
)
assert captured_kwargs.get("agent_id") == agent_id
assert captured_kwargs.get("agent_tags") == agent_tags
assert captured_kwargs.get("run_id") == run_id
assert captured_kwargs.get("step_id") == step_id
@pytest.mark.asyncio
async def test_summarize_all_without_telemetry_params(self, mock_messages, mock_llm_config, mock_actor, mock_compaction_settings):
"""Verify summarize_all works without telemetry params (backwards compatible)."""
captured_kwargs = {}
async def capture_simple_summary(*args, **kwargs):
captured_kwargs.update(kwargs)
return "Mocked summary"
with patch.object(summarizer_all, "simple_summary", new=capture_simple_summary):
await summarizer_all.summarize_all(
actor=mock_actor,
llm_config=mock_llm_config,
summarizer_config=mock_compaction_settings,
in_context_messages=mock_messages,
)
assert captured_kwargs.get("agent_id") is None
assert captured_kwargs.get("agent_tags") is None
assert captured_kwargs.get("run_id") is None
assert captured_kwargs.get("step_id") is None
class TestSummarizeSlidingWindowTelemetryContext:
"""Tests for summarize_via_sliding_window telemetry context passing."""
@pytest.fixture
def mock_compaction_settings(self):
"""Create mock compaction settings."""
return CompactionSettings(model="openai/gpt-4o-mini")
@pytest.mark.asyncio
async def test_sliding_window_passes_telemetry_to_simple_summary(
self, mock_messages, mock_llm_config, mock_actor, mock_compaction_settings
):
"""Verify summarize_via_sliding_window passes telemetry context to simple_summary."""
agent_id = f"agent-{uuid.uuid4()}"
agent_tags = ["version:v2"]
run_id = f"run-{uuid.uuid4()}"
step_id = f"step-{uuid.uuid4()}"
captured_kwargs = {}
async def capture_simple_summary(*args, **kwargs):
captured_kwargs.update(kwargs)
return "Mocked summary"
with patch.object(summarizer_sliding_window, "simple_summary", new=capture_simple_summary):
await summarizer_sliding_window.summarize_via_sliding_window(
actor=mock_actor,
llm_config=mock_llm_config,
agent_llm_config=mock_llm_config, # case where agent and summarizer have same config
summarizer_config=mock_compaction_settings,
in_context_messages=mock_messages,
agent_id=agent_id,
agent_tags=agent_tags,
run_id=run_id,
step_id=step_id,
)
assert captured_kwargs.get("agent_id") == agent_id
assert captured_kwargs.get("agent_tags") == agent_tags
assert captured_kwargs.get("run_id") == run_id
assert captured_kwargs.get("step_id") == step_id
class TestSummarizerClassTelemetryContext:
"""Tests for Summarizer class telemetry context passing."""
@pytest.mark.asyncio
async def test_summarizer_summarize_passes_runtime_telemetry(self, mock_messages, mock_actor):
"""Verify Summarizer.summarize() passes runtime run_id/step_id to the underlying call."""
run_id = f"run-{uuid.uuid4()}"
step_id = f"step-{uuid.uuid4()}"
agent_id = f"agent-{uuid.uuid4()}"
mock_agent_manager = MagicMock()
mock_agent_manager.get_agent_by_id_async = AsyncMock(
return_value=MagicMock(
llm_config=LLMConfig(
model="gpt-4o-mini",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=8000,
),
tags=["test-tag"],
)
)
summarizer = Summarizer(
mode=SummarizationMode.PARTIAL_EVICT_MESSAGE_BUFFER,
summarizer_agent=None,
message_buffer_limit=100,
message_buffer_min=10,
partial_evict_summarizer_percentage=0.5,
agent_manager=mock_agent_manager,
message_manager=MagicMock(),
actor=mock_actor,
agent_id=agent_id,
)
captured_kwargs = {}
async def capture_simple_summary(*args, **kwargs):
captured_kwargs.update(kwargs)
return "Mocked summary"
with patch("letta.services.summarizer.summarizer.simple_summary", new=capture_simple_summary):
try:
await summarizer.summarize(
in_context_messages=mock_messages,
new_letta_messages=[],
force=True,
run_id=run_id,
step_id=step_id,
)
except Exception:
pass
if captured_kwargs:
assert captured_kwargs.get("run_id") == run_id
assert captured_kwargs.get("step_id") == step_id
@pytest.mark.asyncio
async def test_summarizer_uses_constructor_telemetry_as_default(self, mock_messages, mock_actor):
"""Verify Summarizer uses constructor run_id/step_id when not passed to summarize()."""
constructor_run_id = f"run-{uuid.uuid4()}"
constructor_step_id = f"step-{uuid.uuid4()}"
agent_id = f"agent-{uuid.uuid4()}"
mock_agent_manager = MagicMock()
mock_agent_manager.get_agent_by_id_async = AsyncMock(
return_value=MagicMock(
llm_config=LLMConfig(
model="gpt-4o-mini",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=8000,
),
tags=["test-tag"],
)
)
summarizer = Summarizer(
mode=SummarizationMode.PARTIAL_EVICT_MESSAGE_BUFFER,
summarizer_agent=None,
message_buffer_limit=100,
message_buffer_min=10,
partial_evict_summarizer_percentage=0.5,
agent_manager=mock_agent_manager,
message_manager=MagicMock(),
actor=mock_actor,
agent_id=agent_id,
run_id=constructor_run_id,
step_id=constructor_step_id,
)
captured_kwargs = {}
async def capture_simple_summary(*args, **kwargs):
captured_kwargs.update(kwargs)
return "Mocked summary"
with patch("letta.services.summarizer.summarizer.simple_summary", new=capture_simple_summary):
try:
await summarizer.summarize(
in_context_messages=mock_messages,
new_letta_messages=[],
force=True,
)
except Exception:
pass
if captured_kwargs:
assert captured_kwargs.get("run_id") == constructor_run_id
assert captured_kwargs.get("step_id") == constructor_step_id
@pytest.mark.asyncio
async def test_summarizer_runtime_overrides_constructor_telemetry(self, mock_messages, mock_actor):
"""Verify runtime run_id/step_id override constructor values."""
constructor_run_id = f"run-constructor-{uuid.uuid4()}"
constructor_step_id = f"step-constructor-{uuid.uuid4()}"
runtime_run_id = f"run-runtime-{uuid.uuid4()}"
runtime_step_id = f"step-runtime-{uuid.uuid4()}"
agent_id = f"agent-{uuid.uuid4()}"
mock_agent_manager = MagicMock()
mock_agent_manager.get_agent_by_id_async = AsyncMock(
return_value=MagicMock(
llm_config=LLMConfig(
model="gpt-4o-mini",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=8000,
),
tags=["test-tag"],
)
)
summarizer = Summarizer(
mode=SummarizationMode.PARTIAL_EVICT_MESSAGE_BUFFER,
summarizer_agent=None,
message_buffer_limit=100,
message_buffer_min=10,
partial_evict_summarizer_percentage=0.5,
agent_manager=mock_agent_manager,
message_manager=MagicMock(),
actor=mock_actor,
agent_id=agent_id,
run_id=constructor_run_id,
step_id=constructor_step_id,
)
captured_kwargs = {}
async def capture_simple_summary(*args, **kwargs):
captured_kwargs.update(kwargs)
return "Mocked summary"
with patch("letta.services.summarizer.summarizer.simple_summary", new=capture_simple_summary):
try:
await summarizer.summarize(
in_context_messages=mock_messages,
new_letta_messages=[],
force=True,
run_id=runtime_run_id,
step_id=runtime_step_id,
)
except Exception:
pass
if captured_kwargs:
assert captured_kwargs.get("run_id") == runtime_run_id
assert captured_kwargs.get("step_id") == runtime_step_id
class TestLLMClientTelemetryContext:
"""Tests for LLM client telemetry context setting."""
def test_llm_client_set_telemetry_context_method_exists(self):
"""Verify LLMClient has set_telemetry_context method."""
from letta.llm_api.llm_client import LLMClient
client = LLMClient.create(
provider_type="openai",
put_inner_thoughts_first=True,
)
assert hasattr(client, "set_telemetry_context")
def test_llm_client_set_telemetry_context_accepts_all_params(self):
"""Verify set_telemetry_context accepts all telemetry parameters."""
from letta.llm_api.llm_client import LLMClient
client = LLMClient.create(
provider_type="openai",
put_inner_thoughts_first=True,
)
agent_id = f"agent-{uuid.uuid4()}"
agent_tags = ["tag1", "tag2"]
run_id = f"run-{uuid.uuid4()}"
step_id = f"step-{uuid.uuid4()}"
call_type = "summarization"
client.set_telemetry_context(
agent_id=agent_id,
agent_tags=agent_tags,
run_id=run_id,
step_id=step_id,
call_type=call_type,
)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_provider_trace_summarization.py",
"license": "Apache License 2.0",
"lines": 362,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_usage_parsing.py | """
Tests for usage statistics parsing through the production adapter path.
These tests verify that SimpleLLMRequestAdapter correctly extracts usage statistics
from LLM responses, including:
1. Basic usage (prompt_tokens, completion_tokens, total_tokens)
2. Cache-related fields (cached_input_tokens, cache_write_tokens)
3. Reasoning tokens (for models that support it)
This tests the actual production code path:
SimpleLLMRequestAdapter.invoke_llm()
→ llm_client.request_async_with_telemetry()
→ llm_client.convert_response_to_chat_completion()
→ adapter extracts from chat_completions_response.usage
→ normalize_cache_tokens() / normalize_reasoning_tokens()
"""
import os
import pytest
from letta.adapters.simple_llm_request_adapter import SimpleLLMRequestAdapter
from letta.errors import LLMAuthenticationError
from letta.llm_api.anthropic_client import AnthropicClient
from letta.llm_api.google_ai_client import GoogleAIClient
from letta.llm_api.openai_client import OpenAIClient
from letta.schemas.enums import AgentType, LLMCallType, MessageRole
from letta.schemas.letta_message_content import TextContent
from letta.schemas.llm_config import LLMConfig
from letta.schemas.message import Message
from letta.settings import model_settings
def _has_openai_credentials() -> bool:
return bool(model_settings.openai_api_key or os.environ.get("OPENAI_API_KEY"))
def _has_anthropic_credentials() -> bool:
return bool(model_settings.anthropic_api_key or os.environ.get("ANTHROPIC_API_KEY"))
def _has_gemini_credentials() -> bool:
return bool(model_settings.gemini_api_key or os.environ.get("GEMINI_API_KEY"))
def _build_simple_messages(user_content: str) -> list[Message]:
"""Build a minimal message list for testing."""
return [
Message(
role=MessageRole.user,
content=[TextContent(text=user_content)],
)
]
# Large system prompt to exceed caching thresholds (>1024 tokens)
LARGE_SYSTEM_PROMPT = """You are an advanced AI assistant with extensive knowledge across multiple domains.
# Core Capabilities
## Technical Knowledge
- Software Engineering: Expert in Python, JavaScript, TypeScript, Go, Rust, and many other languages
- System Design: Deep understanding of distributed systems, microservices, and cloud architecture
- DevOps: Proficient in Docker, Kubernetes, CI/CD pipelines, and infrastructure as code
- Databases: Experience with SQL (PostgreSQL, MySQL) and NoSQL (MongoDB, Redis, Cassandra) databases
- Machine Learning: Knowledge of neural networks, transformers, and modern ML frameworks
## Problem Solving Approach
When tackling problems, you follow a structured methodology:
1. Understand the requirements thoroughly
2. Break down complex problems into manageable components
3. Consider multiple solution approaches
4. Evaluate trade-offs between different options
5. Implement solutions with clean, maintainable code
6. Test thoroughly and iterate based on feedback
## Communication Style
- Clear and concise explanations
- Use examples and analogies when helpful
- Adapt technical depth to the audience
- Ask clarifying questions when requirements are ambiguous
- Provide context and rationale for recommendations
# Domain Expertise
## Web Development
You have deep knowledge of:
- Frontend: React, Vue, Angular, Next.js, modern CSS frameworks
- Backend: Node.js, Express, FastAPI, Django, Flask
- API Design: REST, GraphQL, gRPC
- Authentication: OAuth, JWT, session management
- Performance: Caching strategies, CDNs, lazy loading
## Data Engineering
You understand:
- ETL pipelines and data transformation
- Data warehousing concepts (Snowflake, BigQuery, Redshift)
- Stream processing (Kafka, Kinesis)
- Data modeling and schema design
- Data quality and validation
## Cloud Platforms
You're familiar with:
- AWS: EC2, S3, Lambda, RDS, DynamoDB, CloudFormation
- GCP: Compute Engine, Cloud Storage, Cloud Functions, BigQuery
- Azure: Virtual Machines, Blob Storage, Azure Functions
- Serverless architectures and best practices
- Cost optimization strategies
## Security
You consider:
- Common vulnerabilities (OWASP Top 10)
- Secure coding practices
- Encryption and key management
- Access control and authorization patterns
- Security audit and compliance requirements
# Interaction Principles
## Helpfulness
- Provide actionable guidance
- Share relevant resources and documentation
- Offer multiple approaches when appropriate
- Point out potential pitfalls and edge cases
- Follow up to ensure understanding
## Accuracy
- Acknowledge limitations and uncertainties
- Distinguish between facts and opinions
- Cite sources when making specific claims
- Correct mistakes promptly when identified
- Stay current with latest developments
## Respect
- Value diverse perspectives and approaches
- Maintain professional boundaries
- Protect user privacy and confidentiality
- Avoid assumptions about user background
- Be patient with varying skill levels
Remember: Your goal is to empower users to solve problems and learn, not just to provide answers."""
@pytest.mark.asyncio
async def test_openai_usage_via_adapter():
"""Test OpenAI usage extraction through SimpleLLMRequestAdapter.
This tests the actual production code path used by letta_agent_v3.
"""
if not _has_openai_credentials():
pytest.skip("OpenAI credentials not configured")
client = OpenAIClient()
llm_config = LLMConfig.default_config("gpt-4o-mini")
adapter = SimpleLLMRequestAdapter(
llm_client=client,
llm_config=llm_config,
call_type=LLMCallType.agent_step,
)
messages = _build_simple_messages("Say hello in exactly 5 words.")
request_data = client.build_request_data(AgentType.letta_v1_agent, messages, llm_config)
# Call through the adapter (production path)
try:
async for _ in adapter.invoke_llm(
request_data=request_data,
messages=messages,
tools=[],
use_assistant_message=False,
):
pass
except LLMAuthenticationError:
pytest.skip("OpenAI credentials invalid")
# Verify usage was extracted
assert adapter.usage is not None, "adapter.usage should not be None"
assert adapter.usage.prompt_tokens > 0, f"prompt_tokens should be > 0, got {adapter.usage.prompt_tokens}"
assert adapter.usage.completion_tokens > 0, f"completion_tokens should be > 0, got {adapter.usage.completion_tokens}"
assert adapter.usage.total_tokens > 0, f"total_tokens should be > 0, got {adapter.usage.total_tokens}"
assert adapter.usage.step_count == 1, f"step_count should be 1, got {adapter.usage.step_count}"
print(f"OpenAI usage: prompt={adapter.usage.prompt_tokens}, completion={adapter.usage.completion_tokens}")
print(f"OpenAI cache: cached_input={adapter.usage.cached_input_tokens}, cache_write={adapter.usage.cache_write_tokens}")
print(f"OpenAI reasoning: {adapter.usage.reasoning_tokens}")
@pytest.mark.asyncio
async def test_anthropic_usage_via_adapter():
"""Test Anthropic usage extraction through SimpleLLMRequestAdapter.
This tests the actual production code path used by letta_agent_v3.
Note: Anthropic's input_tokens is NON-cached only. The adapter should
compute total prompt_tokens = input_tokens + cache_read + cache_creation.
"""
if not _has_anthropic_credentials():
pytest.skip("Anthropic credentials not configured")
client = AnthropicClient()
llm_config = LLMConfig(
model="claude-haiku-4-5-20251001",
model_endpoint_type="anthropic",
model_endpoint="https://api.anthropic.com/v1",
context_window=200000,
max_tokens=256,
)
adapter = SimpleLLMRequestAdapter(
llm_client=client,
llm_config=llm_config,
call_type=LLMCallType.agent_step,
)
# Anthropic requires a system message first
messages = [
Message(role=MessageRole.system, content=[TextContent(text="You are a helpful assistant.")]),
Message(role=MessageRole.user, content=[TextContent(text="Say hello in exactly 5 words.")]),
]
request_data = client.build_request_data(AgentType.letta_v1_agent, messages, llm_config, tools=[])
# Call through the adapter (production path)
try:
async for _ in adapter.invoke_llm(
request_data=request_data,
messages=messages,
tools=[],
use_assistant_message=False,
):
pass
except LLMAuthenticationError:
pytest.skip("Anthropic credentials invalid")
# Verify usage was extracted
assert adapter.usage is not None, "adapter.usage should not be None"
assert adapter.usage.prompt_tokens > 0, f"prompt_tokens should be > 0, got {adapter.usage.prompt_tokens}"
assert adapter.usage.completion_tokens > 0, f"completion_tokens should be > 0, got {adapter.usage.completion_tokens}"
assert adapter.usage.total_tokens > 0, f"total_tokens should be > 0, got {adapter.usage.total_tokens}"
assert adapter.usage.step_count == 1, f"step_count should be 1, got {adapter.usage.step_count}"
print(f"Anthropic usage: prompt={adapter.usage.prompt_tokens}, completion={adapter.usage.completion_tokens}")
print(f"Anthropic cache: cached_input={adapter.usage.cached_input_tokens}, cache_write={adapter.usage.cache_write_tokens}")
@pytest.mark.asyncio
async def test_gemini_usage_via_adapter():
"""Test Gemini usage extraction through SimpleLLMRequestAdapter.
This tests the actual production code path used by letta_agent_v3.
"""
if not _has_gemini_credentials():
pytest.skip("Gemini credentials not configured")
client = GoogleAIClient()
llm_config = LLMConfig(
model="gemini-2.0-flash",
model_endpoint_type="google_ai",
model_endpoint="https://generativelanguage.googleapis.com",
context_window=1048576,
max_tokens=256,
)
adapter = SimpleLLMRequestAdapter(
llm_client=client,
llm_config=llm_config,
call_type=LLMCallType.agent_step,
)
messages = _build_simple_messages("Say hello in exactly 5 words.")
request_data = client.build_request_data(AgentType.letta_v1_agent, messages, llm_config, tools=[])
# Call through the adapter (production path)
try:
async for _ in adapter.invoke_llm(
request_data=request_data,
messages=messages,
tools=[],
use_assistant_message=False,
):
pass
except LLMAuthenticationError:
pytest.skip("Gemini credentials invalid")
# Verify usage was extracted
assert adapter.usage is not None, "adapter.usage should not be None"
assert adapter.usage.prompt_tokens > 0, f"prompt_tokens should be > 0, got {adapter.usage.prompt_tokens}"
assert adapter.usage.completion_tokens > 0, f"completion_tokens should be > 0, got {adapter.usage.completion_tokens}"
assert adapter.usage.total_tokens > 0, f"total_tokens should be > 0, got {adapter.usage.total_tokens}"
assert adapter.usage.step_count == 1, f"step_count should be 1, got {adapter.usage.step_count}"
print(f"Gemini usage: prompt={adapter.usage.prompt_tokens}, completion={adapter.usage.completion_tokens}")
print(f"Gemini cache: cached_input={adapter.usage.cached_input_tokens}")
print(f"Gemini reasoning: {adapter.usage.reasoning_tokens}")
@pytest.mark.asyncio
async def test_openai_prefix_caching_via_adapter():
"""Test OpenAI prefix caching through SimpleLLMRequestAdapter.
Makes two requests with the same large system prompt to verify
cached_input_tokens is populated on the second request.
Note: Prefix caching is probabilistic and depends on server-side state.
"""
if not _has_openai_credentials():
pytest.skip("OpenAI credentials not configured")
client = OpenAIClient()
llm_config = LLMConfig.default_config("gpt-4o-mini")
# First request - should populate the cache
adapter1 = SimpleLLMRequestAdapter(llm_client=client, llm_config=llm_config, call_type=LLMCallType.agent_step)
messages1 = [
Message(role=MessageRole.system, content=[TextContent(text=LARGE_SYSTEM_PROMPT)]),
Message(role=MessageRole.user, content=[TextContent(text="What is 2+2?")]),
]
request_data1 = client.build_request_data(AgentType.letta_v1_agent, messages1, llm_config)
try:
async for _ in adapter1.invoke_llm(request_data=request_data1, messages=messages1, tools=[], use_assistant_message=False):
pass
except LLMAuthenticationError:
pytest.skip("OpenAI credentials invalid")
print(f"Request 1 - prompt={adapter1.usage.prompt_tokens}, cached={adapter1.usage.cached_input_tokens}")
# Second request - same system prompt, should hit cache
adapter2 = SimpleLLMRequestAdapter(llm_client=client, llm_config=llm_config, call_type=LLMCallType.agent_step)
messages2 = [
Message(role=MessageRole.system, content=[TextContent(text=LARGE_SYSTEM_PROMPT)]),
Message(role=MessageRole.user, content=[TextContent(text="What is 3+3?")]),
]
request_data2 = client.build_request_data(AgentType.letta_v1_agent, messages2, llm_config)
async for _ in adapter2.invoke_llm(request_data=request_data2, messages=messages2, tools=[], use_assistant_message=False):
pass
print(f"Request 2 - prompt={adapter2.usage.prompt_tokens}, cached={adapter2.usage.cached_input_tokens}")
# Verify basic usage
assert adapter2.usage.prompt_tokens > 0
assert adapter2.usage.completion_tokens > 0
# Note: We can't guarantee cache hit, but if it happened, cached_input_tokens should be > 0
if adapter2.usage.cached_input_tokens and adapter2.usage.cached_input_tokens > 0:
print(f"SUCCESS: OpenAI cache hit! cached_input_tokens={adapter2.usage.cached_input_tokens}")
else:
print("INFO: No cache hit (cache may not have been populated yet)")
@pytest.mark.asyncio
async def test_anthropic_prefix_caching_via_adapter():
"""Test Anthropic prefix caching through SimpleLLMRequestAdapter.
Makes two requests with the same large system prompt using cache_control
to verify cache tokens are populated.
Note: Anthropic requires explicit cache_control breakpoints.
"""
if not _has_anthropic_credentials():
pytest.skip("Anthropic credentials not configured")
client = AnthropicClient()
llm_config = LLMConfig(
model="claude-haiku-4-5-20251001",
model_endpoint_type="anthropic",
model_endpoint="https://api.anthropic.com/v1",
context_window=200000,
max_tokens=256,
)
# First request
adapter1 = SimpleLLMRequestAdapter(llm_client=client, llm_config=llm_config, call_type=LLMCallType.agent_step)
messages1 = [
Message(role=MessageRole.system, content=[TextContent(text=LARGE_SYSTEM_PROMPT)]),
Message(role=MessageRole.user, content=[TextContent(text="What is 2+2?")]),
]
request_data1 = client.build_request_data(AgentType.letta_v1_agent, messages1, llm_config, tools=[])
try:
async for _ in adapter1.invoke_llm(request_data=request_data1, messages=messages1, tools=[], use_assistant_message=False):
pass
except LLMAuthenticationError:
pytest.skip("Anthropic credentials invalid")
print(
f"Request 1 - prompt={adapter1.usage.prompt_tokens}, cached={adapter1.usage.cached_input_tokens}, cache_write={adapter1.usage.cache_write_tokens}"
)
# Second request
adapter2 = SimpleLLMRequestAdapter(llm_client=client, llm_config=llm_config, call_type=LLMCallType.agent_step)
messages2 = [
Message(role=MessageRole.system, content=[TextContent(text=LARGE_SYSTEM_PROMPT)]),
Message(role=MessageRole.user, content=[TextContent(text="What is 3+3?")]),
]
request_data2 = client.build_request_data(AgentType.letta_v1_agent, messages2, llm_config, tools=[])
async for _ in adapter2.invoke_llm(request_data=request_data2, messages=messages2, tools=[], use_assistant_message=False):
pass
print(
f"Request 2 - prompt={adapter2.usage.prompt_tokens}, cached={adapter2.usage.cached_input_tokens}, cache_write={adapter2.usage.cache_write_tokens}"
)
# Verify basic usage
assert adapter2.usage.prompt_tokens > 0
assert adapter2.usage.completion_tokens > 0
# Check for cache activity
if adapter2.usage.cached_input_tokens and adapter2.usage.cached_input_tokens > 0:
print(f"SUCCESS: Anthropic cache hit! cached_input_tokens={adapter2.usage.cached_input_tokens}")
elif adapter2.usage.cache_write_tokens and adapter2.usage.cache_write_tokens > 0:
print(f"INFO: Anthropic cache write! cache_write_tokens={adapter2.usage.cache_write_tokens}")
else:
print("INFO: No cache activity detected")
@pytest.mark.asyncio
async def test_gemini_prefix_caching_via_adapter():
"""Test Gemini prefix caching through SimpleLLMRequestAdapter.
Makes two requests with the same large system prompt to verify
cached_input_tokens is populated.
Note: Gemini 2.0+ has implicit caching.
"""
if not _has_gemini_credentials():
pytest.skip("Gemini credentials not configured")
client = GoogleAIClient()
llm_config = LLMConfig(
model="gemini-2.0-flash",
model_endpoint_type="google_ai",
model_endpoint="https://generativelanguage.googleapis.com",
context_window=1048576,
max_tokens=256,
)
# First request
adapter1 = SimpleLLMRequestAdapter(llm_client=client, llm_config=llm_config, call_type=LLMCallType.agent_step)
messages1 = [
Message(role=MessageRole.system, content=[TextContent(text=LARGE_SYSTEM_PROMPT)]),
Message(role=MessageRole.user, content=[TextContent(text="What is 2+2?")]),
]
request_data1 = client.build_request_data(AgentType.letta_v1_agent, messages1, llm_config, tools=[])
try:
async for _ in adapter1.invoke_llm(request_data=request_data1, messages=messages1, tools=[], use_assistant_message=False):
pass
except LLMAuthenticationError:
pytest.skip("Gemini credentials invalid")
print(f"Request 1 - prompt={adapter1.usage.prompt_tokens}, cached={adapter1.usage.cached_input_tokens}")
# Second request
adapter2 = SimpleLLMRequestAdapter(llm_client=client, llm_config=llm_config, call_type=LLMCallType.agent_step)
messages2 = [
Message(role=MessageRole.system, content=[TextContent(text=LARGE_SYSTEM_PROMPT)]),
Message(role=MessageRole.user, content=[TextContent(text="What is 3+3?")]),
]
request_data2 = client.build_request_data(AgentType.letta_v1_agent, messages2, llm_config, tools=[])
async for _ in adapter2.invoke_llm(request_data=request_data2, messages=messages2, tools=[], use_assistant_message=False):
pass
print(f"Request 2 - prompt={adapter2.usage.prompt_tokens}, cached={adapter2.usage.cached_input_tokens}")
# Verify basic usage
assert adapter2.usage.prompt_tokens > 0
assert adapter2.usage.completion_tokens > 0
if adapter2.usage.cached_input_tokens and adapter2.usage.cached_input_tokens > 0:
print(f"SUCCESS: Gemini cache hit! cached_input_tokens={adapter2.usage.cached_input_tokens}")
else:
print("INFO: No cache hit detected")
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_usage_parsing.py",
"license": "Apache License 2.0",
"lines": 380,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:alembic/versions/539afa667cff_add_telemetry_context_fields_to_.py | """add telemetry context fields to provider_traces
Revision ID: 539afa667cff
Revises: a1b2c3d4e5f7
Create Date: 2026-01-16 18:29:29.811385
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "539afa667cff"
down_revision: Union[str, None] = "a1b2c3d4e5f7"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("provider_traces", sa.Column("agent_id", sa.String(), nullable=True))
op.add_column("provider_traces", sa.Column("agent_tags", sa.JSON(), nullable=True))
op.add_column("provider_traces", sa.Column("call_type", sa.String(), nullable=True))
op.add_column("provider_traces", sa.Column("run_id", sa.String(), nullable=True))
def downgrade() -> None:
op.drop_column("provider_traces", "run_id")
op.drop_column("provider_traces", "call_type")
op.drop_column("provider_traces", "agent_tags")
op.drop_column("provider_traces", "agent_id")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/539afa667cff_add_telemetry_context_fields_to_.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/82feb220a9b8_add_source_column_to_provider_traces.py | """add source column to provider_traces
Revision ID: 82feb220a9b8
Revises: 539afa667cff
Create Date: 2026-01-18 21:09:59.529688
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "82feb220a9b8"
down_revision: Union[str, None] = "539afa667cff"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("provider_traces", sa.Column("source", sa.String(), nullable=True))
def downgrade() -> None:
op.drop_column("provider_traces", "source")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/82feb220a9b8_add_source_column_to_provider_traces.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/a1b2c3d4e5f7_add_blocks_conversations_table.py | """Add blocks_conversations table for conversation-isolated blocks
Revision ID: a1b2c3d4e5f7
Revises: cf3c4d025dbc
Create Date: 2026-01-14 02:22:00.000000
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "a1b2c3d4e5f7"
down_revision: Union[str, None] = "cf3c4d025dbc"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Create blocks_conversations junction table
op.create_table(
"blocks_conversations",
sa.Column("conversation_id", sa.String(), nullable=False),
sa.Column("block_id", sa.String(), nullable=False),
sa.Column("block_label", sa.String(), nullable=False),
sa.ForeignKeyConstraint(
["conversation_id"],
["conversations.id"],
ondelete="CASCADE",
),
sa.ForeignKeyConstraint(
["block_id"],
["block.id"],
ondelete="CASCADE",
),
sa.PrimaryKeyConstraint("conversation_id", "block_id", "block_label"),
sa.UniqueConstraint("conversation_id", "block_label", name="unique_label_per_conversation"),
sa.UniqueConstraint("conversation_id", "block_id", name="unique_conversation_block"),
)
op.create_index("ix_blocks_conversations_block_id", "blocks_conversations", ["block_id"], unique=False)
def downgrade() -> None:
op.drop_index("ix_blocks_conversations_block_id", table_name="blocks_conversations")
op.drop_table("blocks_conversations")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/a1b2c3d4e5f7_add_blocks_conversations_table.py",
"license": "Apache License 2.0",
"lines": 38,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/cf3c4d025dbc_add_blocks_tags_table.py | """Add blocks tags table
Revision ID: cf3c4d025dbc
Revises: 27de0f58e076
Create Date: 2026-01-08 23:36:00.000000
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "cf3c4d025dbc"
down_revision: Union[str, None] = "27de0f58e076"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# Create blocks_tags table with timestamps and org scoping for filtering
# Note: Matches agents_tags structure but follows SQLite baseline pattern (no separate id column)
op.create_table(
"blocks_tags",
sa.Column("block_id", sa.String(), nullable=False),
sa.Column("tag", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("FALSE"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(
["block_id"],
["block.id"],
),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.PrimaryKeyConstraint("block_id", "tag"),
sa.UniqueConstraint("block_id", "tag", name="unique_block_tag"),
)
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
op.drop_table("blocks_tags")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/cf3c4d025dbc_add_blocks_tags_table.py",
"license": "Apache License 2.0",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/llm_api/chatgpt_oauth_client.py | """ChatGPT OAuth Client - handles requests to chatgpt.com/backend-api/codex/responses."""
import asyncio
import json
from typing import Any, AsyncIterator, Dict, List, Optional
import httpx
from openai.types.responses import (
Response,
ResponseCompletedEvent,
ResponseContentPartAddedEvent,
ResponseContentPartDoneEvent,
ResponseCreatedEvent,
ResponseFunctionCallArgumentsDeltaEvent,
ResponseFunctionCallArgumentsDoneEvent,
ResponseFunctionToolCall,
ResponseInProgressEvent,
ResponseOutputItemAddedEvent,
ResponseOutputItemDoneEvent,
ResponseOutputMessage,
ResponseOutputText,
ResponseReasoningItem,
ResponseReasoningSummaryPartAddedEvent,
ResponseReasoningSummaryPartDoneEvent,
ResponseReasoningSummaryTextDeltaEvent,
ResponseReasoningSummaryTextDoneEvent,
ResponseTextDeltaEvent,
ResponseTextDoneEvent,
)
from openai.types.responses.response_stream_event import ResponseStreamEvent
from letta.errors import (
ContextWindowExceededError,
ErrorCode,
LettaError,
LLMAuthenticationError,
LLMBadRequestError,
LLMConnectionError,
LLMRateLimitError,
LLMServerError,
LLMTimeoutError,
)
from letta.helpers.json_helpers import sanitize_unicode_surrogates
from letta.llm_api.llm_client_base import LLMClientBase
from letta.log import get_logger
from letta.otel.tracing import trace_method
from letta.schemas.enums import AgentType, ProviderCategory
from letta.schemas.llm_config import LLMConfig
from letta.schemas.message import Message as PydanticMessage
from letta.schemas.openai.chat_completion_response import (
ChatCompletionResponse,
)
from letta.schemas.providers.chatgpt_oauth import ChatGPTOAuthCredentials, ChatGPTOAuthProvider
from letta.schemas.usage import LettaUsageStatistics
logger = get_logger(__name__)
# ChatGPT Backend API endpoint
CHATGPT_CODEX_ENDPOINT = "https://chatgpt.com/backend-api/codex/responses"
class AsyncStreamWrapper:
"""Wraps an async generator to provide async context manager protocol.
The OpenAI SDK's AsyncStream implements __aenter__ and __aexit__,
but our custom SSE handler returns a raw async generator. This wrapper
provides the context manager protocol so it can be used with 'async with'.
"""
def __init__(self, generator: AsyncIterator[ResponseStreamEvent]):
self._generator = generator
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
# Close the generator if it has an aclose method
if hasattr(self._generator, "aclose"):
await self._generator.aclose()
return False
def __aiter__(self):
return self
async def __anext__(self) -> ResponseStreamEvent:
return await self._generator.__anext__()
class ChatGPTOAuthClient(LLMClientBase):
"""
LLM client for ChatGPT OAuth provider.
This client:
1. Transforms standard OpenAI chat format to ChatGPT backend Responses API format
2. Adds required headers (Authorization, ChatGPT-Account-Id, OpenAI-Beta, OpenAI-Originator)
3. Makes requests to chatgpt.com/backend-api/codex/responses
4. Transforms responses back to OpenAI ChatCompletion format
"""
MAX_RETRIES = 3
# Transient httpx errors that are safe to retry (connection drops, transport-level failures)
_RETRYABLE_ERRORS = (httpx.ReadError, httpx.WriteError, httpx.ConnectError, httpx.RemoteProtocolError, LLMConnectionError)
@trace_method
async def _get_provider_and_credentials_async(self, llm_config: LLMConfig) -> tuple[ChatGPTOAuthProvider, ChatGPTOAuthCredentials]:
"""Get the ChatGPT OAuth provider and credentials with automatic refresh if needed.
Args:
llm_config: The LLM configuration containing provider info.
Returns:
Tuple of (provider, credentials).
Raises:
LLMAuthenticationError: If credentials cannot be obtained.
"""
from letta.services.provider_manager import ProviderManager
if llm_config.provider_category != ProviderCategory.byok:
raise ValueError("ChatGPT OAuth requires BYOK provider credentials")
# Get provider
provider_manager = ProviderManager()
providers = await provider_manager.list_providers_async(
name=llm_config.provider_name,
actor=self.actor,
provider_category=[ProviderCategory.byok],
)
if not providers:
raise LLMAuthenticationError(
message=f"ChatGPT OAuth provider '{llm_config.provider_name}' not found",
code=ErrorCode.UNAUTHENTICATED,
)
provider: ChatGPTOAuthProvider = providers[0].cast_to_subtype()
# Get credentials with automatic refresh (pass actor for persistence)
creds = await provider.refresh_token_if_needed(actor=self.actor)
if not creds:
raise LLMAuthenticationError(
message="Failed to obtain valid ChatGPT OAuth credentials",
code=ErrorCode.UNAUTHENTICATED,
)
return provider, creds
def _build_headers(self, creds: ChatGPTOAuthCredentials) -> Dict[str, str]:
"""Build required headers for ChatGPT backend API.
Args:
creds: OAuth credentials containing access_token and account_id.
Returns:
Dictionary of HTTP headers.
"""
if not creds.access_token:
raise LLMAuthenticationError(
message="ChatGPT OAuth access_token is empty or missing",
code=ErrorCode.UNAUTHENTICATED,
)
return {
"Authorization": f"Bearer {creds.access_token}",
"ChatGPT-Account-Id": creds.account_id,
"OpenAI-Beta": "responses=v1",
"OpenAI-Originator": "codex",
"Content-Type": "application/json",
"accept": "text/event-stream",
}
@trace_method
def build_request_data(
self,
agent_type: AgentType,
messages: List[PydanticMessage],
llm_config: LLMConfig,
tools: Optional[List[dict]] = None,
force_tool_call: Optional[str] = None,
requires_subsequent_tool_call: bool = False,
tool_return_truncation_chars: Optional[int] = None,
) -> dict:
"""
Build request data for ChatGPT backend API in Responses API format.
The ChatGPT backend uses the OpenAI Responses API format:
- `input` array instead of `messages`
- `role: "developer"` instead of `role: "system"`
- Structured content arrays
"""
# Use the existing method to convert messages to Responses API format
input_messages = PydanticMessage.to_openai_responses_dicts_from_list(
messages,
tool_return_truncation_chars=tool_return_truncation_chars,
)
# Extract system message as instructions
instructions = None
filtered_input = []
for msg in input_messages:
if msg.get("role") == "developer":
# First developer message becomes instructions
if instructions is None:
content = msg.get("content", [])
if isinstance(content, list) and content:
instructions = content[0].get("text", "")
elif isinstance(content, str):
instructions = content
else:
filtered_input.append(msg)
else:
filtered_input.append(msg)
# Build tool_choice
tool_choice = None
if tools:
if force_tool_call is not None:
tool_choice = {"type": "function", "name": force_tool_call}
elif requires_subsequent_tool_call:
tool_choice = "required"
else:
tool_choice = "auto"
# Build request payload for ChatGPT backend
data: Dict[str, Any] = {
"model": llm_config.model,
"input": filtered_input,
"store": False, # Required for stateless operation
"stream": True, # ChatGPT backend requires streaming
}
if instructions:
data["instructions"] = instructions
if tools:
# Convert tools to Responses API format
responses_tools = [
{
"type": "function",
"name": t.get("name"),
"description": t.get("description"),
"parameters": t.get("parameters"),
}
for t in tools
]
data["tools"] = responses_tools
data["tool_choice"] = tool_choice
# Note: ChatGPT backend does NOT support max_output_tokens parameter
# Add reasoning effort for reasoning models (GPT-5.x, o-series)
if self.is_reasoning_model(llm_config) and llm_config.reasoning_effort:
data["reasoning"] = {
"effort": llm_config.reasoning_effort,
"summary": "detailed",
}
return data
def _transform_response_from_chatgpt_backend(self, response_data: dict) -> dict:
"""Transform ChatGPT backend response to standard OpenAI ChatCompletion format.
The ChatGPT backend returns responses in Responses API format.
This method normalizes them to ChatCompletion format.
Args:
response_data: Raw response from ChatGPT backend.
Returns:
Response in OpenAI ChatCompletion format.
"""
# If response is already in ChatCompletion format, return as-is
if "choices" in response_data:
return response_data
# Extract from Responses API format
output = response_data.get("output", [])
message_content = ""
tool_calls = None
reasoning_content = ""
for item in output:
item_type = item.get("type")
if item_type == "message":
content_parts = item.get("content", [])
for part in content_parts:
if part.get("type") in ("output_text", "text"):
message_content += part.get("text", "")
elif part.get("type") == "refusal":
message_content += part.get("refusal", "")
elif item_type == "function_call":
if tool_calls is None:
tool_calls = []
tool_calls.append(
{
"id": item.get("call_id", item.get("id", "")),
"type": "function",
"function": {
"name": item.get("name", ""),
"arguments": item.get("arguments", ""),
},
}
)
elif item_type == "reasoning":
# Capture reasoning/thinking content if present
summary = item.get("summary", [])
for s in summary:
if s.get("type") == "summary_text":
reasoning_content += s.get("text", "")
# Build ChatCompletion response
finish_reason = "stop"
if tool_calls:
finish_reason = "tool_calls"
transformed = {
"id": response_data.get("id", "chatgpt-response"),
"object": "chat.completion",
"created": response_data.get("created_at", 0),
"model": response_data.get("model", ""),
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": message_content or None,
"tool_calls": tool_calls,
},
"finish_reason": finish_reason,
}
],
"usage": self._transform_usage(response_data.get("usage", {})),
}
return transformed
def _transform_usage(self, usage: dict) -> dict:
"""Transform usage statistics from Responses API format."""
return {
"prompt_tokens": usage.get("input_tokens", 0),
"completion_tokens": usage.get("output_tokens", 0),
"total_tokens": usage.get("input_tokens", 0) + usage.get("output_tokens", 0),
}
@trace_method
def request(self, request_data: dict, llm_config: LLMConfig) -> dict:
"""Synchronous request - not recommended for ChatGPT OAuth."""
import asyncio
return asyncio.run(self.request_async(request_data, llm_config))
@trace_method
async def request_async(self, request_data: dict, llm_config: LLMConfig) -> dict:
"""Make asynchronous request to ChatGPT backend API.
Args:
request_data: Request payload in Responses API format.
llm_config: LLM configuration.
Returns:
Response data in OpenAI ChatCompletion format.
"""
request_data = sanitize_unicode_surrogates(request_data)
_, creds = await self._get_provider_and_credentials_async(llm_config)
headers = self._build_headers(creds)
endpoint = llm_config.model_endpoint or CHATGPT_CODEX_ENDPOINT
# ChatGPT backend requires streaming, so we use client.stream() to handle SSE
# Retry on transient network errors with exponential backoff
for attempt in range(self.MAX_RETRIES):
try:
async with httpx.AsyncClient() as client:
async with client.stream(
"POST",
endpoint,
json=request_data,
headers=headers,
timeout=120.0,
) as response:
response.raise_for_status()
# Accumulate SSE events into a final response
return await self._accumulate_sse_response(response)
except httpx.HTTPStatusError as e:
mapped = self._handle_http_error(e)
if isinstance(mapped, tuple(self._RETRYABLE_ERRORS)) and attempt < self.MAX_RETRIES - 1:
wait = 2**attempt
logger.warning(
f"[ChatGPT] Retryable HTTP error on request (attempt {attempt + 1}/{self.MAX_RETRIES}), "
f"retrying in {wait}s: {type(mapped).__name__}: {mapped}"
)
await asyncio.sleep(wait)
continue
raise mapped
except httpx.TimeoutException:
raise LLMTimeoutError(
message="ChatGPT backend request timed out",
code=ErrorCode.TIMEOUT,
)
except self._RETRYABLE_ERRORS as e:
if attempt < self.MAX_RETRIES - 1:
wait = 2**attempt
logger.warning(
f"[ChatGPT] Transient error on request (attempt {attempt + 1}/{self.MAX_RETRIES}), "
f"retrying in {wait}s: {type(e).__name__}: {e}"
)
await asyncio.sleep(wait)
continue
raise LLMConnectionError(
message=f"Failed to connect to ChatGPT backend after {self.MAX_RETRIES} attempts: {str(e)}",
code=ErrorCode.INTERNAL_SERVER_ERROR,
details={"cause": str(e.__cause__) if e.__cause__ else None, "error_type": type(e).__name__},
)
except httpx.RequestError as e:
raise LLMConnectionError(
message=f"Failed to connect to ChatGPT backend: {str(e)}",
code=ErrorCode.INTERNAL_SERVER_ERROR,
)
# Should not be reached, but satisfy type checker
raise LLMConnectionError(message="ChatGPT request failed after all retries", code=ErrorCode.INTERNAL_SERVER_ERROR)
async def _accumulate_sse_response(self, response: httpx.Response) -> dict:
"""Accumulate SSE stream into a final response.
ChatGPT backend may return SSE even for non-streaming requests.
This method accumulates all events into a single response.
Args:
response: httpx Response object with SSE content.
Returns:
Accumulated response data.
"""
accumulated_content = ""
accumulated_tool_calls: List[Dict[str, Any]] = []
model = ""
response_id = ""
usage = {}
async for line in response.aiter_lines():
if not line.startswith("data: "):
continue
data_str = line[6:] # Remove "data: " prefix
if data_str == "[DONE]":
break
try:
event = json.loads(data_str)
except json.JSONDecodeError:
continue
# Extract response metadata
if not response_id and event.get("id"):
response_id = event["id"]
if not model and event.get("model"):
model = event["model"]
if event.get("usage"):
usage = event["usage"]
# Handle different event types
event_type = event.get("type")
if event_type == "response.output_item.done":
item = event.get("item", {})
item_type = item.get("type")
if item_type == "message":
for content in item.get("content", []):
if content.get("type") in ("output_text", "text"):
accumulated_content += content.get("text", "")
elif item_type == "function_call":
accumulated_tool_calls.append(
{
"id": item.get("call_id", item.get("id", "")),
"type": "function",
"function": {
"name": item.get("name", ""),
"arguments": item.get("arguments", ""),
},
}
)
elif event_type == "response.content_part.delta":
delta = event.get("delta", {})
if delta.get("type") == "text_delta":
accumulated_content += delta.get("text", "")
elif event_type == "response.done":
# Final response event
if event.get("response", {}).get("usage"):
usage = event["response"]["usage"]
# Build final response
finish_reason = "stop" if not accumulated_tool_calls else "tool_calls"
return {
"id": response_id or "chatgpt-response",
"object": "chat.completion",
"created": 0,
"model": model,
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": accumulated_content or None,
"tool_calls": accumulated_tool_calls if accumulated_tool_calls else None,
},
"finish_reason": finish_reason,
}
],
"usage": self._transform_usage(usage),
}
@trace_method
async def request_embeddings(
self,
texts: List[str],
embedding_config,
) -> List[List[float]]:
"""ChatGPT backend does not support embeddings."""
raise NotImplementedError("ChatGPT OAuth does not support embeddings")
@trace_method
async def convert_response_to_chat_completion(
self,
response_data: dict,
input_messages: List[PydanticMessage],
llm_config: LLMConfig,
) -> ChatCompletionResponse:
"""Convert response to ChatCompletionResponse.
Args:
response_data: Response data (already in ChatCompletion format).
input_messages: Original input messages.
llm_config: LLM configuration.
Returns:
ChatCompletionResponse object.
"""
# Response should already be in ChatCompletion format after transformation
return ChatCompletionResponse(**response_data)
def extract_usage_statistics(self, response_data: dict | None, llm_config: LLMConfig) -> LettaUsageStatistics:
"""Extract usage statistics from ChatGPT OAuth response and return as LettaUsageStatistics."""
if not response_data:
return LettaUsageStatistics()
usage = response_data.get("usage")
if not usage:
return LettaUsageStatistics()
prompt_tokens = usage.get("prompt_tokens") or 0
completion_tokens = usage.get("completion_tokens") or 0
total_tokens = usage.get("total_tokens") or (prompt_tokens + completion_tokens)
return LettaUsageStatistics(
prompt_tokens=prompt_tokens,
completion_tokens=completion_tokens,
total_tokens=total_tokens,
)
@trace_method
async def stream_async(
self,
request_data: dict,
llm_config: LLMConfig,
) -> AsyncStreamWrapper:
"""Stream response from ChatGPT backend.
Note: ChatGPT backend uses SSE by default. This returns a custom
async generator that yields ResponseStreamEvent objects compatible
with the OpenAI SDK.
Args:
request_data: Request payload.
llm_config: LLM configuration.
Returns:
Async generator yielding ResponseStreamEvent objects.
"""
request_data = sanitize_unicode_surrogates(request_data)
_, creds = await self._get_provider_and_credentials_async(llm_config)
headers = self._build_headers(creds)
endpoint = llm_config.model_endpoint or CHATGPT_CODEX_ENDPOINT
async def stream_generator():
# Track output item index for proper event construction
output_index = 0
# Track sequence_number in case backend doesn't provide it
# (OpenAI SDK expects incrementing sequence numbers starting at 0)
sequence_counter = 0
# Track whether we've yielded any events — once we have, we can't
# transparently retry because the caller has already consumed partial data.
has_yielded = False
for attempt in range(self.MAX_RETRIES):
try:
async with httpx.AsyncClient() as client:
async with client.stream(
"POST",
endpoint,
json=request_data,
headers=headers,
timeout=120.0,
) as response:
# Check for error status
if response.status_code != 200:
error_body = await response.aread()
logger.error(f"ChatGPT SSE error: {response.status_code} - {error_body}")
raise self._handle_http_error_from_status(response.status_code, error_body.decode())
async for line in response.aiter_lines():
if not line or not line.startswith("data: "):
continue
data_str = line[6:]
if data_str == "[DONE]":
break
try:
raw_event = json.loads(data_str)
event_type = raw_event.get("type")
# Check for error events from the API (context window, rate limit, etc.)
if event_type == "error":
logger.error(f"ChatGPT SSE error event: {json.dumps(raw_event, default=str)[:1000]}")
raise self._handle_sse_error_event(raw_event)
# Check for response.failed or response.incomplete events
if event_type in ("response.failed", "response.incomplete"):
logger.error(f"ChatGPT SSE {event_type} event: {json.dumps(raw_event, default=str)[:1000]}")
resp_obj = raw_event.get("response", {})
error_info = resp_obj.get("error", {})
if error_info:
raise self._handle_sse_error_event({"error": error_info, "type": event_type})
else:
raise LLMBadRequestError(
message=f"ChatGPT request failed with status '{event_type}' (no error details provided)",
code=ErrorCode.INTERNAL_SERVER_ERROR,
)
# Use backend-provided sequence_number if available, else use counter
# This ensures proper ordering even if backend doesn't provide it
if "sequence_number" not in raw_event:
raw_event["sequence_number"] = sequence_counter
sequence_counter = raw_event["sequence_number"] + 1
# Track output index for output_item.added events
if event_type == "response.output_item.added":
output_index = raw_event.get("output_index", output_index)
# Convert to OpenAI SDK ResponseStreamEvent
sdk_event = self._convert_to_sdk_event(raw_event, output_index)
if sdk_event:
yield sdk_event
has_yielded = True
except json.JSONDecodeError:
logger.warning(f"Failed to parse SSE event: {data_str[:100]}")
continue
# Stream completed successfully
return
except self._RETRYABLE_ERRORS as e:
if has_yielded or attempt >= self.MAX_RETRIES - 1:
# Already yielded partial data or exhausted retries — must propagate
raise
wait = 2**attempt
logger.warning(
f"[ChatGPT] Transient error on stream (attempt {attempt + 1}/{self.MAX_RETRIES}), "
f"retrying in {wait}s: {type(e).__name__}: {e}"
)
await asyncio.sleep(wait)
# Wrap the async generator in AsyncStreamWrapper to provide context manager protocol
return AsyncStreamWrapper(stream_generator())
def _convert_to_sdk_event(
self,
raw_event: dict,
output_index: int = 0,
) -> Optional[ResponseStreamEvent]:
"""Convert raw ChatGPT backend SSE event to OpenAI SDK ResponseStreamEvent.
Uses model_construct() to bypass validation since ChatGPT backend doesn't
provide all fields required by OpenAI SDK (e.g., sequence_number).
Args:
raw_event: Raw SSE event data from ChatGPT backend.
output_index: Current output item index.
Returns:
OpenAI SDK ResponseStreamEvent or None if event type not handled.
"""
event_type = raw_event.get("type")
response_id = raw_event.get("response_id", "")
seq_num = raw_event.get("sequence_number", 0)
# response.created -> ResponseCreatedEvent
if event_type == "response.created":
response_data = raw_event.get("response", {})
return ResponseCreatedEvent.model_construct(
type="response.created",
sequence_number=seq_num,
response=Response.model_construct(
id=response_data.get("id", response_id),
created_at=response_data.get("created_at", 0),
model=response_data.get("model", ""),
object="response",
output=[],
status=response_data.get("status", "in_progress"),
parallel_tool_calls=response_data.get("parallel_tool_calls", True),
),
)
# response.in_progress -> ResponseInProgressEvent
elif event_type == "response.in_progress":
response_data = raw_event.get("response", {})
return ResponseInProgressEvent.model_construct(
type="response.in_progress",
sequence_number=seq_num,
response=Response.model_construct(
id=response_data.get("id", response_id),
created_at=response_data.get("created_at", 0),
model=response_data.get("model", ""),
object="response",
output=[],
status="in_progress",
parallel_tool_calls=response_data.get("parallel_tool_calls", True),
),
)
# response.output_item.added -> ResponseOutputItemAddedEvent
elif event_type == "response.output_item.added":
item_data = raw_event.get("item", {})
item_type = item_data.get("type")
idx = raw_event.get("output_index", output_index)
if item_type == "message":
item = ResponseOutputMessage.model_construct(
id=item_data.get("id", ""),
type="message",
role=item_data.get("role", "assistant"),
content=[],
status=item_data.get("status", "in_progress"),
)
elif item_type == "function_call":
item = ResponseFunctionToolCall.model_construct(
id=item_data.get("id", ""),
type="function_call",
call_id=item_data.get("call_id", ""),
name=item_data.get("name", ""),
arguments=item_data.get("arguments", ""),
status=item_data.get("status", "in_progress"),
)
elif item_type == "reasoning":
# Reasoning item for o-series, GPT-5 models
item = ResponseReasoningItem.model_construct(
id=item_data.get("id", ""),
type="reasoning",
summary=item_data.get("summary", []),
status=item_data.get("status", "in_progress"),
)
else:
# Unknown item type, skip
return None
return ResponseOutputItemAddedEvent.model_construct(
type="response.output_item.added",
sequence_number=seq_num,
output_index=idx,
item=item,
)
# response.content_part.added -> ResponseContentPartAddedEvent
elif event_type == "response.content_part.added":
part_data = raw_event.get("part", {})
return ResponseContentPartAddedEvent.model_construct(
type="response.content_part.added",
sequence_number=seq_num,
item_id=raw_event.get("item_id", ""),
output_index=raw_event.get("output_index", output_index),
content_index=raw_event.get("content_index", 0),
part=ResponseOutputText.model_construct(
type="output_text",
text=part_data.get("text", ""),
annotations=[],
),
)
# response.output_text.delta -> ResponseTextDeltaEvent
# Note: OpenAI SDK uses "response.output_text.delta" (matching ChatGPT backend)
elif event_type == "response.output_text.delta":
return ResponseTextDeltaEvent.model_construct(
type="response.output_text.delta",
sequence_number=seq_num,
item_id=raw_event.get("item_id", ""),
output_index=raw_event.get("output_index", output_index),
content_index=raw_event.get("content_index", 0),
delta=raw_event.get("delta", ""),
)
# response.output_text.done -> ResponseTextDoneEvent
elif event_type == "response.output_text.done":
return ResponseTextDoneEvent.model_construct(
type="response.output_text.done",
sequence_number=seq_num,
item_id=raw_event.get("item_id", ""),
output_index=raw_event.get("output_index", output_index),
content_index=raw_event.get("content_index", 0),
text=raw_event.get("text", ""),
)
# response.function_call_arguments.delta -> ResponseFunctionCallArgumentsDeltaEvent
elif event_type == "response.function_call_arguments.delta":
return ResponseFunctionCallArgumentsDeltaEvent.model_construct(
type="response.function_call_arguments.delta",
sequence_number=seq_num,
item_id=raw_event.get("item_id", ""),
output_index=raw_event.get("output_index", output_index),
call_id=raw_event.get("call_id", ""),
delta=raw_event.get("delta", ""),
)
# response.function_call_arguments.done -> ResponseFunctionCallArgumentsDoneEvent
elif event_type == "response.function_call_arguments.done":
return ResponseFunctionCallArgumentsDoneEvent.model_construct(
type="response.function_call_arguments.done",
sequence_number=seq_num,
item_id=raw_event.get("item_id", ""),
output_index=raw_event.get("output_index", output_index),
call_id=raw_event.get("call_id", ""),
arguments=raw_event.get("arguments", ""),
)
# response.content_part.done -> ResponseContentPartDoneEvent
elif event_type == "response.content_part.done":
part_data = raw_event.get("part", {})
return ResponseContentPartDoneEvent.model_construct(
type="response.content_part.done",
sequence_number=seq_num,
item_id=raw_event.get("item_id", ""),
output_index=raw_event.get("output_index", output_index),
content_index=raw_event.get("content_index", 0),
part=ResponseOutputText.model_construct(
type="output_text",
text=part_data.get("text", ""),
annotations=[],
),
)
# response.output_item.done -> ResponseOutputItemDoneEvent
elif event_type == "response.output_item.done":
item_data = raw_event.get("item", {})
item_type = item_data.get("type")
idx = raw_event.get("output_index", output_index)
if item_type == "message":
# Build content from item data
content_list = []
for c in item_data.get("content", []):
if c.get("type") in ("output_text", "text"):
content_list.append(
ResponseOutputText.model_construct(
type="output_text",
text=c.get("text", ""),
annotations=[],
)
)
item = ResponseOutputMessage.model_construct(
id=item_data.get("id", ""),
type="message",
role=item_data.get("role", "assistant"),
content=content_list,
status=item_data.get("status", "completed"),
)
elif item_type == "function_call":
item = ResponseFunctionToolCall.model_construct(
id=item_data.get("id", ""),
type="function_call",
call_id=item_data.get("call_id", ""),
name=item_data.get("name", ""),
arguments=item_data.get("arguments", ""),
status=item_data.get("status", "completed"),
)
elif item_type == "reasoning":
# Build summary from item data
summary_list = item_data.get("summary", [])
item = ResponseReasoningItem.model_construct(
id=item_data.get("id", ""),
type="reasoning",
summary=summary_list,
status=item_data.get("status", "completed"),
)
else:
return None
return ResponseOutputItemDoneEvent.model_construct(
type="response.output_item.done",
sequence_number=seq_num,
output_index=idx,
item=item,
)
# response.completed or response.done -> ResponseCompletedEvent
elif event_type in ("response.completed", "response.done"):
response_data = raw_event.get("response", {})
# Build output items from response data
output_items = []
for out in response_data.get("output", []):
out_type = out.get("type")
if out_type == "message":
content_list = []
for c in out.get("content", []):
if c.get("type") in ("output_text", "text"):
content_list.append(
ResponseOutputText.model_construct(
type="output_text",
text=c.get("text", ""),
annotations=[],
)
)
output_items.append(
ResponseOutputMessage.model_construct(
id=out.get("id", ""),
type="message",
role=out.get("role", "assistant"),
content=content_list,
status=out.get("status", "completed"),
)
)
elif out_type == "function_call":
output_items.append(
ResponseFunctionToolCall.model_construct(
id=out.get("id", ""),
type="function_call",
call_id=out.get("call_id", ""),
name=out.get("name", ""),
arguments=out.get("arguments", ""),
status=out.get("status", "completed"),
)
)
return ResponseCompletedEvent.model_construct(
type="response.completed",
sequence_number=seq_num,
response=Response.model_construct(
id=response_data.get("id", response_id),
created_at=response_data.get("created_at", 0),
model=response_data.get("model", ""),
object="response",
output=output_items,
status=response_data.get("status", "completed"),
parallel_tool_calls=response_data.get("parallel_tool_calls", True),
usage=response_data.get("usage"),
),
)
# Reasoning events (for o-series, GPT-5 models)
# response.reasoning_summary_part.added -> ResponseReasoningSummaryPartAddedEvent
elif event_type == "response.reasoning_summary_part.added":
part_data = raw_event.get("part", {})
# Use a simple dict for Part since we use model_construct
part = {"text": part_data.get("text", ""), "type": part_data.get("type", "summary_text")}
return ResponseReasoningSummaryPartAddedEvent.model_construct(
type="response.reasoning_summary_part.added",
sequence_number=seq_num,
item_id=raw_event.get("item_id", ""),
output_index=raw_event.get("output_index", output_index),
summary_index=raw_event.get("summary_index", 0),
part=part,
)
# response.reasoning_summary_text.delta -> ResponseReasoningSummaryTextDeltaEvent
elif event_type == "response.reasoning_summary_text.delta":
return ResponseReasoningSummaryTextDeltaEvent.model_construct(
type="response.reasoning_summary_text.delta",
sequence_number=seq_num,
item_id=raw_event.get("item_id", ""),
output_index=raw_event.get("output_index", output_index),
summary_index=raw_event.get("summary_index", 0),
delta=raw_event.get("delta", ""),
)
# response.reasoning_summary_text.done -> ResponseReasoningSummaryTextDoneEvent
elif event_type == "response.reasoning_summary_text.done":
return ResponseReasoningSummaryTextDoneEvent.model_construct(
type="response.reasoning_summary_text.done",
sequence_number=seq_num,
item_id=raw_event.get("item_id", ""),
output_index=raw_event.get("output_index", output_index),
summary_index=raw_event.get("summary_index", 0),
text=raw_event.get("text", ""),
)
# response.reasoning_summary_part.done -> ResponseReasoningSummaryPartDoneEvent
elif event_type == "response.reasoning_summary_part.done":
part_data = raw_event.get("part", {})
part = {"text": part_data.get("text", ""), "type": part_data.get("type", "summary_text")}
return ResponseReasoningSummaryPartDoneEvent.model_construct(
type="response.reasoning_summary_part.done",
sequence_number=seq_num,
item_id=raw_event.get("item_id", ""),
output_index=raw_event.get("output_index", output_index),
summary_index=raw_event.get("summary_index", 0),
part=part,
)
# Unhandled event types
logger.warning(f"Unhandled ChatGPT SSE event type: {event_type}")
return None
@staticmethod
def _is_upstream_connection_error(error_body: str) -> bool:
"""Check if an error body indicates an upstream connection/proxy failure."""
lower = error_body.lower()
return "upstream connect error" in lower or "reset before headers" in lower or "connection termination" in lower
def _handle_http_error_from_status(self, status_code: int, error_body: str) -> Exception:
"""Create appropriate exception from HTTP status code.
Args:
status_code: HTTP status code.
error_body: Error response body.
Returns:
Appropriate LLM exception.
"""
if status_code == 401:
return LLMAuthenticationError(
message=f"ChatGPT authentication failed: {error_body}",
code=ErrorCode.UNAUTHENTICATED,
)
elif status_code == 429:
return LLMRateLimitError(
message=f"ChatGPT rate limit exceeded: {error_body}",
code=ErrorCode.RATE_LIMIT_EXCEEDED,
)
elif status_code == 502 or (status_code >= 500 and self._is_upstream_connection_error(error_body)):
return LLMConnectionError(
message=f"ChatGPT upstream connection error: {error_body}",
code=ErrorCode.INTERNAL_SERVER_ERROR,
)
elif status_code >= 500:
return LLMServerError(
message=f"ChatGPT API error: {error_body}",
code=ErrorCode.INTERNAL_SERVER_ERROR,
)
else:
return LLMBadRequestError(
message=f"ChatGPT request failed ({status_code}): {error_body}",
code=ErrorCode.INTERNAL_SERVER_ERROR,
)
def is_reasoning_model(self, llm_config: LLMConfig) -> bool:
"""Check if model is a reasoning model.
Args:
llm_config: LLM configuration.
Returns:
True if model supports extended reasoning.
"""
model = llm_config.model.lower()
return "o1" in model or "o3" in model or "o4" in model or "gpt-5" in model
@trace_method
def handle_llm_error(self, e: Exception, llm_config: Optional[LLMConfig] = None) -> Exception:
"""Map ChatGPT-specific errors to common LLMError types.
Args:
e: Original exception.
llm_config: Optional LLM config to determine if this is a BYOK key.
Returns:
Mapped LLMError subclass.
"""
is_byok = (llm_config.provider_category == ProviderCategory.byok) if llm_config else None
# Already a typed LLM/Letta error (e.g. from SSE error handling) — pass through
if isinstance(e, LettaError):
return e
if isinstance(e, httpx.HTTPStatusError):
return self._handle_http_error(e, is_byok=is_byok)
# Handle httpx network errors which can occur during streaming
# when the connection is unexpectedly closed while reading/writing
if isinstance(e, (httpx.ReadError, httpx.WriteError, httpx.ConnectError)):
logger.warning(f"[ChatGPT] Network error during streaming: {type(e).__name__}: {e}")
return LLMConnectionError(
message=f"Network error during ChatGPT streaming: {str(e)}",
code=ErrorCode.INTERNAL_SERVER_ERROR,
details={"cause": str(e.__cause__) if e.__cause__ else None, "error_type": type(e).__name__, "is_byok": is_byok},
)
return super().handle_llm_error(e, llm_config=llm_config)
def _handle_http_error(self, e: httpx.HTTPStatusError, is_byok: bool | None = None) -> Exception:
"""Handle HTTP status errors from ChatGPT backend.
Args:
e: HTTP status error.
is_byok: Whether the request used a BYOK key.
Returns:
Appropriate LLMError subclass.
"""
status_code = e.response.status_code
error_text = str(e)
try:
error_json = e.response.json()
error_message = error_json.get("error", {}).get("message", error_text)
except Exception:
error_message = error_text
if status_code == 401:
return LLMAuthenticationError(
message=f"ChatGPT authentication failed: {error_message}",
code=ErrorCode.UNAUTHENTICATED,
details={"is_byok": is_byok},
)
elif status_code == 429:
return LLMRateLimitError(
message=f"ChatGPT rate limit exceeded: {error_message}",
code=ErrorCode.RATE_LIMIT_EXCEEDED,
details={"is_byok": is_byok},
)
elif status_code == 400:
if "context" in error_message.lower() or "token" in error_message.lower():
return ContextWindowExceededError(
message=f"ChatGPT context window exceeded: {error_message}",
details={"is_byok": is_byok},
)
return LLMBadRequestError(
message=f"ChatGPT bad request: {error_message}",
code=ErrorCode.INVALID_ARGUMENT,
details={"is_byok": is_byok},
)
elif status_code == 502 or (status_code >= 500 and self._is_upstream_connection_error(error_message)):
return LLMConnectionError(
message=f"ChatGPT upstream connection error: {error_message}",
code=ErrorCode.INTERNAL_SERVER_ERROR,
details={"is_byok": is_byok},
)
elif status_code >= 500:
return LLMServerError(
message=f"ChatGPT API error: {error_message}",
code=ErrorCode.INTERNAL_SERVER_ERROR,
details={"is_byok": is_byok},
)
else:
return LLMBadRequestError(
message=f"ChatGPT request failed ({status_code}): {error_message}",
code=ErrorCode.INTERNAL_SERVER_ERROR,
details={"is_byok": is_byok},
)
def _handle_sse_error_event(self, raw_event: dict) -> Exception:
"""Create appropriate exception from an SSE error or response.failed event.
The ChatGPT backend can return errors as SSE events within a 200 OK stream,
e.g. {"type": "error", "error": {"type": "invalid_request_error",
"code": "context_length_exceeded", "message": "..."}}.
Args:
raw_event: Raw SSE event data containing an error.
Returns:
Appropriate LLM exception.
"""
error_obj = raw_event.get("error", {})
if isinstance(error_obj, str):
error_message = error_obj
error_code = None
else:
error_message = error_obj.get("message", "Unknown ChatGPT SSE error")
error_code = error_obj.get("code") or None
if error_code == "context_length_exceeded":
return ContextWindowExceededError(
message=f"ChatGPT context window exceeded: {error_message}",
)
elif error_code == "rate_limit_exceeded":
return LLMRateLimitError(
message=f"ChatGPT rate limit exceeded: {error_message}",
code=ErrorCode.RATE_LIMIT_EXCEEDED,
)
elif error_code == "authentication_error":
return LLMAuthenticationError(
message=f"ChatGPT authentication failed: {error_message}",
code=ErrorCode.UNAUTHENTICATED,
)
elif error_code == "server_error":
return LLMServerError(
message=f"ChatGPT API error: {error_message}",
code=ErrorCode.INTERNAL_SERVER_ERROR,
)
else:
return LLMBadRequestError(
message=f"ChatGPT SSE error ({error_code or 'unknown'}): {error_message}",
code=ErrorCode.INVALID_ARGUMENT,
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/llm_api/chatgpt_oauth_client.py",
"license": "Apache License 2.0",
"lines": 1059,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/orm/blocks_conversations.py | from sqlalchemy import ForeignKey, Index, String, UniqueConstraint
from sqlalchemy.orm import Mapped, mapped_column
from letta.orm.base import Base
class BlocksConversations(Base):
"""Tracks conversation-specific blocks that override agent defaults for isolated memory."""
__tablename__ = "blocks_conversations"
__table_args__ = (
UniqueConstraint("conversation_id", "block_label", name="unique_label_per_conversation"),
UniqueConstraint("conversation_id", "block_id", name="unique_conversation_block"),
Index("ix_blocks_conversations_block_id", "block_id"),
)
conversation_id: Mapped[str] = mapped_column(String, ForeignKey("conversations.id", ondelete="CASCADE"), primary_key=True)
block_id: Mapped[str] = mapped_column(String, ForeignKey("block.id", ondelete="CASCADE"), primary_key=True)
block_label: Mapped[str] = mapped_column(String, primary_key=True)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/orm/blocks_conversations.py",
"license": "Apache License 2.0",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/orm/blocks_tags.py | from datetime import datetime
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from letta.orm.block import Block
from sqlalchemy import Boolean, DateTime, ForeignKey, Index, String, UniqueConstraint, func, text
from sqlalchemy.orm import Mapped, mapped_column, relationship
from letta.orm.base import Base
class BlocksTags(Base):
__tablename__ = "blocks_tags"
__table_args__ = (
UniqueConstraint("block_id", "tag", name="unique_block_tag"),
Index("ix_blocks_tags_block_id_tag", "block_id", "tag"),
Index("ix_blocks_tags_tag_block_id", "tag", "block_id"),
)
# Primary key columns
block_id: Mapped[String] = mapped_column(String, ForeignKey("block.id"), primary_key=True)
tag: Mapped[str] = mapped_column(String, doc="The name of the tag associated with the block.", primary_key=True)
# Organization scoping for filtering
organization_id: Mapped[str] = mapped_column(String, ForeignKey("organizations.id"), nullable=False)
# Timestamps for filtering by date
created_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), server_default=func.now())
updated_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), server_default=func.now())
# Soft delete support
is_deleted: Mapped[bool] = mapped_column(Boolean, server_default=text("FALSE"))
# Audit fields
_created_by_id: Mapped[Optional[str]] = mapped_column(String, nullable=True)
_last_updated_by_id: Mapped[Optional[str]] = mapped_column(String, nullable=True)
# Relationships
block: Mapped["Block"] = relationship("Block", back_populates="tags")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/orm/blocks_tags.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/schemas/providers/chatgpt_oauth.py | """ChatGPT OAuth Provider - uses chatgpt.com/backend-api/codex with OAuth authentication."""
import json
from datetime import datetime, timezone
from typing import TYPE_CHECKING, Literal, Optional
import httpx
from pydantic import BaseModel, Field
from letta.errors import ErrorCode, LLMAuthenticationError, LLMError
from letta.log import get_logger
from letta.schemas.enums import ProviderCategory, ProviderType
from letta.schemas.llm_config import LLMConfig
from letta.schemas.providers.base import Provider
from letta.schemas.secret import Secret
if TYPE_CHECKING:
from letta.orm import User
logger = get_logger(__name__)
# ChatGPT Backend API Configuration
CHATGPT_CODEX_ENDPOINT = "https://chatgpt.com/backend-api/codex/responses"
CHATGPT_TOKEN_REFRESH_URL = "https://auth.openai.com/oauth/token"
# OAuth client_id for Codex CLI (required for token refresh)
# Must match the client_id used in the initial OAuth authorization flow
# This is the public client_id used by Codex CLI / Letta Code
CHATGPT_OAUTH_CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann"
# Token refresh buffer (refresh 5 minutes before expiry)
TOKEN_REFRESH_BUFFER_SECONDS = 300
# Hardcoded models available via ChatGPT backend
# These are models that can be accessed through ChatGPT Plus/Pro subscriptions
# Model list based on opencode-openai-codex-auth plugin presets
# Reasoning effort levels are configured via llm_config.reasoning_effort
CHATGPT_MODELS = [
# GPT-5.3 codex
{"name": "gpt-5.3-codex", "context_window": 272000},
# GPT-5.2 models (supports none/low/medium/high/xhigh reasoning)
{"name": "gpt-5.2", "context_window": 272000},
{"name": "gpt-5.2-codex", "context_window": 272000},
# GPT-5.1 models
{"name": "gpt-5.1", "context_window": 272000},
{"name": "gpt-5.1-codex", "context_window": 272000},
{"name": "gpt-5.1-codex-mini", "context_window": 272000},
{"name": "gpt-5.1-codex-max", "context_window": 272000},
# GPT-5 Codex models (original)
{"name": "gpt-5-codex-mini", "context_window": 272000},
# GPT-4 models (for ChatGPT Plus users)
{"name": "gpt-4o", "context_window": 128000},
{"name": "gpt-4o-mini", "context_window": 128000},
{"name": "o1", "context_window": 200000},
{"name": "o1-pro", "context_window": 200000},
{"name": "o3", "context_window": 200000},
{"name": "o3-mini", "context_window": 200000},
{"name": "o4-mini", "context_window": 200000},
]
class ChatGPTOAuthCredentials(BaseModel):
"""OAuth credentials for ChatGPT backend API access.
These credentials are stored as JSON in the provider's api_key_enc field.
"""
access_token: str = Field(..., description="OAuth access token for ChatGPT API")
refresh_token: str = Field(..., description="OAuth refresh token for obtaining new access tokens")
account_id: str = Field(..., description="ChatGPT account ID for the ChatGPT-Account-Id header")
expires_at: int = Field(..., description="Unix timestamp when the access_token expires")
def is_expired(self, buffer_seconds: int = TOKEN_REFRESH_BUFFER_SECONDS) -> bool:
"""Check if token is expired or will expire within buffer_seconds.
Handles both seconds and milliseconds timestamps (auto-detects based on magnitude).
"""
expires_at = self.expires_at
# Auto-detect milliseconds (13+ digits) vs seconds (10 digits)
# Timestamps > 10^12 are definitely milliseconds (year 33658 in seconds)
if expires_at > 10**12:
expires_at = expires_at // 1000 # Convert ms to seconds
current_time = datetime.now(timezone.utc).timestamp()
is_expired = current_time >= (expires_at - buffer_seconds)
logger.debug(f"Token expiry check: current={current_time}, expires_at={expires_at}, buffer={buffer_seconds}, expired={is_expired}")
return is_expired
def to_json(self) -> str:
"""Serialize to JSON string for storage in api_key_enc."""
return self.model_dump_json()
@classmethod
def from_json(cls, json_str: str) -> "ChatGPTOAuthCredentials":
"""Deserialize from JSON string stored in api_key_enc."""
data = json.loads(json_str)
return cls(**data)
class ChatGPTOAuthProvider(Provider):
"""
ChatGPT OAuth Provider for accessing ChatGPT's backend-api with OAuth tokens.
This provider enables using ChatGPT Plus/Pro subscription credentials to access
OpenAI models through the ChatGPT backend API at chatgpt.com/backend-api/codex.
OAuth credentials are stored as JSON in the api_key_enc field:
{
"access_token": "...",
"refresh_token": "...",
"account_id": "...",
"expires_at": 1234567890
}
The client (e.g., Letta Code) performs the OAuth flow and sends the credentials
to the backend via the provider creation API.
"""
provider_type: Literal[ProviderType.chatgpt_oauth] = Field(
ProviderType.chatgpt_oauth,
description="The type of the provider.",
)
provider_category: ProviderCategory = Field(
ProviderCategory.byok, # Always BYOK since it uses user's OAuth credentials
description="The category of the provider (always byok for OAuth)",
)
base_url: str = Field(
CHATGPT_CODEX_ENDPOINT,
description="Base URL for the ChatGPT backend API.",
)
async def get_oauth_credentials(self) -> Optional[ChatGPTOAuthCredentials]:
"""Retrieve and parse OAuth credentials from api_key_enc.
Returns:
ChatGPTOAuthCredentials if valid credentials exist, None otherwise.
"""
if not self.api_key_enc:
return None
json_str = await self.api_key_enc.get_plaintext_async()
if not json_str:
return None
try:
return ChatGPTOAuthCredentials.from_json(json_str)
except (json.JSONDecodeError, ValueError) as e:
logger.error(f"Failed to parse ChatGPT OAuth credentials: {e}")
return None
async def refresh_token_if_needed(
self, actor: Optional["User"] = None, force_refresh: bool = False
) -> Optional[ChatGPTOAuthCredentials]:
"""Check if token needs refresh and refresh if necessary.
This method is called before each API request to ensure valid credentials.
Tokens are refreshed 5 minutes before expiry to avoid edge cases.
Args:
actor: The user performing the action. Required for persisting refreshed credentials.
force_refresh: If True, always refresh the token regardless of expiry. For testing only.
Returns:
Updated credentials if successful, None on failure.
"""
creds = await self.get_oauth_credentials()
if not creds:
return None
if not creds.is_expired() and not force_refresh:
return creds
# Token needs refresh
logger.debug(f"ChatGPT OAuth token refresh triggered (expired={creds.is_expired()}, force={force_refresh})")
try:
new_creds = await self._perform_token_refresh(creds)
# Update stored credentials in memory and persist to database
await self._update_stored_credentials(new_creds, actor=actor)
return new_creds
except Exception as e:
logger.error(f"Failed to refresh ChatGPT OAuth token: {e}")
# If refresh fails but original access_token is still valid, use it
if not creds.is_expired():
logger.warning("Token refresh failed, but original access_token is still valid - using existing token")
return creds
# Both refresh failed AND token is expired - return None to trigger auth error
return None
async def _perform_token_refresh(self, creds: ChatGPTOAuthCredentials) -> ChatGPTOAuthCredentials:
"""Perform OAuth token refresh with OpenAI's token endpoint.
Args:
creds: Current credentials containing the refresh_token.
Returns:
New ChatGPTOAuthCredentials with refreshed access_token.
Raises:
LLMAuthenticationError: If refresh fails due to invalid credentials.
LLMError: If refresh fails due to network or server error.
"""
async with httpx.AsyncClient() as client:
try:
response = await client.post(
CHATGPT_TOKEN_REFRESH_URL,
data={
"grant_type": "refresh_token",
"refresh_token": creds.refresh_token,
"client_id": CHATGPT_OAUTH_CLIENT_ID,
},
headers={
"Content-Type": "application/x-www-form-urlencoded",
},
timeout=30.0,
)
response.raise_for_status()
data = response.json()
# Calculate new expiry time
expires_in = data.get("expires_in", 3600)
new_expires_at = int(datetime.now(timezone.utc).timestamp()) + expires_in
new_access_token = data["access_token"]
new_refresh_token = data.get("refresh_token", creds.refresh_token)
logger.debug(f"ChatGPT OAuth token refreshed, expires_in={expires_in}s")
return ChatGPTOAuthCredentials(
access_token=new_access_token,
refresh_token=new_refresh_token,
account_id=creds.account_id, # Account ID doesn't change
expires_at=new_expires_at,
)
except httpx.HTTPStatusError as e:
# Log full error details for debugging
try:
error_body = e.response.json()
logger.error(f"Token refresh HTTP error: {e.response.status_code} - JSON: {error_body}")
except Exception:
logger.error(f"Token refresh HTTP error: {e.response.status_code} - Text: {e.response.text}")
if e.response.status_code == 401:
raise LLMAuthenticationError(
message="Failed to refresh ChatGPT OAuth token: refresh token is invalid or expired",
code=ErrorCode.UNAUTHENTICATED,
)
raise LLMError(
message=f"Failed to refresh ChatGPT OAuth token: {e}",
code=ErrorCode.INTERNAL_SERVER_ERROR,
)
except Exception as e:
logger.error(f"Token refresh error: {type(e).__name__}: {e}")
raise LLMError(
message=f"Failed to refresh ChatGPT OAuth token: {e}",
code=ErrorCode.INTERNAL_SERVER_ERROR,
)
async def _update_stored_credentials(self, creds: ChatGPTOAuthCredentials, actor: Optional["User"] = None) -> None:
"""Update stored credentials in memory and persist to database.
Args:
creds: New credentials to store.
actor: The user performing the action. Required for database persistence.
"""
new_secret = await Secret.from_plaintext_async(creds.to_json())
# Update in-memory value
object.__setattr__(self, "api_key_enc", new_secret)
# Persist to database if we have an actor and provider ID
if actor and self.id:
try:
from letta.schemas.providers.base import ProviderUpdate
from letta.services.provider_manager import ProviderManager
provider_manager = ProviderManager()
await provider_manager.update_provider_async(
provider_id=self.id,
provider_update=ProviderUpdate(api_key=creds.to_json()),
actor=actor,
)
except Exception as e:
logger.error(f"Failed to persist refreshed credentials to database: {e}")
# Don't fail the request - we have valid credentials in memory
async def check_api_key(self):
"""Validate the OAuth credentials by checking token validity.
Raises:
ValueError: If no credentials are configured.
LLMAuthenticationError: If credentials are invalid.
"""
creds = await self.get_oauth_credentials()
if not creds:
raise ValueError("No ChatGPT OAuth credentials configured")
# Try to refresh if needed
creds = await self.refresh_token_if_needed()
if not creds:
raise LLMAuthenticationError(
message="Failed to obtain valid ChatGPT OAuth credentials",
code=ErrorCode.UNAUTHENTICATED,
)
# Optionally make a test request to validate
# For now, we just verify we have valid-looking credentials
if not creds.access_token or not creds.account_id:
raise LLMAuthenticationError(
message="ChatGPT OAuth credentials are incomplete",
code=ErrorCode.UNAUTHENTICATED,
)
def get_default_max_output_tokens(self, model_name: str) -> int:
"""Get the default max output tokens for ChatGPT models.
References:
- https://developers.openai.com/api/docs/models/gpt-5
- https://developers.openai.com/api/docs/models/gpt-5-codex
- https://developers.openai.com/api/docs/models/gpt-5.1-codex-max
"""
# GPT-5 family (gpt-5, gpt-5.x, codex variants): 128k max output tokens
if "gpt-5" in model_name:
return 128000
# Reasoning models (o-series) have higher limits
if model_name.startswith("o1") or model_name.startswith("o3") or model_name.startswith("o4"):
return 100000
# GPT-4 models
if "gpt-4" in model_name:
return 16384
return 4096
async def list_llm_models_async(self) -> list[LLMConfig]:
"""List available models from ChatGPT backend.
Returns a hardcoded list of models available via ChatGPT Plus/Pro subscriptions.
"""
creds = await self.get_oauth_credentials()
if not creds:
logger.warning("Cannot list models: no valid ChatGPT OAuth credentials")
return []
configs = []
for model in CHATGPT_MODELS:
model_name = model["name"]
context_window = model["context_window"]
configs.append(
LLMConfig(
model=model_name,
model_endpoint_type="chatgpt_oauth",
model_endpoint=self.base_url,
context_window=context_window,
handle=self.get_handle(model_name),
max_tokens=self.get_default_max_output_tokens(model_name),
provider_name=self.name,
provider_category=self.provider_category,
)
)
return configs
async def list_embedding_models_async(self) -> list:
"""ChatGPT backend does not support embedding models."""
return []
def get_model_context_window(self, model_name: str) -> int | None:
"""Get the context window for a model."""
for model in CHATGPT_MODELS:
if model["name"] == model_name:
return model["context_window"]
return 128000 # Default
async def get_model_context_window_async(self, model_name: str) -> int | None:
"""Get the context window for a model (async version)."""
return self.get_model_context_window(model_name)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/schemas/providers/chatgpt_oauth.py",
"license": "Apache License 2.0",
"lines": 313,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/clickhouse_otel_traces.py | import asyncio
from typing import Any
from urllib.parse import urlparse
from letta.helpers.singleton import singleton
from letta.settings import settings
def _parse_clickhouse_endpoint(endpoint: str) -> tuple[str, int, bool]:
parsed = urlparse(endpoint)
if parsed.scheme in ("http", "https"):
host = parsed.hostname or ""
port = parsed.port or (8443 if parsed.scheme == "https" else 8123)
secure = parsed.scheme == "https"
return host, port, secure
# Fallback: accept raw hostname (possibly with :port)
if ":" in endpoint:
host, port_str = endpoint.rsplit(":", 1)
return host, int(port_str), True
return endpoint, 8443, True
@singleton
class ClickhouseOtelTracesReader:
def __init__(self):
pass
def _get_client(self):
import clickhouse_connect
if not settings.clickhouse_endpoint:
raise ValueError("CLICKHOUSE_ENDPOINT is required")
host, port, secure = _parse_clickhouse_endpoint(settings.clickhouse_endpoint)
if not host:
raise ValueError("Invalid CLICKHOUSE_ENDPOINT")
database = settings.clickhouse_database or "otel"
username = settings.clickhouse_username or "default"
password = settings.clickhouse_password
if not password:
raise ValueError("CLICKHOUSE_PASSWORD is required")
return clickhouse_connect.get_client(
host=host,
port=port,
username=username,
password=password,
database=database,
secure=secure,
verify=True,
)
def _get_traces_by_trace_id_sync(self, trace_id: str, limit: int, filter_ui_spans: bool = False) -> list[dict[str, Any]]:
client = self._get_client()
if filter_ui_spans:
# Only return spans used by the trace viewer UI:
# - agent_step: step events
# - *._execute_tool: tool execution details
# - root spans (no parent): request info
# - time_to_first_token: TTFT measurement
query = """
SELECT *
FROM otel_traces
WHERE TraceId = %(trace_id)s
AND (
SpanName = 'agent_step'
OR SpanName LIKE '%%._execute_tool'
OR ParentSpanId = ''
OR SpanName = 'time_to_first_token'
)
ORDER BY Timestamp ASC
LIMIT %(limit)s
"""
else:
query = """
SELECT *
FROM otel_traces
WHERE TraceId = %(trace_id)s
ORDER BY Timestamp ASC
LIMIT %(limit)s
"""
result = client.query(query, parameters={"trace_id": trace_id, "limit": limit})
if not result or not result.result_rows:
return []
cols = list(result.column_names)
return [dict(zip(cols, row)) for row in result.result_rows]
async def get_traces_by_trace_id_async(
self, *, trace_id: str, limit: int = 1000, filter_ui_spans: bool = False
) -> list[dict[str, Any]]:
return await asyncio.to_thread(self._get_traces_by_trace_id_sync, trace_id, limit, filter_ui_spans)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/clickhouse_otel_traces.py",
"license": "Apache License 2.0",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/clickhouse_provider_traces.py | import asyncio
import json
from dataclasses import dataclass
from typing import Any
from urllib.parse import urlparse
from letta.helpers.singleton import singleton
from letta.schemas.provider_trace import ProviderTrace
from letta.settings import settings
def _parse_json_maybe(value: str | None) -> dict[str, Any]:
if not value:
return {}
try:
parsed = json.loads(value)
return parsed if isinstance(parsed, dict) else {"_value": parsed}
except Exception:
# Preserve the raw payload if parsing fails (e.g. non-JSON string)
return {"_raw": value}
def _parse_clickhouse_endpoint(endpoint: str) -> tuple[str, int, bool]:
"""Return (host, port, secure) for clickhouse_connect.get_client."""
parsed = urlparse(endpoint)
if parsed.scheme in ("http", "https"):
host = parsed.hostname or ""
port = parsed.port or (8443 if parsed.scheme == "https" else 8123)
secure = parsed.scheme == "https"
return host, port, secure
# Fallback: accept raw hostname (possibly with :port)
if ":" in endpoint:
host, port_str = endpoint.rsplit(":", 1)
return host, int(port_str), True
return endpoint, 8443, True
@dataclass(frozen=True)
class ClickhouseProviderTraceRow:
created_at: Any
id: str
step_id: str
request_json: str | None
response_json: str | None
@singleton
class ClickhouseProviderTraceReader:
def __init__(self):
self._client = None
def _get_client(self):
if self._client is not None:
return self._client
# Import lazily so OSS users who never enable this flag don't pay import cost.
import clickhouse_connect
if not settings.clickhouse_endpoint:
raise ValueError("CLICKHOUSE_ENDPOINT is required")
host, port, secure = _parse_clickhouse_endpoint(settings.clickhouse_endpoint)
if not host:
raise ValueError("Invalid CLICKHOUSE_ENDPOINT")
database = settings.clickhouse_database or "otel"
username = settings.clickhouse_username or "default"
password = settings.clickhouse_password
if not password:
raise ValueError("CLICKHOUSE_PASSWORD is required")
self._client = clickhouse_connect.get_client(
host=host,
port=port,
username=username,
password=password,
database=database,
secure=secure,
verify=True,
)
return self._client
def _query_latest_row_for_step_id_sync(self, step_id: str, organization_id: str) -> ClickhouseProviderTraceRow | None:
client = self._get_client()
query = """
SELECT
created_at,
id,
step_id,
request_json,
response_json
FROM llm_traces
WHERE step_id = %(step_id)s
AND organization_id = %(organization_id)s
ORDER BY created_at DESC
LIMIT 1
"""
result = client.query(
query,
parameters={
"step_id": step_id,
"organization_id": organization_id,
},
)
if not result or not result.result_rows:
return None
row = result.result_rows[0]
return ClickhouseProviderTraceRow(
created_at=row[0],
id=row[1],
step_id=row[2],
request_json=row[3],
response_json=row[4],
)
async def get_provider_trace_by_step_id_async(self, *, step_id: str, organization_id: str) -> ProviderTrace | None:
row = await asyncio.to_thread(self._query_latest_row_for_step_id_sync, step_id, organization_id)
if row is None:
return None
return ProviderTrace(
id=f"provider_trace-{row.id}",
step_id=row.step_id,
request_json=_parse_json_maybe(row.request_json),
response_json=_parse_json_maybe(row.response_json),
created_at=row.created_at,
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/clickhouse_provider_traces.py",
"license": "Apache License 2.0",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/provider_trace_backends/base.py | """Base class for provider trace backends."""
from abc import ABC, abstractmethod
from enum import Enum
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from letta.schemas.provider_trace import ProviderTrace
from letta.schemas.user import User
class ProviderTraceBackend(str, Enum):
"""Supported provider trace storage backends."""
POSTGRES = "postgres"
CLICKHOUSE = "clickhouse"
SOCKET = "socket"
class ProviderTraceBackendClient(ABC):
"""Abstract base class for provider trace storage backends."""
@abstractmethod
async def create_async(
self,
actor: "User",
provider_trace: "ProviderTrace",
) -> "ProviderTrace | None":
"""
Store a provider trace record.
Args:
actor: The user/actor creating the trace
provider_trace: The trace data to store
Returns:
The created ProviderTrace, or None if the backend doesn't return it
"""
raise NotImplementedError
@abstractmethod
async def get_by_step_id_async(
self,
step_id: str,
actor: "User",
) -> "ProviderTrace | None":
"""
Retrieve a provider trace by step ID.
Args:
step_id: The step ID to look up
actor: The user/actor requesting the trace
Returns:
The ProviderTrace if found, None otherwise
"""
raise NotImplementedError
def create_sync(
self,
actor: "User",
provider_trace: "ProviderTrace",
) -> "ProviderTrace | None":
"""
Synchronous version of create_async.
Default implementation does nothing. Override if sync support is needed.
"""
return None
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/provider_trace_backends/base.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
letta-ai/letta:letta/services/provider_trace_backends/clickhouse.py | """ClickHouse provider trace backend.
Writes and reads from the llm_traces table with denormalized columns for cost analytics.
"""
import json
import uuid
from typing import TYPE_CHECKING, Optional
from letta.log import get_logger
from letta.schemas.provider_trace import ProviderTrace
from letta.schemas.user import User
from letta.services.clickhouse_provider_traces import ClickhouseProviderTraceReader
from letta.services.provider_trace_backends.base import ProviderTraceBackendClient
from letta.settings import settings
if TYPE_CHECKING:
from letta.schemas.llm_trace import LLMTrace
logger = get_logger(__name__)
class ClickhouseProviderTraceBackend(ProviderTraceBackendClient):
"""ClickHouse backend for provider traces (reads and writes from llm_traces table)."""
def __init__(self):
self._reader = ClickhouseProviderTraceReader()
async def create_async(
self,
actor: User,
provider_trace: ProviderTrace,
) -> ProviderTrace | None:
"""Write provider trace to ClickHouse llm_traces table."""
if not settings.store_llm_traces:
# Return minimal trace for consistency if writes disabled
return ProviderTrace(
id=provider_trace.id,
step_id=provider_trace.step_id,
request_json=provider_trace.request_json or {},
response_json=provider_trace.response_json or {},
)
try:
from letta.services.llm_trace_writer import get_llm_trace_writer
trace = self._convert_to_trace(actor, provider_trace)
if trace:
writer = get_llm_trace_writer()
await writer.write_async(trace)
except Exception as e:
logger.debug(f"Failed to write trace to ClickHouse: {e}")
return ProviderTrace(
id=provider_trace.id,
step_id=provider_trace.step_id,
request_json=provider_trace.request_json or {},
response_json=provider_trace.response_json or {},
)
async def get_by_step_id_async(
self,
step_id: str,
actor: User,
) -> ProviderTrace | None:
"""Read provider trace from llm_traces table by step_id."""
return await self._reader.get_provider_trace_by_step_id_async(
step_id=step_id,
organization_id=actor.organization_id,
)
def _convert_to_trace(
self,
actor: User,
provider_trace: ProviderTrace,
) -> Optional["LLMTrace"]:
"""Convert ProviderTrace to LLMTrace for analytics storage."""
from letta.schemas.llm_trace import LLMTrace
# Serialize JSON fields
request_json_str = json.dumps(provider_trace.request_json, default=str)
response_json_str = json.dumps(provider_trace.response_json, default=str)
llm_config_json_str = json.dumps(provider_trace.llm_config, default=str) if provider_trace.llm_config else "{}"
# Extract provider and model from llm_config
llm_config = provider_trace.llm_config or {}
provider = llm_config.get("model_endpoint_type", "unknown")
model = llm_config.get("model", "unknown")
is_byok = llm_config.get("provider_category") == "byok"
# Extract usage from response (generic parsing for common formats)
usage = self._extract_usage(provider_trace.response_json, provider)
# Check for error in response - must have actual error content, not just null
# OpenAI Responses API returns {"error": null} on success
error_data = provider_trace.response_json.get("error")
error_type = provider_trace.response_json.get("error_type")
error_message = None
is_error = bool(error_data) or bool(error_type)
if is_error:
if isinstance(error_data, dict):
error_type = error_type or error_data.get("type")
error_message = error_data.get("message", str(error_data))[:1000]
elif error_data:
error_message = str(error_data)[:1000]
# Extract UUID from provider_trace.id (strip "provider_trace-" prefix)
trace_id = provider_trace.id
if not trace_id:
logger.warning("ProviderTrace missing id - trace correlation across backends will fail")
trace_id = str(uuid.uuid4())
elif trace_id.startswith("provider_trace-"):
trace_id = trace_id[len("provider_trace-") :]
return LLMTrace(
id=trace_id,
organization_id=provider_trace.org_id or actor.organization_id,
project_id=None,
agent_id=provider_trace.agent_id,
agent_tags=provider_trace.agent_tags or [],
run_id=provider_trace.run_id,
step_id=provider_trace.step_id,
trace_id=None,
call_type=provider_trace.call_type or "unknown",
provider=provider,
model=model,
is_byok=is_byok,
request_size_bytes=len(request_json_str.encode("utf-8")),
response_size_bytes=len(response_json_str.encode("utf-8")),
prompt_tokens=usage.get("prompt_tokens", 0),
completion_tokens=usage.get("completion_tokens", 0),
total_tokens=usage.get("total_tokens", 0),
cached_input_tokens=usage.get("cached_input_tokens"),
cache_write_tokens=usage.get("cache_write_tokens"),
reasoning_tokens=usage.get("reasoning_tokens"),
latency_ms=0, # Not available in ProviderTrace
is_error=is_error,
error_type=error_type,
error_message=error_message,
request_json=request_json_str,
response_json=response_json_str,
llm_config_json=llm_config_json_str,
billing_plan_type=provider_trace.billing_context.plan_type if provider_trace.billing_context else None,
billing_cost_source=provider_trace.billing_context.cost_source if provider_trace.billing_context else None,
billing_customer_id=provider_trace.billing_context.customer_id if provider_trace.billing_context else None,
)
def _extract_usage(self, response_json: dict, provider: str) -> dict:
"""Extract usage statistics from response JSON.
Handles common formats from OpenAI, Anthropic, and other providers.
"""
usage = {}
# OpenAI format: response.usage
if "usage" in response_json:
u = response_json["usage"]
usage["prompt_tokens"] = u.get("prompt_tokens", 0)
usage["completion_tokens"] = u.get("completion_tokens", 0)
usage["total_tokens"] = u.get("total_tokens", 0)
# OpenAI reasoning tokens
if "completion_tokens_details" in u:
details = u["completion_tokens_details"]
usage["reasoning_tokens"] = details.get("reasoning_tokens")
# OpenAI cached tokens
if "prompt_tokens_details" in u:
details = u["prompt_tokens_details"]
usage["cached_input_tokens"] = details.get("cached_tokens")
# Anthropic format: response.usage with cache fields
if provider == "anthropic" and "usage" in response_json:
u = response_json["usage"]
# input_tokens can be 0 when all tokens come from cache
input_tokens = u.get("input_tokens", 0)
cache_read = u.get("cache_read_input_tokens", 0)
cache_write = u.get("cache_creation_input_tokens", 0)
# Total prompt = input + cached (for cost analytics)
usage["prompt_tokens"] = input_tokens + cache_read + cache_write
usage["completion_tokens"] = u.get("output_tokens", usage.get("completion_tokens", 0))
usage["cached_input_tokens"] = cache_read if cache_read else None
usage["cache_write_tokens"] = cache_write if cache_write else None
# Recalculate total if not present
if "total_tokens" not in usage or usage["total_tokens"] == 0:
usage["total_tokens"] = usage.get("prompt_tokens", 0) + usage.get("completion_tokens", 0)
return usage
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/provider_trace_backends/clickhouse.py",
"license": "Apache License 2.0",
"lines": 161,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/provider_trace_backends/factory.py | """Factory for creating provider trace backends."""
from functools import lru_cache
from letta.services.provider_trace_backends.base import ProviderTraceBackend, ProviderTraceBackendClient
def _create_backend(backend: ProviderTraceBackend | str) -> ProviderTraceBackendClient:
"""Create a single backend instance."""
from letta.settings import telemetry_settings
backend_str = backend.value if isinstance(backend, ProviderTraceBackend) else backend
match backend_str:
case "clickhouse":
from letta.services.provider_trace_backends.clickhouse import ClickhouseProviderTraceBackend
return ClickhouseProviderTraceBackend()
case "socket":
from letta.services.provider_trace_backends.socket import SocketProviderTraceBackend
return SocketProviderTraceBackend(socket_path=telemetry_settings.socket_path)
case "postgres" | _:
from letta.services.provider_trace_backends.postgres import PostgresProviderTraceBackend
return PostgresProviderTraceBackend()
@lru_cache(maxsize=1)
def get_provider_trace_backends() -> list[ProviderTraceBackendClient]:
"""
Get all configured provider trace backends.
Returns cached singleton instances for each configured backend.
Supports multiple backends for dual-write scenarios (e.g., migration).
"""
from letta.settings import telemetry_settings
backends = telemetry_settings.provider_trace_backends
return [_create_backend(b) for b in backends]
def get_provider_trace_backend() -> ProviderTraceBackendClient:
"""
Get the primary (first) configured provider trace backend.
For backwards compatibility and read operations.
"""
backends = get_provider_trace_backends()
return backends[0] if backends else _create_backend("postgres")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/provider_trace_backends/factory.py",
"license": "Apache License 2.0",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/services/provider_trace_backends/postgres.py | """PostgreSQL provider trace backend."""
from letta.helpers.json_helpers import json_dumps, json_loads
from letta.orm.provider_trace import ProviderTrace as ProviderTraceModel
from letta.orm.provider_trace_metadata import ProviderTraceMetadata as ProviderTraceMetadataModel
from letta.schemas.provider_trace import ProviderTrace, ProviderTraceMetadata
from letta.schemas.user import User
from letta.server.db import db_registry
from letta.services.provider_trace_backends.base import ProviderTraceBackendClient
from letta.settings import telemetry_settings
class PostgresProviderTraceBackend(ProviderTraceBackendClient):
"""Store provider traces in PostgreSQL."""
async def create_async(
self,
actor: User,
provider_trace: ProviderTrace,
) -> ProviderTrace | ProviderTraceMetadata:
if telemetry_settings.provider_trace_pg_metadata_only:
return await self._create_metadata_only_async(actor, provider_trace)
return await self._create_full_async(actor, provider_trace)
async def _create_full_async(
self,
actor: User,
provider_trace: ProviderTrace,
) -> ProviderTrace:
"""Write full provider trace to provider_traces table."""
async with db_registry.async_session() as session:
provider_trace_model = ProviderTraceModel(**provider_trace.model_dump(exclude={"billing_context"}))
provider_trace_model.organization_id = actor.organization_id
if provider_trace.request_json:
request_json_str = json_dumps(provider_trace.request_json)
provider_trace_model.request_json = json_loads(request_json_str)
if provider_trace.response_json:
response_json_str = json_dumps(provider_trace.response_json)
provider_trace_model.response_json = json_loads(response_json_str)
await provider_trace_model.create_async(session, actor=actor, no_commit=True, no_refresh=True)
return provider_trace_model.to_pydantic()
async def _create_metadata_only_async(
self,
actor: User,
provider_trace: ProviderTrace,
) -> ProviderTraceMetadata:
"""Write metadata-only trace to provider_trace_metadata table."""
metadata = ProviderTraceMetadata(
id=provider_trace.id,
step_id=provider_trace.step_id,
agent_id=provider_trace.agent_id,
agent_tags=provider_trace.agent_tags,
call_type=provider_trace.call_type,
run_id=provider_trace.run_id,
source=provider_trace.source,
org_id=provider_trace.org_id,
user_id=provider_trace.user_id,
)
metadata_model = ProviderTraceMetadataModel(**metadata.model_dump())
metadata_model.organization_id = actor.organization_id
async with db_registry.async_session() as session:
await metadata_model.create_async(session, actor=actor, no_commit=True, no_refresh=True)
return metadata_model.to_pydantic()
async def get_by_step_id_async(
self,
step_id: str,
actor: User,
) -> ProviderTrace | None:
"""Read from provider_traces table. Always reads from full table regardless of write flag."""
return await self._get_full_by_step_id_async(step_id, actor)
async def _get_full_by_step_id_async(
self,
step_id: str,
actor: User,
) -> ProviderTrace | None:
"""Read from provider_traces table."""
async with db_registry.async_session() as session:
provider_trace_model = await ProviderTraceModel.read_async(
db_session=session,
step_id=step_id,
actor=actor,
)
return provider_trace_model.to_pydantic() if provider_trace_model else None
async def _get_metadata_by_step_id_async(
self,
step_id: str,
actor: User,
) -> ProviderTraceMetadata | None:
"""Read from provider_trace_metadata table."""
async with db_registry.async_session() as session:
metadata_model = await ProviderTraceMetadataModel.read_async(
db_session=session,
step_id=step_id,
actor=actor,
)
return metadata_model.to_pydantic() if metadata_model else None
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/provider_trace_backends/postgres.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/services/provider_trace_backends/socket.py | """Unix socket provider trace backend."""
import json
import os
import socket as socket_module
import threading
import time
from datetime import datetime, timezone
from typing import Any
from letta.log import get_logger
from letta.schemas.provider_trace import ProviderTrace
from letta.schemas.user import User
from letta.services.provider_trace_backends.base import ProviderTraceBackendClient
logger = get_logger(__name__)
# Protocol version for crouton communication.
# Bump this when making breaking changes to the record schema.
# Must match ProtocolVersion in apps/crouton/main.go.
# v2: Added user_id, compaction_settings (summarization), llm_config (non-summarization)
# v3: Increased buffer to 128MB, native sidecar for deterministic startup
PROTOCOL_VERSION = 3
class SocketProviderTraceBackend(ProviderTraceBackendClient):
"""
Store provider traces via Unix socket.
Sends NDJSON telemetry records to a Unix socket. The receiving service
(sidecar) is responsible for storage (e.g., GCS, S3, local filesystem).
This is a write-only backend - reads are not supported.
"""
def __init__(self, socket_path: str = "/var/run/telemetry/telemetry.sock"):
self.socket_path = socket_path
async def create_async(
self,
actor: User,
provider_trace: ProviderTrace,
) -> ProviderTrace | None:
self._send_to_crouton(provider_trace)
# Return a ProviderTrace with the same ID for consistency across backends
return ProviderTrace(
id=provider_trace.id,
step_id=provider_trace.step_id,
request_json=provider_trace.request_json or {},
response_json=provider_trace.response_json or {},
)
def create_sync(
self,
actor: User,
provider_trace: ProviderTrace,
) -> ProviderTrace | None:
self._send_to_crouton(provider_trace)
return None
async def get_by_step_id_async(
self,
step_id: str,
actor: User,
) -> ProviderTrace | None:
# Socket backend is write-only - reads should go through the storage backend directly.
logger.warning("Socket backend does not support reads")
return None
def _send_to_crouton(self, provider_trace: ProviderTrace) -> None:
"""Build telemetry record and send to Crouton sidecar (fire-and-forget)."""
response = provider_trace.response_json or {}
request = provider_trace.request_json or {}
# Extract error if present - handles both {"error": "msg"} and {"error": {"message": "msg"}}
raw_error = response.get("error")
if isinstance(raw_error, dict):
error = raw_error.get("message")
elif isinstance(raw_error, str):
error = raw_error
else:
error = None
error_type = response.get("error_type")
record = {
"protocol_version": PROTOCOL_VERSION,
"provider_trace_id": provider_trace.id,
"agent_id": provider_trace.agent_id,
"run_id": provider_trace.run_id,
"step_id": provider_trace.step_id,
"tags": provider_trace.agent_tags or [],
"type": provider_trace.call_type or "agent_step",
"source": provider_trace.source,
"request": request,
"response": response if not error else None,
"error": error,
"error_type": error_type,
"timestamp": datetime.now(timezone.utc).isoformat(),
# v2 protocol fields
"org_id": provider_trace.org_id,
"user_id": provider_trace.user_id,
"compaction_settings": provider_trace.compaction_settings,
"llm_config": provider_trace.llm_config,
}
# Fire-and-forget in background thread
thread = threading.Thread(target=self._send_async, args=(record,), daemon=True)
thread.start()
def _send_async(self, record: dict[str, Any], max_retries: int = 3) -> None:
"""Send record to Unix socket (runs in background thread)."""
base_delay = 0.5
for attempt in range(max_retries):
try:
if not os.path.exists(self.socket_path):
if attempt < max_retries - 1:
time.sleep(base_delay * (2**attempt))
continue
logger.warning(f"Crouton socket not found at {self.socket_path} after {max_retries} attempts")
return
with socket_module.socket(socket_module.AF_UNIX, socket_module.SOCK_STREAM) as sock:
sock.settimeout(60.0) # Match crouton's connectionTimeout for large payloads
sock.connect(self.socket_path)
payload = json.dumps(record, default=str) + "\n"
sock.sendall(payload.encode())
return
except BrokenPipeError:
if attempt < max_retries - 1:
time.sleep(base_delay * (2**attempt))
continue
logger.warning(f"Failed to send telemetry to Crouton: broken pipe after {max_retries} attempts")
except Exception as e:
logger.warning(f"Failed to send telemetry to Crouton: {e}")
return
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/provider_trace_backends/socket.py",
"license": "Apache License 2.0",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/tool_sandbox/typescript_generator.py | """TypeScript execution script generator for sandbox execution."""
import json
import re
from typing import Any, Dict, Optional
from letta.types import JsonDict, JsonValue
def convert_param_to_ts_value(param_type: Optional[str], raw_value: JsonValue) -> str:
"""
Convert parameter to TypeScript code representation based on JSON schema type.
Args:
param_type: The JSON schema type (string, number, integer, boolean, array, object)
raw_value: The raw value to convert
Returns:
A string representation of the value in TypeScript syntax
"""
# Handle null values first - return TypeScript null (not Python's "None")
if raw_value is None:
return "null"
if param_type == "string":
# Use JSON.stringify for proper string escaping
return json.dumps(raw_value)
if param_type in ("number", "integer"):
return str(raw_value)
if param_type == "boolean":
if isinstance(raw_value, bool):
return "true" if raw_value else "false"
if isinstance(raw_value, int) and raw_value in (0, 1):
return "true" if raw_value else "false"
if isinstance(raw_value, str) and raw_value.strip().lower() in ("true", "false"):
return raw_value.strip().lower()
raise ValueError(f"Invalid boolean value: {raw_value}")
if param_type in ("array", "object"):
return json.dumps(raw_value)
# Default: use JSON serialization
return json.dumps(raw_value)
def extract_typescript_function_name(source_code: str) -> Optional[str]:
"""
Extract the exported function name from TypeScript source code.
Args:
source_code: TypeScript source code
Returns:
The function name if found, None otherwise
"""
# Match both regular and async exported functions
patterns = [
r"export\s+function\s+(\w+)",
r"export\s+async\s+function\s+(\w+)",
]
for pattern in patterns:
match = re.search(pattern, source_code)
if match:
return match.group(1)
return None
def is_async_typescript_function(source_code: str, function_name: str) -> bool:
"""
Detect if a TypeScript function is async.
Args:
source_code: TypeScript source code
function_name: The function name to check
Returns:
True if the function is async, False otherwise
"""
# Match async function declaration: export async function foo
pattern1 = rf"export\s+async\s+function\s+{re.escape(function_name)}"
if re.search(pattern1, source_code):
return True
# Match async arrow function: export const foo = async
pattern2 = rf"export\s+const\s+{re.escape(function_name)}\s*=\s*async"
if re.search(pattern2, source_code):
return True
return False
def generate_typescript_execution_script(
tool_name: str,
tool_source_code: str,
args: JsonDict,
json_schema: Dict[str, Any],
env_vars_to_inject: Optional[Dict[str, str]] = None,
) -> str:
"""
Generate a TypeScript execution script for running a tool in E2B sandbox.
The generated script:
1. Imports and initializes the Letta client (available as `client` variable)
2. Initializes arguments as TypeScript constants
3. Includes the user's tool source code
4. Calls the function and serializes the result as JSON
Note: TypeScript tools do NOT support agent_state injection (legacy Python feature).
The agent_id is available via process.env.LETTA_AGENT_ID environment variable.
Args:
tool_name: Name of the tool function
tool_source_code: The TypeScript source code of the tool
args: Arguments to pass to the function
json_schema: JSON schema describing the function parameters
env_vars_to_inject: Optional environment variables to inject
Returns:
Generated TypeScript code ready for execution
"""
lines: list[str] = []
# Extract user's import statements - they must be at the top of the file for ESM
import_pattern = r"^import\s+.+?['\"];?\s*$"
user_imports = re.findall(import_pattern, tool_source_code, re.MULTILINE)
source_without_imports = re.sub(import_pattern, "", tool_source_code, flags=re.MULTILINE)
# Add user imports at the very top (ESM requires imports at top of file)
if user_imports:
for imp in user_imports:
lines.append(imp.strip())
lines.append("")
# Import and initialize Letta client (similar to Python's letta_client injection)
# The client is available as `client` variable in the tool's scope
# Use dynamic import with try/catch to gracefully handle missing package
lines.extend(
[
"// Initialize Letta client for TypeScript tool execution",
"let client: any = null;",
"try {",
" const { LettaClient } = await import('@letta-ai/letta-client');",
" const apiKey = process.env.LETTA_API_KEY;",
" if (apiKey) {",
" client = new LettaClient({ apiKey });",
" }",
"} catch (e) {",
" // Package not available - client remains null",
"}",
"",
]
)
# Initialize arguments
# Handle null json_schema (can happen if ToolUpdate sets source_type without schema)
if json_schema is None:
json_schema = {}
properties = json_schema.get("parameters", {}).get("properties", {})
for param_name, param_value in args.items():
param_spec = properties.get(param_name, {})
param_type = param_spec.get("type")
ts_value = convert_param_to_ts_value(param_type, param_value)
lines.append(f"const {param_name} = {ts_value};")
if args:
lines.append("")
# Add the user's source code (imports already extracted), stripping 'export' keywords
stripped_source = re.sub(r"\bexport\s+", "", source_without_imports)
lines.append(stripped_source.strip())
lines.append("")
# Detect if function is async
is_async = is_async_typescript_function(tool_source_code, tool_name)
# Generate function call with arguments in correct order
# Use the order from json_schema (required + optional) to ensure positional args are correct
parameters = json_schema.get("parameters", {})
required_params = parameters.get("required", [])
schema_properties = parameters.get("properties", {})
# Build ordered param list: required params first (in order), then any remaining args
ordered_params = []
for param in required_params:
if param in args:
ordered_params.append(param)
# Add any remaining params that weren't in required (optional params)
for param in schema_properties.keys():
if param in args and param not in ordered_params:
ordered_params.append(param)
# Fallback: add any args not in schema (shouldn't happen, but be safe)
for param in args.keys():
if param not in ordered_params:
ordered_params.append(param)
params_str = ", ".join(ordered_params)
func_call = f"{tool_name}({params_str})"
# Execute the function and output result as JSON
# E2B supports top-level await for TypeScript
if is_async:
lines.append(f"const _result = await {func_call};")
else:
lines.append(f"const _result = {func_call};")
# Serialize the result - we use JSON for TypeScript (not pickle like Python)
# The output format matches what the Python sandbox expects
# Note: agent_state is always null for TypeScript tools (not supported)
lines.append("const _output = { results: _result, agent_state: null };")
lines.append("JSON.stringify(_output);")
return "\n".join(lines) + "\n"
def parse_typescript_result(result_text: str) -> tuple[Any, None]:
"""
Parse the result from TypeScript tool execution.
TypeScript tools return JSON-serialized results instead of pickle.
Args:
result_text: The JSON string output from the TypeScript execution
Returns:
Tuple of (function_return_value, agent_state)
Note: agent_state is always None for TypeScript tools
"""
if not result_text:
return None, None
try:
result = json.loads(result_text)
return result.get("results"), result.get("agent_state")
except json.JSONDecodeError:
# If JSON parsing fails, return the raw text
return result_text, None
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/tool_sandbox/typescript_generator.py",
"license": "Apache License 2.0",
"lines": 192,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:tests/integration_test_override_model.py | """
Integration tests for the override_model functionality.
Tests the ability to send messages to agents using a different model than the agent's default
configured model, without permanently changing the agent's configuration.
Note: Some type: ignore comments are present because the SDK types haven't been regenerated
to include the new override_model parameter yet.
"""
import logging
import os
import threading
import time
import uuid
from typing import Generator, List
import pytest
import requests
from dotenv import load_dotenv
from letta_client import APIError, AsyncLetta, Letta
from letta_client.types import AgentState, MessageCreateParam
logger = logging.getLogger(__name__)
# Test message that forces a simple response
USER_MESSAGE_OTID = str(uuid.uuid4())
USER_MESSAGE_SIMPLE: List[MessageCreateParam] = [
MessageCreateParam(
role="user",
content="This is an automated test. Please respond with exactly: 'Test successful'",
otid=USER_MESSAGE_OTID,
)
]
# ------------------------------
# Fixtures
# ------------------------------
@pytest.fixture(scope="module")
def server_url() -> str:
"""
Provides the URL for the Letta server.
If LETTA_SERVER_URL is not set, starts the server in a background thread
and polls until it's accepting connections.
"""
def _run_server() -> None:
load_dotenv()
from letta.server.rest_api.app import start_server
start_server(debug=True)
url: str = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
if not os.getenv("LETTA_SERVER_URL"):
thread = threading.Thread(target=_run_server, daemon=True)
thread.start()
# Poll until the server is up (or timeout)
timeout_seconds = 60
deadline = time.time() + timeout_seconds
while time.time() < deadline:
try:
resp = requests.get(url + "/v1/health")
if resp.status_code < 500:
break
except requests.exceptions.RequestException:
pass
time.sleep(0.1)
else:
raise RuntimeError(f"Could not reach {url} within {timeout_seconds}s")
return url
@pytest.fixture(scope="module")
def client(server_url: str) -> Generator[Letta, None, None]:
"""
Creates and returns a synchronous Letta REST client for testing.
"""
client_instance = Letta(base_url=server_url)
yield client_instance
@pytest.fixture(scope="function")
def async_client(server_url: str) -> Generator[AsyncLetta, None, None]:
"""
Creates and returns an asynchronous Letta REST client for testing.
"""
async_client_instance = AsyncLetta(base_url=server_url)
yield async_client_instance
@pytest.fixture(scope="function")
def agent_with_gpt4o_mini(client: Letta) -> Generator[AgentState, None, None]:
"""
Creates an agent configured with gpt-4o-mini for testing model override.
"""
agent_state = client.agents.create(
name=f"override_model_test_{uuid.uuid4().hex[:8]}",
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
tags=["override_model_test"],
memory_blocks=[
{"label": "human", "value": "Test user"},
{"label": "persona", "value": "You are a helpful assistant."},
],
)
yield agent_state
# Cleanup
try:
client.agents.delete(agent_state.id)
except Exception as e:
logger.warning(f"Failed to delete agent {agent_state.id}: {e}")
@pytest.fixture(scope="function")
def agent_with_gemini(client: Letta) -> Generator[AgentState, None, None]:
"""
Creates an agent configured with Gemini for testing model override.
"""
agent_state = client.agents.create(
name=f"override_model_test_gemini_{uuid.uuid4().hex[:8]}",
model="google_ai/gemini-2.0-flash",
embedding="openai/text-embedding-3-small",
tags=["override_model_test"],
memory_blocks=[
{"label": "human", "value": "Test user"},
{"label": "persona", "value": "You are a helpful assistant."},
],
)
yield agent_state
# Cleanup
try:
client.agents.delete(agent_state.id)
except Exception as e:
logger.warning(f"Failed to delete agent {agent_state.id}: {e}")
# ------------------------------
# Test Cases
# ------------------------------
class TestOverrideModelSync:
"""Tests for override_model with synchronous message sending."""
def test_override_model_changes_model_used(
self,
client: Letta,
agent_with_gpt4o_mini: AgentState,
) -> None:
"""
Test that override_model causes the message to be processed by a different model.
Agent is configured with gpt-4o-mini, but we override with gpt-4o.
"""
agent = agent_with_gpt4o_mini
# Verify agent's default model
assert agent.model is not None
assert "gpt-4o-mini" in agent.model
# Send message with override model
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_SIMPLE,
extra_body={"override_model": "openai/gpt-4o"},
)
# Verify we got a response
assert response.messages is not None
assert len(response.messages) > 0
# Verify agent's model was not permanently changed
agent_after = client.agents.retrieve(agent.id)
assert agent_after.model is not None
assert "gpt-4o-mini" in agent_after.model, "Agent's model should not be permanently changed"
def test_override_model_cross_provider(
self,
client: Letta,
agent_with_gpt4o_mini: AgentState,
) -> None:
"""
Test overriding from one provider to another (OpenAI -> Google AI).
"""
agent = agent_with_gpt4o_mini
# Verify agent's default model is OpenAI
assert agent.model is not None
assert "openai" in agent.model.lower() or "gpt" in agent.model.lower()
# Send message with Google AI model override
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_SIMPLE,
extra_body={"override_model": "google_ai/gemini-2.0-flash"},
)
# Verify we got a response
assert response.messages is not None
assert len(response.messages) > 0
# Verify agent's model was not permanently changed
agent_after = client.agents.retrieve(agent.id)
assert agent_after.model is not None
assert "gpt-4o-mini" in agent_after.model, "Agent's model should not be permanently changed"
def test_override_model_with_none_uses_default(
self,
client: Letta,
agent_with_gpt4o_mini: AgentState,
) -> None:
"""
Test that not setting override_model (None) uses the agent's default model.
"""
agent = agent_with_gpt4o_mini
# Send message without override_model
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_SIMPLE,
)
# Verify we got a response
assert response.messages is not None
assert len(response.messages) > 0
def test_override_model_invalid_handle(
self,
client: Letta,
agent_with_gpt4o_mini: AgentState,
) -> None:
"""
Test that an invalid override_model handle raises an appropriate error.
"""
agent = agent_with_gpt4o_mini
with pytest.raises(APIError) as exc_info:
client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_SIMPLE,
extra_body={"override_model": "invalid/nonexistent-model-xyz"},
)
# Verify the error is related to the invalid model handle
# The error could be a 400, 404, or 422 depending on implementation
error = exc_info.value
# APIError should have status_code attribute
assert hasattr(error, "status_code") and error.status_code in [400, 404, 422] # type: ignore[attr-defined]
class TestOverrideModelStreaming:
"""Tests for override_model with streaming message sending."""
def test_override_model_streaming(
self,
client: Letta,
agent_with_gpt4o_mini: AgentState,
) -> None:
"""
Test that override_model works correctly with streaming enabled.
"""
agent = agent_with_gpt4o_mini
# Send message with streaming and override model
# Note: Using messages.create with streaming=True, not create_stream
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_SIMPLE,
extra_body={"override_model": "openai/gpt-4o"},
streaming=True,
)
# For streaming, the response object should still have messages
# (they're accumulated from the stream internally)
assert response is not None
# Verify agent's model was not permanently changed
agent_after = client.agents.retrieve(agent.id)
assert agent_after.model is not None
assert "gpt-4o-mini" in agent_after.model
def test_override_model_streaming_cross_provider(
self,
client: Letta,
agent_with_gpt4o_mini: AgentState,
) -> None:
"""
Test streaming with cross-provider model override (OpenAI -> Google AI).
"""
agent = agent_with_gpt4o_mini
# Send message with streaming and Google AI override
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_SIMPLE,
extra_body={"override_model": "google_ai/gemini-2.0-flash"},
streaming=True,
)
# Verify we got a response
assert response is not None
class TestOverrideModelAsync:
"""Tests for override_model with async message sending."""
@pytest.mark.asyncio
async def test_override_model_async(
self,
async_client: AsyncLetta,
client: Letta,
agent_with_gpt4o_mini: AgentState,
) -> None:
"""
Test that override_model works correctly with async message sending.
"""
agent = agent_with_gpt4o_mini
# Send message asynchronously with override model
run = await async_client.agents.messages.create_async(
agent_id=agent.id,
messages=USER_MESSAGE_SIMPLE,
extra_body={"override_model": "openai/gpt-4o"},
)
# Verify we got a run object
assert run is not None
assert run.id is not None
# Wait for the run to complete (poll status)
max_wait = 60 # seconds
poll_interval = 1 # second
elapsed = 0
run_status = None
while elapsed < max_wait:
run_status = client.runs.retrieve(run.id)
if run_status.status in ["completed", "failed", "cancelled"]:
break
time.sleep(poll_interval)
elapsed += poll_interval
# Verify run completed
assert run_status is not None
assert run_status.status == "completed", f"Run failed with status: {run_status.status}"
# Verify agent's model was not permanently changed
agent_after = client.agents.retrieve(agent.id)
assert agent_after.model is not None
assert "gpt-4o-mini" in agent_after.model
class TestOverrideModelConversation:
"""Tests for override_model with conversation-based messaging."""
def test_override_model_conversation(
self,
client: Letta,
agent_with_gpt4o_mini: AgentState,
) -> None:
"""
Test that override_model works correctly with conversation endpoints.
"""
agent = agent_with_gpt4o_mini
# Create a conversation
conversation = client.conversations.create(agent_id=agent.id)
assert conversation is not None
assert conversation.id is not None
# Send message through conversation with override model
response = client.conversations.messages.create(
conversation_id=conversation.id,
messages=USER_MESSAGE_SIMPLE,
extra_body={"override_model": "openai/gpt-4o"},
)
# Verify we got a response
assert response is not None
# Verify agent's model was not permanently changed
agent_after = client.agents.retrieve(agent.id)
assert agent_after.model is not None
assert "gpt-4o-mini" in agent_after.model
class TestOverrideModelConsistency:
"""Tests to ensure override_model doesn't affect agent state persistently."""
def test_multiple_override_models_in_sequence(
self,
client: Letta,
agent_with_gpt4o_mini: AgentState,
) -> None:
"""
Test sending multiple messages with different override models.
Agent's default model should remain unchanged throughout.
"""
agent = agent_with_gpt4o_mini
original_model = agent.model
# First message with gpt-4o
response1 = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_SIMPLE,
extra_body={"override_model": "openai/gpt-4o"},
)
assert response1.messages is not None
# Second message with Gemini
response2 = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_SIMPLE,
extra_body={"override_model": "google_ai/gemini-2.0-flash"},
)
assert response2.messages is not None
# Third message without override (should use default)
response3 = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_SIMPLE,
)
assert response3.messages is not None
# Verify agent's model is still the original
agent_after = client.agents.retrieve(agent.id)
assert agent_after.model == original_model
def test_override_model_does_not_modify_agent_state(
self,
client: Letta,
agent_with_gpt4o_mini: AgentState,
) -> None:
"""
Test that using override_model doesn't modify any part of the agent state.
"""
agent = agent_with_gpt4o_mini
# Get full agent state before
agent_before = client.agents.retrieve(agent.id)
# Send message with override
response = client.agents.messages.create(
agent_id=agent.id,
messages=USER_MESSAGE_SIMPLE,
extra_body={"override_model": "openai/gpt-4o"},
)
assert response.messages is not None
# Get full agent state after
agent_after = client.agents.retrieve(agent.id)
# Verify key fields are unchanged
assert agent_after.model == agent_before.model
assert agent_after.name == agent_before.name
assert agent_after.agent_type == agent_before.agent_type
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/integration_test_override_model.py",
"license": "Apache License 2.0",
"lines": 380,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/integration_test_typescript_tool_execution_sandbox.py | """Integration tests for TypeScript tool execution in E2B sandbox."""
import uuid
import pytest
from sqlalchemy import delete
from letta.config import LettaConfig
from letta.orm.sandbox_config import SandboxConfig, SandboxEnvironmentVariable
from letta.schemas.enums import ToolType
from letta.schemas.npm_requirement import NpmRequirement
from letta.schemas.organization import Organization
from letta.schemas.tool import Tool as PydanticTool, ToolCreate
from letta.schemas.user import User
from letta.server.server import SyncServer
from letta.services.organization_manager import OrganizationManager
from letta.services.tool_manager import ToolManager
from letta.services.tool_sandbox.e2b_sandbox import AsyncToolSandboxE2B
from letta.services.user_manager import UserManager
# Constants
namespace = uuid.NAMESPACE_DNS
org_name = str(uuid.uuid5(namespace, "test-typescript-tool-execution-org"))
user_name = str(uuid.uuid5(namespace, "test-typescript-tool-execution-user"))
# Fixtures
@pytest.fixture(scope="module")
def server():
"""Creates a SyncServer instance for testing."""
config = LettaConfig.load()
config.save()
server = SyncServer(init_with_default_org_and_user=True)
yield server
@pytest.fixture(autouse=True)
async def clear_tables():
"""Fixture to clear sandbox tables before each test."""
from letta.server.db import db_registry
async with db_registry.async_session() as session:
await session.execute(delete(SandboxEnvironmentVariable))
await session.execute(delete(SandboxConfig))
@pytest.fixture
async def test_organization():
"""Fixture to create and return the default organization."""
org = await OrganizationManager().create_organization_async(Organization(name=org_name))
yield org
@pytest.fixture
async def test_user(test_organization):
"""Fixture to create and return the default user within the default organization."""
user = await UserManager().create_actor_async(User(name=user_name, organization_id=test_organization.id))
yield user
# TypeScript Tool Fixtures
@pytest.fixture
async def add_numbers_ts_tool(test_user):
"""Simple TypeScript tool that adds two numbers."""
tool = PydanticTool(
name="add_numbers",
description="Add two numbers together",
source_code="""
export function add_numbers(x: number, y: number): number {
return x + y;
}
""",
source_type="typescript",
tool_type=ToolType.CUSTOM,
json_schema={
"name": "add_numbers",
"description": "Add two numbers together",
"parameters": {
"type": "object",
"properties": {
"x": {"type": "number", "description": "First number"},
"y": {"type": "number", "description": "Second number"},
},
"required": ["x", "y"],
},
},
)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def string_concat_ts_tool(test_user):
"""TypeScript tool that concatenates strings."""
tool = PydanticTool(
name="concat_strings",
description="Concatenate two strings",
source_code="""
export function concat_strings(a: string, b: string): string {
return a + b;
}
""",
source_type="typescript",
tool_type=ToolType.CUSTOM,
json_schema={
"name": "concat_strings",
"description": "Concatenate two strings",
"parameters": {
"type": "object",
"properties": {
"a": {"type": "string", "description": "First string"},
"b": {"type": "string", "description": "Second string"},
},
"required": ["a", "b"],
},
},
)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def async_ts_tool(test_user):
"""Async TypeScript tool."""
tool = PydanticTool(
name="async_delay",
description="An async function that returns after a small delay",
source_code="""
export async function async_delay(message: string): Promise<string> {
await new Promise(resolve => setTimeout(resolve, 100));
return `Delayed: ${message}`;
}
""",
source_type="typescript",
tool_type=ToolType.CUSTOM,
json_schema={
"name": "async_delay",
"description": "An async function that returns after a small delay",
"parameters": {
"type": "object",
"properties": {
"message": {"type": "string", "description": "Message to return"},
},
"required": ["message"],
},
},
)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def error_ts_tool(test_user):
"""TypeScript tool that throws an error."""
tool = PydanticTool(
name="throw_error",
description="A function that always throws an error",
source_code="""
export function throw_error(): never {
throw new Error("This is an intentional test error");
}
""",
source_type="typescript",
tool_type=ToolType.CUSTOM,
json_schema={
"name": "throw_error",
"description": "A function that always throws an error",
"parameters": {
"type": "object",
"properties": {},
"required": [],
},
},
)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def array_ts_tool(test_user):
"""TypeScript tool that works with arrays."""
tool = PydanticTool(
name="sum_array",
description="Sum all numbers in an array",
source_code="""
export function sum_array(numbers: number[]): number {
return numbers.reduce((acc, curr) => acc + curr, 0);
}
""",
source_type="typescript",
tool_type=ToolType.CUSTOM,
json_schema={
"name": "sum_array",
"description": "Sum all numbers in an array",
"parameters": {
"type": "object",
"properties": {
"numbers": {
"type": "array",
"items": {"type": "number"},
"description": "Array of numbers to sum",
},
},
"required": ["numbers"],
},
},
)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def object_ts_tool(test_user):
"""TypeScript tool that works with objects."""
tool = PydanticTool(
name="get_name",
description="Extract name from a person object",
source_code="""
export function get_name(person: { firstName: string; lastName: string }): string {
return `${person.firstName} ${person.lastName}`;
}
""",
source_type="typescript",
tool_type=ToolType.CUSTOM,
json_schema={
"name": "get_name",
"description": "Extract name from a person object",
"parameters": {
"type": "object",
"properties": {
"person": {
"type": "object",
"properties": {
"firstName": {"type": "string"},
"lastName": {"type": "string"},
},
"required": ["firstName", "lastName"],
"description": "Person object with name fields",
},
},
"required": ["person"],
},
},
)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
# Tests
class TestTypescriptToolValidation:
"""Tests for TypeScript tool validation."""
def test_typescript_tool_requires_json_schema(self):
"""Test that TypeScript tools require explicit json_schema."""
with pytest.raises(ValueError, match="TypeScript tools require an explicit json_schema"):
ToolCreate(
source_code='export function test(): string { return "hello"; }',
source_type="typescript",
# Deliberately not providing json_schema
)
def test_typescript_tool_with_schema_is_valid(self, test_user):
"""Test that TypeScript tools with json_schema are valid."""
tool_create = ToolCreate(
source_code='export function test(): string { return "hello"; }',
source_type="typescript",
json_schema={
"name": "test",
"description": "Test function",
"parameters": {"type": "object", "properties": {}, "required": []},
},
)
assert tool_create.source_type == "typescript"
assert tool_create.json_schema is not None
def test_python_tool_without_schema_is_valid(self):
"""Test that Python tools can still be created without explicit json_schema."""
tool_create = ToolCreate(
source_code='def test(): return "hello"',
source_type="python",
# No json_schema - should be auto-generated for Python
)
assert tool_create.source_type == "python"
@pytest.mark.asyncio
async def test_typescript_tool_does_not_inject_agent_state(self, test_user):
"""Test that TypeScript tools do not support agent_state injection (legacy Python feature)."""
# Create a TypeScript tool that has 'agent_state' in its parameters
# (this shouldn't happen in practice, but we test the sandbox behavior)
tool = PydanticTool(
name="test_no_agent_state",
description="Test tool",
source_code="""
export function test_no_agent_state(x: number): number {
return x;
}
""",
source_type="typescript",
tool_type=ToolType.CUSTOM,
json_schema={
"name": "test_no_agent_state",
"description": "Test tool",
"parameters": {
"type": "object",
"properties": {
"x": {"type": "number"},
},
"required": ["x"],
},
},
)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
sandbox = AsyncToolSandboxE2B(
tool_name=tool.name,
args={"x": 42},
user=test_user,
tool_id=tool.id,
tool_object=tool,
)
# Initialize the sandbox to trigger the _init_async method
await sandbox._init_async()
# Verify agent_state injection is disabled for TypeScript tools
assert sandbox.inject_agent_state is False
@pytest.mark.e2b_sandbox
class TestTypescriptToolExecution:
"""Tests for TypeScript tool execution in E2B sandbox."""
@pytest.mark.asyncio
async def test_e2b_typescript_add_numbers(self, check_e2b_key_is_set, add_numbers_ts_tool, test_user):
"""Test basic TypeScript tool execution with number arguments."""
sandbox = AsyncToolSandboxE2B(
tool_name=add_numbers_ts_tool.name,
args={"x": 10, "y": 5},
user=test_user,
tool_id=add_numbers_ts_tool.id,
tool_object=add_numbers_ts_tool,
)
result = await sandbox.run()
assert result.status == "success"
assert result.func_return == 15
@pytest.mark.asyncio
async def test_e2b_typescript_string_concat(self, check_e2b_key_is_set, string_concat_ts_tool, test_user):
"""Test TypeScript tool execution with string arguments."""
sandbox = AsyncToolSandboxE2B(
tool_name=string_concat_ts_tool.name,
args={"a": "Hello, ", "b": "World!"},
user=test_user,
tool_id=string_concat_ts_tool.id,
tool_object=string_concat_ts_tool,
)
result = await sandbox.run()
assert result.status == "success"
assert result.func_return == "Hello, World!"
@pytest.mark.asyncio
async def test_e2b_typescript_async_function(self, check_e2b_key_is_set, async_ts_tool, test_user):
"""Test async TypeScript tool execution."""
sandbox = AsyncToolSandboxE2B(
tool_name=async_ts_tool.name,
args={"message": "test"},
user=test_user,
tool_id=async_ts_tool.id,
tool_object=async_ts_tool,
)
result = await sandbox.run()
assert result.status == "success"
assert result.func_return == "Delayed: test"
@pytest.mark.asyncio
async def test_e2b_typescript_error_handling(self, check_e2b_key_is_set, error_ts_tool, test_user):
"""Test TypeScript tool error handling."""
sandbox = AsyncToolSandboxE2B(
tool_name=error_ts_tool.name,
args={},
user=test_user,
tool_id=error_ts_tool.id,
tool_object=error_ts_tool,
)
result = await sandbox.run()
assert result.status == "error"
assert "error" in result.func_return.lower() or "Error" in str(result.stderr)
@pytest.mark.asyncio
async def test_e2b_typescript_array_argument(self, check_e2b_key_is_set, array_ts_tool, test_user):
"""Test TypeScript tool with array argument."""
sandbox = AsyncToolSandboxE2B(
tool_name=array_ts_tool.name,
args={"numbers": [1, 2, 3, 4, 5]},
user=test_user,
tool_id=array_ts_tool.id,
tool_object=array_ts_tool,
)
result = await sandbox.run()
assert result.status == "success"
assert result.func_return == 15
@pytest.mark.asyncio
async def test_e2b_typescript_object_argument(self, check_e2b_key_is_set, object_ts_tool, test_user):
"""Test TypeScript tool with object argument."""
sandbox = AsyncToolSandboxE2B(
tool_name=object_ts_tool.name,
args={"person": {"firstName": "John", "lastName": "Doe"}},
user=test_user,
tool_id=object_ts_tool.id,
tool_object=object_ts_tool,
)
result = await sandbox.run()
assert result.status == "success"
assert result.func_return == "John Doe"
@pytest.mark.e2b_sandbox
class TestTypescriptToolWithLettaClient:
"""Tests for TypeScript tools with Letta client integration."""
@pytest.mark.asyncio
async def test_e2b_typescript_letta_client_available(self, check_e2b_key_is_set, test_user):
"""Test that the Letta client is available in TypeScript sandbox (as null when no API key)."""
# Create a tool that checks if the client variable exists
tool = PydanticTool(
name="check_client",
description="Check if Letta client is available",
source_code="""
export function check_client(): string {
// client is injected by the sandbox - it will be null if no API key
return client === null ? "client is null (no API key)" : "client is available";
}
""",
source_type="typescript",
tool_type=ToolType.CUSTOM,
json_schema={
"name": "check_client",
"description": "Check if Letta client is available",
"parameters": {
"type": "object",
"properties": {},
"required": [],
},
},
)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
sandbox = AsyncToolSandboxE2B(
tool_name=tool.name,
args={},
user=test_user,
tool_id=tool.id,
tool_object=tool,
)
result = await sandbox.run()
assert result.status == "success"
# Without LETTA_API_KEY, client should be null
assert "client" in result.func_return.lower()
@pytest.mark.e2b_sandbox
class TestTypescriptToolWithNpmPackages:
"""Tests for TypeScript tools with npm package dependencies."""
@pytest.mark.asyncio
async def test_e2b_typescript_with_npm_package(self, check_e2b_key_is_set, test_user):
"""Test TypeScript tool execution with npm package dependency."""
# Create a tool that uses the 'lodash' npm package
tool = PydanticTool(
name="lodash_capitalize",
description="Capitalize a string using lodash",
source_code="""
import _ from 'lodash';
export function lodash_capitalize(text: string): string {
return _.capitalize(text);
}
""",
source_type="typescript",
tool_type=ToolType.CUSTOM,
npm_requirements=[NpmRequirement(name="lodash"), NpmRequirement(name="@types/lodash")],
json_schema={
"name": "lodash_capitalize",
"description": "Capitalize a string using lodash",
"parameters": {
"type": "object",
"properties": {
"text": {"type": "string", "description": "Text to capitalize"},
},
"required": ["text"],
},
},
)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
sandbox = AsyncToolSandboxE2B(
tool_name=tool.name,
args={"text": "hello world"},
user=test_user,
tool_id=tool.id,
tool_object=tool,
)
result = await sandbox.run()
assert result.status == "success"
assert result.func_return == "Hello world"
class TestTypescriptGeneratorUnit:
"""Unit tests for TypeScript code generation."""
def test_convert_param_to_ts_value_string(self):
"""Test string parameter conversion."""
from letta.services.tool_sandbox.typescript_generator import convert_param_to_ts_value
assert convert_param_to_ts_value("string", "hello") == '"hello"'
assert convert_param_to_ts_value("string", 'hello "world"') == '"hello \\"world\\""'
def test_convert_param_to_ts_value_number(self):
"""Test number parameter conversion."""
from letta.services.tool_sandbox.typescript_generator import convert_param_to_ts_value
assert convert_param_to_ts_value("number", 42) == "42"
assert convert_param_to_ts_value("number", 3.14) == "3.14"
assert convert_param_to_ts_value("integer", 100) == "100"
def test_convert_param_to_ts_value_boolean(self):
"""Test boolean parameter conversion."""
from letta.services.tool_sandbox.typescript_generator import convert_param_to_ts_value
assert convert_param_to_ts_value("boolean", True) == "true"
assert convert_param_to_ts_value("boolean", False) == "false"
def test_convert_param_to_ts_value_array(self):
"""Test array parameter conversion."""
from letta.services.tool_sandbox.typescript_generator import convert_param_to_ts_value
assert convert_param_to_ts_value("array", [1, 2, 3]) == "[1, 2, 3]"
def test_convert_param_to_ts_value_object(self):
"""Test object parameter conversion."""
from letta.services.tool_sandbox.typescript_generator import convert_param_to_ts_value
result = convert_param_to_ts_value("object", {"key": "value"})
assert result == '{"key": "value"}'
def test_extract_typescript_function_name(self):
"""Test TypeScript function name extraction."""
from letta.services.tool_sandbox.typescript_generator import extract_typescript_function_name
assert extract_typescript_function_name("export function myFunc(): void {}") == "myFunc"
assert extract_typescript_function_name("export async function asyncFunc(): Promise<void> {}") == "asyncFunc"
assert extract_typescript_function_name("function notExported(): void {}") is None
def test_is_async_typescript_function(self):
"""Test async function detection."""
from letta.services.tool_sandbox.typescript_generator import is_async_typescript_function
assert is_async_typescript_function("export async function test(): Promise<void> {}", "test") is True
assert is_async_typescript_function("export function test(): void {}", "test") is False
def test_generate_typescript_execution_script(self):
"""Test TypeScript execution script generation."""
from letta.services.tool_sandbox.typescript_generator import generate_typescript_execution_script
script = generate_typescript_execution_script(
tool_name="add",
tool_source_code="export function add(x: number, y: number): number { return x + y; }",
args={"x": 1, "y": 2},
json_schema={
"name": "add",
"parameters": {
"type": "object",
"properties": {
"x": {"type": "number"},
"y": {"type": "number"},
},
},
},
)
# Verify Letta client initialization is included (using dynamic import with try/catch)
assert "let client: any = null;" in script
assert "await import('@letta-ai/letta-client')" in script
assert "process.env.LETTA_API_KEY" in script
assert "} catch (e) {" in script # Graceful fallback if package not available
# Verify arguments are initialized
assert "const x = 1;" in script
assert "const y = 2;" in script
assert "function add" in script # 'export' is stripped for inline execution
assert "const _result = add(x, y);" in script
assert "JSON.stringify(_output);" in script
# Verify agent_state is null (not supported for TypeScript)
assert "agent_state: null" in script
def test_parse_typescript_result(self):
"""Test TypeScript result parsing."""
from letta.services.tool_sandbox.typescript_generator import parse_typescript_result
# Valid JSON result
result, agent_state = parse_typescript_result('{"results": 42, "agent_state": null}')
assert result == 42
assert agent_state is None
# Invalid JSON returns raw text
result, agent_state = parse_typescript_result("not json")
assert result == "not json"
assert agent_state is None
# Empty result
result, agent_state = parse_typescript_result("")
assert result is None
assert agent_state is None
def test_sandbox_tool_executor_skips_ast_for_typescript(self):
"""Test that SandboxToolExecutor._prepare_function_args skips AST parsing for TypeScript."""
from letta.schemas.tool import Tool as PydanticTool
from letta.services.tool_executor.sandbox_tool_executor import SandboxToolExecutor
ts_tool = PydanticTool(
name="ts_func",
description="Test TypeScript tool",
source_code="""
export function ts_func(a: number, b: string): string {
return b.repeat(a);
}
""",
source_type="typescript",
tool_type=ToolType.CUSTOM,
json_schema={
"name": "ts_func",
"parameters": {
"type": "object",
"properties": {
"a": {"type": "number"},
"b": {"type": "string"},
},
"required": ["a", "b"],
},
},
)
# This should NOT raise a SyntaxError - it should skip AST parsing for TypeScript
result = SandboxToolExecutor._prepare_function_args(
function_args={"a": 3, "b": "test"},
tool=ts_tool,
function_name="ts_func",
)
# Should return original args unchanged (no type coercion for TypeScript)
assert result == {"a": 3, "b": "test"}
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/integration_test_typescript_tool_execution_sandbox.py",
"license": "Apache License 2.0",
"lines": 573,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_provider_trace_backends.py | """Unit tests for provider trace backends."""
import json
import os
import socket
import tempfile
import threading
from unittest.mock import patch
import pytest
from letta.schemas.provider_trace import ProviderTrace
from letta.schemas.user import User
from letta.services.provider_trace_backends.base import ProviderTraceBackend
from letta.services.provider_trace_backends.socket import SocketProviderTraceBackend
@pytest.fixture
def mock_actor():
"""Create a mock user/actor."""
return User(
id="user-00000000-0000-4000-8000-000000000000",
organization_id="org-00000000-0000-4000-8000-000000000000",
name="test_user",
)
@pytest.fixture
def sample_provider_trace():
"""Create a sample ProviderTrace."""
return ProviderTrace(
request_json={
"model": "gpt-4o-mini",
"messages": [{"role": "user", "content": "Hello"}],
},
response_json={
"id": "chatcmpl-xyz",
"model": "gpt-4o-mini",
"choices": [{"message": {"content": "Hi!"}}],
"usage": {"prompt_tokens": 10, "completion_tokens": 5},
},
step_id="step-test-789",
run_id="run-test-abc",
)
class TestProviderTraceBackendEnum:
"""Tests for ProviderTraceBackend enum."""
def test_enum_values(self):
assert ProviderTraceBackend.POSTGRES.value == "postgres"
assert ProviderTraceBackend.CLICKHOUSE.value == "clickhouse"
assert ProviderTraceBackend.SOCKET.value == "socket"
def test_enum_string_comparison(self):
assert ProviderTraceBackend.POSTGRES == "postgres"
assert ProviderTraceBackend.SOCKET == "socket"
class TestProviderTrace:
"""Tests for ProviderTrace schema."""
def test_id_generation(self):
"""Test that ID is auto-generated with correct prefix."""
trace = ProviderTrace(
request_json={"model": "test"},
response_json={"id": "test"},
step_id="step-123",
)
assert trace.id.startswith("provider_trace-")
def test_id_uniqueness(self):
"""Test that each instance gets a unique ID."""
trace1 = ProviderTrace(request_json={}, response_json={}, step_id="step-1")
trace2 = ProviderTrace(request_json={}, response_json={}, step_id="step-2")
assert trace1.id != trace2.id
def test_optional_fields(self):
"""Test optional telemetry fields."""
trace = ProviderTrace(
request_json={},
response_json={},
step_id="step-123",
agent_id="agent-456",
agent_tags=["env:dev", "team:ml"],
call_type="summarization",
run_id="run-789",
)
assert trace.agent_id == "agent-456"
assert trace.agent_tags == ["env:dev", "team:ml"]
assert trace.call_type == "summarization"
assert trace.run_id == "run-789"
def test_v2_protocol_fields(self):
"""Test v2 protocol fields (org_id, user_id, compaction_settings, llm_config)."""
trace = ProviderTrace(
request_json={},
response_json={},
step_id="step-123",
org_id="org-123",
user_id="user-123",
compaction_settings={"mode": "sliding_window", "target_message_count": 50},
llm_config={"model": "gpt-4", "temperature": 0.7},
)
assert trace.org_id == "org-123"
assert trace.user_id == "user-123"
assert trace.compaction_settings == {"mode": "sliding_window", "target_message_count": 50}
assert trace.llm_config == {"model": "gpt-4", "temperature": 0.7}
def test_v2_fields_mutually_exclusive_by_convention(self):
"""Test that compaction_settings is set for summarization, llm_config for non-summarization."""
summarization_trace = ProviderTrace(
request_json={},
response_json={},
step_id="step-123",
call_type="summarization",
compaction_settings={"mode": "partial_evict"},
llm_config=None,
)
assert summarization_trace.call_type == "summarization"
assert summarization_trace.compaction_settings is not None
assert summarization_trace.llm_config is None
agent_step_trace = ProviderTrace(
request_json={},
response_json={},
step_id="step-456",
call_type="agent_step",
compaction_settings=None,
llm_config={"model": "claude-3"},
)
assert agent_step_trace.call_type == "agent_step"
assert agent_step_trace.compaction_settings is None
assert agent_step_trace.llm_config is not None
class TestSocketProviderTraceBackend:
"""Tests for SocketProviderTraceBackend."""
def test_init_default_path(self):
"""Test default socket path."""
backend = SocketProviderTraceBackend()
assert backend.socket_path == "/var/run/telemetry/telemetry.sock"
def test_init_custom_path(self):
"""Test custom socket path."""
backend = SocketProviderTraceBackend(socket_path="/tmp/custom.sock")
assert backend.socket_path == "/tmp/custom.sock"
@pytest.mark.asyncio
async def test_create_async_returns_provider_trace(self, mock_actor, sample_provider_trace):
"""Test that create_async returns a ProviderTrace."""
backend = SocketProviderTraceBackend(socket_path="/nonexistent/path.sock")
result = await backend.create_async(
actor=mock_actor,
provider_trace=sample_provider_trace,
)
assert isinstance(result, ProviderTrace)
assert result.id == sample_provider_trace.id
assert result.step_id == sample_provider_trace.step_id
@pytest.mark.asyncio
async def test_get_by_step_id_returns_none(self, mock_actor):
"""Test that read operations return None (write-only backend)."""
backend = SocketProviderTraceBackend()
result = await backend.get_by_step_id_async(
step_id="step-123",
actor=mock_actor,
)
assert result is None
def test_send_to_socket_with_real_socket(self, sample_provider_trace):
"""Test sending data to a real Unix socket."""
received_data = []
with tempfile.TemporaryDirectory() as tmpdir:
socket_path = os.path.join(tmpdir, "test.sock")
# Create a simple socket server
server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.bind(socket_path)
server_sock.listen(1)
server_sock.settimeout(5.0)
def accept_connection():
try:
conn, _ = server_sock.accept()
data = conn.recv(65536)
received_data.append(data.decode())
conn.close()
except socket.timeout:
pass # Expected - test socket has short timeout, data may not arrive
finally:
server_sock.close()
# Start server in background
server_thread = threading.Thread(target=accept_connection)
server_thread.start()
# Send data via backend
backend = SocketProviderTraceBackend(socket_path=socket_path)
backend._send_to_crouton(sample_provider_trace)
# Wait for send to complete
server_thread.join(timeout=5.0)
# Verify data was received
assert len(received_data) == 1
record = json.loads(received_data[0].strip())
assert record["provider_trace_id"] == sample_provider_trace.id
assert record["step_id"] == "step-test-789"
assert record["run_id"] == "run-test-abc"
assert record["request"]["model"] == "gpt-4o-mini"
assert record["response"]["usage"]["prompt_tokens"] == 10
assert record["response"]["usage"]["completion_tokens"] == 5
def test_send_to_nonexistent_socket_does_not_raise(self, sample_provider_trace):
"""Test that sending to nonexistent socket fails silently."""
backend = SocketProviderTraceBackend(socket_path="/nonexistent/path.sock")
# Should not raise
backend._send_to_crouton(sample_provider_trace)
def test_record_extracts_usage_from_openai_response(self):
"""Test usage extraction from OpenAI-style response."""
trace = ProviderTrace(
request_json={"model": "gpt-4"},
response_json={
"usage": {
"prompt_tokens": 100,
"completion_tokens": 50,
}
},
step_id="step-123",
)
backend = SocketProviderTraceBackend(socket_path="/fake/path")
# Access internal method to build record
with patch.object(backend, "_send_async"):
backend._send_to_crouton(trace)
def test_record_extracts_usage_from_anthropic_response(self):
"""Test usage extraction from Anthropic-style response."""
trace = ProviderTrace(
request_json={"model": "claude-3"},
response_json={
"usage": {
"input_tokens": 100,
"output_tokens": 50,
}
},
step_id="step-123",
)
backend = SocketProviderTraceBackend(socket_path="/fake/path")
with patch.object(backend, "_send_async"):
backend._send_to_crouton(trace)
def test_record_extracts_error_from_response(self):
"""Test error extraction from response."""
trace = ProviderTrace(
request_json={"model": "gpt-4"},
response_json={
"error": {"message": "Rate limit exceeded"},
},
step_id="step-123",
)
backend = SocketProviderTraceBackend(socket_path="/fake/path")
# Capture the record sent to _send_async
captured_records = []
def capture_record(record):
captured_records.append(record)
with patch.object(backend, "_send_async", side_effect=capture_record):
backend._send_to_crouton(trace)
assert len(captured_records) == 1
assert captured_records[0]["error"] == "Rate limit exceeded"
assert captured_records[0]["response"] is None
def test_record_includes_v3_protocol_fields(self):
"""Test that v3 protocol fields are included in the socket record."""
trace = ProviderTrace(
request_json={"model": "gpt-4"},
response_json={"id": "test"},
step_id="step-123",
org_id="org-456",
user_id="user-456",
compaction_settings={"mode": "sliding_window"},
llm_config={"model": "gpt-4", "temperature": 0.5},
)
backend = SocketProviderTraceBackend(socket_path="/fake/path")
captured_records = []
def capture_record(record):
captured_records.append(record)
with patch.object(backend, "_send_async", side_effect=capture_record):
backend._send_to_crouton(trace)
assert len(captured_records) == 1
record = captured_records[0]
assert record["protocol_version"] == 3
assert record["org_id"] == "org-456"
assert record["user_id"] == "user-456"
assert record["compaction_settings"] == {"mode": "sliding_window"}
assert record["llm_config"] == {"model": "gpt-4", "temperature": 0.5}
class TestBackendFactory:
"""Tests for backend factory."""
def test_get_postgres_backend(self):
"""Test getting postgres backend."""
from letta.services.provider_trace_backends.factory import _create_backend
backend = _create_backend("postgres")
assert backend.__class__.__name__ == "PostgresProviderTraceBackend"
def test_get_socket_backend(self):
"""Test getting socket backend."""
with patch("letta.settings.telemetry_settings") as mock_settings:
mock_settings.socket_path = "/tmp/test.sock"
from letta.services.provider_trace_backends.factory import _create_backend
backend = _create_backend("socket")
assert backend.__class__.__name__ == "SocketProviderTraceBackend"
def test_get_multiple_backends(self):
"""Test getting multiple backends via environment."""
from letta.services.provider_trace_backends.factory import (
get_provider_trace_backends,
)
# Clear cache first
get_provider_trace_backends.cache_clear()
# This test just verifies the factory works - actual backend list
# depends on env var LETTA_TELEMETRY_PROVIDER_TRACE_BACKEND
backends = get_provider_trace_backends()
assert len(backends) >= 1
assert all(hasattr(b, "create_async") and hasattr(b, "get_by_step_id_async") for b in backends)
def test_unknown_backend_defaults_to_postgres(self):
"""Test that unknown backend type defaults to postgres."""
from letta.services.provider_trace_backends.factory import _create_backend
backend = _create_backend("unknown_backend")
assert backend.__class__.__name__ == "PostgresProviderTraceBackend"
class TestTelemetrySettings:
"""Tests for telemetry settings."""
def test_provider_trace_backends_parsing(self):
"""Test parsing comma-separated backend list."""
from letta.settings import TelemetrySettings
# Create a fresh settings object and set the value directly
settings = TelemetrySettings(provider_trace_backend="postgres,socket")
backends = settings.provider_trace_backends
assert backends == ["postgres", "socket"]
def test_provider_trace_backends_single(self):
"""Test single backend."""
from letta.settings import TelemetrySettings
settings = TelemetrySettings(provider_trace_backend="postgres")
backends = settings.provider_trace_backends
assert backends == ["postgres"]
def test_provider_trace_backends_with_whitespace(self):
"""Test backend list with whitespace."""
from letta.settings import TelemetrySettings
settings = TelemetrySettings(provider_trace_backend="postgres , socket , clickhouse")
backends = settings.provider_trace_backends
assert backends == ["postgres", "socket", "clickhouse"]
def test_socket_backend_enabled(self):
"""Test socket_backend_enabled property."""
from letta.settings import TelemetrySettings
settings1 = TelemetrySettings(provider_trace_backend="postgres")
assert settings1.socket_backend_enabled is False
settings2 = TelemetrySettings(provider_trace_backend="postgres,socket")
assert settings2.socket_backend_enabled is True
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_provider_trace_backends.py",
"license": "Apache License 2.0",
"lines": 320,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:alembic/versions/27de0f58e076_add_conversations_tables_and_run_.py | """add conversations tables and run conversation_id
Revision ID: 27de0f58e076
Revises: ee2b43eea55e
Create Date: 2026-01-01 20:36:09.101274
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "27de0f58e076"
down_revision: Union[str, None] = "ee2b43eea55e"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"conversations",
sa.Column("id", sa.String(), nullable=False),
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("summary", sa.String(), nullable=True),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("FALSE"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.PrimaryKeyConstraint("id"),
)
op.create_index("ix_conversations_agent_id", "conversations", ["agent_id"], unique=False)
op.create_index("ix_conversations_org_agent", "conversations", ["organization_id", "agent_id"], unique=False)
op.create_table(
"conversation_messages",
sa.Column("id", sa.String(), nullable=False),
sa.Column("conversation_id", sa.String(), nullable=True),
sa.Column("agent_id", sa.String(), nullable=False),
sa.Column("message_id", sa.String(), nullable=False),
sa.Column("position", sa.Integer(), nullable=False),
sa.Column("in_context", sa.Boolean(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("FALSE"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(["agent_id"], ["agents.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["conversation_id"], ["conversations.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["message_id"], ["messages.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("conversation_id", "message_id", name="unique_conversation_message"),
)
op.create_index("ix_conv_msg_agent_conversation", "conversation_messages", ["agent_id", "conversation_id"], unique=False)
op.create_index("ix_conv_msg_agent_id", "conversation_messages", ["agent_id"], unique=False)
op.create_index("ix_conv_msg_conversation_position", "conversation_messages", ["conversation_id", "position"], unique=False)
op.create_index("ix_conv_msg_message_id", "conversation_messages", ["message_id"], unique=False)
op.add_column("messages", sa.Column("conversation_id", sa.String(), nullable=True))
op.create_index(op.f("ix_messages_conversation_id"), "messages", ["conversation_id"], unique=False)
op.create_foreign_key(None, "messages", "conversations", ["conversation_id"], ["id"], ondelete="SET NULL")
op.add_column("runs", sa.Column("conversation_id", sa.String(), nullable=True))
op.create_index("ix_runs_conversation_id", "runs", ["conversation_id"], unique=False)
op.create_foreign_key(None, "runs", "conversations", ["conversation_id"], ["id"], ondelete="SET NULL")
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, "runs", type_="foreignkey")
op.drop_index("ix_runs_conversation_id", table_name="runs")
op.drop_column("runs", "conversation_id")
op.drop_constraint(None, "messages", type_="foreignkey")
op.drop_index(op.f("ix_messages_conversation_id"), table_name="messages")
op.drop_column("messages", "conversation_id")
op.drop_index("ix_conv_msg_message_id", table_name="conversation_messages")
op.drop_index("ix_conv_msg_conversation_position", table_name="conversation_messages")
op.drop_index("ix_conv_msg_agent_id", table_name="conversation_messages")
op.drop_index("ix_conv_msg_agent_conversation", table_name="conversation_messages")
op.drop_table("conversation_messages")
op.drop_index("ix_conversations_org_agent", table_name="conversations")
op.drop_index("ix_conversations_agent_id", table_name="conversations")
op.drop_table("conversations")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/27de0f58e076_add_conversations_tables_and_run_.py",
"license": "Apache License 2.0",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/ee2b43eea55e_add_request_id_to_steps_table.py | """add request_id to steps table
Revision ID: ee2b43eea55e
Revises: 39577145c45d
Create Date: 2025-12-17 13:48:08.642245
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "ee2b43eea55e"
down_revision: Union[str, None] = "39577145c45d"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("steps", sa.Column("request_id", sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("steps", "request_id")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/ee2b43eea55e_add_request_id_to_steps_table.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/llm_api/anthropic_constants.py | # Anthropic-specific constants for the Letta LLM API
# Allowlist of simple tools that work with Anthropic's structured outputs (strict mode).
# These tools have few parameters and no complex nesting, making them safe for strict mode.
# Tools with many optional params or deeply nested structures should use non-strict mode.
#
# Anthropic limitations for strict mode:
# - Max 15 tools can use strict mode per request
# - Max 24 optional parameters per tool (counted recursively in undocumented ways)
# - Schema complexity limits
#
# Rather than trying to count parameters correctly, we allowlist simple tools that we know work.
ANTHROPIC_STRICT_MODE_ALLOWLIST = {
"Write", # 2 required params, no optional
"Read", # 1 required, 2 simple optional
"Edit", # 3 required, 1 simple optional
"Glob", # 1 required, 1 simple optional
"KillBash", # 1 required, no optional
"fetch_webpage", # 1 required, no optional
"EnterPlanMode", # no params
"ExitPlanMode", # no params
"Skill", # 1 required, 1 optional array
"conversation_search", # 1 required, 4 simple optional
}
# Maximum number of tools that can use strict mode in a single request
ANTHROPIC_MAX_STRICT_TOOLS = 15
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/llm_api/anthropic_constants.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/llm_api/zai_client.py | from typing import List, Optional
from openai import AsyncOpenAI, AsyncStream, OpenAI
from openai.types.chat.chat_completion import ChatCompletion
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
from letta.helpers.json_helpers import sanitize_unicode_surrogates
from letta.llm_api.openai_client import OpenAIClient
from letta.otel.tracing import trace_method
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import AgentType
from letta.schemas.llm_config import LLMConfig
from letta.schemas.message import Message as PydanticMessage
from letta.schemas.openai.chat_completion_response import ChatCompletionResponse
from letta.settings import model_settings
def is_zai_reasoning_model(model_name: str) -> bool:
"""Check if the model is a ZAI reasoning model (GLM-4.5+)."""
return (
model_name.startswith("glm-4.5")
or model_name.startswith("glm-4.6")
or model_name.startswith("glm-4.7")
or model_name.startswith("glm-5")
)
class ZAIClient(OpenAIClient):
"""Z.ai (ZhipuAI) client - uses OpenAI-compatible API."""
def requires_auto_tool_choice(self, llm_config: LLMConfig) -> bool:
return False
def supports_structured_output(self, llm_config: LLMConfig) -> bool:
return False
def is_reasoning_model(self, llm_config: LLMConfig) -> bool:
"""Returns True if the model is a ZAI reasoning model (GLM-4.5+)."""
return is_zai_reasoning_model(llm_config.model)
@trace_method
def build_request_data(
self,
agent_type: AgentType,
messages: List[PydanticMessage],
llm_config: LLMConfig,
tools: Optional[List[dict]] = None,
force_tool_call: Optional[str] = None,
requires_subsequent_tool_call: bool = False,
tool_return_truncation_chars: Optional[int] = None,
) -> dict:
data = super().build_request_data(agent_type, messages, llm_config, tools, force_tool_call, requires_subsequent_tool_call)
# Add thinking configuration for ZAI GLM-4.5+ models
# Must explicitly send type: "disabled" when reasoning is off, as GLM-4.7 has thinking on by default
if self.is_reasoning_model(llm_config):
if llm_config.enable_reasoner:
data["extra_body"] = {
"thinking": {
"type": "enabled",
"clear_thinking": False, # Preserved thinking for agents
}
}
else:
data["extra_body"] = {
"thinking": {
"type": "disabled",
}
}
# Z.ai's API uses max_tokens, not max_completion_tokens.
# If max_completion_tokens is sent, Z.ai ignores it and falls back to its
# default of 65536, silently truncating input to ~137K of the 200K context window.
if "max_completion_tokens" in data:
data["max_tokens"] = data.pop("max_completion_tokens")
# Sanitize empty text content — ZAI rejects empty text blocks
if "messages" in data:
for msg in data["messages"]:
content = msg.get("content") if isinstance(msg, dict) else getattr(msg, "content", None)
# String content: replace empty with None (assistant+tool_calls) or "."
if isinstance(content, str) and not content.strip():
role = msg.get("role") if isinstance(msg, dict) else getattr(msg, "role", None)
has_tool_calls = msg.get("tool_calls") if isinstance(msg, dict) else getattr(msg, "tool_calls", None)
if role == "assistant" and has_tool_calls:
# assistant + tool_calls: null content is valid in OpenAI format
if isinstance(msg, dict):
msg["content"] = None
else:
msg.content = None
else:
if isinstance(msg, dict):
msg["content"] = "."
else:
msg.content = "."
# List content: fix empty text blocks within arrays
elif isinstance(content, list):
for block in content:
if isinstance(block, dict) and block.get("type") == "text":
if not block.get("text", "").strip():
block["text"] = "."
return data
@trace_method
def request(self, request_data: dict, llm_config: LLMConfig) -> dict:
"""
Performs underlying synchronous request to Z.ai API and returns raw response dict.
"""
api_key = model_settings.zai_api_key
client = OpenAI(api_key=api_key, base_url=llm_config.model_endpoint)
response: ChatCompletion = client.chat.completions.create(**request_data)
return response.model_dump()
@trace_method
async def request_async(self, request_data: dict, llm_config: LLMConfig) -> dict:
"""
Performs underlying asynchronous request to Z.ai API and returns raw response dict.
"""
request_data = sanitize_unicode_surrogates(request_data)
api_key = model_settings.zai_api_key
client = AsyncOpenAI(api_key=api_key, base_url=llm_config.model_endpoint)
response: ChatCompletion = await client.chat.completions.create(**request_data)
return response.model_dump()
@trace_method
async def stream_async(self, request_data: dict, llm_config: LLMConfig) -> AsyncStream[ChatCompletionChunk]:
"""
Performs underlying asynchronous streaming request to Z.ai and returns the async stream iterator.
"""
request_data = sanitize_unicode_surrogates(request_data)
api_key = model_settings.zai_api_key
client = AsyncOpenAI(api_key=api_key, base_url=llm_config.model_endpoint)
response_stream: AsyncStream[ChatCompletionChunk] = await client.chat.completions.create(
**request_data, stream=True, stream_options={"include_usage": True}
)
return response_stream
@trace_method
async def request_embeddings(self, inputs: List[str], embedding_config: EmbeddingConfig) -> List[List[float]]:
"""Request embeddings given texts and embedding config"""
api_key = model_settings.zai_api_key
client = AsyncOpenAI(api_key=api_key, base_url=embedding_config.embedding_endpoint)
response = await client.embeddings.create(model=embedding_config.embedding_model, input=inputs)
return [r.embedding for r in response.data]
@trace_method
async def convert_response_to_chat_completion(
self,
response_data: dict,
input_messages: List[PydanticMessage],
llm_config: LLMConfig,
) -> ChatCompletionResponse:
"""
Converts raw ZAI response dict into the ChatCompletionResponse Pydantic model.
Handles extraction of reasoning_content from ZAI GLM-4.5+ responses.
"""
# Use parent class conversion first
chat_completion_response = await super().convert_response_to_chat_completion(response_data, input_messages, llm_config)
# Parse reasoning_content from ZAI responses (similar to OpenAI pattern)
# ZAI returns reasoning_content in delta.reasoning_content (streaming) or message.reasoning_content
if (
chat_completion_response.choices
and len(chat_completion_response.choices) > 0
and chat_completion_response.choices[0].message
and not chat_completion_response.choices[0].message.reasoning_content
):
if "choices" in response_data and len(response_data["choices"]) > 0:
choice_data = response_data["choices"][0]
if "message" in choice_data and "reasoning_content" in choice_data["message"]:
reasoning_content = choice_data["message"]["reasoning_content"]
if reasoning_content:
chat_completion_response.choices[0].message.reasoning_content = reasoning_content
chat_completion_response.choices[0].message.reasoning_content_signature = None
# If we used a reasoning model, mark that reasoning content was used
if self.is_reasoning_model(llm_config) and llm_config.enable_reasoner:
chat_completion_response.choices[0].message.omitted_reasoning_content = True
return chat_completion_response
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/llm_api/zai_client.py",
"license": "Apache License 2.0",
"lines": 159,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/orm/conversation.py | import uuid
from typing import TYPE_CHECKING, List, Optional
from pydantic import TypeAdapter
from sqlalchemy import JSON, ForeignKey, Index, String
from sqlalchemy.orm import Mapped, mapped_column, relationship
from letta.orm.mixins import OrganizationMixin
from letta.orm.sqlalchemy_base import SqlalchemyBase
from letta.schemas.conversation import Conversation as PydanticConversation
from letta.schemas.model import ModelSettingsUnion
if TYPE_CHECKING:
from letta.orm.agent import Agent
from letta.orm.block import Block
from letta.orm.conversation_messages import ConversationMessage
_model_settings_adapter = TypeAdapter(ModelSettingsUnion)
class Conversation(SqlalchemyBase, OrganizationMixin):
"""Conversations that can be created on an agent for concurrent messaging."""
__tablename__ = "conversations"
__pydantic_model__ = PydanticConversation
__table_args__ = (
Index("ix_conversations_agent_id", "agent_id"),
Index("ix_conversations_org_agent", "organization_id", "agent_id"),
)
id: Mapped[str] = mapped_column(String, primary_key=True, default=lambda: f"conv-{uuid.uuid4()}")
agent_id: Mapped[str] = mapped_column(String, ForeignKey("agents.id", ondelete="CASCADE"), nullable=False)
summary: Mapped[Optional[str]] = mapped_column(String, nullable=True, doc="Summary of the conversation")
model: Mapped[Optional[str]] = mapped_column(
String, nullable=True, doc="Model handle override for this conversation (format: provider/model-name)"
)
model_settings: Mapped[Optional[dict]] = mapped_column(
JSON, nullable=True, doc="Model settings override for this conversation (provider-specific settings)"
)
# Relationships
agent: Mapped["Agent"] = relationship("Agent", back_populates="conversations", lazy="raise")
message_associations: Mapped[List["ConversationMessage"]] = relationship(
"ConversationMessage",
back_populates="conversation",
cascade="all, delete-orphan",
lazy="raise",
)
isolated_blocks: Mapped[List["Block"]] = relationship(
"Block",
secondary="blocks_conversations",
lazy="selectin",
passive_deletes=True,
doc="Conversation-specific blocks that override agent defaults for isolated memory.",
)
def to_pydantic(self) -> PydanticConversation:
"""Converts the SQLAlchemy model to its Pydantic counterpart."""
return self.__pydantic_model__(
id=self.id,
agent_id=self.agent_id,
summary=self.summary,
created_at=self.created_at,
updated_at=self.updated_at,
created_by_id=self.created_by_id,
last_updated_by_id=self.last_updated_by_id,
isolated_block_ids=[b.id for b in self.isolated_blocks] if self.isolated_blocks else [],
model=self.model,
model_settings=_model_settings_adapter.validate_python(self.model_settings) if self.model_settings else None,
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/orm/conversation.py",
"license": "Apache License 2.0",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/orm/conversation_messages.py | import uuid
from typing import TYPE_CHECKING, Optional
from sqlalchemy import Boolean, ForeignKey, Index, Integer, String, UniqueConstraint
from sqlalchemy.orm import Mapped, mapped_column, relationship
from letta.orm.mixins import OrganizationMixin
from letta.orm.sqlalchemy_base import SqlalchemyBase
if TYPE_CHECKING:
from letta.orm.conversation import Conversation
from letta.orm.message import Message
class ConversationMessage(SqlalchemyBase, OrganizationMixin):
"""
Track in-context messages for a conversation.
This replaces the message_ids JSON list on agents with proper relational modeling.
- conversation_id=NULL represents the "default" conversation (backward compatible)
- conversation_id=<id> represents a named conversation for concurrent messaging
"""
__tablename__ = "conversation_messages"
__table_args__ = (
Index("ix_conv_msg_conversation_position", "conversation_id", "position"),
Index("ix_conv_msg_message_id", "message_id"),
Index("ix_conv_msg_agent_id", "agent_id"),
Index("ix_conv_msg_agent_conversation", "agent_id", "conversation_id"),
UniqueConstraint("conversation_id", "message_id", name="unique_conversation_message"),
)
id: Mapped[str] = mapped_column(String, primary_key=True, default=lambda: f"conv_msg-{uuid.uuid4()}")
conversation_id: Mapped[Optional[str]] = mapped_column(
String,
ForeignKey("conversations.id", ondelete="CASCADE"),
nullable=True,
doc="NULL for default conversation, otherwise FK to conversation",
)
agent_id: Mapped[str] = mapped_column(
String,
ForeignKey("agents.id", ondelete="CASCADE"),
nullable=False,
doc="The agent this message association belongs to",
)
message_id: Mapped[str] = mapped_column(
String,
ForeignKey("messages.id", ondelete="CASCADE"),
nullable=False,
doc="The message being tracked",
)
position: Mapped[int] = mapped_column(
Integer,
nullable=False,
doc="Position in conversation (for ordering)",
)
in_context: Mapped[bool] = mapped_column(
Boolean,
default=True,
nullable=False,
doc="Whether message is currently in the agent's context window",
)
# Relationships
conversation: Mapped[Optional["Conversation"]] = relationship(
"Conversation",
back_populates="message_associations",
lazy="raise",
)
message: Mapped["Message"] = relationship(
"Message",
lazy="raise",
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/orm/conversation_messages.py",
"license": "Apache License 2.0",
"lines": 64,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/schemas/conversation.py | from typing import List, Optional
from pydantic import BaseModel, Field, field_validator
from letta.errors import LettaInvalidArgumentError
from letta.schemas.letta_base import OrmMetadataBase
from letta.schemas.model import ModelSettingsUnion
class Conversation(OrmMetadataBase):
"""Represents a conversation on an agent for concurrent messaging."""
__id_prefix__ = "conv"
id: str = Field(..., description="The unique identifier of the conversation.")
agent_id: str = Field(..., description="The ID of the agent this conversation belongs to.")
summary: Optional[str] = Field(None, description="A summary of the conversation.")
in_context_message_ids: List[str] = Field(default_factory=list, description="The IDs of in-context messages for the conversation.")
isolated_block_ids: List[str] = Field(
default_factory=list,
description="IDs of blocks that are isolated (specific to this conversation, overriding agent defaults).",
)
model: Optional[str] = Field(
None,
description="The model handle for this conversation (overrides agent's model). Format: provider/model-name.",
)
model_settings: Optional[ModelSettingsUnion] = Field(
None,
description="The model settings for this conversation (overrides agent's model settings).",
)
class CreateConversation(BaseModel):
"""Request model for creating a new conversation."""
summary: Optional[str] = Field(None, description="A summary of the conversation.")
isolated_block_labels: Optional[List[str]] = Field(
None,
description="List of block labels that should be isolated (conversation-specific) rather than shared across conversations. "
"New blocks will be created as copies of the agent's blocks with these labels.",
)
model: Optional[str] = Field(
None,
description="The model handle for this conversation (overrides agent's model). Format: provider/model-name.",
)
model_settings: Optional[ModelSettingsUnion] = Field(
None,
description="The model settings for this conversation (overrides agent's model settings).",
)
@field_validator("model")
@classmethod
def validate_model(cls, model: Optional[str]) -> Optional[str]:
if not model:
return model
if "/" not in model:
raise LettaInvalidArgumentError("The model handle should be in the format provider/model-name", argument_name="model")
provider_name, model_name = model.split("/", 1)
if not provider_name or not model_name:
raise LettaInvalidArgumentError("The model handle should be in the format provider/model-name", argument_name="model")
return model
class UpdateConversation(BaseModel):
"""Request model for updating a conversation."""
summary: Optional[str] = Field(None, description="A summary of the conversation.")
model: Optional[str] = Field(
None,
description="The model handle for this conversation (overrides agent's model). Format: provider/model-name.",
)
model_settings: Optional[ModelSettingsUnion] = Field(
None,
description="The model settings for this conversation (overrides agent's model settings).",
)
@field_validator("model")
@classmethod
def validate_model(cls, model: Optional[str]) -> Optional[str]:
if not model:
return model
if "/" not in model:
raise LettaInvalidArgumentError("The model handle should be in the format provider/model-name", argument_name="model")
provider_name, model_name = model.split("/", 1)
if not provider_name or not model_name:
raise LettaInvalidArgumentError("The model handle should be in the format provider/model-name", argument_name="model")
return model
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/schemas/conversation.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/schemas/providers/zai.py | from typing import Literal
from letta.log import get_logger
logger = get_logger(__name__)
from pydantic import Field
from letta.schemas.enums import ProviderCategory, ProviderType
from letta.schemas.llm_config import LLMConfig
from letta.schemas.providers.openai import OpenAIProvider
# Z.ai model context windows
# Reference: https://docs.z.ai/
# GLM-5 max context window is 200K tokens but max_output_tokens (default 16k) counts against that --> 180k
MODEL_CONTEXT_WINDOWS = {
"glm-4.5": 128000,
"glm-4.6": 180000,
"glm-4.7": 180000,
"glm-5": 180000,
"glm-5-code": 180000,
}
class ZAIProvider(OpenAIProvider):
"""Z.ai (ZhipuAI) provider - https://docs.z.ai/"""
provider_type: Literal[ProviderType.zai] = Field(ProviderType.zai, description="The type of the provider.")
provider_category: ProviderCategory = Field(ProviderCategory.base, description="The category of the provider (base or byok)")
api_key: str | None = Field(None, description="API key for the Z.ai API.", deprecated=True)
base_url: str = Field("https://api.z.ai/api/paas/v4/", description="Base URL for the Z.ai API.")
def get_model_context_window_size(self, model_name: str) -> int | None:
# Z.ai doesn't return context window in the model listing,
# this is hardcoded from documentation
return MODEL_CONTEXT_WINDOWS.get(model_name)
async def list_llm_models_async(self) -> list[LLMConfig]:
from letta.llm_api.openai import openai_get_model_list_async
api_key = await self.api_key_enc.get_plaintext_async() if self.api_key_enc else None
response = await openai_get_model_list_async(self.base_url, api_key=api_key)
data = response.get("data", response)
configs = []
for model in data:
assert "id" in model, f"Z.ai model missing 'id' field: {model}"
model_name = model["id"]
# In case Z.ai starts supporting it in the future:
if "context_length" in model:
context_window_size = model["context_length"]
else:
context_window_size = self.get_model_context_window_size(model_name)
if not context_window_size:
logger.warning(f"Couldn't find context window size for model {model_name}")
continue
configs.append(
LLMConfig(
model=model_name,
model_endpoint_type=self.provider_type.value,
model_endpoint=self.base_url,
context_window=context_window_size,
handle=self.get_handle(model_name),
max_tokens=self.get_default_max_output_tokens(model_name),
provider_name=self.name,
provider_category=self.provider_category,
)
)
return configs
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/schemas/providers/zai.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/server/rest_api/middleware/request_id.py | """
Middleware for extracting and propagating API request IDs from cloud-api.
Uses a pure ASGI middleware pattern to properly propagate the request_id
to streaming responses. BaseHTTPMiddleware has a known limitation where
contextvars are not propagated to streaming response generators.
See: https://github.com/encode/starlette/discussions/1729
This middleware:
1. Extracts the x-api-request-log-id header from cloud-api
2. Sets it in the contextvar (for non-streaming code)
3. Stores it in request.state (for streaming responses where contextvars don't propagate)
"""
from contextvars import ContextVar
from typing import Optional
from starlette.requests import Request
from starlette.types import ASGIApp, Receive, Scope, Send
from letta.otel.tracing import tracer
# Contextvar for storing the request ID across async boundaries
request_id_var: ContextVar[Optional[str]] = ContextVar("request_id", default=None)
def get_request_id() -> Optional[str]:
"""Get the request ID from the current context."""
return request_id_var.get()
class RequestIdMiddleware:
"""
Pure ASGI middleware that extracts and propagates the API request ID.
The request ID comes from cloud-api via the x-api-request-log-id header
and is used to correlate steps with API request logs.
This middleware stores the request_id in:
- The request_id_var contextvar (works for non-streaming responses)
- request.state.request_id (works for streaming responses where contextvars may not propagate)
"""
def __init__(self, app: ASGIApp) -> None:
self.app = app
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if scope["type"] != "http":
await self.app(scope, receive, send)
return
with tracer.start_as_current_span("middleware.request_id"):
# Create a Request object for easier header access
request = Request(scope)
# Extract request_id from header
request_id = request.headers.get("x-api-request-log-id")
# Set in contextvar (for non-streaming code paths)
request_id_var.set(request_id)
# Also store in request.state for streaming responses where contextvars don't propagate
# This is accessible via request.state.request_id throughout the request lifecycle
request.state.request_id = request_id
await self.app(scope, receive, send)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/middleware/request_id.py",
"license": "Apache License 2.0",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
letta-ai/letta:letta/server/rest_api/routers/v1/conversations.py | from datetime import timedelta
from typing import Annotated, List, Literal, Optional
from uuid import uuid4
from fastapi import APIRouter, Body, Depends, HTTPException, Query, status
from pydantic import BaseModel, Field
from starlette.responses import StreamingResponse
from letta.agents.agent_loop import AgentLoop
from letta.agents.letta_agent_v3 import LettaAgentV3
from letta.constants import REDIS_RUN_ID_PREFIX
from letta.data_sources.redis_client import NoopAsyncRedisClient, get_redis_client
from letta.errors import LettaExpiredError, LettaInvalidArgumentError, NoActiveRunsToCancelError
from letta.helpers.datetime_helpers import get_utc_time
from letta.log import get_logger
from letta.schemas.conversation import Conversation, CreateConversation, UpdateConversation
from letta.schemas.enums import RunStatus
from letta.schemas.job import LettaRequestConfig
from letta.schemas.letta_message import LettaMessageUnion
from letta.schemas.letta_request import ConversationMessageRequest, LettaStreamingRequest, RetrieveStreamRequest
from letta.schemas.letta_response import LettaResponse
from letta.schemas.provider_trace import BillingContext
from letta.schemas.run import Run as PydanticRun
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
from letta.server.rest_api.redis_stream_manager import redis_sse_stream_generator
from letta.server.rest_api.streaming_response import (
StreamingResponseWithStatusCode,
add_keepalive_to_stream,
)
from letta.server.server import SyncServer
from letta.services.conversation_manager import ConversationManager
from letta.services.lettuce import LettuceClient
from letta.services.run_manager import RunManager
from letta.services.streaming_service import StreamingService
from letta.services.summarizer.summarizer_config import CompactionSettings
from letta.settings import settings
from letta.validators import ConversationId, ConversationIdOrDefault
router = APIRouter(prefix="/conversations", tags=["conversations"])
logger = get_logger(__name__)
# Instantiate manager
conversation_manager = ConversationManager()
@router.post("/", response_model=Conversation, operation_id="create_conversation")
async def create_conversation(
agent_id: str = Query(..., description="The agent ID to create a conversation for"),
conversation_create: CreateConversation = Body(default_factory=CreateConversation),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""Create a new conversation for an agent."""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await conversation_manager.create_conversation(
agent_id=agent_id,
conversation_create=conversation_create,
actor=actor,
)
@router.get("/", response_model=List[Conversation], operation_id="list_conversations")
async def list_conversations(
agent_id: Optional[str] = Query(
None, description="The agent ID to list conversations for (optional - returns all conversations if not provided)"
),
limit: int = Query(50, description="Maximum number of conversations to return"),
after: Optional[str] = Query(None, description="Cursor for pagination (conversation ID)"),
summary_search: Optional[str] = Query(None, description="Search for text within conversation summaries"),
order: Literal["asc", "desc"] = Query(
"desc", description="Sort order for conversations. 'asc' for oldest first, 'desc' for newest first"
),
order_by: Literal["created_at", "last_run_completion"] = Query("created_at", description="Field to sort by"),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""List all conversations for an agent (or all conversations if agent_id not provided)."""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
ascending = order == "asc"
return await conversation_manager.list_conversations(
agent_id=agent_id,
actor=actor,
limit=limit,
after=after,
summary_search=summary_search,
ascending=ascending,
sort_by=order_by,
)
@router.get("/{conversation_id}", response_model=Conversation, operation_id="retrieve_conversation")
async def retrieve_conversation(
conversation_id: ConversationId,
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""Retrieve a specific conversation."""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await conversation_manager.get_conversation_by_id(
conversation_id=conversation_id,
actor=actor,
)
@router.patch("/{conversation_id}", response_model=Conversation, operation_id="update_conversation")
async def update_conversation(
conversation_id: ConversationId,
conversation_update: UpdateConversation = Body(...),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""Update a conversation."""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await conversation_manager.update_conversation(
conversation_id=conversation_id,
conversation_update=conversation_update,
actor=actor,
)
@router.delete("/{conversation_id}", response_model=None, operation_id="delete_conversation")
async def delete_conversation(
conversation_id: ConversationId,
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Delete a conversation (soft delete).
This marks the conversation as deleted but does not permanently remove it from the database.
The conversation will no longer appear in list operations.
Any isolated blocks associated with the conversation will be permanently deleted.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
await conversation_manager.delete_conversation(
conversation_id=conversation_id,
actor=actor,
)
ConversationMessagesResponse = Annotated[
List[LettaMessageUnion], Field(json_schema_extra={"type": "array", "items": {"$ref": "#/components/schemas/LettaMessageUnion"}})
]
@router.get(
"/{conversation_id}/messages",
response_model=ConversationMessagesResponse,
operation_id="list_conversation_messages",
)
async def list_conversation_messages(
conversation_id: ConversationIdOrDefault,
agent_id: Optional[str] = Query(None, description="Agent ID for agent-direct mode with 'default' conversation"),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
before: Optional[str] = Query(
None, description="Message ID cursor for pagination. Returns messages that come before this message ID in the specified sort order"
),
after: Optional[str] = Query(
None, description="Message ID cursor for pagination. Returns messages that come after this message ID in the specified sort order"
),
limit: Optional[int] = Query(100, description="Maximum number of messages to return"),
order: Literal["asc", "desc"] = Query(
"desc", description="Sort order for messages by creation time. 'asc' for oldest first, 'desc' for newest first"
),
order_by: Literal["created_at"] = Query("created_at", description="Field to sort by"),
group_id: Optional[str] = Query(None, description="Group ID to filter messages by."),
include_err: Optional[bool] = Query(
None, description="Whether to include error messages and error statuses. For debugging purposes only."
),
):
"""
List all messages in a conversation.
Returns LettaMessage objects (UserMessage, AssistantMessage, etc.) for all
messages in the conversation, with support for cursor-based pagination.
**Agent-direct mode**: Pass conversation_id="default" with agent_id parameter
to list messages from the agent's default conversation.
**Deprecated**: Passing an agent ID as conversation_id still works but will be removed.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
# Agent-direct mode: conversation_id="default" + agent_id param (preferred)
# OR conversation_id="agent-*" (backwards compat, deprecated)
resolved_agent_id = None
if conversation_id == "default" and agent_id:
resolved_agent_id = agent_id
elif conversation_id.startswith("agent-"):
resolved_agent_id = conversation_id
if resolved_agent_id:
return await server.get_agent_recall_async(
agent_id=resolved_agent_id,
after=after,
before=before,
limit=limit,
group_id=group_id,
conversation_id=None, # Default conversation (no isolation)
reverse=(order == "desc"),
return_message_object=False,
include_err=include_err,
actor=actor,
)
return await conversation_manager.list_conversation_messages(
conversation_id=conversation_id,
actor=actor,
limit=limit,
before=before,
after=after,
reverse=(order == "desc"),
group_id=group_id,
include_err=include_err,
)
async def _send_agent_direct_message(
agent_id: str,
request: ConversationMessageRequest,
server: SyncServer,
actor,
billing_context: "BillingContext | None" = None,
) -> StreamingResponse | LettaResponse:
"""
Handle agent-direct messaging with locking but without conversation features.
This is used when the conversation_id in the URL is actually an agent ID,
providing a unified endpoint while maintaining agent-level locking.
"""
redis_client = await get_redis_client()
# Streaming mode (default)
if request.streaming:
streaming_request = LettaStreamingRequest(
messages=request.messages,
streaming=True,
stream_tokens=request.stream_tokens,
include_pings=request.include_pings,
background=request.background,
max_steps=request.max_steps,
use_assistant_message=request.use_assistant_message,
assistant_message_tool_name=request.assistant_message_tool_name,
assistant_message_tool_kwarg=request.assistant_message_tool_kwarg,
include_return_message_types=request.include_return_message_types,
override_model=request.override_model,
client_tools=request.client_tools,
)
streaming_service = StreamingService(server)
run, result = await streaming_service.create_agent_stream(
agent_id=agent_id,
actor=actor,
request=streaming_request,
run_type="send_message",
conversation_id=None,
should_lock=True,
billing_context=billing_context,
)
return result
# Non-streaming mode with locking
agent = await server.agent_manager.get_agent_by_id_async(
agent_id,
actor,
include_relationships=["memory", "multi_agent_group", "sources", "tool_exec_environment_variables", "tools", "tags"],
)
# Handle model override if specified in the request
if request.override_model:
override_llm_config = await server.get_llm_config_from_handle_async(
actor=actor,
handle=request.override_model,
)
agent = agent.model_copy(update={"llm_config": override_llm_config})
# Acquire lock using agent_id as lock key
if not isinstance(redis_client, NoopAsyncRedisClient):
await redis_client.acquire_conversation_lock(
conversation_id=agent_id,
token=str(uuid4()),
)
try:
# Create a run for execution tracking
run = None
if settings.track_agent_run:
runs_manager = RunManager()
run = await runs_manager.create_run(
pydantic_run=PydanticRun(
agent_id=agent_id,
background=False,
metadata={
"run_type": "send_message",
},
request_config=LettaRequestConfig.from_letta_request(request),
),
actor=actor,
)
# Set run_id in Redis for cancellation support
await redis_client.set(f"{REDIS_RUN_ID_PREFIX}:{agent_id}", run.id if run else None)
agent_loop = AgentLoop.load(agent_state=agent, actor=actor)
return await agent_loop.step(
request.messages,
max_steps=request.max_steps,
run_id=run.id if run else None,
use_assistant_message=request.use_assistant_message,
include_return_message_types=request.include_return_message_types,
client_tools=request.client_tools,
conversation_id=None,
include_compaction_messages=request.include_compaction_messages,
billing_context=billing_context,
)
finally:
# Release lock
await redis_client.release_conversation_lock(agent_id)
@router.post(
"/{conversation_id}/messages",
response_model=LettaResponse,
operation_id="send_conversation_message",
responses={
200: {
"description": "Successful response",
"content": {
"text/event-stream": {"description": "Server-Sent Events stream (default, when streaming=true)"},
"application/json": {"description": "JSON response (when streaming=false)"},
},
}
},
)
async def send_conversation_message(
conversation_id: ConversationIdOrDefault,
request: ConversationMessageRequest = Body(...),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
) -> StreamingResponse | LettaResponse:
"""
Send a message to a conversation and get a response.
This endpoint sends a message to an existing conversation.
By default (streaming=true), returns a streaming response (Server-Sent Events).
Set streaming=false to get a complete JSON response.
**Agent-direct mode**: Pass conversation_id="default" with agent_id in request body
to send messages to the agent's default conversation with locking.
**Deprecated**: Passing an agent ID as conversation_id still works but will be removed.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
if not request.messages or len(request.messages) == 0:
raise HTTPException(status_code=422, detail="Messages must not be empty")
# Agent-direct mode: conversation_id="default" + agent_id in body (preferred)
# OR conversation_id="agent-*" (backwards compat, deprecated)
resolved_agent_id = None
if conversation_id == "default" and request.agent_id:
resolved_agent_id = request.agent_id
elif conversation_id.startswith("agent-"):
resolved_agent_id = conversation_id
if resolved_agent_id:
# Agent-direct mode: use agent ID, enable locking, skip conversation features
return await _send_agent_direct_message(
agent_id=resolved_agent_id,
request=request,
server=server,
actor=actor,
billing_context=headers.billing_context,
)
# Normal conversation mode
conversation = await conversation_manager.get_conversation_by_id(
conversation_id=conversation_id,
actor=actor,
)
# Streaming mode (default)
if request.streaming:
# Convert to LettaStreamingRequest for StreamingService compatibility
streaming_request = LettaStreamingRequest(
messages=request.messages,
streaming=True,
stream_tokens=request.stream_tokens,
include_pings=request.include_pings,
background=request.background,
max_steps=request.max_steps,
use_assistant_message=request.use_assistant_message,
assistant_message_tool_name=request.assistant_message_tool_name,
assistant_message_tool_kwarg=request.assistant_message_tool_kwarg,
include_return_message_types=request.include_return_message_types,
override_model=request.override_model,
client_tools=request.client_tools,
)
streaming_service = StreamingService(server)
run, result = await streaming_service.create_agent_stream(
agent_id=conversation.agent_id,
actor=actor,
request=streaming_request,
run_type="send_conversation_message",
conversation_id=conversation_id,
billing_context=headers.billing_context,
)
return result
# Non-streaming mode
agent = await server.agent_manager.get_agent_by_id_async(
conversation.agent_id,
actor,
include_relationships=["memory", "multi_agent_group", "sources", "tool_exec_environment_variables", "tools", "tags"],
)
# Apply conversation-level model override if set (lower priority than request override)
if conversation.model and not request.override_model:
conversation_llm_config = await server.get_llm_config_from_handle_async(
actor=actor,
handle=conversation.model,
)
if conversation.model_settings is not None:
update_params = conversation.model_settings._to_legacy_config_params()
# Don't clobber max_tokens with the Pydantic default when the caller
# didn't explicitly provide max_output_tokens.
if "max_output_tokens" not in conversation.model_settings.model_fields_set:
update_params.pop("max_tokens", None)
conversation_llm_config = conversation_llm_config.model_copy(update=update_params)
agent = agent.model_copy(update={"llm_config": conversation_llm_config})
if request.override_model:
override_llm_config = await server.get_llm_config_from_handle_async(
actor=actor,
handle=request.override_model,
)
agent = agent.model_copy(update={"llm_config": override_llm_config})
# Create a run for execution tracking
run = None
if settings.track_agent_run:
runs_manager = RunManager()
run = await runs_manager.create_run(
pydantic_run=PydanticRun(
agent_id=conversation.agent_id,
background=False,
metadata={
"run_type": "send_conversation_message",
},
request_config=LettaRequestConfig.from_letta_request(request),
),
actor=actor,
)
# Set run_id in Redis for cancellation support
redis_client = await get_redis_client()
await redis_client.set(f"{REDIS_RUN_ID_PREFIX}:{conversation.agent_id}", run.id if run else None)
agent_loop = AgentLoop.load(agent_state=agent, actor=actor)
return await agent_loop.step(
request.messages,
max_steps=request.max_steps,
run_id=run.id if run else None,
use_assistant_message=request.use_assistant_message,
include_return_message_types=request.include_return_message_types,
client_tools=request.client_tools,
conversation_id=conversation_id,
include_compaction_messages=request.include_compaction_messages,
billing_context=headers.billing_context,
)
@router.post(
"/{conversation_id}/stream",
response_model=None,
operation_id="retrieve_conversation_stream",
responses={
200: {
"description": "Successful response",
"content": {
"text/event-stream": {
"description": "Server-Sent Events stream",
"schema": {
"oneOf": [
{"$ref": "#/components/schemas/SystemMessage"},
{"$ref": "#/components/schemas/UserMessage"},
{"$ref": "#/components/schemas/ReasoningMessage"},
{"$ref": "#/components/schemas/HiddenReasoningMessage"},
{"$ref": "#/components/schemas/ToolCallMessage"},
{"$ref": "#/components/schemas/ToolReturnMessage"},
{"$ref": "#/components/schemas/AssistantMessage"},
{"$ref": "#/components/schemas/ApprovalRequestMessage"},
{"$ref": "#/components/schemas/ApprovalResponseMessage"},
{"$ref": "#/components/schemas/LettaPing"},
{"$ref": "#/components/schemas/LettaErrorMessage"},
{"$ref": "#/components/schemas/LettaStopReason"},
{"$ref": "#/components/schemas/LettaUsageStatistics"},
]
},
},
},
}
},
)
async def retrieve_conversation_stream(
conversation_id: ConversationIdOrDefault,
request: RetrieveStreamRequest = Body(None),
headers: HeaderParams = Depends(get_headers),
server: SyncServer = Depends(get_letta_server),
):
"""
Resume the stream for the most recent active run in a conversation.
This endpoint allows you to reconnect to an active background stream
for a conversation, enabling recovery from network interruptions.
**Agent-direct mode**: Pass conversation_id="default" with agent_id in request body
to retrieve the stream for the agent's most recent active run.
**Deprecated**: Passing an agent ID as conversation_id still works but will be removed.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
runs_manager = RunManager()
# Agent-direct mode: conversation_id="default" + agent_id in body (preferred)
# OR conversation_id="agent-*" (backwards compat, deprecated)
resolved_agent_id = None
if conversation_id == "default" and request and request.agent_id:
resolved_agent_id = request.agent_id
elif conversation_id.startswith("agent-"):
resolved_agent_id = conversation_id
# Find the most recent active run
if resolved_agent_id:
# Agent-direct mode: find runs by agent_id
active_runs = await runs_manager.list_runs(
actor=actor,
agent_id=resolved_agent_id,
statuses=[RunStatus.created, RunStatus.running],
limit=1,
ascending=False,
)
else:
# Normal mode: find runs by conversation_id
active_runs = await runs_manager.list_runs(
actor=actor,
conversation_id=conversation_id,
statuses=[RunStatus.created, RunStatus.running],
limit=1,
ascending=False,
)
if not active_runs:
raise LettaInvalidArgumentError("No active runs found for this conversation.")
run = active_runs[0]
if not run.background:
raise LettaInvalidArgumentError("Run was not created in background mode, so it cannot be retrieved.")
if run.created_at < get_utc_time() - timedelta(hours=3):
raise LettaExpiredError("Run was created more than 3 hours ago, and is now expired.")
redis_client = await get_redis_client()
if isinstance(redis_client, NoopAsyncRedisClient):
raise HTTPException(
status_code=503,
detail=(
"Background streaming requires Redis to be running. "
"Please ensure Redis is properly configured. "
f"LETTA_REDIS_HOST: {settings.redis_host}, LETTA_REDIS_PORT: {settings.redis_port}"
),
)
stream = redis_sse_stream_generator(
redis_client=redis_client,
run_id=run.id,
starting_after=request.starting_after if request else None,
poll_interval=request.poll_interval if request else None,
batch_size=request.batch_size if request else None,
)
if settings.enable_cancellation_aware_streaming:
from letta.server.rest_api.streaming_response import cancellation_aware_stream_wrapper, get_cancellation_event_for_run
stream = cancellation_aware_stream_wrapper(
stream_generator=stream,
run_manager=server.run_manager,
run_id=run.id,
actor=actor,
cancellation_event=get_cancellation_event_for_run(run.id),
)
if request and request.include_pings and settings.enable_keepalive:
stream = add_keepalive_to_stream(stream, keepalive_interval=settings.keepalive_interval, run_id=run.id)
return StreamingResponseWithStatusCode(
stream,
media_type="text/event-stream",
)
@router.post("/{conversation_id}/cancel", operation_id="cancel_conversation")
async def cancel_conversation(
conversation_id: ConversationIdOrDefault,
agent_id: Optional[str] = Query(None, description="Agent ID for agent-direct mode with 'default' conversation"),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
) -> dict:
"""
Cancel runs associated with a conversation.
Note: To cancel active runs, Redis is required.
**Agent-direct mode**: Pass conversation_id="default" with agent_id query parameter
to cancel runs for the agent's default conversation.
**Deprecated**: Passing an agent ID as conversation_id still works but will be removed.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
logger.info(
"[Interrupt] Cancel request received for conversation=%s by actor=%s (org=%s)",
conversation_id,
actor.id,
actor.organization_id,
)
if not settings.track_agent_run:
raise HTTPException(status_code=400, detail="Agent run tracking is disabled")
# Agent-direct mode: conversation_id="default" + agent_id param (preferred)
# OR conversation_id="agent-*" (backwards compat, deprecated)
resolved_agent_id = None
if conversation_id == "default" and agent_id:
resolved_agent_id = agent_id
elif conversation_id.startswith("agent-"):
resolved_agent_id = conversation_id
if resolved_agent_id:
# Agent-direct mode: use agent_id directly, skip conversation lookup
# Find active runs for this agent (default conversation has conversation_id=None)
runs = await server.run_manager.list_runs(
actor=actor,
agent_id=resolved_agent_id,
statuses=[RunStatus.created, RunStatus.running],
ascending=False,
limit=100,
)
else:
# Verify conversation exists and get agent_id
conversation = await conversation_manager.get_conversation_by_id(
conversation_id=conversation_id,
actor=actor,
)
agent_id = conversation.agent_id
# Find active runs for this conversation
runs = await server.run_manager.list_runs(
actor=actor,
statuses=[RunStatus.created, RunStatus.running],
ascending=False,
conversation_id=conversation_id,
limit=100,
)
run_ids = [run.id for run in runs]
if not run_ids:
raise NoActiveRunsToCancelError(conversation_id=conversation_id)
results = {}
for run_id in run_ids:
try:
run = await server.run_manager.get_run_by_id(run_id=run_id, actor=actor)
if run.metadata and run.metadata.get("lettuce"):
try:
lettuce_client = await LettuceClient.create()
await lettuce_client.cancel(run_id)
except Exception as e:
logger.error(f"Failed to cancel Lettuce run {run_id}: {e}")
await server.run_manager.cancel_run(actor=actor, agent_id=agent_id, run_id=run_id)
except Exception as e:
results[run_id] = "failed"
logger.error(f"Failed to cancel run {run_id}: {str(e)}")
continue
results[run_id] = "cancelled"
logger.info(f"Cancelled run {run_id}")
return results
class CompactionRequest(BaseModel):
agent_id: Optional[str] = Field(
default=None,
description="Agent ID for agent-direct mode with 'default' conversation. Use with conversation_id='default' in the URL path.",
)
compaction_settings: Optional[CompactionSettings] = Field(
default=None,
description="Optional compaction settings to use for this summarization request. If not provided, the agent's default settings will be used.",
)
class CompactionResponse(BaseModel):
summary: str
num_messages_before: int
num_messages_after: int
@router.post("/{conversation_id}/compact", response_model=CompactionResponse, operation_id="compact_conversation")
async def compact_conversation(
conversation_id: ConversationIdOrDefault,
request: Optional[CompactionRequest] = Body(default=None),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Compact (summarize) a conversation's message history.
This endpoint summarizes the in-context messages for a specific conversation,
reducing the message count while preserving important context.
**Agent-direct mode**: Pass conversation_id="default" with agent_id in request body
to compact the agent's default conversation messages.
**Deprecated**: Passing an agent ID as conversation_id still works but will be removed.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
# Agent-direct mode: conversation_id="default" + agent_id in body (preferred)
# OR conversation_id="agent-*" (backwards compat, deprecated)
resolved_agent_id = None
if conversation_id == "default" and request and request.agent_id:
resolved_agent_id = request.agent_id
elif conversation_id.startswith("agent-"):
resolved_agent_id = conversation_id
if resolved_agent_id:
# Agent-direct mode: compact agent's default conversation
agent = await server.agent_manager.get_agent_by_id_async(resolved_agent_id, actor, include_relationships=["multi_agent_group"])
in_context_messages = await server.message_manager.get_messages_by_ids_async(message_ids=agent.message_ids, actor=actor)
agent_loop = LettaAgentV3(agent_state=agent, actor=actor)
else:
# Get the conversation to find the agent_id
conversation = await conversation_manager.get_conversation_by_id(
conversation_id=conversation_id,
actor=actor,
)
# Get the agent state
agent = await server.agent_manager.get_agent_by_id_async(conversation.agent_id, actor, include_relationships=["multi_agent_group"])
# Get in-context messages for this conversation
in_context_messages = await conversation_manager.get_messages_for_conversation(
conversation_id=conversation_id,
actor=actor,
)
# Create agent loop with conversation context
agent_loop = LettaAgentV3(agent_state=agent, actor=actor, conversation_id=conversation_id)
if not in_context_messages:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="No in-context messages found for this conversation.",
)
# Merge request compaction_settings with agent's settings (request overrides agent)
if agent.compaction_settings and request and request.compaction_settings:
# Start with agent's settings, override with new values from request
# Use model_fields_set to get the fields that were changed in the request (want to ignore the defaults that get set automatically)
compaction_settings = agent.compaction_settings.copy() # do not mutate original agent compaction settings
changed_fields = request.compaction_settings.model_fields_set
for field in changed_fields:
setattr(compaction_settings, field, getattr(request.compaction_settings, field))
# If mode changed from agent's original settings and prompt not explicitly set in request, then use the default prompt for the new mode
# Ex: previously was sliding_window, now is all, so we need to use the default prompt for all mode
if (
"mode" in changed_fields
and "prompt" not in changed_fields
and agent.compaction_settings.mode != request.compaction_settings.mode
):
from letta.services.summarizer.summarizer_config import get_default_prompt_for_mode
compaction_settings.prompt = get_default_prompt_for_mode(compaction_settings.mode)
else:
compaction_settings = (request and request.compaction_settings) or agent.compaction_settings
num_messages_before = len(in_context_messages)
# Run compaction
summary_message, messages, summary = await agent_loop.compact(
messages=in_context_messages,
compaction_settings=compaction_settings,
use_summary_role=True,
)
num_messages_after = len(messages)
# Validate compaction reduced messages
if num_messages_before <= num_messages_after:
logger.warning(f"Summarization failed to reduce the number of messages. {num_messages_before} messages -> {num_messages_after}.")
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Summarization failed to reduce the number of messages. You may not have enough messages to compact or need to use a different CompactionSettings (e.g. using `all` mode).",
)
# Checkpoint the messages (this will update the conversation_messages table)
await agent_loop._checkpoint_messages(run_id=None, step_id=None, new_messages=[summary_message], in_context_messages=messages)
logger.info(f"Compacted conversation {conversation_id}: {num_messages_before} messages -> {num_messages_after}")
return CompactionResponse(
summary=summary,
num_messages_before=num_messages_before,
num_messages_after=num_messages_after,
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/routers/v1/conversations.py",
"license": "Apache License 2.0",
"lines": 711,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/conversation_manager.py | from typing import TYPE_CHECKING, Dict, List, Optional
if TYPE_CHECKING:
pass
# Import AgentState outside TYPE_CHECKING for @enforce_types decorator
from sqlalchemy import and_, asc, delete, desc, func, nulls_last, or_, select
from letta.errors import LettaInvalidArgumentError
from letta.helpers.datetime_helpers import get_utc_time
from letta.orm.agent import Agent as AgentModel
from letta.orm.block import Block as BlockModel
from letta.orm.blocks_conversations import BlocksConversations
from letta.orm.conversation import Conversation as ConversationModel
from letta.orm.conversation_messages import ConversationMessage as ConversationMessageModel
from letta.orm.message import Message as MessageModel
from letta.orm.run import Run as RunModel
from letta.otel.tracing import trace_method
from letta.schemas.agent import AgentState
from letta.schemas.block import Block as PydanticBlock
from letta.schemas.conversation import Conversation as PydanticConversation, CreateConversation, UpdateConversation
from letta.schemas.letta_message import LettaMessage
from letta.schemas.message import Message as PydanticMessage
from letta.schemas.user import User as PydanticUser
from letta.server.db import db_registry
from letta.services.helpers.agent_manager_helper import validate_agent_exists_async
from letta.utils import enforce_types
class ConversationManager:
"""Manager class to handle business logic related to Conversations."""
@staticmethod
def _serialize_model_settings(model_settings) -> Optional[dict]:
"""Serialize model settings for DB storage, stripping max_output_tokens if not explicitly set.
Uses model_dump() to preserve all fields (including the provider_type discriminator),
but removes max_output_tokens when it wasn't explicitly provided by the caller so we
don't persist the Pydantic default (4096) and later overwrite the agent's own value.
"""
if model_settings is None:
return None
data = model_settings.model_dump()
if "max_output_tokens" not in model_settings.model_fields_set:
data.pop("max_output_tokens", None)
return data
@enforce_types
@trace_method
async def create_conversation(
self,
agent_id: str,
conversation_create: CreateConversation,
actor: PydanticUser,
) -> PydanticConversation:
"""Create a new conversation for an agent.
Args:
agent_id: The ID of the agent this conversation belongs to
conversation_create: The conversation creation request, optionally including
isolated_block_labels for conversation-specific memory blocks
actor: The user performing the action
Returns:
The created conversation with isolated_block_ids if any were created
"""
async with db_registry.async_session() as session:
# Validate that the agent exists before creating the conversation
await validate_agent_exists_async(session, agent_id, actor)
conversation = ConversationModel(
agent_id=agent_id,
summary=conversation_create.summary,
organization_id=actor.organization_id,
model=conversation_create.model,
model_settings=self._serialize_model_settings(conversation_create.model_settings),
)
await conversation.create_async(session, actor=actor)
# Handle isolated blocks if requested
isolated_block_ids = []
if conversation_create.isolated_block_labels:
isolated_block_ids = await self._create_isolated_blocks(
session=session,
conversation=conversation,
agent_id=agent_id,
isolated_block_labels=conversation_create.isolated_block_labels,
actor=actor,
)
pydantic_conversation = conversation.to_pydantic()
pydantic_conversation.isolated_block_ids = isolated_block_ids
# Compile and persist the initial system message for this conversation
# This ensures the conversation captures the latest memory block state at creation time
await self.compile_and_save_system_message_for_conversation(
conversation_id=pydantic_conversation.id,
agent_id=agent_id,
actor=actor,
)
return pydantic_conversation
@trace_method
async def compile_and_save_system_message_for_conversation(
self,
conversation_id: str,
agent_id: str,
actor: PydanticUser,
agent_state: Optional["AgentState"] = None,
message_manager: Optional[object] = None,
) -> PydanticMessage:
"""Compile and persist the initial system message for a conversation.
This recompiles the system prompt with the latest memory block values
and metadata, ensuring the conversation starts with an up-to-date
system message.
This is the single source of truth for creating a conversation's system
message — used both at conversation creation time and as a fallback
when a conversation has no messages yet.
Args:
conversation_id: The conversation to add the system message to
agent_id: The agent this conversation belongs to
actor: The user performing the action
agent_state: Optional pre-loaded agent state (avoids redundant DB load)
message_manager: Optional pre-loaded MessageManager instance
Returns:
The persisted system message
"""
# Lazy imports to avoid circular dependencies
from letta.prompts.prompt_generator import PromptGenerator
from letta.services.message_manager import MessageManager
from letta.services.passage_manager import PassageManager
if message_manager is None:
message_manager = MessageManager()
if agent_state is None:
from letta.services.agent_manager import AgentManager
agent_state = await AgentManager().get_agent_by_id_async(
agent_id=agent_id,
include_relationships=["memory", "sources"],
actor=actor,
)
passage_manager = PassageManager()
num_messages = await message_manager.size_async(actor=actor, agent_id=agent_id)
num_archival_memories = await passage_manager.agent_passage_size_async(actor=actor, agent_id=agent_id)
# Compile the system message with current memory state
system_message_str = await PromptGenerator.compile_system_message_async(
system_prompt=agent_state.system,
in_context_memory=agent_state.memory,
in_context_memory_last_edit=get_utc_time(),
timezone=agent_state.timezone,
user_defined_variables=None,
append_icm_if_missing=True,
previous_message_count=num_messages,
archival_memory_size=num_archival_memories,
sources=agent_state.sources,
max_files_open=agent_state.max_files_open,
)
system_message = PydanticMessage.dict_to_message(
agent_id=agent_id,
model=agent_state.llm_config.model,
openai_message_dict={"role": "system", "content": system_message_str},
)
# Persist the new system message
persisted_messages = await message_manager.create_many_messages_async([system_message], actor=actor)
system_message = persisted_messages[0]
# Add it to the conversation tracking at position 0
await self.add_messages_to_conversation(
conversation_id=conversation_id,
agent_id=agent_id,
message_ids=[system_message.id],
actor=actor,
starting_position=0,
)
return system_message
@enforce_types
@trace_method
async def get_conversation_by_id(
self,
conversation_id: str,
actor: PydanticUser,
) -> PydanticConversation:
"""Retrieve a conversation by its ID, including in-context message IDs."""
async with db_registry.async_session() as session:
conversation = await ConversationModel.read_async(
db_session=session,
identifier=conversation_id,
actor=actor,
check_is_deleted=True,
)
# Get the in-context message IDs for this conversation
message_ids = await self.get_message_ids_for_conversation(
conversation_id=conversation_id,
actor=actor,
)
# Build the pydantic model with in_context_message_ids
pydantic_conversation = conversation.to_pydantic()
pydantic_conversation.in_context_message_ids = message_ids
return pydantic_conversation
@enforce_types
@trace_method
async def list_conversations(
self,
agent_id: Optional[str],
actor: PydanticUser,
limit: int = 50,
after: Optional[str] = None,
summary_search: Optional[str] = None,
ascending: bool = False,
sort_by: str = "created_at",
) -> List[PydanticConversation]:
"""List conversations for an agent (or all conversations) with cursor-based pagination.
Args:
agent_id: The agent ID to list conversations for (optional - returns all if not provided)
actor: The user performing the action
limit: Maximum number of conversations to return
after: Cursor for pagination (conversation ID)
summary_search: Optional text to search for within the summary field
ascending: Sort order (True for oldest first, False for newest first)
sort_by: Field to sort by ("created_at" or "last_run_completion")
Returns:
List of conversations matching the criteria
"""
async with db_registry.async_session() as session:
# Build base query with optional join for last_run_completion
if sort_by == "last_run_completion":
# Subquery to get the latest completed_at for each conversation
latest_run_subquery = (
select(RunModel.conversation_id, func.max(RunModel.completed_at).label("last_run_completion"))
.where(RunModel.conversation_id.isnot(None))
.group_by(RunModel.conversation_id)
.subquery()
)
# Join conversations with the subquery
stmt = select(ConversationModel).outerjoin(
latest_run_subquery, ConversationModel.id == latest_run_subquery.c.conversation_id
)
sort_column = latest_run_subquery.c.last_run_completion
sort_nulls_last = True
else:
# Simple query for created_at
stmt = select(ConversationModel)
sort_column = ConversationModel.created_at
sort_nulls_last = False
# Build where conditions
conditions = [
ConversationModel.organization_id == actor.organization_id,
ConversationModel.is_deleted == False,
]
# Add agent_id filter if provided
if agent_id is not None:
conditions.append(ConversationModel.agent_id == agent_id)
# Add summary search filter if provided
if summary_search:
conditions.extend(
[
ConversationModel.summary.isnot(None),
ConversationModel.summary.contains(summary_search),
]
)
stmt = stmt.where(and_(*conditions))
# Handle cursor pagination
if after:
# Get the sort value for the cursor conversation
if sort_by == "last_run_completion":
cursor_query = (
select(ConversationModel.id, func.max(RunModel.completed_at).label("last_run_completion"))
.outerjoin(RunModel, ConversationModel.id == RunModel.conversation_id)
.where(ConversationModel.id == after)
.group_by(ConversationModel.id)
)
result = (await session.execute(cursor_query)).first()
if result:
after_id, after_sort_value = result
# Apply cursor filter
if after_sort_value is None:
# Cursor is at NULL - if ascending, get non-NULLs or NULLs with greater ID
if ascending:
stmt = stmt.where(
or_(and_(sort_column.is_(None), ConversationModel.id > after_id), sort_column.isnot(None))
)
else:
# If descending, get NULLs with smaller ID
stmt = stmt.where(and_(sort_column.is_(None), ConversationModel.id < after_id))
else:
# Cursor is at non-NULL
if ascending:
# Moving forward: greater values or same value with greater ID
stmt = stmt.where(
and_(
sort_column.isnot(None),
or_(
sort_column > after_sort_value,
and_(sort_column == after_sort_value, ConversationModel.id > after_id),
),
)
)
else:
# Moving backward: smaller values or NULLs or same value with smaller ID
stmt = stmt.where(
or_(
sort_column.is_(None),
sort_column < after_sort_value,
and_(sort_column == after_sort_value, ConversationModel.id < after_id),
)
)
else:
# Simple created_at cursor
after_conv = await ConversationModel.read_async(
db_session=session,
identifier=after,
actor=actor,
)
if ascending:
stmt = stmt.where(ConversationModel.created_at > after_conv.created_at)
else:
stmt = stmt.where(ConversationModel.created_at < after_conv.created_at)
# Apply ordering
order_fn = asc if ascending else desc
if sort_nulls_last:
stmt = stmt.order_by(nulls_last(order_fn(sort_column)), order_fn(ConversationModel.id))
else:
stmt = stmt.order_by(order_fn(sort_column), order_fn(ConversationModel.id))
stmt = stmt.limit(limit)
result = await session.execute(stmt)
conversations = result.scalars().all()
return [conv.to_pydantic() for conv in conversations]
@enforce_types
@trace_method
async def update_conversation(
self,
conversation_id: str,
conversation_update: UpdateConversation,
actor: PydanticUser,
) -> PydanticConversation:
"""Update a conversation."""
async with db_registry.async_session() as session:
conversation = await ConversationModel.read_async(
db_session=session,
identifier=conversation_id,
actor=actor,
check_is_deleted=True,
)
# Set attributes on the model
update_data = conversation_update.model_dump(exclude_none=True)
for key, value in update_data.items():
# model_settings needs to be serialized to dict for the JSON column
if key == "model_settings" and value is not None:
setattr(
conversation,
key,
self._serialize_model_settings(conversation_update.model_settings) if conversation_update.model_settings else value,
)
else:
setattr(conversation, key, value)
# Commit the update
updated_conversation = await conversation.update_async(
db_session=session,
actor=actor,
)
return updated_conversation.to_pydantic()
@enforce_types
@trace_method
async def delete_conversation(
self,
conversation_id: str,
actor: PydanticUser,
) -> None:
"""Soft delete a conversation and hard-delete its isolated blocks."""
async with db_registry.async_session() as session:
conversation = await ConversationModel.read_async(
db_session=session,
identifier=conversation_id,
actor=actor,
check_is_deleted=True,
)
# Get isolated blocks before modifying conversation
isolated_blocks = list(conversation.isolated_blocks)
# Soft delete the conversation first
conversation.is_deleted = True
await conversation.update_async(db_session=session, actor=actor)
# Hard-delete isolated blocks (Block model doesn't support soft-delete)
# Following same pattern as block_manager.delete_block_async
for block in isolated_blocks:
# Delete junction table entry first
await session.execute(delete(BlocksConversations).where(BlocksConversations.block_id == block.id))
await session.flush()
# Then hard-delete the block
await block.hard_delete_async(db_session=session, actor=actor)
# ==================== Message Management Methods ====================
@enforce_types
@trace_method
async def get_message_ids_for_conversation(
self,
conversation_id: str,
actor: PydanticUser,
) -> List[str]:
"""
Get ordered message IDs for a conversation.
Returns message IDs ordered by position in the conversation.
Only returns messages that are currently in_context.
"""
async with db_registry.async_session() as session:
query = (
select(ConversationMessageModel.message_id)
.where(
ConversationMessageModel.conversation_id == conversation_id,
ConversationMessageModel.organization_id == actor.organization_id,
ConversationMessageModel.in_context == True,
ConversationMessageModel.is_deleted == False,
)
.order_by(ConversationMessageModel.position)
)
result = await session.execute(query)
return list(result.scalars().all())
@enforce_types
@trace_method
async def get_messages_for_conversation(
self,
conversation_id: str,
actor: PydanticUser,
) -> List[PydanticMessage]:
"""
Get ordered Message objects for a conversation.
Returns full Message objects ordered by position in the conversation.
Only returns messages that are currently in_context.
"""
async with db_registry.async_session() as session:
query = (
select(MessageModel)
.join(
ConversationMessageModel,
MessageModel.id == ConversationMessageModel.message_id,
)
.where(
ConversationMessageModel.conversation_id == conversation_id,
ConversationMessageModel.organization_id == actor.organization_id,
ConversationMessageModel.in_context == True,
ConversationMessageModel.is_deleted == False,
)
.order_by(ConversationMessageModel.position)
)
result = await session.execute(query)
return [msg.to_pydantic() for msg in result.scalars().all()]
@enforce_types
@trace_method
async def add_messages_to_conversation(
self,
conversation_id: str,
agent_id: str,
message_ids: List[str],
actor: PydanticUser,
starting_position: Optional[int] = None,
) -> None:
"""
Add messages to a conversation's tracking table.
Creates ConversationMessage entries with auto-incrementing positions.
Args:
conversation_id: The conversation to add messages to
agent_id: The agent ID
message_ids: List of message IDs to add
actor: The user performing the action
starting_position: Optional starting position (defaults to next available)
"""
if not message_ids:
return
async with db_registry.async_session() as session:
# Get starting position if not provided
if starting_position is None:
query = select(func.coalesce(func.max(ConversationMessageModel.position), -1)).where(
ConversationMessageModel.conversation_id == conversation_id,
ConversationMessageModel.organization_id == actor.organization_id,
)
result = await session.execute(query)
max_position = result.scalar()
# Use explicit None check instead of `or` to handle position=0 correctly
if max_position is None:
max_position = -1
starting_position = max_position + 1
# Create ConversationMessage entries
for i, message_id in enumerate(message_ids):
conv_msg = ConversationMessageModel(
conversation_id=conversation_id,
agent_id=agent_id,
message_id=message_id,
position=starting_position + i,
in_context=True,
organization_id=actor.organization_id,
)
session.add(conv_msg)
await session.commit()
@enforce_types
@trace_method
async def update_in_context_messages(
self,
conversation_id: str,
in_context_message_ids: List[str],
actor: PydanticUser,
) -> None:
"""
Update which messages are in context for a conversation.
Sets in_context=True for messages in the list, False for others.
Also updates positions to preserve the order specified in in_context_message_ids.
This is critical for correctness: when summarization inserts a summary message
that needs to appear before an approval request, the positions must reflect
the intended order, not the insertion order.
Args:
conversation_id: The conversation to update
in_context_message_ids: List of message IDs in the desired order
actor: The user performing the action
"""
async with db_registry.async_session() as session:
# Get all conversation messages for this conversation
query = select(ConversationMessageModel).where(
ConversationMessageModel.conversation_id == conversation_id,
ConversationMessageModel.organization_id == actor.organization_id,
ConversationMessageModel.is_deleted == False,
)
result = await session.execute(query)
conv_messages = result.scalars().all()
# Build lookup dict
conv_msg_dict = {cm.message_id: cm for cm in conv_messages}
# Update in_context status AND positions
in_context_set = set(in_context_message_ids)
for conv_msg in conv_messages:
conv_msg.in_context = conv_msg.message_id in in_context_set
# Update positions to match the order in in_context_message_ids
# This ensures ORDER BY position returns messages in the correct order
for position, message_id in enumerate(in_context_message_ids):
if message_id in conv_msg_dict:
conv_msg_dict[message_id].position = position
await session.commit()
@enforce_types
@trace_method
async def list_conversation_messages(
self,
conversation_id: str,
actor: PydanticUser,
limit: Optional[int] = 100,
before: Optional[str] = None,
after: Optional[str] = None,
reverse: bool = False,
group_id: Optional[str] = None,
include_err: Optional[bool] = None,
) -> List[LettaMessage]:
"""
List all messages in a conversation with pagination support.
Unlike get_messages_for_conversation, this returns ALL messages
(not just in_context) and supports cursor-based pagination.
Args:
conversation_id: The conversation to list messages for
actor: The user performing the action
limit: Maximum number of messages to return
before: Return messages before this message ID
after: Return messages after this message ID
reverse: If True, return messages in descending order (newest first)
group_id: Optional group ID to filter messages by
include_err: Optional boolean to include error messages and error statuses
Returns:
List of LettaMessage objects
"""
async with db_registry.async_session() as session:
# Build base query joining Message with ConversationMessage
query = (
select(MessageModel)
.join(
ConversationMessageModel,
MessageModel.id == ConversationMessageModel.message_id,
)
.where(
ConversationMessageModel.conversation_id == conversation_id,
ConversationMessageModel.organization_id == actor.organization_id,
ConversationMessageModel.is_deleted == False,
)
)
# Filter by group_id if provided
if group_id:
query = query.where(MessageModel.group_id == group_id)
# Handle cursor-based pagination
if before:
# Get the position of the cursor message
cursor_query = select(ConversationMessageModel.position).where(
ConversationMessageModel.conversation_id == conversation_id,
ConversationMessageModel.message_id == before,
)
cursor_result = await session.execute(cursor_query)
cursor_position = cursor_result.scalar_one_or_none()
if cursor_position is not None:
query = query.where(ConversationMessageModel.position < cursor_position)
if after:
# Get the position of the cursor message
cursor_query = select(ConversationMessageModel.position).where(
ConversationMessageModel.conversation_id == conversation_id,
ConversationMessageModel.message_id == after,
)
cursor_result = await session.execute(cursor_query)
cursor_position = cursor_result.scalar_one_or_none()
if cursor_position is not None:
query = query.where(ConversationMessageModel.position > cursor_position)
# Order by position
if reverse:
query = query.order_by(ConversationMessageModel.position.desc())
else:
query = query.order_by(ConversationMessageModel.position.asc())
# Apply limit
if limit is not None:
query = query.limit(limit)
result = await session.execute(query)
messages = [msg.to_pydantic() for msg in result.scalars().all()]
# Convert to LettaMessages (reverse=False keeps sub-messages in natural order)
return PydanticMessage.to_letta_messages_from_list(
messages, reverse=False, include_err=include_err, text_is_assistant_message=True
)
# ==================== Isolated Blocks Methods ====================
async def _create_isolated_blocks(
self,
session,
conversation: ConversationModel,
agent_id: str,
isolated_block_labels: List[str],
actor: PydanticUser,
) -> List[str]:
"""Create conversation-specific copies of blocks for isolated labels.
Args:
session: The database session
conversation: The conversation model (must be created but not yet committed)
agent_id: The agent ID to get source blocks from
isolated_block_labels: List of block labels to isolate
actor: The user performing the action
Returns:
List of created block IDs
Raises:
LettaInvalidArgumentError: If a block label is not found on the agent
"""
# Get the agent with its blocks
agent = await AgentModel.read_async(db_session=session, identifier=agent_id, actor=actor)
# Map of label -> agent block
agent_blocks_by_label = {block.label: block for block in agent.core_memory}
created_block_ids = []
for label in isolated_block_labels:
if label not in agent_blocks_by_label:
raise LettaInvalidArgumentError(
f"Block with label '{label}' not found on agent '{agent_id}'",
argument_name="isolated_block_labels",
)
source_block = agent_blocks_by_label[label]
# Create a copy of the block with a new ID using Pydantic schema (which auto-generates ID)
new_block_pydantic = PydanticBlock(
label=source_block.label,
description=source_block.description,
value=source_block.value,
limit=source_block.limit,
metadata=source_block.metadata_,
read_only=source_block.read_only,
)
# Convert to ORM model
block_data = new_block_pydantic.model_dump(to_orm=True, exclude_none=True)
new_block = BlockModel(**block_data, organization_id=actor.organization_id)
await new_block.create_async(session, actor=actor)
# Create the junction table entry
blocks_conv = BlocksConversations(
conversation_id=conversation.id,
block_id=new_block.id,
block_label=label,
)
session.add(blocks_conv)
created_block_ids.append(new_block.id)
return created_block_ids
@enforce_types
@trace_method
async def get_isolated_blocks_for_conversation(
self,
conversation_id: str,
actor: PydanticUser,
) -> Dict[str, PydanticBlock]:
"""Get isolated blocks for a conversation, keyed by label.
Args:
conversation_id: The conversation ID
actor: The user performing the action
Returns:
Dictionary mapping block labels to their conversation-specific blocks
"""
async with db_registry.async_session() as session:
conversation = await ConversationModel.read_async(
db_session=session,
identifier=conversation_id,
actor=actor,
check_is_deleted=True,
)
return {block.label: block.to_pydantic() for block in conversation.isolated_blocks}
@enforce_types
@trace_method
async def apply_isolated_blocks_to_agent_state(
self,
agent_state: "AgentState",
conversation_id: str,
actor: PydanticUser,
) -> "AgentState":
"""Apply conversation-specific block overrides to an agent state.
This method modifies the agent_state.memory to replace blocks that have
conversation-specific isolated versions.
Args:
agent_state: The agent state to modify (will be modified in place)
conversation_id: The conversation ID to get isolated blocks from
actor: The user performing the action
Returns:
The modified agent state (same object, modified in place)
"""
from letta.schemas.memory import Memory
# Get conversation's isolated blocks
isolated_blocks = await self.get_isolated_blocks_for_conversation(
conversation_id=conversation_id,
actor=actor,
)
if not isolated_blocks:
return agent_state
# Override agent's blocks with conversation-specific blocks
memory_blocks = []
for block in agent_state.memory.blocks:
if block.label in isolated_blocks:
memory_blocks.append(isolated_blocks[block.label])
else:
memory_blocks.append(block)
# Create new Memory with overridden blocks
agent_state.memory = Memory(
blocks=memory_blocks,
file_blocks=agent_state.memory.file_blocks,
agent_type=agent_state.memory.agent_type,
git_enabled=agent_state.memory.git_enabled,
)
return agent_state
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/conversation_manager.py",
"license": "Apache License 2.0",
"lines": 714,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/mcp/fastmcp_client.py | """FastMCP-based MCP clients with server-side OAuth support.
This module provides MCP client implementations using the FastMCP library,
with support for server-side OAuth flows where authorization URLs are
forwarded to web clients instead of opening a browser.
These clients replace the existing AsyncSSEMCPClient and AsyncStreamableHTTPMCPClient
implementations that used the lower-level MCP SDK directly.
"""
from contextlib import AsyncExitStack
from typing import List, Optional, Tuple
import httpx
from fastmcp import Client
from fastmcp.client.transports import SSETransport, StreamableHttpTransport
from mcp import Tool as MCPTool
from letta.errors import LettaMCPConnectionError
from letta.functions.mcp_client.types import SSEServerConfig, StreamableHTTPServerConfig
from letta.log import get_logger
from letta.services.mcp.base_client import _log_mcp_tool_error
from letta.services.mcp.server_side_oauth import ServerSideOAuth
logger = get_logger(__name__)
class AsyncFastMCPSSEClient:
"""SSE MCP client using FastMCP with server-side OAuth support.
This client connects to MCP servers using Server-Sent Events (SSE) transport.
It supports both authenticated and unauthenticated connections, with OAuth
handled via the ServerSideOAuth class for server-side flows.
Args:
server_config: SSE server configuration including URL, headers, and auth settings
oauth: Optional ServerSideOAuth instance for OAuth authentication
agent_id: Optional agent ID to include in request headers
"""
AGENT_ID_HEADER = "X-Agent-Id"
def __init__(
self,
server_config: SSEServerConfig,
oauth: Optional[ServerSideOAuth] = None,
agent_id: Optional[str] = None,
):
self.server_config = server_config
self.oauth = oauth
self.agent_id = agent_id
self.client: Optional[Client] = None
self.initialized = False
self.exit_stack = AsyncExitStack()
async def connect_to_server(self):
"""Establish connection to the MCP server.
Raises:
ConnectionError: If connection to the server fails
"""
try:
headers = {}
if self.server_config.custom_headers:
headers.update(self.server_config.custom_headers)
if self.server_config.auth_header and self.server_config.auth_token:
headers[self.server_config.auth_header] = self.server_config.auth_token
if self.agent_id:
headers[self.AGENT_ID_HEADER] = self.agent_id
transport = SSETransport(
url=self.server_config.server_url,
headers=headers if headers else None,
auth=self.oauth, # Pass ServerSideOAuth instance (or None)
)
self.client = Client(transport)
await self.client._connect()
self.initialized = True
except httpx.HTTPStatusError as e:
if e.response.status_code == 401:
raise LettaMCPConnectionError(message="401 Unauthorized", server_name=self.server_config.server_name) from e
raise LettaMCPConnectionError(
message=f"HTTP error connecting to MCP server at {self.server_config.server_url}: {e}",
server_name=self.server_config.server_name,
) from e
except LettaMCPConnectionError:
raise
except ConnectionError as e:
raise LettaMCPConnectionError(message=str(e), server_name=self.server_config.server_name) from e
except Exception as e:
logger.warning(
f"Connecting to MCP server failed. Please review your server config: {self.server_config.model_dump_json(indent=4)}. Error: {str(e)}"
)
raise LettaMCPConnectionError(
message=f"Failed to connect to MCP server at '{self.server_config.server_url}'. "
f"Please check your configuration and ensure the server is accessible. Error: {str(e)}",
server_name=self.server_config.server_name,
) from e
async def list_tools(self, serialize: bool = False) -> List[MCPTool]:
"""List available tools from the MCP server.
Args:
serialize: If True, return tools as dictionaries instead of MCPTool objects
Returns:
List of tools available on the server
Raises:
RuntimeError: If client has not been initialized
"""
self._check_initialized()
tools = await self.client.list_tools()
if serialize:
serializable_tools = []
for tool in tools:
if hasattr(tool, "model_dump"):
serializable_tools.append(tool.model_dump())
elif hasattr(tool, "dict"):
serializable_tools.append(tool.dict())
elif hasattr(tool, "__dict__"):
serializable_tools.append(tool.__dict__)
else:
serializable_tools.append(str(tool))
return serializable_tools
return tools
async def execute_tool(self, tool_name: str, tool_args: dict) -> Tuple[str, bool]:
"""Execute a tool on the MCP server.
Args:
tool_name: Name of the tool to execute
tool_args: Arguments to pass to the tool
Returns:
Tuple of (result_content, success_flag)
Raises:
RuntimeError: If client has not been initialized
"""
self._check_initialized()
try:
result = await self.client.call_tool(tool_name, tool_args)
except Exception as e:
exception_to_check = e
if hasattr(e, "exceptions") and e.exceptions and len(e.exceptions) == 1:
exception_to_check = e.exceptions[0]
_log_mcp_tool_error(logger, tool_name, exception_to_check)
return str(exception_to_check), False
# Parse content from result
parsed_content = []
for content_piece in result.content:
if hasattr(content_piece, "text"):
parsed_content.append(content_piece.text)
logger.debug(f"MCP tool result parsed content (text): {parsed_content}")
else:
parsed_content.append(str(content_piece))
logger.debug(f"MCP tool result parsed content (other): {parsed_content}")
if parsed_content:
final_content = " ".join(parsed_content)
else:
final_content = "Empty response from tool"
return final_content, not result.is_error
def _check_initialized(self):
"""Check if the client has been initialized."""
if not self.initialized:
logger.error("MCPClient has not been initialized")
raise RuntimeError("MCPClient has not been initialized")
async def cleanup(self):
"""Clean up client resources."""
if self.client:
try:
await self.client.close()
except Exception as e:
logger.warning(f"Error during FastMCP client cleanup: {e}")
self.initialized = False
class AsyncFastMCPStreamableHTTPClient:
"""Streamable HTTP MCP client using FastMCP with server-side OAuth support.
This client connects to MCP servers using Streamable HTTP transport.
It supports both authenticated and unauthenticated connections, with OAuth
handled via the ServerSideOAuth class for server-side flows.
Args:
server_config: Streamable HTTP server configuration
oauth: Optional ServerSideOAuth instance for OAuth authentication
agent_id: Optional agent ID to include in request headers
"""
AGENT_ID_HEADER = "X-Agent-Id"
def __init__(
self,
server_config: StreamableHTTPServerConfig,
oauth: Optional[ServerSideOAuth] = None,
agent_id: Optional[str] = None,
):
self.server_config = server_config
self.oauth = oauth
self.agent_id = agent_id
self.client: Optional[Client] = None
self.initialized = False
self.exit_stack = AsyncExitStack()
async def connect_to_server(self):
"""Establish connection to the MCP server.
Raises:
ConnectionError: If connection to the server fails
"""
try:
headers = {}
if self.server_config.custom_headers:
headers.update(self.server_config.custom_headers)
if self.server_config.auth_header and self.server_config.auth_token:
headers[self.server_config.auth_header] = self.server_config.auth_token
if self.agent_id:
headers[self.AGENT_ID_HEADER] = self.agent_id
transport = StreamableHttpTransport(
url=self.server_config.server_url,
headers=headers if headers else None,
auth=self.oauth, # Pass ServerSideOAuth instance (or None)
)
self.client = Client(transport)
await self.client._connect()
self.initialized = True
except httpx.HTTPStatusError as e:
if e.response.status_code == 401:
raise LettaMCPConnectionError(message="401 Unauthorized", server_name=self.server_config.server_name) from e
raise LettaMCPConnectionError(
message=f"HTTP error connecting to MCP server at {self.server_config.server_url}: {e}",
server_name=self.server_config.server_name,
) from e
except LettaMCPConnectionError:
raise
except ConnectionError as e:
raise LettaMCPConnectionError(message=str(e), server_name=self.server_config.server_name) from e
except Exception as e:
logger.warning(
f"Connecting to MCP server failed. Please review your server config: {self.server_config.model_dump_json(indent=4)}. Error: {str(e)}"
)
raise LettaMCPConnectionError(
message=f"Failed to connect to MCP server at '{self.server_config.server_url}'. "
f"Please check your configuration and ensure the server is accessible. Error: {str(e)}",
server_name=self.server_config.server_name,
) from e
async def list_tools(self, serialize: bool = False) -> List[MCPTool]:
"""List available tools from the MCP server.
Args:
serialize: If True, return tools as dictionaries instead of MCPTool objects
Returns:
List of tools available on the server
Raises:
RuntimeError: If client has not been initialized
"""
self._check_initialized()
tools = await self.client.list_tools()
if serialize:
serializable_tools = []
for tool in tools:
if hasattr(tool, "model_dump"):
serializable_tools.append(tool.model_dump())
elif hasattr(tool, "dict"):
serializable_tools.append(tool.dict())
elif hasattr(tool, "__dict__"):
serializable_tools.append(tool.__dict__)
else:
serializable_tools.append(str(tool))
return serializable_tools
return tools
async def execute_tool(self, tool_name: str, tool_args: dict) -> Tuple[str, bool]:
"""Execute a tool on the MCP server.
Args:
tool_name: Name of the tool to execute
tool_args: Arguments to pass to the tool
Returns:
Tuple of (result_content, success_flag)
Raises:
RuntimeError: If client has not been initialized
"""
self._check_initialized()
try:
result = await self.client.call_tool(tool_name, tool_args)
except Exception as e:
exception_to_check = e
if hasattr(e, "exceptions") and e.exceptions and len(e.exceptions) == 1:
exception_to_check = e.exceptions[0]
_log_mcp_tool_error(logger, tool_name, exception_to_check)
return str(exception_to_check), False
# Parse content from result
parsed_content = []
for content_piece in result.content:
if hasattr(content_piece, "text"):
parsed_content.append(content_piece.text)
logger.debug(f"MCP tool result parsed content (text): {parsed_content}")
else:
parsed_content.append(str(content_piece))
logger.debug(f"MCP tool result parsed content (other): {parsed_content}")
if parsed_content:
final_content = " ".join(parsed_content)
else:
final_content = "Empty response from tool"
return final_content, not result.is_error
def _check_initialized(self):
"""Check if the client has been initialized."""
if not self.initialized:
logger.error("MCPClient has not been initialized")
raise RuntimeError("MCPClient has not been initialized")
async def cleanup(self):
"""Clean up client resources."""
if self.client:
try:
await self.client.close()
except Exception as e:
logger.warning(f"Error during FastMCP client cleanup: {e}")
self.initialized = False
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/mcp/fastmcp_client.py",
"license": "Apache License 2.0",
"lines": 287,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/mcp/server_side_oauth.py | """Server-side OAuth for FastMCP client that works with web app flows.
This module provides a custom OAuth implementation that:
1. Forwards authorization URLs via callback instead of opening a browser
2. Receives auth codes from an external source (web app callback) instead of running a local server
This is designed for server-side applications where the OAuth flow must be handled
by a web frontend rather than opening a local browser.
"""
import asyncio
import time
from typing import Callable, Optional, Tuple
from urllib.parse import parse_qs, urlencode, urlparse, urlunparse
import httpx
from fastmcp.client.auth.oauth import OAuth
from pydantic import AnyHttpUrl
from letta.log import get_logger
from letta.orm.mcp_oauth import OAuthSessionStatus
from letta.schemas.mcp import MCPOAuthSessionUpdate
from letta.schemas.user import User as PydanticUser
from letta.services.mcp.oauth_utils import DatabaseTokenStorage
logger = get_logger(__name__)
# Type alias for the MCPServerManager to avoid circular imports
# The actual type is letta.services.mcp_server_manager.MCPServerManager
MCPManagerType = "MCPServerManager"
class ServerSideOAuth(OAuth):
"""
OAuth client that forwards authorization URL via callback instead of opening browser,
and receives auth code from external source instead of running local callback server.
This class extends FastMCP's OAuth class to:
- Use DatabaseTokenStorage for persistent token storage instead of file-based storage
- Override redirect_handler to store URLs in the database instead of opening a browser
- Override callback_handler to poll database for auth codes instead of running a local server
By extending FastMCP's OAuth, we inherit its _initialize() fix that properly sets
token_expiry_time, enabling automatic token refresh when tokens expire.
Args:
mcp_url: The MCP server URL to authenticate against
session_id: The OAuth session ID for tracking this flow in the database
mcp_manager: The MCP manager instance for database operations
actor: The user making the OAuth request
redirect_uri: The redirect URI for the OAuth callback (web app endpoint)
url_callback: Optional callback function called with the authorization URL
logo_uri: Optional logo URI to include in OAuth client metadata
scopes: OAuth scopes to request
exclude_resource_param: If True, prevents the RFC 8707 resource parameter from being
added to OAuth requests. Some servers (like Supabase) reject this parameter.
"""
def __init__(
self,
mcp_url: str,
session_id: str,
mcp_manager: MCPManagerType,
actor: PydanticUser,
redirect_uri: str,
url_callback: Optional[Callable[[str], None]] = None,
logo_uri: Optional[str] = None,
scopes: Optional[str | list[str]] = None,
exclude_resource_param: bool = True,
):
self.session_id = session_id
self.mcp_manager = mcp_manager
self.actor = actor
self._redirect_uri = redirect_uri
self._url_callback = url_callback
self._exclude_resource_param = exclude_resource_param
# Initialize parent OAuth class (this creates FileTokenStorage internally)
super().__init__(
mcp_url=mcp_url,
scopes=scopes,
client_name="Letta",
)
# Replace the file-based storage with database storage
# This must be done after super().__init__ since it creates the context
self.context.storage = DatabaseTokenStorage(session_id, mcp_manager, actor)
# Override redirect URI in client metadata to use our web app's callback
self.context.client_metadata.redirect_uris = [AnyHttpUrl(redirect_uri)]
# Clear empty scope - some OAuth servers (like Supabase) reject empty scope strings
# Setting to None lets the server use its default scopes
if not scopes:
self.context.client_metadata.scope = None
# Set logo URI if provided
if logo_uri:
self.context.client_metadata.logo_uri = logo_uri
async def _initialize(self) -> None:
"""Load stored tokens and client info, properly setting token expiry."""
await super()._initialize()
# Some OAuth servers (like Supabase) don't accept the RFC 8707 resource parameter
# Clear protected_resource_metadata to prevent the SDK from adding it to requests
if self._exclude_resource_param:
self.context.protected_resource_metadata = None
async def _handle_protected_resource_response(self, response: httpx.Response) -> None:
"""Handle protected resource metadata response.
This overrides the parent's method to:
1. Let OAuth server discovery work (extracts auth_server_url from metadata)
2. Then clear protected_resource_metadata to prevent RFC 8707 resource parameter
from being added to token exchange and other requests.
Some OAuth servers (like Supabase) reject the resource parameter entirely.
"""
# Call parent to process metadata and extract auth_server_url
await super()._handle_protected_resource_response(response)
# Clear the metadata to prevent resource parameter in subsequent requests
# The auth_server_url is already extracted, so OAuth discovery still works
if self._exclude_resource_param:
logger.debug("Clearing protected_resource_metadata to prevent resource parameter in token exchange")
self.context.protected_resource_metadata = None
async def _handle_token_response(self, response: httpx.Response) -> None:
"""Handle token exchange response, accepting both 200 and 201 status codes.
Some OAuth servers (like Supabase) return 201 Created instead of 200 OK
for successful token exchange. The MCP SDK only accepts 200, so we override
this method to accept both.
"""
# Accept both 200 and 201 as success (Supabase returns 201)
if response.status_code == 201:
logger.debug("Token exchange returned 201 Created, treating as success")
# Monkey-patch the status code to 200 so parent method accepts it
response.status_code = 200
await super()._handle_token_response(response)
async def redirect_handler(self, authorization_url: str) -> None:
"""Store authorization URL in database and call optional callback.
This overrides the parent's redirect_handler which would open a browser.
Instead, we:
1. Extract the state from the authorization URL (generated by MCP SDK)
2. Optionally strip the resource parameter (some servers reject it)
3. Store the URL and state in the database for the API to return
4. Call an optional callback (e.g., to yield to an SSE stream)
Args:
authorization_url: The OAuth authorization URL to redirect the user to
"""
logger.info(f"OAuth redirect handler called with URL: {authorization_url}")
# Strip the resource parameter if exclude_resource_param is True
# Some OAuth servers (like Supabase) reject the RFC 8707 resource parameter
if self._exclude_resource_param:
parsed_url = urlparse(authorization_url)
query_params = parse_qs(parsed_url.query, keep_blank_values=True)
if "resource" in query_params:
logger.debug(f"Stripping resource parameter from authorization URL: {query_params['resource']}")
del query_params["resource"]
# Rebuild the URL without the resource parameter
# parse_qs returns lists, so flatten them for urlencode
flat_params = {k: v[0] if len(v) == 1 else v for k, v in query_params.items()}
new_query = urlencode(flat_params, doseq=True)
authorization_url = urlunparse(
(
parsed_url.scheme,
parsed_url.netloc,
parsed_url.path,
parsed_url.params,
new_query,
parsed_url.fragment,
)
)
logger.info(f"Authorization URL after stripping resource: {authorization_url}")
# Extract the state parameter from the authorization URL
parsed_url = urlparse(authorization_url)
query_params = parse_qs(parsed_url.query)
oauth_state = query_params.get("state", [None])[0]
# Store URL and state in database for API response
session_update = MCPOAuthSessionUpdate(authorization_url=authorization_url, state=oauth_state)
await self.mcp_manager.update_oauth_session(self.session_id, session_update, self.actor)
logger.info(f"OAuth authorization URL stored for session {self.session_id} with state {oauth_state}")
# Call the callback if provided (e.g., to yield URL to SSE stream)
if self._url_callback:
self._url_callback(authorization_url)
async def callback_handler(self) -> Tuple[str, Optional[str]]:
"""Poll database for authorization code set by web app callback.
This overrides the parent's callback_handler which would run a local server.
Instead, we poll the database waiting for the authorization code to be set
by the web app's callback endpoint.
Returns:
Tuple of (authorization_code, state)
Raises:
Exception: If OAuth authorization failed or timed out
"""
timeout = 300 # 5 minutes
start_time = time.time()
logger.info(f"Waiting for authorization code for session {self.session_id}")
while time.time() - start_time < timeout:
oauth_session = await self.mcp_manager.get_oauth_session_by_id(self.session_id, self.actor)
if oauth_session and oauth_session.authorization_code_enc:
# Read authorization code directly from _enc column
auth_code = await oauth_session.authorization_code_enc.get_plaintext_async()
logger.info(f"Authorization code received for session {self.session_id}")
return auth_code, oauth_session.state
if oauth_session and oauth_session.status == OAuthSessionStatus.ERROR:
raise Exception("OAuth authorization failed")
await asyncio.sleep(1)
raise Exception(f"Timeout waiting for OAuth callback after {timeout} seconds")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/mcp/server_side_oauth.py",
"license": "Apache License 2.0",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:tests/adapters/test_letta_llm_stream_adapter_error_handling.py | import anthropic
import httpx
import openai
import pytest
from anthropic.types.beta import (
BetaMessage,
BetaRawMessageStartEvent,
BetaRawMessageStopEvent,
BetaUsage,
)
from google.genai import errors as google_errors
from letta.adapters.letta_llm_stream_adapter import LettaLLMStreamAdapter
from letta.errors import (
ContextWindowExceededError,
LLMBadRequestError,
LLMConnectionError,
LLMEmptyResponseError,
LLMInsufficientCreditsError,
LLMServerError,
)
from letta.llm_api.anthropic_client import AnthropicClient
from letta.llm_api.google_vertex_client import GoogleVertexClient
from letta.schemas.enums import LLMCallType
from letta.schemas.llm_config import LLMConfig
@pytest.mark.asyncio
async def test_letta_llm_stream_adapter_converts_anthropic_streaming_api_status_error(monkeypatch):
"""Regression: provider APIStatusError raised *during* streaming iteration should be converted via handle_llm_error."""
request = httpx.Request("POST", "https://api.anthropic.com/v1/messages")
response = httpx.Response(status_code=500, request=request)
body = {
"type": "error",
"error": {"details": None, "type": "api_error", "message": "Internal server error"},
"request_id": "req_011CWSBmrUwW5xdcqjfkUFS4",
}
class FakeAsyncStream:
"""Mimics anthropic.AsyncStream enough for AnthropicStreamingInterface (async cm + async iterator)."""
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
return None
def __aiter__(self):
return self
async def __anext__(self):
raise anthropic.APIStatusError("INTERNAL_SERVER_ERROR", response=response, body=body)
async def fake_stream_async(self, request_data: dict, llm_config: LLMConfig):
return FakeAsyncStream()
monkeypatch.setattr(AnthropicClient, "stream_async", fake_stream_async, raising=True)
llm_client = AnthropicClient()
llm_config = LLMConfig(model="claude-sonnet-4-5-20250929", model_endpoint_type="anthropic", context_window=200000)
adapter = LettaLLMStreamAdapter(llm_client=llm_client, llm_config=llm_config, call_type=LLMCallType.agent_step)
gen = adapter.invoke_llm(request_data={}, messages=[], tools=[], use_assistant_message=True)
with pytest.raises(LLMServerError):
async for _ in gen:
pass
@pytest.mark.asyncio
async def test_letta_llm_stream_adapter_converts_anthropic_413_request_too_large(monkeypatch):
"""Regression: 413 request_too_large errors should be converted to ContextWindowExceededError."""
request = httpx.Request("POST", "https://api.anthropic.com/v1/messages")
response = httpx.Response(status_code=413, request=request)
body = {
"type": "error",
"error": {"type": "request_too_large", "message": "Request exceeds the maximum size"},
}
class FakeAsyncStream:
"""Mimics anthropic.AsyncStream enough for AnthropicStreamingInterface (async cm + async iterator)."""
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
return None
def __aiter__(self):
return self
async def __anext__(self):
raise anthropic.APIStatusError("REQUEST_TOO_LARGE", response=response, body=body)
async def fake_stream_async(self, request_data: dict, llm_config: LLMConfig):
return FakeAsyncStream()
monkeypatch.setattr(AnthropicClient, "stream_async", fake_stream_async, raising=True)
llm_client = AnthropicClient()
llm_config = LLMConfig(model="claude-sonnet-4-5-20250929", model_endpoint_type="anthropic", context_window=200000)
adapter = LettaLLMStreamAdapter(llm_client=llm_client, llm_config=llm_config, call_type=LLMCallType.agent_step)
gen = adapter.invoke_llm(request_data={}, messages=[], tools=[], use_assistant_message=True)
with pytest.raises(ContextWindowExceededError):
async for _ in gen:
pass
@pytest.mark.asyncio
async def test_letta_llm_stream_adapter_converts_httpx_read_error(monkeypatch):
"""Regression: httpx.ReadError raised during streaming should be converted to LLMConnectionError."""
class FakeAsyncStream:
"""Mimics anthropic.AsyncStream enough for AnthropicStreamingInterface (async cm + async iterator)."""
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
return None
def __aiter__(self):
return self
async def __anext__(self):
raise httpx.ReadError("Connection closed unexpectedly")
async def fake_stream_async(self, request_data: dict, llm_config: LLMConfig):
return FakeAsyncStream()
monkeypatch.setattr(AnthropicClient, "stream_async", fake_stream_async, raising=True)
llm_client = AnthropicClient()
llm_config = LLMConfig(model="claude-sonnet-4-5-20250929", model_endpoint_type="anthropic", context_window=200000)
adapter = LettaLLMStreamAdapter(llm_client=llm_client, llm_config=llm_config, call_type=LLMCallType.agent_step)
gen = adapter.invoke_llm(request_data={}, messages=[], tools=[], use_assistant_message=True)
with pytest.raises(LLMConnectionError):
async for _ in gen:
pass
@pytest.mark.asyncio
async def test_letta_llm_stream_adapter_converts_httpx_write_error(monkeypatch):
"""Regression: httpx.WriteError raised during streaming should be converted to LLMConnectionError."""
class FakeAsyncStream:
"""Mimics anthropic.AsyncStream enough for AnthropicStreamingInterface (async cm + async iterator)."""
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
return None
def __aiter__(self):
return self
async def __anext__(self):
raise httpx.WriteError("Failed to write to connection")
async def fake_stream_async(self, request_data: dict, llm_config: LLMConfig):
return FakeAsyncStream()
monkeypatch.setattr(AnthropicClient, "stream_async", fake_stream_async, raising=True)
llm_client = AnthropicClient()
llm_config = LLMConfig(model="claude-sonnet-4-5-20250929", model_endpoint_type="anthropic", context_window=200000)
adapter = LettaLLMStreamAdapter(llm_client=llm_client, llm_config=llm_config, call_type=LLMCallType.agent_step)
gen = adapter.invoke_llm(request_data={}, messages=[], tools=[], use_assistant_message=True)
with pytest.raises(LLMConnectionError):
async for _ in gen:
pass
def test_anthropic_client_handle_llm_error_413_status_code():
"""Test that handle_llm_error correctly converts 413 status code to ContextWindowExceededError."""
client = AnthropicClient()
request = httpx.Request("POST", "https://api.anthropic.com/v1/messages")
response = httpx.Response(status_code=413, request=request)
body = {
"type": "error",
"error": {"type": "request_too_large", "message": "Request exceeds the maximum size"},
}
error = anthropic.APIStatusError("REQUEST_TOO_LARGE", response=response, body=body)
result = client.handle_llm_error(error)
assert isinstance(result, ContextWindowExceededError)
assert "413" in result.message or "request_too_large" in result.message.lower()
def test_anthropic_client_handle_llm_error_request_too_large_string():
"""Test that handle_llm_error correctly converts request_too_large string match to ContextWindowExceededError."""
client = AnthropicClient()
# Test with a generic exception that has the request_too_large string
error = Exception("Error code: 413 - {'error': {'type': 'request_too_large', 'message': 'Request exceeds the maximum size'}}")
result = client.handle_llm_error(error)
assert isinstance(result, ContextWindowExceededError)
assert "request_too_large" in result.message.lower() or "context window exceeded" in result.message.lower()
@pytest.mark.parametrize(
"error_message",
[
"The input token count exceeds the maximum number of tokens allowed 1048576.",
"Token count of 1500000 exceeds the model limit of 1048576 tokens allowed.",
],
ids=["gemini-token-count-exceeds", "gemini-tokens-allowed-limit"],
)
def test_google_client_handle_llm_error_token_limit_returns_context_window_exceeded(error_message):
"""Google 400 errors about token limits should map to ContextWindowExceededError."""
client = GoogleVertexClient.__new__(GoogleVertexClient)
response_json = {
"message": f'{{"error": {{"code": 400, "message": "{error_message}", "status": "INVALID_ARGUMENT"}}}}',
"status": "Bad Request",
}
error = google_errors.ClientError(400, response_json)
result = client.handle_llm_error(error)
assert isinstance(result, ContextWindowExceededError)
def test_google_client_handle_llm_error_context_exceeded_returns_context_window_exceeded():
"""Google 400 errors with 'context' + 'exceeded' should map to ContextWindowExceededError."""
client = GoogleVertexClient.__new__(GoogleVertexClient)
response_json = {
"message": '{"error": {"code": 400, "message": "Request context window exceeded the limit.", "status": "INVALID_ARGUMENT"}}',
"status": "Bad Request",
}
error = google_errors.ClientError(400, response_json)
result = client.handle_llm_error(error)
assert isinstance(result, ContextWindowExceededError)
def test_google_client_handle_llm_error_generic_400_returns_bad_request():
"""Google 400 errors without token/context keywords should map to LLMBadRequestError."""
client = GoogleVertexClient.__new__(GoogleVertexClient)
response_json = {
"message": '{"error": {"code": 400, "message": "Invalid argument: unsupported parameter.", "status": "INVALID_ARGUMENT"}}',
"status": "Bad Request",
}
error = google_errors.ClientError(400, response_json)
result = client.handle_llm_error(error)
assert isinstance(result, LLMBadRequestError)
assert not isinstance(result, ContextWindowExceededError)
@pytest.mark.parametrize(
"error_message",
[
"Insufficient credits. Add more using https://openrouter.ai/settings/credits",
"This request requires more credits, or fewer max_tokens. You requested up to 65536 tokens, but can only afford 2679.",
"You exceeded your current quota, please check your plan and billing details.",
],
ids=["openrouter-402", "openrouter-streaming-afford", "openai-quota-exceeded"],
)
def test_openai_client_handle_llm_error_insufficient_credits(error_message):
"""Credit/quota errors should map to LLMInsufficientCreditsError."""
from letta.llm_api.openai_client import OpenAIClient
client = OpenAIClient()
request = httpx.Request("POST", "https://api.openai.com/v1/chat/completions")
error = openai.APIError(message=error_message, request=request, body=None)
result = client.handle_llm_error(error)
assert isinstance(result, LLMInsufficientCreditsError)
def test_openai_client_handle_llm_error_402_status_code():
"""402 APIStatusError should map to LLMInsufficientCreditsError."""
from letta.llm_api.openai_client import OpenAIClient
client = OpenAIClient()
request = httpx.Request("POST", "https://openrouter.ai/api/v1/chat/completions")
response = httpx.Response(status_code=402, request=request)
body = {"error": {"message": "Insufficient credits", "code": 402}}
error = openai.APIStatusError("Insufficient credits", response=response, body=body)
result = client.handle_llm_error(error)
assert isinstance(result, LLMInsufficientCreditsError)
def test_openai_client_handle_llm_error_non_credit_api_error():
"""Non-credit bare APIError should map to LLMBadRequestError, not LLMInsufficientCreditsError."""
from letta.llm_api.openai_client import OpenAIClient
client = OpenAIClient()
request = httpx.Request("POST", "https://api.openai.com/v1/chat/completions")
error = openai.APIError(message="Some other API error occurred", request=request, body=None)
result = client.handle_llm_error(error)
assert isinstance(result, LLMBadRequestError)
assert not isinstance(result, LLMInsufficientCreditsError)
@pytest.mark.asyncio
async def test_letta_llm_stream_adapter_raises_empty_response_error_for_anthropic(monkeypatch):
"""LET-7679: Empty streaming responses (no content blocks) should raise LLMEmptyResponseError.
This tests the case where Opus 4.6 returns a response with:
- BetaRawMessageStartEvent (with usage tokens)
- BetaRawMessageStopEvent (end_turn)
- NO content blocks in between
This should raise LLMEmptyResponseError, not complete successfully with stop_reason=end_turn.
"""
class FakeAsyncStream:
"""Mimics anthropic.AsyncStream that returns empty content (no content blocks)."""
def __init__(self):
self.events = [
# Message start with some usage info
BetaRawMessageStartEvent(
type="message_start",
message=BetaMessage(
id="msg_test_empty",
type="message",
role="assistant",
content=[], # Empty content
model="claude-opus-4-6",
stop_reason="end_turn",
stop_sequence=None,
usage=BetaUsage(input_tokens=1000, output_tokens=26, cache_creation_input_tokens=0, cache_read_input_tokens=0),
),
),
# Message stop immediately after start - no content blocks
BetaRawMessageStopEvent(type="message_stop"),
]
self.index = 0
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
return None
def __aiter__(self):
return self
async def __anext__(self):
if self.index >= len(self.events):
raise StopAsyncIteration
event = self.events[self.index]
self.index += 1
return event
async def fake_stream_async(self, request_data: dict, llm_config):
return FakeAsyncStream()
monkeypatch.setattr(AnthropicClient, "stream_async", fake_stream_async, raising=True)
llm_client = AnthropicClient()
llm_config = LLMConfig(model="claude-opus-4-6", model_endpoint_type="anthropic", context_window=200000)
adapter = LettaLLMStreamAdapter(llm_client=llm_client, llm_config=llm_config, call_type=LLMCallType.agent_step)
gen = adapter.invoke_llm(request_data={}, messages=[], tools=[], use_assistant_message=True)
with pytest.raises(LLMEmptyResponseError):
async for _ in gen:
pass
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/adapters/test_letta_llm_stream_adapter_error_handling.py",
"license": "Apache License 2.0",
"lines": 278,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/integration_test_client_side_tools.py | """
Integration tests for client-side tools passed in the request.
These tests verify that:
1. Client-side tools can be specified in the request without pre-registration on the server
2. When the agent calls a client-side tool, execution pauses (stop_reason=requires_approval)
3. Client can provide tool returns via the approval response mechanism
4. Agent continues execution after receiving tool returns
"""
import uuid
import pytest
from letta_client import Letta
# ------------------------------
# Constants
# ------------------------------
SECRET_CODE = "CLIENT_SIDE_SECRET_12345"
# Models to test - both Anthropic and OpenAI
TEST_MODELS = [
"anthropic/claude-sonnet-4-5-20250929",
"openai/gpt-4o-mini",
]
def get_client_tool_schema():
"""Returns a client-side tool schema for testing."""
return {
"name": "get_secret_code",
"description": "Returns a secret code for the given input text. This tool is executed client-side. You MUST call this tool when the user asks for a secret code.",
"parameters": {
"type": "object",
"properties": {
"input_text": {
"type": "string",
"description": "The input text to process",
}
},
"required": ["input_text"],
},
}
# ------------------------------
# Fixtures
# ------------------------------
@pytest.fixture
def client(server_url: str) -> Letta:
"""Create a Letta client."""
return Letta(base_url=server_url)
# ------------------------------
# Test Cases
# ------------------------------
class TestClientSideTools:
"""Test client-side tools using the SDK client."""
@pytest.mark.parametrize("model", TEST_MODELS)
def test_client_side_tool_full_flow(self, client: Letta, model: str) -> None:
"""
Test the complete end-to-end flow:
1. User asks agent to get a secret code
2. Agent calls client-side tool, execution pauses
3. Client provides the tool return with the secret code
4. Agent processes the result and continues execution
5. User asks what the code was
6. Agent recalls and reports the secret code
"""
# Create agent for this test
agent = client.agents.create(
name=f"client_tools_test_{uuid.uuid4().hex[:8]}",
model=model,
embedding="openai/text-embedding-3-small",
include_base_tools=False,
tool_ids=[],
include_base_tool_rules=False,
tool_rules=[],
)
try:
tool_schema = get_client_tool_schema()
print(f"\n=== Testing with model: {model} ===")
# Step 1: User asks for the secret code - agent should call the tool
print("\nStep 1: Asking agent to call get_secret_code tool...")
response1 = client.agents.messages.create(
agent_id=agent.id,
messages=[{"role": "user", "content": "Please call the get_secret_code tool with input 'hello world'."}],
client_tools=[tool_schema],
)
# Validate Step 1: Should pause with approval request
assert response1.stop_reason.stop_reason == "requires_approval", f"Expected requires_approval, got {response1.stop_reason}"
assert response1.messages[-1].message_type == "approval_request_message"
assert response1.messages[-1].tool_call is not None
assert response1.messages[-1].tool_call.name == "get_secret_code"
tool_call_id = response1.messages[-1].tool_call.tool_call_id
print(f" ✓ Agent called get_secret_code tool (call_id: {tool_call_id})")
# Step 2: Provide the tool return (simulating client-side execution)
print(f"\nStep 2: Providing tool return with secret code: {SECRET_CODE}")
response2 = client.agents.messages.create(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "tool",
"tool_call_id": tool_call_id,
"tool_return": SECRET_CODE,
"status": "success",
}
],
}
],
client_tools=[tool_schema],
)
# Validate Step 2: Agent should receive tool return and CONTINUE execution
assert response2.messages is not None
assert len(response2.messages) >= 1
# First message should be the tool return
assert response2.messages[0].message_type == "tool_return_message"
assert response2.messages[0].status == "success"
assert response2.messages[0].tool_return == SECRET_CODE
print(" ✓ Tool return message received with secret code")
# Agent should continue and eventually end turn (not require more approval)
assert response2.stop_reason.stop_reason in [
"end_turn",
"tool_rule",
"max_steps",
], f"Expected end_turn/tool_rule/max_steps, got {response2.stop_reason}"
print(f" ✓ Agent continued execution (stop_reason: {response2.stop_reason})")
# Check that agent produced a response after the tool return
assistant_messages_step2 = [msg for msg in response2.messages if msg.message_type == "assistant_message"]
assert len(assistant_messages_step2) > 0, "Agent should produce an assistant message after receiving tool return"
print(f" ✓ Agent produced {len(assistant_messages_step2)} assistant message(s)")
# Step 3: Ask the agent what the secret code was (testing memory/context)
print("\nStep 3: Asking agent to recall the secret code...")
response3 = client.agents.messages.create(
agent_id=agent.id,
messages=[{"role": "user", "content": "What was the exact secret code that the tool returned? Please repeat it."}],
client_tools=[tool_schema],
)
# Validate Step 3: Agent should recall and report the secret code
assert response3.stop_reason.stop_reason in ["end_turn", "tool_rule", "max_steps"]
# Find the assistant message in the response
assistant_messages = [msg for msg in response3.messages if msg.message_type == "assistant_message"]
assert len(assistant_messages) > 0, "Agent should have responded with an assistant message"
# The agent should mention the secret code in its response
assistant_content = " ".join([msg.content for msg in assistant_messages if msg.content])
print(f" ✓ Agent response: {assistant_content[:200]}...")
assert SECRET_CODE in assistant_content, f"Agent should mention '{SECRET_CODE}' in response. Got: {assistant_content}"
print(" ✓ Agent correctly recalled the secret code!")
# Step 4: Validate the full conversation history makes sense
print("\nStep 4: Validating conversation history...")
all_messages = client.agents.messages.list(agent_id=agent.id, limit=100).items
message_types = [msg.message_type for msg in all_messages]
assert "user_message" in message_types, "Should have user messages"
assert "tool_return_message" in message_types, "Should have tool return message"
assert "assistant_message" in message_types, "Should have assistant messages"
# Verify the tool return message contains our secret code
tool_return_msgs = [msg for msg in all_messages if msg.message_type == "tool_return_message"]
assert any(msg.tool_return == SECRET_CODE for msg in tool_return_msgs), "Tool return should contain secret code"
print(f"\n✓ Full flow validated successfully for {model}!")
finally:
# Cleanup
client.agents.delete(agent_id=agent.id)
@pytest.mark.parametrize("model", TEST_MODELS)
def test_client_tool_overrides_server_tool(self, client: Letta, model: str) -> None:
"""
Test that a client-side tool with the same name as a server-side tool
overrides the server-side tool.
Flow:
1. Create a server-side tool named 'get_secret_code' that returns a DIFFERENT value
2. Create agent with that server-side tool attached
3. Send request with client-side tool with same name 'get_secret_code'
4. Verify execution pauses (requires_approval) instead of server-side execution
5. Provide client-side tool return and verify it's used
"""
SERVER_TOOL_RETURN = "SERVER_SIDE_VALUE_999"
# Define server-side tool source code that returns a different value
server_tool_source = f'''
def get_secret_code(input_text: str) -> str:
"""
Returns a secret code for the given input text.
Args:
input_text: The input text to process
Returns:
str: The secret code
"""
return "{SERVER_TOOL_RETURN}"
'''
# Create the server-side tool
server_tool = client.tools.create(source_code=server_tool_source)
assert server_tool.name == "get_secret_code"
# Create agent with the server-side tool attached
agent = client.agents.create(
name=f"client_override_test_{uuid.uuid4().hex[:8]}",
model=model,
embedding="openai/text-embedding-3-small",
include_base_tools=False,
tool_ids=[server_tool.id],
include_base_tool_rules=False,
tool_rules=[],
)
try:
# Define client-side tool schema with same name but different behavior
client_tool_schema = get_client_tool_schema() # name='get_secret_code'
print(f"\n=== Testing client tool override with model: {model} ===")
# Step 1: Call the tool WITH client_tools specified - should pause for approval
print("\nStep 1: Calling tool with client_tools specified (should override server tool)...")
response1 = client.agents.messages.create(
agent_id=agent.id,
messages=[{"role": "user", "content": "Please call the get_secret_code tool with input 'test'."}],
client_tools=[client_tool_schema],
)
# Should pause with requires_approval because client tool overrides server tool
assert response1.stop_reason.stop_reason == "requires_approval", (
f"Expected requires_approval (client tool override), got {response1.stop_reason}. "
f"Server tool may have executed instead of client tool."
)
print(" ✓ Execution paused with requires_approval (client tool took precedence)")
tool_call_id = response1.messages[-1].tool_call.tool_call_id
assert response1.messages[-1].tool_call.name == "get_secret_code"
print(f" ✓ Tool call is for 'get_secret_code' (call_id: {tool_call_id})")
# Step 2: Provide client-side tool return
print(f"\nStep 2: Providing client-side tool return with: {SECRET_CODE}")
response2 = client.agents.messages.create(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "tool",
"tool_call_id": tool_call_id,
"tool_return": SECRET_CODE,
"status": "success",
}
],
}
],
client_tools=[client_tool_schema],
)
# Agent should continue with the client-provided value
assert response2.messages[0].message_type == "tool_return_message"
assert response2.messages[0].tool_return == SECRET_CODE
print(f" ✓ Tool return contains client-provided value: {SECRET_CODE}")
# Step 3: Verify the client value was used, not the server value
print("\nStep 3: Asking agent what the secret code was...")
response3 = client.agents.messages.create(
agent_id=agent.id,
messages=[{"role": "user", "content": "What was the exact secret code returned by the tool?"}],
client_tools=[client_tool_schema],
)
assistant_messages = [msg for msg in response3.messages if msg.message_type == "assistant_message"]
assistant_content = " ".join([msg.content for msg in assistant_messages if msg.content])
# Should contain the CLIENT value, not the SERVER value
assert SECRET_CODE in assistant_content, (
f"Agent should have used client-side value '{SECRET_CODE}', not server value. Got: {assistant_content}"
)
assert SERVER_TOOL_RETURN not in assistant_content, (
f"Agent should NOT have used server-side value '{SERVER_TOOL_RETURN}'. Got: {assistant_content}"
)
print(f" ✓ Agent used client-side value '{SECRET_CODE}' (not server value '{SERVER_TOOL_RETURN}')")
# Step 4: Test that WITHOUT client_tools, server tool executes directly
print("\nStep 4: Calling tool WITHOUT client_tools (server tool should execute)...")
response4 = client.agents.messages.create(
agent_id=agent.id,
messages=[{"role": "user", "content": "Please call get_secret_code again with input 'verify'."}],
# No client_tools - server tool should execute
)
# Should NOT pause for approval - server tool executes directly
assert response4.stop_reason.stop_reason != "requires_approval", (
f"Without client_tools, server tool should execute directly. Got: {response4.stop_reason}"
)
print(" ✓ Without client_tools, server tool executed directly (no approval required)")
# The response should eventually contain the server value
" ".join([msg.content for msg in response4.messages if hasattr(msg, "content") and msg.content])
tool_returns = [msg for msg in response4.messages if msg.message_type == "tool_return_message"]
if tool_returns:
server_return_value = tool_returns[0].tool_return
print(f" ✓ Server tool returned: {server_return_value}")
print(f"\n✓ Client tool override test passed for {model}!")
finally:
client.agents.delete(agent_id=agent.id)
client.tools.delete(tool_id=server_tool.id)
@pytest.mark.parametrize("model", TEST_MODELS)
def test_client_side_tool_error_return(self, client: Letta, model: str) -> None:
"""
Test providing an error status for a client-side tool return.
The agent should handle the error gracefully and continue execution.
"""
# Create agent for this test
agent = client.agents.create(
name=f"client_tools_error_test_{uuid.uuid4().hex[:8]}",
model=model,
embedding="openai/text-embedding-3-small",
include_base_tools=False,
tool_ids=[],
include_base_tool_rules=False,
tool_rules=[],
)
try:
tool_schema = get_client_tool_schema()
print(f"\n=== Testing error return with model: {model} ===")
# Step 1: Trigger the client-side tool call
print("\nStep 1: Triggering tool call...")
response1 = client.agents.messages.create(
agent_id=agent.id,
messages=[{"role": "user", "content": "Please call the get_secret_code tool with input 'hello'."}],
client_tools=[tool_schema],
)
assert response1.stop_reason.stop_reason == "requires_approval"
tool_call_id = response1.messages[-1].tool_call.tool_call_id
print(f" ✓ Agent called tool (call_id: {tool_call_id})")
# Step 2: Provide an error response
error_message = "Error: Unable to compute secret code - service unavailable"
print(f"\nStep 2: Providing error response: {error_message}")
response2 = client.agents.messages.create(
agent_id=agent.id,
messages=[
{
"type": "approval",
"approvals": [
{
"type": "tool",
"tool_call_id": tool_call_id,
"tool_return": error_message,
"status": "error",
}
],
}
],
client_tools=[tool_schema],
)
messages = response2.messages
assert messages is not None
assert messages[0].message_type == "tool_return_message"
assert messages[0].status == "error"
print(" ✓ Tool return shows error status")
# Agent should continue execution even after error
assert response2.stop_reason.stop_reason in ["end_turn", "tool_rule", "max_steps"], (
f"Expected agent to continue, got {response2.stop_reason}"
)
print(f" ✓ Agent continued execution after error (stop_reason: {response2.stop_reason})")
# Agent should have produced a response acknowledging the error
assistant_messages = [msg for msg in messages if msg.message_type == "assistant_message"]
assert len(assistant_messages) > 0, "Agent should respond after receiving error"
print(" ✓ Agent produced response after error")
print(f"\n✓ Error handling validated successfully for {model}!")
finally:
# Cleanup
client.agents.delete(agent_id=agent.id)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/integration_test_client_side_tools.py",
"license": "Apache License 2.0",
"lines": 342,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/integration_test_conversations_sdk.py | """
Integration tests for the Conversations API using the SDK.
"""
import uuid
import pytest
import requests
from letta_client import Letta
@pytest.fixture
def client(server_url: str) -> Letta:
"""Create a Letta client."""
return Letta(base_url=server_url)
@pytest.fixture
def agent(client: Letta):
"""Create a test agent."""
agent_state = client.agents.create(
name=f"test_conversations_{uuid.uuid4().hex[:8]}",
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
memory_blocks=[
{"label": "human", "value": "Test user"},
{"label": "persona", "value": "You are a helpful assistant."},
],
)
yield agent_state
# Cleanup
client.agents.delete(agent_id=agent_state.id)
class TestConversationsSDK:
"""Test conversations using the SDK client."""
def test_create_conversation(self, client: Letta, agent):
"""Test creating a conversation for an agent."""
conversation = client.conversations.create(agent_id=agent.id)
assert conversation.id is not None
assert conversation.id.startswith("conv-")
assert conversation.agent_id == agent.id
def test_list_conversations(self, client: Letta, agent):
"""Test listing conversations for an agent."""
# Create multiple conversations
conv1 = client.conversations.create(agent_id=agent.id)
conv2 = client.conversations.create(agent_id=agent.id)
# List conversations
conversations = client.conversations.list(agent_id=agent.id)
assert len(conversations) >= 2
conv_ids = [c.id for c in conversations]
assert conv1.id in conv_ids
assert conv2.id in conv_ids
def test_retrieve_conversation(self, client: Letta, agent):
"""Test retrieving a specific conversation."""
# Create a conversation
created = client.conversations.create(agent_id=agent.id)
# Retrieve it (should have system message from creation)
retrieved = client.conversations.retrieve(conversation_id=created.id)
assert retrieved.id == created.id
assert retrieved.agent_id == created.agent_id
# Conversation should have 1 system message immediately after creation
assert len(retrieved.in_context_message_ids) == 1
assert retrieved.in_context_message_ids[0].startswith("message-")
# Send a message to the conversation
list(
client.conversations.messages.create(
conversation_id=created.id,
messages=[{"role": "user", "content": "Hello!"}],
)
)
# Retrieve again and check in_context_message_ids is populated
retrieved_with_messages = client.conversations.retrieve(conversation_id=created.id)
# System message + user + assistant messages should be in the conversation
assert len(retrieved_with_messages.in_context_message_ids) >= 3 # system + user + assistant
# All IDs should be strings starting with "message-"
for msg_id in retrieved_with_messages.in_context_message_ids:
assert isinstance(msg_id, str)
assert msg_id.startswith("message-")
# Verify message ordering by listing messages
messages = client.conversations.messages.list(conversation_id=created.id)
assert len(messages) >= 3 # system + user + assistant
# First message should be system message (shared across conversations)
assert messages[0].message_type == "system_message", f"First message should be system_message, got {messages[0].message_type}"
# Second message should be user message
assert messages[1].message_type == "user_message", f"Second message should be user_message, got {messages[1].message_type}"
def test_send_message_to_conversation(self, client: Letta, agent):
"""Test sending a message to a conversation."""
# Create a conversation
conversation = client.conversations.create(agent_id=agent.id)
# Send a message (returns a stream)
stream = client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "Hello, how are you?"}],
)
# Consume the stream to get messages
messages = list(stream)
# Check response contains messages
assert len(messages) > 0
# Should have at least an assistant message
message_types = [m.message_type for m in messages if hasattr(m, "message_type")]
assert "assistant_message" in message_types
def test_list_conversation_messages(self, client: Letta, agent):
"""Test listing messages from a conversation."""
# Create a conversation
conversation = client.conversations.create(agent_id=agent.id)
# Send a message to create some history (consume the stream)
stream = client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "Say 'test response' back to me."}],
)
list(stream) # Consume stream
# List messages
messages = client.conversations.messages.list(conversation_id=conversation.id)
assert len(messages) >= 2 # At least user + assistant
message_types = [m.message_type for m in messages]
assert "user_message" in message_types
assert "assistant_message" in message_types
# Send another message and check that old and new messages are both listed
first_message_count = len(messages)
stream = client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "This is a follow-up message."}],
)
list(stream) # Consume stream
# List messages again
updated_messages = client.conversations.messages.list(conversation_id=conversation.id)
# Should have more messages now (at least 2 more: user + assistant)
assert len(updated_messages) >= first_message_count + 2
def test_conversation_isolation(self, client: Letta, agent):
"""Test that conversations are isolated from each other."""
# Create two conversations
conv1 = client.conversations.create(agent_id=agent.id)
conv2 = client.conversations.create(agent_id=agent.id)
# Send different messages to each (consume streams)
list(
client.conversations.messages.create(
conversation_id=conv1.id,
messages=[{"role": "user", "content": "Remember the word: APPLE"}],
)
)
list(
client.conversations.messages.create(
conversation_id=conv2.id,
messages=[{"role": "user", "content": "Remember the word: BANANA"}],
)
)
# List messages from each conversation
conv1_messages = client.conversations.messages.list(conversation_id=conv1.id)
conv2_messages = client.conversations.messages.list(conversation_id=conv2.id)
# Check messages are separate
conv1_content = " ".join([m.content for m in conv1_messages if hasattr(m, "content") and m.content])
conv2_content = " ".join([m.content for m in conv2_messages if hasattr(m, "content") and m.content])
assert "APPLE" in conv1_content
assert "BANANA" in conv2_content
# Each conversation should only have its own word
assert "BANANA" not in conv1_content or "APPLE" not in conv2_content
# Ask what word was remembered and make sure it's different for each conversation
conv1_recall = list(
client.conversations.messages.create(
conversation_id=conv1.id,
messages=[{"role": "user", "content": "What word did I ask you to remember? Reply with just the word."}],
)
)
conv2_recall = list(
client.conversations.messages.create(
conversation_id=conv2.id,
messages=[{"role": "user", "content": "What word did I ask you to remember? Reply with just the word."}],
)
)
# Get the assistant responses
conv1_response = " ".join([m.content for m in conv1_recall if hasattr(m, "message_type") and m.message_type == "assistant_message"])
conv2_response = " ".join([m.content for m in conv2_recall if hasattr(m, "message_type") and m.message_type == "assistant_message"])
assert "APPLE" in conv1_response.upper(), f"Conv1 should remember APPLE, got: {conv1_response}"
assert "BANANA" in conv2_response.upper(), f"Conv2 should remember BANANA, got: {conv2_response}"
# Each conversation has its own system message (created on first message)
conv1_system_id = conv1_messages[0].id
conv2_system_id = conv2_messages[0].id
assert conv1_system_id != conv2_system_id, "System messages should have different IDs for different conversations"
def test_conversation_messages_pagination(self, client: Letta, agent):
"""Test pagination when listing conversation messages."""
# Create a conversation
conversation = client.conversations.create(agent_id=agent.id)
# Send multiple messages to create history (consume streams)
for i in range(3):
list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": f"Message number {i}"}],
)
)
# List with limit
messages = client.conversations.messages.list(
conversation_id=conversation.id,
limit=2,
)
# Should respect the limit
assert len(messages) <= 2
def test_retrieve_conversation_stream_no_active_run(self, client: Letta, agent):
"""Test that retrieve_conversation_stream returns error when no active run exists."""
from letta_client import BadRequestError
# Create a conversation
conversation = client.conversations.create(agent_id=agent.id)
# Try to retrieve stream when no run exists (should fail)
with pytest.raises(BadRequestError) as exc_info:
# Use the SDK's stream method
stream = client.conversations.messages.stream(conversation_id=conversation.id)
list(stream) # Consume the stream to trigger the error
# Should return 400 because no active run exists
assert "No active runs found" in str(exc_info.value)
def test_retrieve_conversation_stream_after_completed_run(self, client: Letta, agent):
"""Test that retrieve_conversation_stream returns error when run is completed."""
from letta_client import BadRequestError
# Create a conversation
conversation = client.conversations.create(agent_id=agent.id)
# Send a message (this creates a run that completes)
list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "Hello"}],
)
)
# Try to retrieve stream after the run has completed (should fail)
with pytest.raises(BadRequestError) as exc_info:
# Use the SDK's stream method
stream = client.conversations.messages.stream(conversation_id=conversation.id)
list(stream) # Consume the stream to trigger the error
# Should return 400 because no active run exists (run is completed)
assert "No active runs found" in str(exc_info.value)
def test_conversation_lock_released_after_completion(self, client: Letta, agent):
"""Test that lock is released after request completes by sending sequential messages."""
from letta.settings import settings
# Skip if Redis is not configured
if settings.redis_host is None or settings.redis_port is None:
pytest.skip("Redis not configured - skipping conversation lock test")
conversation = client.conversations.create(agent_id=agent.id)
# Send first message (should acquire and release lock)
messages1 = list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "Hello"}],
)
)
assert len(messages1) > 0
# Send second message - should succeed if lock was released
messages2 = list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "Hello again"}],
)
)
assert len(messages2) > 0
def test_conversation_lock_released_on_error(self, client: Letta, agent):
"""Test that lock is released even when the run encounters an error.
This test sends a message that triggers an error during streaming (by causing
a context window exceeded error with a very long message), then verifies the
lock is properly released by successfully sending another message.
"""
from letta.settings import settings
# Skip if Redis is not configured
if settings.redis_host is None or settings.redis_port is None:
pytest.skip("Redis not configured - skipping conversation lock test")
conversation = client.conversations.create(agent_id=agent.id)
# Try to send a message that will cause an error during processing
# We use an extremely long message to trigger a context window error
very_long_message = "Hello " * 100000 # Very long message to exceed context window
try:
list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": very_long_message}],
)
)
except Exception:
pass # Expected to fail due to context window exceeded
# Send another message - should succeed if lock was released after error
messages = list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "Hello after error"}],
)
)
assert len(messages) > 0, "Lock should be released even after run error"
def test_concurrent_messages_to_same_conversation(self, client: Letta, agent):
"""Test that concurrent messages to the same conversation are properly serialized.
One request should succeed and one should get a 409 CONVERSATION_BUSY error.
After both return, a subsequent message should succeed.
"""
import concurrent.futures
from letta_client import ConflictError
from letta.settings import settings
# Skip if Redis is not configured
if settings.redis_host is None or settings.redis_port is None:
pytest.skip("Redis not configured - skipping conversation lock test")
conversation = client.conversations.create(agent_id=agent.id)
results = {"success": 0, "conflict": 0, "other_error": 0}
def send_message(msg: str):
try:
messages = list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": msg}],
)
)
return ("success", messages)
except ConflictError:
return ("conflict", None)
except Exception as e:
return ("other_error", str(e))
# Fire off two messages concurrently
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
future1 = executor.submit(send_message, "Message 1")
future2 = executor.submit(send_message, "Message 2")
result1 = future1.result()
result2 = future2.result()
# Count results
for result_type, _ in [result1, result2]:
results[result_type] += 1
# One should succeed and one should get conflict
assert results["success"] == 1, f"Expected 1 success, got {results['success']}"
assert results["conflict"] == 1, f"Expected 1 conflict, got {results['conflict']}"
assert results["other_error"] == 0, f"Unexpected errors: {results['other_error']}"
# Now send another message - should succeed since lock is released
messages = list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "Message after concurrent requests"}],
)
)
assert len(messages) > 0, "Should be able to send message after concurrent requests complete"
def test_list_conversation_messages_order_asc(self, client: Letta, agent):
"""Test listing messages in ascending order (oldest first)."""
conversation = client.conversations.create(agent_id=agent.id)
# Send messages to create history
list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "First message"}],
)
)
list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "Second message"}],
)
)
# List messages in ascending order (oldest first)
messages_asc = client.conversations.messages.list(
conversation_id=conversation.id,
order="asc",
)
# First message should be system message (oldest)
assert messages_asc[0].message_type == "system_message"
# Get user messages and verify order
user_messages = [m for m in messages_asc if m.message_type == "user_message"]
assert len(user_messages) >= 2
# First user message should contain "First message"
assert "First" in user_messages[0].content
def test_list_conversation_messages_order_desc(self, client: Letta, agent):
"""Test listing messages in descending order (newest first)."""
conversation = client.conversations.create(agent_id=agent.id)
# Send messages to create history
list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "First message"}],
)
)
list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "Second message"}],
)
)
# List messages in descending order (newest first) - this is the default
messages_desc = client.conversations.messages.list(
conversation_id=conversation.id,
order="desc",
)
# Get user messages and verify order
user_messages = [m for m in messages_desc if m.message_type == "user_message"]
assert len(user_messages) >= 2
# First user message in desc order should contain "Second message" (newest)
assert "Second" in user_messages[0].content
def test_list_conversation_messages_order_affects_pagination(self, client: Letta, agent):
"""Test that order parameter affects pagination correctly."""
conversation = client.conversations.create(agent_id=agent.id)
# Send multiple messages
for i in range(3):
list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": f"Message {i}"}],
)
)
# Get all messages in descending order with limit
messages_desc = client.conversations.messages.list(
conversation_id=conversation.id,
order="desc",
limit=5,
)
# Get all messages in ascending order with limit
messages_asc = client.conversations.messages.list(
conversation_id=conversation.id,
order="asc",
limit=5,
)
# The first messages should be different based on order
assert messages_desc[0].id != messages_asc[0].id
def test_list_conversation_messages_with_before_cursor(self, client: Letta, agent):
"""Test pagination with before cursor."""
conversation = client.conversations.create(agent_id=agent.id)
# Send messages to create history
list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "First message"}],
)
)
list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "Second message"}],
)
)
# Get all messages first
all_messages = client.conversations.messages.list(
conversation_id=conversation.id,
order="asc",
)
assert len(all_messages) >= 4 # system + user + assistant + user + assistant
# Use the last message ID as cursor
last_message_id = all_messages[-1].id
messages_before = client.conversations.messages.list(
conversation_id=conversation.id,
order="asc",
before=last_message_id,
)
# Should have fewer messages (all except the last one)
assert len(messages_before) < len(all_messages)
# Should not contain the cursor message
assert last_message_id not in [m.id for m in messages_before]
def test_list_conversation_messages_with_after_cursor(self, client: Letta, agent):
"""Test pagination with after cursor."""
conversation = client.conversations.create(agent_id=agent.id)
# Send messages to create history
list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "First message"}],
)
)
list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": "Second message"}],
)
)
# Get all messages first
all_messages = client.conversations.messages.list(
conversation_id=conversation.id,
order="asc",
)
assert len(all_messages) >= 4
# Use the first message ID as cursor
first_message_id = all_messages[0].id
messages_after = client.conversations.messages.list(
conversation_id=conversation.id,
order="asc",
after=first_message_id,
)
# Should have fewer messages (all except the first one)
assert len(messages_after) < len(all_messages)
# Should not contain the cursor message
assert first_message_id not in [m.id for m in messages_after]
def test_agent_direct_messaging_via_conversations_endpoint(self, client: Letta, agent):
"""Test sending messages using agent ID as conversation_id (agent-direct mode).
This allows clients to use a unified endpoint pattern without managing conversation IDs.
"""
# Send a message using the agent ID directly as conversation_id
# This should route to agent-direct mode with locking
messages = list(
client.conversations.messages.create(
conversation_id=agent.id, # Using agent ID instead of conversation ID
messages=[{"role": "user", "content": "Hello via agent-direct mode!"}],
)
)
# Verify we got a response
assert len(messages) > 0, "Should receive response messages"
# Verify we got an assistant message in the response
assistant_messages = [m for m in messages if hasattr(m, "message_type") and m.message_type == "assistant_message"]
assert len(assistant_messages) > 0, "Should receive at least one assistant message"
def test_agent_direct_messaging_with_locking(self, client: Letta, agent):
"""Test that agent-direct mode properly acquires and releases locks.
Sequential requests should both succeed if locks are properly released.
"""
from letta.settings import settings
# Skip if Redis is not configured
if settings.redis_host is None or settings.redis_port is None:
pytest.skip("Redis not configured - skipping agent-direct lock test")
# Send first message via agent-direct mode
messages1 = list(
client.conversations.messages.create(
conversation_id=agent.id,
messages=[{"role": "user", "content": "First message"}],
)
)
assert len(messages1) > 0, "First message should succeed"
# Send second message - should succeed if lock was released
messages2 = list(
client.conversations.messages.create(
conversation_id=agent.id,
messages=[{"role": "user", "content": "Second message"}],
)
)
assert len(messages2) > 0, "Second message should succeed after lock released"
def test_agent_direct_concurrent_requests_blocked(self, client: Letta, agent):
"""Test that concurrent requests to agent-direct mode are properly serialized.
One request should succeed and one should get a 409 CONVERSATION_BUSY error.
"""
import concurrent.futures
from letta_client import ConflictError
from letta.settings import settings
# Skip if Redis is not configured
if settings.redis_host is None or settings.redis_port is None:
pytest.skip("Redis not configured - skipping agent-direct lock test")
results = {"success": 0, "conflict": 0, "other_error": 0}
def send_message(msg: str):
try:
messages = list(
client.conversations.messages.create(
conversation_id=agent.id, # Agent-direct mode
messages=[{"role": "user", "content": msg}],
)
)
return ("success", messages)
except ConflictError:
return ("conflict", None)
except Exception as e:
return ("other_error", str(e))
# Fire off two messages concurrently
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
future1 = executor.submit(send_message, "Concurrent message 1")
future2 = executor.submit(send_message, "Concurrent message 2")
result1 = future1.result()
result2 = future2.result()
# Count results
for result_type, _ in [result1, result2]:
results[result_type] += 1
# One should succeed and one should get conflict
assert results["success"] == 1, f"Expected 1 success, got {results['success']}"
assert results["conflict"] == 1, f"Expected 1 conflict, got {results['conflict']}"
assert results["other_error"] == 0, f"Unexpected errors: {results['other_error']}"
# Now send another message - should succeed since lock is released
messages = list(
client.conversations.messages.create(
conversation_id=agent.id,
messages=[{"role": "user", "content": "Message after concurrent requests"}],
)
)
assert len(messages) > 0, "Should be able to send message after concurrent requests complete"
def test_agent_direct_list_messages(self, client: Letta, agent):
"""Test listing messages using agent ID as conversation_id."""
# First send a message via agent-direct mode
list(
client.conversations.messages.create(
conversation_id=agent.id,
messages=[{"role": "user", "content": "Test message for listing"}],
)
)
# List messages using agent ID
messages_page = client.conversations.messages.list(conversation_id=agent.id)
messages = list(messages_page)
# Should have messages (at least system + user + assistant)
assert len(messages) >= 3, f"Expected at least 3 messages, got {len(messages)}"
# Verify we can find our test message
user_messages = [m for m in messages if hasattr(m, "message_type") and m.message_type == "user_message"]
assert any("Test message for listing" in str(m.content) for m in user_messages), "Should find our test message"
def test_agent_direct_cancel(self, client: Letta, agent):
"""Test canceling runs using agent ID as conversation_id."""
from letta.settings import settings
# Skip if run tracking is disabled
if not settings.track_agent_run:
pytest.skip("Run tracking disabled - skipping cancel test")
# Start a background request that we can cancel
try:
# Send a message in background mode
stream = client.conversations.messages.create(
conversation_id=agent.id,
messages=[{"role": "user", "content": "Background message to cancel"}],
background=True,
)
# Consume a bit of the stream to ensure it started
next(iter(stream), None)
# Cancel using agent ID
result = client.conversations.cancel(conversation_id=agent.id)
# Should return results (may be empty if run already completed)
assert isinstance(result, dict), "Cancel should return a dict of results"
except Exception as e:
# If no active runs, that's okay - the run may have completed quickly
if "No active runs" not in str(e):
raise
def test_backwards_compatibility_old_pattern(self, client: Letta, agent, server_url: str):
"""Test that the old pattern (agent_id as conversation_id) still works for backwards compatibility."""
# OLD PATTERN: conversation_id=agent.id (should still work)
# Use raw HTTP requests since SDK might not be up to date
# Test 1: Send message using old pattern
response = requests.post(
f"{server_url}/v1/conversations/{agent.id}/messages",
json={
"messages": [{"role": "user", "content": "Testing old pattern still works"}],
"streaming": False,
},
)
assert response.status_code == 200, f"Old pattern should work for sending messages: {response.text}"
data = response.json()
assert "messages" in data, "Response should contain messages"
assert len(data["messages"]) > 0, "Should receive response messages"
# Test 2: List messages using old pattern
response = requests.get(f"{server_url}/v1/conversations/{agent.id}/messages")
assert response.status_code == 200, f"Old pattern should work for listing messages: {response.text}"
data = response.json()
# Response is a list of messages directly
assert isinstance(data, list), "Response should be a list of messages"
assert len(data) >= 3, "Should have at least system + user + assistant messages"
# Verify our message is there
user_messages = [m for m in data if m.get("message_type") == "user_message"]
assert any("Testing old pattern still works" in str(m.get("content", "")) for m in user_messages), "Should find our test message"
def test_new_pattern_send_message(self, client: Letta, agent, server_url: str):
"""Test sending messages using the new pattern: conversation_id='default' + agent_id in body."""
# NEW PATTERN: conversation_id='default' + agent_id in request body
response = requests.post(
f"{server_url}/v1/conversations/default/messages",
json={
"agent_id": agent.id,
"messages": [{"role": "user", "content": "Testing new pattern send message"}],
"streaming": False,
},
)
assert response.status_code == 200, f"New pattern should work for sending messages: {response.text}"
data = response.json()
assert "messages" in data, "Response should contain messages"
assert len(data["messages"]) > 0, "Should receive response messages"
# Verify we got an assistant message
assistant_messages = [m for m in data["messages"] if m.get("message_type") == "assistant_message"]
assert len(assistant_messages) > 0, "Should receive at least one assistant message"
def test_new_pattern_list_messages(self, client: Letta, agent, server_url: str):
"""Test listing messages using the new pattern: conversation_id='default' + agent_id query param."""
# First send a message to populate the conversation
requests.post(
f"{server_url}/v1/conversations/{agent.id}/messages",
json={
"messages": [{"role": "user", "content": "Setup message for list test"}],
"streaming": False,
},
)
# NEW PATTERN: conversation_id='default' + agent_id as query param
response = requests.get(
f"{server_url}/v1/conversations/default/messages",
params={"agent_id": agent.id},
)
assert response.status_code == 200, f"New pattern should work for listing messages: {response.text}"
data = response.json()
# Response is a list of messages directly
assert isinstance(data, list), "Response should be a list of messages"
assert len(data) >= 3, "Should have at least system + user + assistant messages"
def test_new_pattern_cancel(self, client: Letta, agent, server_url: str):
"""Test canceling runs using the new pattern: conversation_id='default' + agent_id query param."""
from letta.settings import settings
if not settings.track_agent_run:
pytest.skip("Run tracking disabled - skipping cancel test")
# NEW PATTERN: conversation_id='default' + agent_id as query param
response = requests.post(
f"{server_url}/v1/conversations/default/cancel",
params={"agent_id": agent.id},
)
# Returns 200 with results if runs exist, or 409 if no active runs
assert response.status_code in [200, 409], f"New pattern should work for cancel: {response.text}"
if response.status_code == 200:
data = response.json()
assert isinstance(data, dict), "Cancel should return a dict"
def test_new_pattern_compact(self, client: Letta, agent, server_url: str):
"""Test compacting conversation using the new pattern: conversation_id='default' + agent_id in body."""
# Send many messages to have enough for compaction
for i in range(10):
requests.post(
f"{server_url}/v1/conversations/{agent.id}/messages",
json={
"messages": [{"role": "user", "content": f"Message {i} for compaction test"}],
"streaming": False,
},
)
# NEW PATTERN: conversation_id='default' + agent_id in request body
response = requests.post(
f"{server_url}/v1/conversations/default/compact",
json={"agent_id": agent.id},
)
# May return 200 (success) or 400 (not enough messages to compact)
assert response.status_code in [200, 400], f"New pattern should accept agent_id parameter: {response.text}"
if response.status_code == 200:
data = response.json()
assert "summary" in data, "Response should contain summary"
assert "num_messages_before" in data, "Response should contain num_messages_before"
assert "num_messages_after" in data, "Response should contain num_messages_after"
def test_new_pattern_stream_retrieve(self, client: Letta, agent, server_url: str):
"""Test retrieving stream using the new pattern: conversation_id='default' + agent_id in body."""
# NEW PATTERN: conversation_id='default' + agent_id in request body
# Note: This will likely return 400 if no active run exists, which is expected
response = requests.post(
f"{server_url}/v1/conversations/default/stream",
json={"agent_id": agent.id},
)
# Either 200 (if run exists) or 400 (no active run) are both acceptable
assert response.status_code in [200, 400], f"Stream retrieve should accept new pattern: {response.text}"
class TestConversationDelete:
"""Tests for the conversation delete endpoint."""
def test_delete_conversation(self, client: Letta, agent, server_url: str):
"""Test soft deleting a conversation."""
# Create a conversation
conversation = client.conversations.create(agent_id=agent.id)
assert conversation.id is not None
# Delete it via REST endpoint
response = requests.delete(
f"{server_url}/v1/conversations/{conversation.id}",
)
assert response.status_code == 200, f"Expected 200, got {response.status_code}: {response.text}"
# Verify it's no longer accessible
response = requests.get(
f"{server_url}/v1/conversations/{conversation.id}",
)
assert response.status_code == 404, f"Expected 404 for deleted conversation, got {response.status_code}"
def test_delete_conversation_removes_from_list(self, client: Letta, agent, server_url: str):
"""Test that deleted conversations don't appear in list."""
# Create two conversations
conv1 = client.conversations.create(agent_id=agent.id)
conv2 = client.conversations.create(agent_id=agent.id)
# Verify both appear in list
conversations = client.conversations.list(agent_id=agent.id)
conv_ids = [c.id for c in conversations]
assert conv1.id in conv_ids
assert conv2.id in conv_ids
# Delete one
response = requests.delete(
f"{server_url}/v1/conversations/{conv1.id}",
)
assert response.status_code == 200
# Verify only the non-deleted one appears in list
conversations = client.conversations.list(agent_id=agent.id)
conv_ids = [c.id for c in conversations]
assert conv1.id not in conv_ids, "Deleted conversation should not appear in list"
assert conv2.id in conv_ids, "Non-deleted conversation should still appear"
def test_delete_conversation_not_found(self, client: Letta, agent, server_url: str):
"""Test that deleting a non-existent conversation returns 404."""
fake_id = "conv-00000000-0000-0000-0000-000000000000"
response = requests.delete(
f"{server_url}/v1/conversations/{fake_id}",
)
assert response.status_code == 404
def test_delete_conversation_double_delete(self, client: Letta, agent, server_url: str):
"""Test that deleting an already-deleted conversation returns 404."""
# Create and delete a conversation
conversation = client.conversations.create(agent_id=agent.id)
# First delete should succeed
response = requests.delete(
f"{server_url}/v1/conversations/{conversation.id}",
)
assert response.status_code == 200
# Second delete should return 404
response = requests.delete(
f"{server_url}/v1/conversations/{conversation.id}",
)
assert response.status_code == 404, "Double delete should return 404"
def test_update_deleted_conversation_fails(self, client: Letta, agent, server_url: str):
"""Test that updating a deleted conversation returns 404."""
# Create and delete a conversation
conversation = client.conversations.create(agent_id=agent.id)
response = requests.delete(
f"{server_url}/v1/conversations/{conversation.id}",
)
assert response.status_code == 200
# Try to update the deleted conversation
response = requests.patch(
f"{server_url}/v1/conversations/{conversation.id}",
json={"summary": "Updated summary"},
)
assert response.status_code == 404, "Updating deleted conversation should return 404"
class TestConversationCompact:
"""Tests for the conversation compact (summarization) endpoint."""
def test_compact_conversation_basic(self, client: Letta, agent, server_url: str):
"""Test basic conversation compaction via the REST endpoint."""
# Create a conversation
conversation = client.conversations.create(agent_id=agent.id)
# Send multiple messages to create a history worth summarizing
for i in range(5):
list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": f"Message {i}: Tell me about topic {i}."}],
)
)
# Get initial message count
initial_messages = client.conversations.messages.list(
conversation_id=conversation.id,
order="asc",
)
initial_count = len(initial_messages)
assert initial_count >= 10 # At least 5 user + 5 assistant messages
# Call compact endpoint via REST
response = requests.post(
f"{server_url}/v1/conversations/{conversation.id}/compact",
json={},
)
assert response.status_code == 200, f"Expected 200, got {response.status_code}: {response.text}"
result = response.json()
# Verify the response structure
assert "summary" in result
assert "num_messages_before" in result
assert "num_messages_after" in result
assert isinstance(result["summary"], str)
assert len(result["summary"]) > 0
assert result["num_messages_before"] > result["num_messages_after"]
# Verify messages were actually compacted
compacted_messages = client.conversations.messages.list(
conversation_id=conversation.id,
order="asc",
)
assert len(compacted_messages) < initial_count
def test_compact_conversation_creates_summary_role_message(self, client: Letta, agent, server_url: str):
"""Test that compaction creates a summary message with role='summary'."""
# Create a conversation
conversation = client.conversations.create(agent_id=agent.id)
# Send multiple messages to create a history worth summarizing
for i in range(5):
list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": f"Message {i}: Tell me about topic {i}."}],
)
)
# Call compact endpoint with 'all' mode to ensure a single summary
response = requests.post(
f"{server_url}/v1/conversations/{conversation.id}/compact",
json={
"compaction_settings": {
"mode": "all",
}
},
)
assert response.status_code == 200, f"Expected 200, got {response.status_code}: {response.text}"
# Get compacted messages
compacted_messages = client.conversations.messages.list(
conversation_id=conversation.id,
order="asc",
)
# After 'all' mode compaction, we expect: system message + summary message
# The summary message should have role='summary'
summary_messages = [msg for msg in compacted_messages if msg.role == "summary"]
assert len(summary_messages) == 1, (
f"Expected exactly 1 summary message after compaction, found {len(summary_messages)}. "
f"Message roles: {[msg.role for msg in compacted_messages]}"
)
def test_compact_conversation_with_settings(self, client: Letta, agent, server_url: str):
"""Test conversation compaction with custom compaction settings."""
# Create a conversation with multiple messages
conversation = client.conversations.create(agent_id=agent.id)
for i in range(5):
list(
client.conversations.messages.create(
conversation_id=conversation.id,
messages=[{"role": "user", "content": f"Remember fact {i}: The number {i} is important."}],
)
)
# Call compact with 'all' mode
response = requests.post(
f"{server_url}/v1/conversations/{conversation.id}/compact",
json={
"compaction_settings": {
"mode": "all",
}
},
)
assert response.status_code == 200, f"Expected 200, got {response.status_code}: {response.text}"
result = response.json()
assert result["num_messages_before"] > result["num_messages_after"]
def test_compact_conversation_preserves_conversation_isolation(self, client: Letta, agent, server_url: str):
"""Test that compacting one conversation doesn't affect another."""
# Create two conversations
conv1 = client.conversations.create(agent_id=agent.id)
conv2 = client.conversations.create(agent_id=agent.id)
# Add messages to both
for i in range(5):
list(
client.conversations.messages.create(
conversation_id=conv1.id,
messages=[{"role": "user", "content": f"Conv1 message {i}"}],
)
)
list(
client.conversations.messages.create(
conversation_id=conv2.id,
messages=[{"role": "user", "content": f"Conv2 message {i}"}],
)
)
# Get initial counts
conv1_initial = len(client.conversations.messages.list(conversation_id=conv1.id))
conv2_initial = len(client.conversations.messages.list(conversation_id=conv2.id))
# Compact only conv1
response = requests.post(
f"{server_url}/v1/conversations/{conv1.id}/compact",
json={},
)
assert response.status_code == 200
# Conv1 should be compacted
conv1_after = len(client.conversations.messages.list(conversation_id=conv1.id))
assert conv1_after < conv1_initial
# Conv2 should be unchanged
conv2_after = len(client.conversations.messages.list(conversation_id=conv2.id))
assert conv2_after == conv2_initial
def test_compact_conversation_empty_fails(self, client: Letta, agent, server_url: str):
"""Test that compacting an empty conversation fails gracefully."""
# Create a new conversation without messages
conversation = client.conversations.create(agent_id=agent.id)
# Try to compact - should fail since no messages exist
response = requests.post(
f"{server_url}/v1/conversations/{conversation.id}/compact",
json={},
)
# Should return 400 because there are no in-context messages
assert response.status_code == 400
def test_compact_conversation_invalid_id(self, client: Letta, agent, server_url: str):
"""Test that compacting with invalid conversation ID returns 404."""
fake_id = "conv-00000000-0000-0000-0000-000000000000"
response = requests.post(
f"{server_url}/v1/conversations/{fake_id}/compact",
json={},
)
assert response.status_code == 404
class TestConversationSystemMessageRecompilation:
"""Tests that verify the system message is recompiled with latest memory state on new conversation creation."""
def test_new_conversation_recompiles_system_message_with_updated_memory(self, client: Letta, server_url: str):
"""Test the full workflow:
1. Agent is created
2. Send message to agent (through a conversation)
3. Modify the memory block -> check system message is NOT updated with the modified value
4. Create a new conversation
5. Check new conversation system message DOES have the modified value
"""
unique_marker = f"UNIQUE_MARKER_{uuid.uuid4().hex[:8]}"
# Step 1: Create an agent with known memory blocks
agent = client.agents.create(
name=f"test_sys_msg_recompile_{uuid.uuid4().hex[:8]}",
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
memory_blocks=[
{"label": "human", "value": "The user is a test user."},
{"label": "persona", "value": "You are a helpful assistant."},
],
)
try:
# Step 2: Create a conversation and send a message to it
conv1 = client.conversations.create(agent_id=agent.id)
list(
client.conversations.messages.create(
conversation_id=conv1.id,
messages=[{"role": "user", "content": "Hello, just a quick test."}],
)
)
# Verify the conversation has messages including a system message
conv1_messages = client.conversations.messages.list(
conversation_id=conv1.id,
order="asc",
)
assert len(conv1_messages) >= 3 # system + user + assistant
assert conv1_messages[0].message_type == "system_message"
# Get the original system message content
original_system_content = conv1_messages[0].content
assert unique_marker not in original_system_content, "Marker should not be in original system message"
# Step 3: Modify the memory block with a unique marker
client.agents.blocks.update(
agent_id=agent.id,
block_label="human",
value=f"The user is a test user. {unique_marker}",
)
# Verify the block was actually updated
updated_block = client.agents.blocks.retrieve(agent_id=agent.id, block_label="human")
assert unique_marker in updated_block.value
# Check that the OLD conversation's system message is NOT updated
conv1_messages_after_update = client.conversations.messages.list(
conversation_id=conv1.id,
order="asc",
)
old_system_content = conv1_messages_after_update[0].content
assert unique_marker not in old_system_content, "Old conversation system message should NOT contain the updated memory value"
# Step 4: Create a new conversation
conv2 = client.conversations.create(agent_id=agent.id)
# Step 5: Check the new conversation's system message has the updated value
# The system message should be compiled at creation time with the latest memory
conv2_retrieved = client.conversations.retrieve(conversation_id=conv2.id)
assert len(conv2_retrieved.in_context_message_ids) == 1, (
f"New conversation should have exactly 1 system message, got {len(conv2_retrieved.in_context_message_ids)}"
)
conv2_messages = client.conversations.messages.list(
conversation_id=conv2.id,
order="asc",
)
assert len(conv2_messages) >= 1
assert conv2_messages[0].message_type == "system_message"
new_system_content = conv2_messages[0].content
assert unique_marker in new_system_content, (
f"New conversation system message should contain the updated memory value '{unique_marker}', "
f"but system message content did not include it"
)
finally:
client.agents.delete(agent_id=agent.id)
def test_conversation_creation_initializes_system_message(self, client: Letta, server_url: str):
"""Test that creating a conversation immediately initializes it with a system message."""
agent = client.agents.create(
name=f"test_conv_init_{uuid.uuid4().hex[:8]}",
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
memory_blocks=[
{"label": "human", "value": "Test user for system message init."},
{"label": "persona", "value": "You are a helpful assistant."},
],
)
try:
# Create a conversation (without sending any messages)
conversation = client.conversations.create(agent_id=agent.id)
# Verify the conversation has a system message immediately
retrieved = client.conversations.retrieve(conversation_id=conversation.id)
assert len(retrieved.in_context_message_ids) == 1, (
f"Expected 1 system message after conversation creation, got {len(retrieved.in_context_message_ids)}"
)
# Verify the system message content contains memory block values
messages = client.conversations.messages.list(
conversation_id=conversation.id,
order="asc",
)
assert len(messages) == 1
assert messages[0].message_type == "system_message"
assert "Test user for system message init." in messages[0].content
finally:
client.agents.delete(agent_id=agent.id)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/integration_test_conversations_sdk.py",
"license": "Apache License 2.0",
"lines": 1035,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_agent_manager_block_update.py | import pytest
# Import shared fixtures and constants from conftest
from conftest import DEFAULT_EMBEDDING_CONFIG
from letta.orm.errors import NoResultFound
from letta.schemas.agent import CreateAgent
from letta.schemas.block import Block as PydanticBlock, BlockUpdate
from letta.schemas.llm_config import LLMConfig
from letta.server.server import SyncServer
from letta.services.block_manager import BlockManager
@pytest.mark.asyncio
async def test_modify_nonexistent_block_raises_error(server: SyncServer, default_user):
"""
Test that modifying a non-existent block raises NoResultFound instead of
silently updating the wrong block.
Regression test for bug where `block = block` was a no-op, causing the loop
variable to end as the last block in core_memory, which then got incorrectly updated.
"""
# Upsert base tools
await server.tool_manager.upsert_base_tools_async(actor=default_user)
# Create human and persona blocks
block_manager = BlockManager()
human_block = await block_manager.create_or_update_block_async(
PydanticBlock(label="human", value="Test user context", limit=2000), actor=default_user
)
persona_block = await block_manager.create_or_update_block_async(
PydanticBlock(label="persona", value="Test persona context", limit=2000), actor=default_user
)
# Create agent with human and persona blocks (but no "skills" block)
create_agent_request = CreateAgent(
name="test_block_update_agent",
agent_type="memgpt_v2_agent",
system="test system",
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=DEFAULT_EMBEDDING_CONFIG,
block_ids=[human_block.id, persona_block.id],
)
agent = await server.agent_manager.create_agent_async(create_agent_request, actor=default_user)
# Try to update a non-existent block (e.g., "skills")
# This should raise NoResultFound, not silently update human block
with pytest.raises(NoResultFound, match="No block with label 'skills' found"):
await server.agent_manager.modify_block_by_label_async(
agent_id=agent.id,
block_label="skills",
block_update=BlockUpdate(value="Skills directory content that should not overwrite human block"),
actor=default_user,
)
# Verify human block wasn't modified
retrieved_human_block = await server.agent_manager.get_block_with_label_async(
agent_id=agent.id,
block_label="human",
actor=default_user,
)
assert retrieved_human_block.value == "Test user context", "Human block should not be modified"
# Verify persona block wasn't modified
retrieved_persona_block = await server.agent_manager.get_block_with_label_async(
agent_id=agent.id,
block_label="persona",
actor=default_user,
)
assert retrieved_persona_block.value == "Test persona context", "Persona block should not be modified"
# Clean up
await server.agent_manager.delete_agent_async(agent.id, default_user)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_agent_manager_block_update.py",
"license": "Apache License 2.0",
"lines": 62,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_conversation_manager.py | """
Tests for ConversationManager.
"""
import pytest
from letta.orm.errors import NoResultFound
from letta.schemas.conversation import CreateConversation, UpdateConversation
from letta.server.server import SyncServer
from letta.services.conversation_manager import ConversationManager
# ======================================================================================================================
# ConversationManager Tests
# ======================================================================================================================
@pytest.fixture
def conversation_manager():
"""Create a ConversationManager instance."""
return ConversationManager()
@pytest.mark.asyncio
async def test_create_conversation(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test creating a conversation."""
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test conversation"),
actor=default_user,
)
assert conversation.id is not None
assert conversation.agent_id == sarah_agent.id
assert conversation.summary == "Test conversation"
assert conversation.id.startswith("conv-")
@pytest.mark.asyncio
async def test_create_conversation_no_summary(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test creating a conversation without summary."""
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(),
actor=default_user,
)
assert conversation.id is not None
assert conversation.agent_id == sarah_agent.id
assert conversation.summary is None
@pytest.mark.asyncio
async def test_get_conversation_by_id(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test retrieving a conversation by ID."""
# Create a conversation
created = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test"),
actor=default_user,
)
# Retrieve it
retrieved = await conversation_manager.get_conversation_by_id(
conversation_id=created.id,
actor=default_user,
)
assert retrieved.id == created.id
assert retrieved.agent_id == created.agent_id
assert retrieved.summary == created.summary
@pytest.mark.asyncio
async def test_get_conversation_not_found(conversation_manager, server: SyncServer, default_user):
"""Test retrieving a non-existent conversation raises error."""
with pytest.raises(NoResultFound):
await conversation_manager.get_conversation_by_id(
conversation_id="conv-nonexistent",
actor=default_user,
)
@pytest.mark.asyncio
async def test_list_conversations(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test listing conversations for an agent."""
# Create multiple conversations
for i in range(3):
await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary=f"Conversation {i}"),
actor=default_user,
)
# List them
conversations = await conversation_manager.list_conversations(
agent_id=sarah_agent.id,
actor=default_user,
)
assert len(conversations) == 3
@pytest.mark.asyncio
async def test_list_conversations_with_limit(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test listing conversations with a limit."""
# Create multiple conversations
for i in range(5):
await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary=f"Conversation {i}"),
actor=default_user,
)
# List with limit
conversations = await conversation_manager.list_conversations(
agent_id=sarah_agent.id,
actor=default_user,
limit=2,
)
assert len(conversations) == 2
@pytest.mark.asyncio
async def test_update_conversation(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test updating a conversation."""
# Create a conversation
created = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Original"),
actor=default_user,
)
# Update it
updated = await conversation_manager.update_conversation(
conversation_id=created.id,
conversation_update=UpdateConversation(summary="Updated summary"),
actor=default_user,
)
assert updated.id == created.id
assert updated.summary == "Updated summary"
@pytest.mark.asyncio
async def test_delete_conversation(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test soft deleting a conversation."""
# Create a conversation
created = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="To delete"),
actor=default_user,
)
# Delete it
await conversation_manager.delete_conversation(
conversation_id=created.id,
actor=default_user,
)
# Verify it's no longer accessible
with pytest.raises(NoResultFound):
await conversation_manager.get_conversation_by_id(
conversation_id=created.id,
actor=default_user,
)
@pytest.mark.asyncio
async def test_delete_conversation_removes_from_list(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test that soft-deleted conversations are excluded from list results."""
# Create two conversations
conv1 = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Keep me"),
actor=default_user,
)
conv2 = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Delete me"),
actor=default_user,
)
# Delete one
await conversation_manager.delete_conversation(
conversation_id=conv2.id,
actor=default_user,
)
# List should only return the non-deleted conversation
conversations = await conversation_manager.list_conversations(
agent_id=sarah_agent.id,
actor=default_user,
)
conv_ids = [c.id for c in conversations]
assert conv1.id in conv_ids
assert conv2.id not in conv_ids
@pytest.mark.asyncio
async def test_delete_conversation_double_delete_raises(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test that deleting an already-deleted conversation raises NoResultFound."""
created = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Delete me twice"),
actor=default_user,
)
await conversation_manager.delete_conversation(
conversation_id=created.id,
actor=default_user,
)
# Second delete should raise
with pytest.raises(NoResultFound):
await conversation_manager.delete_conversation(
conversation_id=created.id,
actor=default_user,
)
@pytest.mark.asyncio
async def test_update_deleted_conversation_raises(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test that updating a soft-deleted conversation raises NoResultFound."""
created = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Original"),
actor=default_user,
)
await conversation_manager.delete_conversation(
conversation_id=created.id,
actor=default_user,
)
with pytest.raises(NoResultFound):
await conversation_manager.update_conversation(
conversation_id=created.id,
conversation_update=UpdateConversation(summary="Should fail"),
actor=default_user,
)
@pytest.mark.asyncio
async def test_delete_conversation_excluded_from_summary_search(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test that soft-deleted conversations are excluded from summary search results."""
await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="alpha search term"),
actor=default_user,
)
to_delete = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="alpha deleted term"),
actor=default_user,
)
await conversation_manager.delete_conversation(
conversation_id=to_delete.id,
actor=default_user,
)
results = await conversation_manager.list_conversations(
agent_id=sarah_agent.id,
actor=default_user,
summary_search="alpha",
)
result_ids = [c.id for c in results]
assert to_delete.id not in result_ids
assert len(results) == 1
@pytest.mark.asyncio
async def test_conversation_isolation_by_agent(conversation_manager, server: SyncServer, sarah_agent, charles_agent, default_user):
"""Test that conversations are isolated by agent."""
# Create conversation for sarah_agent
await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Sarah's conversation"),
actor=default_user,
)
# Create conversation for charles_agent
await conversation_manager.create_conversation(
agent_id=charles_agent.id,
conversation_create=CreateConversation(summary="Charles's conversation"),
actor=default_user,
)
# List for sarah_agent
sarah_convos = await conversation_manager.list_conversations(
agent_id=sarah_agent.id,
actor=default_user,
)
assert len(sarah_convos) == 1
assert sarah_convos[0].summary == "Sarah's conversation"
# List for charles_agent
charles_convos = await conversation_manager.list_conversations(
agent_id=charles_agent.id,
actor=default_user,
)
assert len(charles_convos) == 1
assert charles_convos[0].summary == "Charles's conversation"
@pytest.mark.asyncio
async def test_conversation_isolation_by_organization(
conversation_manager, server: SyncServer, sarah_agent, default_user, other_user_different_org
):
"""Test that conversations are isolated by organization."""
# Create conversation
created = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test"),
actor=default_user,
)
# Other org user should not be able to access it
with pytest.raises(NoResultFound):
await conversation_manager.get_conversation_by_id(
conversation_id=created.id,
actor=other_user_different_org,
)
# ======================================================================================================================
# Conversation Message Management Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_add_messages_to_conversation(
conversation_manager, server: SyncServer, sarah_agent, default_user, hello_world_message_fixture
):
"""Test adding messages to a conversation."""
# Create a conversation
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test"),
actor=default_user,
)
# Add the message to the conversation
await conversation_manager.add_messages_to_conversation(
conversation_id=conversation.id,
agent_id=sarah_agent.id,
message_ids=[hello_world_message_fixture.id],
actor=default_user,
)
# Verify message is in conversation
message_ids = await conversation_manager.get_message_ids_for_conversation(
conversation_id=conversation.id,
actor=default_user,
)
# create_conversation auto-creates a system message at position 0
assert len(message_ids) == 2
assert hello_world_message_fixture.id in message_ids
@pytest.mark.asyncio
async def test_get_messages_for_conversation(
conversation_manager, server: SyncServer, sarah_agent, default_user, hello_world_message_fixture
):
"""Test getting full message objects from a conversation."""
# Create a conversation
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test"),
actor=default_user,
)
# Add the message
await conversation_manager.add_messages_to_conversation(
conversation_id=conversation.id,
agent_id=sarah_agent.id,
message_ids=[hello_world_message_fixture.id],
actor=default_user,
)
# Get full messages
messages = await conversation_manager.get_messages_for_conversation(
conversation_id=conversation.id,
actor=default_user,
)
# create_conversation auto-creates a system message at position 0
assert len(messages) == 2
assert any(m.id == hello_world_message_fixture.id for m in messages)
@pytest.mark.asyncio
async def test_message_ordering_in_conversation(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test that messages maintain their order in a conversation."""
from letta.schemas.letta_message_content import TextContent
from letta.schemas.message import Message as PydanticMessage
# Create a conversation
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test"),
actor=default_user,
)
# Create multiple messages
pydantic_messages = [
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text=f"Message {i}")],
)
for i in range(3)
]
messages = await server.message_manager.create_many_messages_async(
pydantic_messages,
actor=default_user,
)
# Add messages in order
await conversation_manager.add_messages_to_conversation(
conversation_id=conversation.id,
agent_id=sarah_agent.id,
message_ids=[m.id for m in messages],
actor=default_user,
)
# Verify order is maintained
retrieved_ids = await conversation_manager.get_message_ids_for_conversation(
conversation_id=conversation.id,
actor=default_user,
)
# create_conversation auto-creates a system message at position 0,
# so the user messages start at index 1
assert len(retrieved_ids) == len(messages) + 1
assert retrieved_ids[1:] == [m.id for m in messages]
@pytest.mark.asyncio
async def test_update_in_context_messages(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test updating which messages are in context."""
from letta.schemas.letta_message_content import TextContent
from letta.schemas.message import Message as PydanticMessage
# Create a conversation
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test"),
actor=default_user,
)
# Create messages
pydantic_messages = [
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text=f"Message {i}")],
)
for i in range(3)
]
messages = await server.message_manager.create_many_messages_async(
pydantic_messages,
actor=default_user,
)
# Add all messages
await conversation_manager.add_messages_to_conversation(
conversation_id=conversation.id,
agent_id=sarah_agent.id,
message_ids=[m.id for m in messages],
actor=default_user,
)
# Update to only keep first and last in context
await conversation_manager.update_in_context_messages(
conversation_id=conversation.id,
in_context_message_ids=[messages[0].id, messages[2].id],
actor=default_user,
)
# Verify only the selected messages are in context
in_context_ids = await conversation_manager.get_message_ids_for_conversation(
conversation_id=conversation.id,
actor=default_user,
)
assert len(in_context_ids) == 2
assert messages[0].id in in_context_ids
assert messages[2].id in in_context_ids
assert messages[1].id not in in_context_ids
@pytest.mark.asyncio
async def test_empty_conversation_message_ids(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test getting message IDs from a newly created conversation (has auto-created system message)."""
# Create a conversation
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Empty"),
actor=default_user,
)
# create_conversation auto-creates a system message at position 0,
# so a newly created conversation has exactly one message
message_ids = await conversation_manager.get_message_ids_for_conversation(
conversation_id=conversation.id,
actor=default_user,
)
assert len(message_ids) == 1
@pytest.mark.asyncio
async def test_list_conversation_messages(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test listing messages from a conversation as LettaMessages."""
from letta.schemas.letta_message_content import TextContent
from letta.schemas.message import Message as PydanticMessage
# Create a conversation
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test"),
actor=default_user,
)
# Create messages with different roles
pydantic_messages = [
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text="Hello!")],
),
PydanticMessage(
agent_id=sarah_agent.id,
role="assistant",
content=[TextContent(text="Hi there!")],
),
]
messages = await server.message_manager.create_many_messages_async(
pydantic_messages,
actor=default_user,
)
# Add messages to conversation
await conversation_manager.add_messages_to_conversation(
conversation_id=conversation.id,
agent_id=sarah_agent.id,
message_ids=[m.id for m in messages],
actor=default_user,
)
# List conversation messages (returns LettaMessages)
letta_messages = await conversation_manager.list_conversation_messages(
conversation_id=conversation.id,
actor=default_user,
)
# create_conversation auto-creates a system message, so we get 3 total
assert len(letta_messages) == 3
# Check message types
message_types = [m.message_type for m in letta_messages]
assert "system_message" in message_types
assert "user_message" in message_types
assert "assistant_message" in message_types
@pytest.mark.asyncio
async def test_list_conversation_messages_pagination(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test pagination when listing conversation messages."""
from letta.schemas.letta_message_content import TextContent
from letta.schemas.message import Message as PydanticMessage
# Create a conversation
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test"),
actor=default_user,
)
# Create multiple messages
pydantic_messages = [
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text=f"Message {i}")],
)
for i in range(5)
]
messages = await server.message_manager.create_many_messages_async(
pydantic_messages,
actor=default_user,
)
# Add messages to conversation
await conversation_manager.add_messages_to_conversation(
conversation_id=conversation.id,
agent_id=sarah_agent.id,
message_ids=[m.id for m in messages],
actor=default_user,
)
# List with limit
letta_messages = await conversation_manager.list_conversation_messages(
conversation_id=conversation.id,
actor=default_user,
limit=2,
)
assert len(letta_messages) == 2
# List with after cursor (get messages after the first one)
letta_messages_after = await conversation_manager.list_conversation_messages(
conversation_id=conversation.id,
actor=default_user,
after=messages[0].id,
)
assert len(letta_messages_after) == 4 # Should get messages 1-4
# ======================================================================================================================
# Isolated Blocks Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_create_conversation_with_isolated_blocks(conversation_manager, server: SyncServer, charles_agent, default_user):
"""Test creating a conversation with isolated block labels."""
# Get the agent's blocks to know what labels exist
agent_state = await server.agent_manager.get_agent_by_id_async(charles_agent.id, default_user, include_relationships=["memory"])
block_labels = [block.label for block in agent_state.memory.blocks]
assert len(block_labels) > 0, "Agent should have at least one block"
# Create conversation with isolated blocks
first_label = block_labels[0]
conversation = await conversation_manager.create_conversation(
agent_id=charles_agent.id,
conversation_create=CreateConversation(
summary="Test with isolated blocks",
isolated_block_labels=[first_label],
),
actor=default_user,
)
assert conversation.id is not None
assert conversation.agent_id == charles_agent.id
assert len(conversation.isolated_block_ids) == 1
# Verify the isolated block was created
isolated_blocks = await conversation_manager.get_isolated_blocks_for_conversation(
conversation_id=conversation.id,
actor=default_user,
)
assert first_label in isolated_blocks
assert isolated_blocks[first_label].label == first_label
@pytest.mark.asyncio
async def test_isolated_blocks_have_different_ids(conversation_manager, server: SyncServer, charles_agent, default_user):
"""Test that isolated blocks have different IDs from agent's original blocks."""
# Get the agent's blocks
agent_state = await server.agent_manager.get_agent_by_id_async(charles_agent.id, default_user, include_relationships=["memory"])
original_block = agent_state.memory.blocks[0]
# Create conversation with isolated block
conversation = await conversation_manager.create_conversation(
agent_id=charles_agent.id,
conversation_create=CreateConversation(
summary="Test isolated block IDs",
isolated_block_labels=[original_block.label],
),
actor=default_user,
)
# Get the isolated blocks
isolated_blocks = await conversation_manager.get_isolated_blocks_for_conversation(
conversation_id=conversation.id,
actor=default_user,
)
# Verify the isolated block has a different ID
isolated_block = isolated_blocks[original_block.label]
assert isolated_block.id != original_block.id
assert isolated_block.label == original_block.label
assert isolated_block.value == original_block.value # Same initial value
@pytest.mark.asyncio
async def test_isolated_blocks_are_conversation_specific(conversation_manager, server: SyncServer, charles_agent, default_user):
"""Test that isolated blocks are specific to each conversation."""
# Get the agent's first block label
agent_state = await server.agent_manager.get_agent_by_id_async(charles_agent.id, default_user, include_relationships=["memory"])
block_label = agent_state.memory.blocks[0].label
# Create two conversations with the same isolated block label
conv1 = await conversation_manager.create_conversation(
agent_id=charles_agent.id,
conversation_create=CreateConversation(
summary="Conversation 1",
isolated_block_labels=[block_label],
),
actor=default_user,
)
conv2 = await conversation_manager.create_conversation(
agent_id=charles_agent.id,
conversation_create=CreateConversation(
summary="Conversation 2",
isolated_block_labels=[block_label],
),
actor=default_user,
)
# Get isolated blocks for both conversations
isolated_blocks_1 = await conversation_manager.get_isolated_blocks_for_conversation(
conversation_id=conv1.id,
actor=default_user,
)
isolated_blocks_2 = await conversation_manager.get_isolated_blocks_for_conversation(
conversation_id=conv2.id,
actor=default_user,
)
# Verify they have different block IDs
block_1 = isolated_blocks_1[block_label]
block_2 = isolated_blocks_2[block_label]
assert block_1.id != block_2.id
@pytest.mark.asyncio
async def test_create_conversation_invalid_block_label(conversation_manager, server: SyncServer, charles_agent, default_user):
"""Test that creating a conversation with non-existent block label raises error."""
from letta.errors import LettaInvalidArgumentError
with pytest.raises(LettaInvalidArgumentError) as exc_info:
await conversation_manager.create_conversation(
agent_id=charles_agent.id,
conversation_create=CreateConversation(
summary="Test invalid label",
isolated_block_labels=["nonexistent_block_label"],
),
actor=default_user,
)
assert "nonexistent_block_label" in str(exc_info.value)
@pytest.mark.asyncio
async def test_apply_isolated_blocks_to_agent_state(conversation_manager, server: SyncServer, charles_agent, default_user):
"""Test that isolated blocks are correctly applied to agent state."""
# Get the original agent state
original_agent_state = await server.agent_manager.get_agent_by_id_async(
charles_agent.id, default_user, include_relationships=["memory"]
)
original_block = original_agent_state.memory.blocks[0]
# Create conversation with isolated block
conversation = await conversation_manager.create_conversation(
agent_id=charles_agent.id,
conversation_create=CreateConversation(
summary="Test apply isolated blocks",
isolated_block_labels=[original_block.label],
),
actor=default_user,
)
# Get fresh agent state
agent_state = await server.agent_manager.get_agent_by_id_async(charles_agent.id, default_user, include_relationships=["memory"])
# Apply isolated blocks
modified_state = await conversation_manager.apply_isolated_blocks_to_agent_state(
agent_state=agent_state,
conversation_id=conversation.id,
actor=default_user,
)
# Verify the block was replaced
modified_block = modified_state.memory.get_block(original_block.label)
assert modified_block.id != original_block.id
assert modified_block.label == original_block.label
assert modified_block.id in conversation.isolated_block_ids
@pytest.mark.asyncio
async def test_conversation_without_isolated_blocks(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test that creating a conversation without isolated blocks works normally."""
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="No isolated blocks"),
actor=default_user,
)
assert conversation.id is not None
assert conversation.isolated_block_ids == []
isolated_blocks = await conversation_manager.get_isolated_blocks_for_conversation(
conversation_id=conversation.id,
actor=default_user,
)
assert isolated_blocks == {}
@pytest.mark.asyncio
async def test_apply_no_isolated_blocks_preserves_state(conversation_manager, server: SyncServer, charles_agent, default_user):
"""Test that applying isolated blocks to a conversation without them preserves original state."""
# Create conversation without isolated blocks
conversation = await conversation_manager.create_conversation(
agent_id=charles_agent.id,
conversation_create=CreateConversation(summary="No isolated blocks"),
actor=default_user,
)
# Get agent state
agent_state = await server.agent_manager.get_agent_by_id_async(charles_agent.id, default_user, include_relationships=["memory"])
original_block_ids = [block.id for block in agent_state.memory.blocks]
# Apply isolated blocks (should be a no-op)
modified_state = await conversation_manager.apply_isolated_blocks_to_agent_state(
agent_state=agent_state,
conversation_id=conversation.id,
actor=default_user,
)
# Verify blocks are unchanged
modified_block_ids = [block.id for block in modified_state.memory.blocks]
assert original_block_ids == modified_block_ids
@pytest.mark.asyncio
async def test_delete_conversation_cleans_up_isolated_blocks(conversation_manager, server: SyncServer, charles_agent, default_user):
"""Test that deleting a conversation also hard-deletes its isolated blocks."""
# Get the agent's first block label
agent_state = await server.agent_manager.get_agent_by_id_async(charles_agent.id, default_user, include_relationships=["memory"])
block_label = agent_state.memory.blocks[0].label
# Create conversation with isolated block
conversation = await conversation_manager.create_conversation(
agent_id=charles_agent.id,
conversation_create=CreateConversation(
summary="Test delete cleanup",
isolated_block_labels=[block_label],
),
actor=default_user,
)
# Get the isolated block ID
isolated_block_ids = conversation.isolated_block_ids
assert len(isolated_block_ids) == 1
isolated_block_id = isolated_block_ids[0]
# Verify the isolated block exists
isolated_block = await server.block_manager.get_block_by_id_async(isolated_block_id, default_user)
assert isolated_block is not None
# Delete the conversation
await conversation_manager.delete_conversation(
conversation_id=conversation.id,
actor=default_user,
)
# Verify the isolated block was hard-deleted
deleted_block = await server.block_manager.get_block_by_id_async(isolated_block_id, default_user)
assert deleted_block is None
# ======================================================================================================================
# list_conversation_messages with order/reverse Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_list_conversation_messages_ascending_order(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test listing messages in ascending order (oldest first)."""
from letta.schemas.letta_message_content import TextContent
from letta.schemas.message import Message as PydanticMessage
# Create a conversation
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test"),
actor=default_user,
)
# Create messages in a known order
pydantic_messages = [
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text=f"Message {i}")],
)
for i in range(3)
]
messages = await server.message_manager.create_many_messages_async(
pydantic_messages,
actor=default_user,
)
# Add messages to conversation
await conversation_manager.add_messages_to_conversation(
conversation_id=conversation.id,
agent_id=sarah_agent.id,
message_ids=[m.id for m in messages],
actor=default_user,
)
# List messages in ascending order (reverse=False)
letta_messages = await conversation_manager.list_conversation_messages(
conversation_id=conversation.id,
actor=default_user,
reverse=False,
)
# create_conversation auto-creates a system message at position 0,
# so we get 4 messages total (system + 3 user messages)
assert len(letta_messages) == 4
# First message is the auto-created system message; "Message 0" is second
assert letta_messages[0].message_type == "system_message"
assert "Message 0" in letta_messages[1].content
@pytest.mark.asyncio
async def test_list_conversation_messages_descending_order(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test listing messages in descending order (newest first)."""
from letta.schemas.letta_message_content import TextContent
from letta.schemas.message import Message as PydanticMessage
# Create a conversation
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test"),
actor=default_user,
)
# Create messages in a known order
pydantic_messages = [
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text=f"Message {i}")],
)
for i in range(3)
]
messages = await server.message_manager.create_many_messages_async(
pydantic_messages,
actor=default_user,
)
# Add messages to conversation
await conversation_manager.add_messages_to_conversation(
conversation_id=conversation.id,
agent_id=sarah_agent.id,
message_ids=[m.id for m in messages],
actor=default_user,
)
# List messages in descending order (reverse=True)
letta_messages = await conversation_manager.list_conversation_messages(
conversation_id=conversation.id,
actor=default_user,
reverse=True,
)
# create_conversation auto-creates a system message, so 4 total
# First message should be "Message 2" (newest) in descending order
assert len(letta_messages) == 4
assert "Message 2" in letta_messages[0].content
@pytest.mark.asyncio
async def test_list_conversation_messages_with_group_id_filter(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test filtering messages by group_id."""
from letta.schemas.letta_message_content import TextContent
from letta.schemas.message import Message as PydanticMessage
# Create a conversation
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test"),
actor=default_user,
)
# Create messages with different group_ids
group_a_id = "group-aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
group_b_id = "group-bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
messages_group_a = [
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text="Group A message 1")],
group_id=group_a_id,
),
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text="Group A message 2")],
group_id=group_a_id,
),
]
messages_group_b = [
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text="Group B message 1")],
group_id=group_b_id,
),
]
created_a = await server.message_manager.create_many_messages_async(messages_group_a, actor=default_user)
created_b = await server.message_manager.create_many_messages_async(messages_group_b, actor=default_user)
# Add all messages to conversation
all_message_ids = [m.id for m in created_a] + [m.id for m in created_b]
await conversation_manager.add_messages_to_conversation(
conversation_id=conversation.id,
agent_id=sarah_agent.id,
message_ids=all_message_ids,
actor=default_user,
)
# List messages filtered by group A
messages_a = await conversation_manager.list_conversation_messages(
conversation_id=conversation.id,
actor=default_user,
group_id=group_a_id,
)
assert len(messages_a) == 2
for msg in messages_a:
assert "Group A" in msg.content
# List messages filtered by group B
messages_b = await conversation_manager.list_conversation_messages(
conversation_id=conversation.id,
actor=default_user,
group_id=group_b_id,
)
assert len(messages_b) == 1
assert "Group B" in messages_b[0].content
@pytest.mark.asyncio
async def test_list_conversation_messages_no_group_id_returns_all(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test that not providing group_id returns all messages."""
from letta.schemas.letta_message_content import TextContent
from letta.schemas.message import Message as PydanticMessage
# Create a conversation
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test"),
actor=default_user,
)
# Create messages with different group_ids
group_a_id = "group-aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
group_b_id = "group-bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb"
pydantic_messages = [
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text="Group A message")],
group_id=group_a_id,
),
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text="Group B message")],
group_id=group_b_id,
),
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text="No group message")],
group_id=None,
),
]
messages = await server.message_manager.create_many_messages_async(pydantic_messages, actor=default_user)
# Add all messages to conversation
await conversation_manager.add_messages_to_conversation(
conversation_id=conversation.id,
agent_id=sarah_agent.id,
message_ids=[m.id for m in messages],
actor=default_user,
)
# List all messages without group_id filter
all_messages = await conversation_manager.list_conversation_messages(
conversation_id=conversation.id,
actor=default_user,
)
# create_conversation auto-creates a system message, so 4 total
assert len(all_messages) == 4
@pytest.mark.asyncio
async def test_list_conversation_messages_order_with_pagination(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test that order affects pagination correctly."""
from letta.schemas.letta_message_content import TextContent
from letta.schemas.message import Message as PydanticMessage
# Create a conversation
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test"),
actor=default_user,
)
# Create messages
pydantic_messages = [
PydanticMessage(
agent_id=sarah_agent.id,
role="user",
content=[TextContent(text=f"Message {i}")],
)
for i in range(5)
]
messages = await server.message_manager.create_many_messages_async(
pydantic_messages,
actor=default_user,
)
# Add messages to conversation
await conversation_manager.add_messages_to_conversation(
conversation_id=conversation.id,
agent_id=sarah_agent.id,
message_ids=[m.id for m in messages],
actor=default_user,
)
# Get first page in ascending order with limit
page_asc = await conversation_manager.list_conversation_messages(
conversation_id=conversation.id,
actor=default_user,
reverse=False,
limit=2,
)
# Get first page in descending order with limit
page_desc = await conversation_manager.list_conversation_messages(
conversation_id=conversation.id,
actor=default_user,
reverse=True,
limit=2,
)
# The first messages should be different
assert page_asc[0].content != page_desc[0].content
# In ascending, first is the auto-created system message, second is "Message 0"
assert page_asc[0].message_type == "system_message"
# In descending, first should be "Message 4"
assert "Message 4" in page_desc[0].content
# ======================================================================================================================
# Model/Model Settings Override Tests
# ======================================================================================================================
@pytest.mark.asyncio
async def test_create_conversation_with_model(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test creating a conversation with a model override."""
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Test with model override", model="openai/gpt-4o"),
actor=default_user,
)
assert conversation.id is not None
assert conversation.model == "openai/gpt-4o"
assert conversation.model_settings is None
@pytest.mark.asyncio
async def test_create_conversation_with_model_and_settings(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test creating a conversation with model and model_settings."""
from letta.schemas.model import OpenAIModelSettings
settings = OpenAIModelSettings(temperature=0.5)
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(
summary="Test with settings",
model="openai/gpt-4o",
model_settings=settings,
),
actor=default_user,
)
assert conversation.model == "openai/gpt-4o"
assert conversation.model_settings is not None
assert conversation.model_settings.temperature == 0.5
@pytest.mark.asyncio
async def test_create_conversation_without_model_override(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test creating a conversation without model override returns None for model fields."""
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="No override"),
actor=default_user,
)
assert conversation.id is not None
assert conversation.model is None
assert conversation.model_settings is None
@pytest.mark.asyncio
async def test_update_conversation_set_model(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test updating a conversation to add a model override."""
# Create without override
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="Original"),
actor=default_user,
)
assert conversation.model is None
# Update to add override
updated = await conversation_manager.update_conversation(
conversation_id=conversation.id,
conversation_update=UpdateConversation(model="anthropic/claude-3-opus"),
actor=default_user,
)
assert updated.model == "anthropic/claude-3-opus"
@pytest.mark.asyncio
async def test_update_conversation_preserves_model(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test that updating summary preserves existing model override."""
# Create with override
conversation = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="With override", model="openai/gpt-4o"),
actor=default_user,
)
assert conversation.model == "openai/gpt-4o"
# Update summary only
updated = await conversation_manager.update_conversation(
conversation_id=conversation.id,
conversation_update=UpdateConversation(summary="New summary"),
actor=default_user,
)
assert updated.summary == "New summary"
assert updated.model == "openai/gpt-4o"
@pytest.mark.asyncio
async def test_retrieve_conversation_includes_model(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test that retrieving a conversation includes model/model_settings."""
from letta.schemas.model import OpenAIModelSettings
created = await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(
summary="Retrieve test",
model="openai/gpt-4o",
model_settings=OpenAIModelSettings(temperature=0.7),
),
actor=default_user,
)
retrieved = await conversation_manager.get_conversation_by_id(
conversation_id=created.id,
actor=default_user,
)
assert retrieved.model == "openai/gpt-4o"
assert retrieved.model_settings is not None
assert retrieved.model_settings.temperature == 0.7
@pytest.mark.asyncio
async def test_list_conversations_includes_model(conversation_manager, server: SyncServer, sarah_agent, default_user):
"""Test that listing conversations includes model fields."""
await conversation_manager.create_conversation(
agent_id=sarah_agent.id,
conversation_create=CreateConversation(summary="List test", model="openai/gpt-4o"),
actor=default_user,
)
conversations = await conversation_manager.list_conversations(
agent_id=sarah_agent.id,
actor=default_user,
)
assert len(conversations) >= 1
conv_with_model = [c for c in conversations if c.summary == "List test"]
assert len(conv_with_model) == 1
assert conv_with_model[0].model == "openai/gpt-4o"
@pytest.mark.asyncio
async def test_create_conversation_schema_model_validation():
"""Test that CreateConversation validates model handle format."""
from letta.errors import LettaInvalidArgumentError
# Valid format should work
create = CreateConversation(model="openai/gpt-4o")
assert create.model == "openai/gpt-4o"
# Invalid format should raise
with pytest.raises(LettaInvalidArgumentError):
CreateConversation(model="invalid-no-slash")
@pytest.mark.asyncio
async def test_update_conversation_schema_model_validation():
"""Test that UpdateConversation validates model handle format."""
from letta.errors import LettaInvalidArgumentError
# Valid format should work
update = UpdateConversation(model="anthropic/claude-3-opus")
assert update.model == "anthropic/claude-3-opus"
# Invalid format should raise
with pytest.raises(LettaInvalidArgumentError):
UpdateConversation(model="no-slash")
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_conversation_manager.py",
"license": "Apache License 2.0",
"lines": 1100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:alembic/versions/39577145c45d_add_project_constraint_on_tools.py | """add project constraint on tools
Revision ID: 39577145c45d
Revises: d0880aae6cee
Create Date: 2025-12-17 15:46:06.184858
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "39577145c45d"
down_revision: Union[str, None] = "d0880aae6cee"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(
"uix_organization_project_name", "tools", ["organization_id", "project_id", "name"], postgresql_nulls_not_distinct=True
)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint("uix_organization_project_name", "tools", type_="unique")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/39577145c45d_add_project_constraint_on_tools.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/llm_api/error_utils.py | """Shared helpers for provider error detection/mapping.
Keep these utilities free of heavy imports to avoid circular dependencies between
LLM clients (provider-specific) and streaming interfaces.
"""
def is_context_window_overflow_message(msg: str) -> bool:
"""Best-effort detection for context window overflow errors.
Different providers (and even different API surfaces within the same provider)
may phrase context-window errors differently. We centralize the heuristic so
all layers (clients, streaming interfaces, agent loops) behave consistently.
"""
return (
"exceeds the context window" in msg
or "This model's maximum context length is" in msg
or "maximum context length" in msg
or "context_length_exceeded" in msg
or "Input tokens exceed the configured limit" in msg
)
def is_insufficient_credits_message(msg: str) -> bool:
"""Best-effort detection for insufficient credits/quota/billing errors.
BYOK users on OpenRouter, OpenAI, etc. may exhaust their credits mid-stream
or get rejected pre-flight. We detect these so they map to 402 instead of 400/500.
"""
lower = msg.lower()
return (
"insufficient credits" in lower
or "requires more credits" in lower
or "add more credits" in lower
or "exceeded your current quota" in lower
or "you've exceeded your budget" in lower
or ("billing" in lower and "hard limit" in lower)
or "can only afford" in lower
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/llm_api/error_utils.py",
"license": "Apache License 2.0",
"lines": 32,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
letta-ai/letta:alembic/versions/175dd10fb916_add_prompt_tokens_details_to_steps.py | """Add prompt_tokens_details to steps table
Revision ID: 175dd10fb916
Revises: b1c2d3e4f5a6
Create Date: 2025-11-28 12:00:00.000000
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "175dd10fb916"
down_revision: Union[str, None] = "b1c2d3e4f5a6"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Add prompt_tokens_details JSON column to steps table
# This stores detailed prompt token breakdown (cached_tokens, cache_read_tokens, cache_creation_tokens)
op.add_column("steps", sa.Column("prompt_tokens_details", sa.JSON(), nullable=True))
def downgrade() -> None:
op.drop_column("steps", "prompt_tokens_details")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/175dd10fb916_add_prompt_tokens_details_to_steps.py",
"license": "Apache License 2.0",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/2e5e90d3cdf8_add_project_id_to_tools.py | """add project_id to tools
Revision ID: 2e5e90d3cdf8
Revises: af842aa6f743
Create Date: 2025-12-03 11:55:57.355341
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "2e5e90d3cdf8"
down_revision: Union[str, None] = "af842aa6f743"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("tools", sa.Column("project_id", sa.String(), nullable=True))
def downgrade() -> None:
op.drop_column("tools", "project_id")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/2e5e90d3cdf8_add_project_id_to_tools.py",
"license": "Apache License 2.0",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/af842aa6f743_add_tool_indexes_for_organization_id.py | """add tool indexes for organization_id
Revision ID: af842aa6f743
Revises: 175dd10fb916
Create Date: 2025-12-07 15:30:43.407495
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "af842aa6f743"
down_revision: Union[str, None] = "175dd10fb916"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_index("ix_tools_organization_id", "tools", ["organization_id"], unique=False)
op.create_index("ix_tools_organization_id_name", "tools", ["organization_id", "name"], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f("ix_step_metrics_run_id"), "step_metrics", ["run_id"], unique=False)
op.create_index(op.f("idx_messages_step_id"), "messages", ["step_id"], unique=False)
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/af842aa6f743_add_tool_indexes_for_organization_id.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/d0880aae6cee_add_compaction_settings_to_agents_table.py | """add compaction_settings to agents table
Revision ID: d0880aae6cee
Revises: 2e5e90d3cdf8
Create Date: 2025-12-10 16:17:23.595775
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
from letta.orm.custom_columns import CompactionSettingsColumn
# revision identifiers, used by Alembic.
revision: str = "d0880aae6cee"
down_revision: Union[str, None] = "2e5e90d3cdf8"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
op.add_column("agents", sa.Column("compaction_settings", CompactionSettingsColumn(), nullable=True))
def downgrade() -> None:
op.drop_column("agents", "compaction_settings")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/d0880aae6cee_add_compaction_settings_to_agents_table.py",
"license": "Apache License 2.0",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/prompts/summarizer_prompt.py | ALL_WORD_LIMIT = 500
SLIDING_WORD_LIMIT = 300
ALL_PROMPT = f"""Your task is to create a detailed summary of the conversation so far, paying close attention to the user's explicit requests and your previous actions.
This summary should be thorough in capturing technical details, code patterns, and architectural decisions that would be essential for continuing development work without losing context. Your summary should include the following sections:
1.**High level goals**: What is the high level goal and ongoing task? Capture the user's explicit requests and intent in detail. If there is an existing summary in the transcript, make sure to take it into consideration to continue tracking the higher level goals and long-term progress.
2. **What happened**: The conversations, tasks, and exchanges that took place. What did the user ask for? What did you do? How did things progress? If there is a previous summary being evicted, please extract a concise version of the critical info from it.
3. **Important details**: Enumerate specific files and code sections examined, modified, or created with a summary of why this file read or edit is important. Include specific names, data, configurations, or facts that were discussed. Don't omit details that might be referenced later.
4. **Errors and fixes**: List all errors that you ran into, and how you fixed them. Pay special attention to specific user feedback that you received and record verbatim if useful.
5. **Current state**:Describe in detail precisely what is currently being worked on, paying special attention to the most recent messages from both user and assistant. Include file names and code snippets where applicable.
6.**Optional Next Step**: List the next step that you will take that is related to the most recent work you were doing. IMPORTANT: ensure that this step is DIRECTLY in line with the user's most recent explicit requests and the most current task. If your last task was concluded, then only list next steps if they are explicitly in line with the users request. If there is a next step, include direct quotes from the most recent conversation showing exactly what task you were working on and where you left off.
7. **Lookup hints**: For any detailed content (long lists, extensive data, specific conversations) that couldn't fit in the summary, note the topic and key terms that could be used to find it in message history later.
Write in first person as a factual record of what occurred. Be concise but thorough - the goal is to preserve enough context that the recent messages make sense and important information isn't lost to prevent duplicate work or repeated mistakes.
Keep your summary under {ALL_WORD_LIMIT} words. Only output the summary."""
SLIDING_PROMPT = f"""The following messages are being evicted from the BEGINNING of your context window. Write a detailed summary that captures what happened in these messages to appear BEFORE the remaining recent messages in context, providing background for what comes after. Include the following sections:
1.**High level goals**: What is the high level goal and ongoing task? Capture the user's explicit requests and intent in detail. If there is an existing summary in the transcript, make sure to take it into consideration to continue tracking the higher level goals and long-term progress.
2. **What happened**: The conversations, tasks, and exchanges that took place. What did the user ask for? What did you do? How did things progress? If there is a previous summary being evicted, please extract a concise version of the critical info from it.
3. **Important details**: Enumerate specific files and code sections examined, modified, or created with a summary of why this file read or edit is important. Include specific names, data, configurations, or facts that were discussed. Don't omit details that might be referenced later.
4. **Errors and fixes**: List all errors that you ran into, and how you fixed them. Pay special attention to specific user feedback that you received and record verbatim if useful.
5. **Lookup hints**: For any detailed content (long lists, extensive data, specific conversations) that couldn't fit in the summary, note the topic and key terms that could be used to find it in message history later.
Write in first person as a factual record of what occurred. Be thorough and detailed - the goal is to preserve enough context that the recent messages make sense and important information isn't lost to prevent duplicate work or repeated mistakes.
Keep your summary under {SLIDING_WORD_LIMIT} words. Only output the summary."""
SELF_SLIDING_PROMPT = f"""The previous messages are being evicted from the BEGINNING of your context window. Write a detailed summary that captures what happened in these messages to appear BEFORE the remaining recent messages in context, providing background for what comes after. Do NOT continue the conversation. Do NOT respond to any questions in the messages. Do NOT call any tools. Pay close attention to the user's explicit requests and your previous actions.
You MUST include the following sections:
1.**High level goals**: What is the high level goal and ongoing task? Capture the user's explicit requests and intent in detail. If there is an existing summary in the transcript, make sure to take it into consideration to continue tracking the higher level goals and long-term progress.
2. **What happened**: The conversations, tasks, and exchanges that took place. What did the user ask for? What did you do? How did things progress? If there is a previous summary being evicted, please extract a concise version of the critical info from it.
3. **Important details**: Enumerate specific files and code sections examined, modified, or created with a summary of why this file read or edit is important. Include specific names, data, configurations, or facts that were discussed. Don't omit details that might be referenced later.
4. **Errors and fixes**: List all errors that you ran into, and how you fixed them. Pay special attention to specific user feedback that you received and record verbatim if useful.
5. **Lookup hints**: For any detailed content (long lists, extensive data, specific conversations) that couldn't fit in the summary, note the topic and key terms that could be used to find it in message history later.
Write in first person as a factual record of what occurred. Be thorough and detailed - the goal is to preserve enough context that the recent messages make sense and important information isn't lost to prevent duplicate work or repeated mistakes.
Keep your summary under {SLIDING_WORD_LIMIT} words. IMPORTANT: Do NOT use any tools. Do NOT continue the conversation. You MUST respond with ONLY the summary as text output. Generate the summary with each section as mentioned:
"""
SELF_ALL_PROMPT = f"""Your task is to create a detailed summary of the conversation so far. Do NOT continue the conversation. Do NOT respond to any questions in the messages. Do NOT call any tools. Pay close attention to the user's explicit requests and your previous actions. This summary should be thorough in capturing technical details, code patterns, and architectural decisions that would be essential for continuing development work without losing context.
You MUST include the following sections:
1.**High level goals**: What is the high level goal and ongoing task? Capture the user's explicit requests and intent in detail. If there is an existing summary in the transcript, make sure to take it into consideration to continue tracking the higher level goals and long-term progress.
2. **What happened**: The conversations, tasks, and exchanges that took place. What did the user ask for? What did you do? How did things progress? If there is a previous summary being evicted, please extract a concise version of the critical info from it.
3. **Important details**: Enumerate specific files and code sections examined, modified, or created with a summary of why this file read or edit is important. Include specific names, data, configurations, or facts that were discussed. Don't omit details that might be referenced later.
4. **Errors and fixes**: List all errors that you ran into, and how you fixed them. Pay special attention to specific user feedback that you received and record verbatim if useful.
5. **Current state**:Describe in detail precisely what is currently being worked on, paying special attention to the most recent messages from both user and assistant. Include file names and code snippets where applicable.
6.**Optional Next Step**: List the next step that you will take that is related to the most recent work you were doing. IMPORTANT: ensure that this step is DIRECTLY in line with the user's most recent explicit requests and the most current task. If your last task was concluded, then only list next steps if they are explicitly in line with the users request. If there is a next step, include direct quotes from the most recent conversation showing exactly what task you were working on and where you left off.
7. **Lookup hints**: For any detailed content (long lists, extensive data, specific conversations) that couldn't fit in the summary, note the topic and key terms that could be used to find it in message history later.
Write in first person as a factual record of what occurred. Be concise but thorough - the goal is to preserve enough context that the recent messages make sense and important information isn't lost to prevent duplicate work or repeated mistakes.
Keep your summary under {ALL_WORD_LIMIT} words.
IMPORTANT: Do NOT use any tools. Do NOT continue the conversation. You MUST respond with ONLY the summary as text output. Generate the summary with each section as mentioned:
"""
ANTHROPIC_SUMMARY_PROMPT = """You have been working on the task described above but have not yet completed it. Write a continuation summary that will allow you (or another instance of yourself) to resume work efficiently in a future context window where the conversation history will be replaced with this summary. Your summary should be structured, concise, and actionable. Include:
1. Task Overview
The user's core request and success criteria
Any clarifications or constraints they specified
2. Current State
What has been completed so far
Files created, modified, or analyzed (with paths if relevant)
Key outputs or artifacts produced
3. Important Discoveries
Technical constraints or requirements uncovered
Decisions made and their rationale
Errors encountered and how they were resolved
What approaches were tried that didn't work (and why)
4. Next Steps
Specific actions needed to complete the task
Any blockers or open questions to resolve
Priority order if multiple steps remain
5. Context to Preserve
User preferences or style requirements
Domain-specific details that aren't obvious
Any promises made to the user
Write the summary from the perspective of the AI (use the first person from the perspective of the AI). Be concise but complete—err on the side of including information that would prevent duplicate work or repeated mistakes. Write in a way that enables immediate resumption of the task.
Only output the summary, do NOT include anything else in your output.
"""
SHORTER_SUMMARY_PROMPT = f"""The following messages are being evicted from your context window. Write a detailed summary that captures what happened in these messages.
This summary will appear BEFORE the remaining recent messages in context, providing background for what comes after. Include:
1. **What happened**: The conversations, tasks, and exchanges that took place. What did the user ask for? What did you do? How did things progress?
2. **High level goals**: If there is an existing summary in the transcript, make sure to take it into consideration to continue tracking the higher level goals and long-term progress. Make sure to not lose track of higher level goals or the ongoing task.
3. **Important details**: Specific names, data, configurations, or facts that were discussed. Don't omit details that might be referenced later.
4. **Lookup hints**: For any detailed content (long lists, extensive data, specific conversations) that couldn't fit in the summary, note the topic and key terms that could be used to find it in message history later.
Write in first person as a factual record of what occurred. Be thorough and detailed - the goal is to preserve enough context that the recent messages make sense and important information isn't lost.
Keep your summary under {SLIDING_WORD_LIMIT} words. Only output the summary."""
SELF_SUMMARIZATION_PROMPT = """Your task is to create a detailed summary of the conversation so far, paying close attention to the user's explicit requests and your previous actions.
This summary should be thorough in capturing technical details, code patterns, and architectural decisions that would be essential for continuing development work without losing context.
Before providing your final summary, wrap your analysis in <analysis> tags to organize your thoughts and ensure you've covered all necessary points. In your analysis process:
1. Chronologically analyze each message and section of the conversation. For each section thoroughly identify:
- The user's explicit requests and intents
- Your approach to addressing the user's requests
- Key decisions, technical concepts and code patterns
- Specific details like:
- file names
- full code snippets
- function signatures
- file edits
- Errors that you ran into and how you fixed them
- Pay special attention to specific user feedback that you received, especially if the user told you to do something differently.
2. Double-check for technical accuracy and completeness, addressing each required element thoroughly.
Your summary should include the following sections:
1. Primary Request and Intent: Capture all of the user's explicit requests and intents in detail
2. Key Technical Concepts: List all important technical concepts, technologies, and frameworks discussed.
3. Files and Code Sections: Enumerate specific files and code sections examined, modified, or created. Pay special attention to the most recent messages and include full code snippets where applicable and include a summary of why this file read or edit is important.
4. Errors and fixes: List all errors that you ran into, and how you fixed them. Pay special attention to specific user feedback that you received, especially if the user told you to do something differently.
5. Problem Solving: Document problems solved and any ongoing troubleshooting efforts.
6. All user messages: List ALL user messages that are not tool results. These are critical for understanding the users' feedback and changing intent.
6. Pending Tasks: Outline any pending tasks that you have explicitly been asked to work on.
7. Current Work: Describe in detail precisely what was being worked on immediately before this summary request, paying special attention to the most recent messages from both user and assistant. Include file names and code snippets where applicable.
8. Optional Next Step: List the next step that you will take that is related to the most recent work you were doing. IMPORTANT: ensure that this step is DIRECTLY in line with the user's most recent explicit requests, and the task you were working on immediately before this summary request. If your last task was concluded, then only list next steps if they are explicitly in line with the users request. Do not start on tangential requests or really old requests that were already completed without confirming with the user first.
If there is a next step, include direct quotes from the most recent conversation showing exactly what task you were working on and where you left off. This should be verbatim to ensure there's no drift in task interpretation.
Here's an example of how your output should be structured:
<example>
<analysis>
[Your thought process, ensuring all points are covered thoroughly and accurately]
</analysis>
<summary>
1. Primary Request and Intent:
[Detailed description]
2. Key Technical Concepts:
- [Concept 1]
- [Concept 2]
- [...]
3. Files and Code Sections:
- [File Name 1]
- [Summary of why this file is important]
- [Summary of the changes made to this file, if any]
- [Important Code Snippet]
- [File Name 2]
- [Important Code Snippet]
- [...]
4. Errors and fixes:
- [Detailed description of error 1]:
- [How you fixed the error]
- [User feedback on the error if any]
- [...]
5. Problem Solving:
[Description of solved problems and ongoing troubleshooting]
6. All user messages:
- [Detailed non tool use user message]
- [...]
7. Pending Tasks:
- [Task 1]
- [Task 2]
- [...]
8. Current Work:
[Precise description of current work]
9. Optional Next Step:
[Optional Next step to take]
</summary>
</example>
Please provide your summary based on the conversation so far, following this structure and ensuring precision and thoroughness in your response.
There may be additional summarization instructions provided in the included context. If so, remember to follow these instructions when creating the above summary. Examples of instructions include:
<example>
## Compact Instructions
When summarizing the conversation focus on typescript code changes and also remember the mistakes you made and how you fixed them.
</example>
<example>
# Summary instructions
When you are using compact - please focus on test output and code changes. Include file reads verbatim.
</example>
IMPORTANT: Do NOT use any tools. You MUST respond with ONLY the <summary>...</summary> block as your text output.
"""
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/prompts/summarizer_prompt.py",
"license": "Apache License 2.0",
"lines": 154,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
letta-ai/letta:letta/server/rest_api/proxy_helpers.py | """
Shared helper functions for Anthropic-compatible proxy endpoints.
These helpers are used by both the Anthropic and Z.ai proxy routers to reduce code duplication.
"""
import json
from fastapi import Request
from letta.log import get_logger
from letta.server.rest_api.utils import capture_and_persist_messages
from letta.settings import model_settings
logger = get_logger(__name__)
def strip_policy_specs(text: str) -> str:
"""
Remove Claude policy injection blocks from message text.
Claude injects policy instructions in two forms:
1. Appended with prefix: 'user: <policy_spec>...'
2. As entire message: '<policy_spec>...'
We truncate everything from the policy start marker onwards since it's all injected policy content.
"""
# Check if entire message is a policy spec (starts with tag)
if text.startswith("<policy_spec>"):
logger.info("[Proxy Helpers] Stripped policy injection (entire message)")
return ""
# Check if policy spec is appended (with prefix)
policy_start = text.find("user: <policy_spec>")
if policy_start != -1:
logger.info(f"[Proxy Helpers] Stripped policy injection from position {policy_start}")
# Truncate everything from this point onwards
cleaned = text[:policy_start].strip()
return cleaned
# No policy injection found, return original text
return text
def extract_user_messages(body: bytes) -> list[str]:
"""Extract user messages from request body."""
messages = []
try:
request_data = json.loads(body)
messages = request_data.get("messages", [])
user_messages = []
for msg in messages:
if msg.get("role") == "user":
content = msg.get("content", "")
if isinstance(content, str):
# Strip policy specs before adding
cleaned = strip_policy_specs(content)
if cleaned: # Only add if not empty after stripping
user_messages.append(cleaned)
elif isinstance(content, list):
for block in content:
if isinstance(block, dict):
if block.get("type") == "text":
text = block.get("text", "")
# Strip policy specs from text blocks
cleaned = strip_policy_specs(text)
if cleaned: # Only add if not empty after stripping
user_messages.append(cleaned)
elif block.get("type") == "image":
user_messages.append("[IMAGE]")
return user_messages
except Exception as e:
logger.warning(f"[Proxy Helpers] Failed to extract user messages from request {messages}: {e}")
return []
def extract_assistant_message(response_data: dict) -> str:
"""Extract assistant message from response data."""
content_blocks = []
try:
content_blocks = response_data.get("content", [])
text_parts = []
for block in content_blocks:
if isinstance(block, dict) and block.get("type") == "text":
text_parts.append(block.get("text", ""))
return "\n".join(text_parts)
except Exception as e:
logger.warning(f"[Proxy Helpers] Failed to extract assistant message from response {content_blocks}: {e}")
return ""
def is_topic_detection_response(message: str) -> bool:
"""
Check if the assistant message is a topic detection response (contains isNewTopic key).
These are Claude Code metadata responses that should not be persisted as conversation.
"""
try:
stripped = message.strip()
if stripped.startswith("{") and stripped.endswith("}"):
parsed = json.loads(stripped)
# Check for isNewTopic key which indicates topic detection
if "isNewTopic" in parsed:
return True
except (json.JSONDecodeError, AttributeError):
pass
return False
def prepare_headers(request: Request, proxy_name: str, use_bearer_auth: bool = False) -> dict | None:
"""
Prepare headers for forwarding to Anthropic-compatible API.
Args:
request: The incoming FastAPI request
proxy_name: Name of the proxy for logging (e.g., "Anthropic Proxy", "Z.ai Proxy")
use_bearer_auth: If True, convert x-api-key to Bearer token in Authorization header (for Z.ai)
Returns:
Dictionary of headers to forward, or None if authentication fails
"""
skip_headers = {
"host",
"connection",
"content-length",
"transfer-encoding",
"content-encoding",
"te",
"upgrade",
"proxy-authenticate",
"proxy-authorization",
"authorization",
}
headers = {}
for key, value in request.headers.items():
if key.lower() not in skip_headers:
headers[key] = value
# Extract API key from headers or fallback to letta's key
api_key = None
if "x-api-key" in headers:
api_key = headers["x-api-key"]
elif "anthropic-api-key" in headers:
api_key = headers["anthropic-api-key"]
else:
# Fallback to letta's anthropic api key if not provided
api_key = model_settings.anthropic_api_key
if api_key:
logger.info(f"[{proxy_name}] Falling back to Letta's anthropic api key instead of user's key")
# Handle authentication based on proxy type
if use_bearer_auth:
# Z.ai: use Bearer token in Authorization header
if api_key:
headers["authorization"] = f"Bearer {api_key}"
# Keep x-api-key in headers too (doesn't hurt)
if "x-api-key" not in headers and api_key:
headers["x-api-key"] = api_key
else:
# Anthropic: use x-api-key header
if api_key and "x-api-key" not in headers:
headers["x-api-key"] = api_key
if "content-type" not in headers:
headers["content-type"] = "application/json"
return headers
def format_memory_blocks(blocks, agent_id: str) -> str:
"""Format memory blocks for injection into system prompt."""
blocks_with_content = [block for block in blocks if block.value]
if not blocks_with_content:
return ""
memory_context = (
"<letta>\n"
"You have persistent memory powered by Letta that is maintained across conversations. "
"A background agent updates these memory blocks based on conversation content.\n"
"<memory_blocks>\n"
"The following memory blocks are currently engaged in your core memory unit:\n\n"
)
for idx, block in enumerate(blocks_with_content):
label = block.label or "block"
value = block.value or ""
desc = block.description or ""
chars_current = len(value)
limit = block.limit if block.limit is not None else 0
memory_context += f"<{label}>\n"
if desc:
memory_context += "<description>\n"
memory_context += f"{desc}\n"
memory_context += "</description>\n"
memory_context += "<metadata>\n"
memory_context += f"- chars_current={chars_current}\n"
memory_context += f"- chars_limit={limit}\n"
memory_context += "</metadata>\n"
memory_context += "<value>\n"
memory_context += f"{value}\n"
memory_context += "</value>\n"
memory_context += f"</{label}>\n"
if idx != len(blocks_with_content) - 1:
memory_context += "\n"
memory_context += "\n</memory_blocks>\n\n"
memory_context += (
"<memory_management>\n"
f"Users can view and edit their memory blocks at:\n"
f"https://app.letta.com/agents/{agent_id}\n\n"
"Share this link when users ask how to manage their memory, what you remember about them, or how to view, edit, or delete stored information.\n"
"</memory_management>\n\n"
"<documentation>\n"
"- Memory blocks: https://docs.letta.com/guides/agents/memory-blocks/index.md\n"
"- Full Letta documentation: https://docs.letta.com/llms.txt\n\n"
"Reference these when users ask how Letta memory works or want to learn more about the platform.\n"
"</documentation>\n"
"</letta>"
)
return memory_context
def build_response_from_chunks(chunks: list[bytes]) -> str:
"""Build complete response text from streaming chunks."""
try:
text_parts = []
full_data = b"".join(chunks).decode("utf-8")
for line in full_data.split("\n"):
if line.startswith("data: "):
data_str = line[6:] # Remove "data: " prefix
if data_str.strip() in ["[DONE]", ""]:
continue
try:
event_data = json.loads(data_str)
event_type = event_data.get("type")
if event_type == "content_block_delta":
delta = event_data.get("delta", {})
if delta.get("type") == "text_delta":
text_parts.append(delta.get("text", ""))
except json.JSONDecodeError:
continue
return "".join(text_parts)
except Exception as e:
logger.warning(f"[Proxy Helpers] Failed to build response from chunks: {e}")
return ""
async def inject_memory_context(
server,
agent,
actor,
request_data: dict,
proxy_name: str,
) -> dict:
"""
Inject memory context into the request system prompt.
Args:
server: SyncServer instance
agent: Agent to get memory from
actor: Actor performing the operation
request_data: Request data dictionary to modify
proxy_name: Name of the proxy for logging (e.g., "Anthropic Proxy", "Z.ai Proxy")
Returns:
Modified request data with memory context injected
"""
try:
messages = request_data.get("messages", [])
if not messages:
return request_data
memory_context = format_memory_blocks(agent.blocks, agent.id)
if not memory_context:
logger.debug(f"[{proxy_name}] No memory blocks found, skipping memory injection")
return request_data
block_count = len([b for b in agent.blocks if b.value])
logger.info(f"[{proxy_name}] Injecting {block_count} memory block(s) into request")
# Inject into system prompt
modified_data = request_data.copy()
# Check if there's already a system prompt
# Anthropic API accepts system as either a string or list of content blocks
existing_system = modified_data.get("system", "")
# Handle both string and list system prompts
if isinstance(existing_system, list):
# If it's a list, prepend our context as a text block
modified_data["system"] = [*existing_system, {"type": "text", "text": memory_context.rstrip()}]
elif existing_system:
# If it's a non-empty string, prepend our context
modified_data["system"] = memory_context + existing_system
else:
# No existing system prompt
modified_data["system"] = memory_context.rstrip()
# Fix max_tokens if using extended thinking
# Anthropic requires max_tokens > thinking.budget_tokens
if "thinking" in modified_data and isinstance(modified_data["thinking"], dict):
budget_tokens = modified_data["thinking"].get("budget_tokens", 0)
current_max_tokens = modified_data.get("max_tokens", 0)
if budget_tokens > 0 and current_max_tokens <= budget_tokens:
# Set max_tokens to budget_tokens + reasonable buffer for response
# Claude Code typically uses budget_tokens around 10000-20000
modified_data["max_tokens"] = budget_tokens + 4096
logger.info(
f"[{proxy_name}] Adjusted max_tokens from {current_max_tokens} to {modified_data['max_tokens']} (thinking.budget_tokens={budget_tokens})"
)
return modified_data
except Exception as e:
logger.exception(f"[{proxy_name}] Failed to inject memory context: {e}")
return request_data
async def persist_messages_background(
server,
agent,
actor,
user_messages: list[str],
assistant_message: str,
model_name: str,
proxy_name: str,
):
"""
Background task to persist messages without blocking the response.
This runs asynchronously after the response is returned to minimize latency.
Args:
server: SyncServer instance
agent: Agent to persist messages for
actor: Actor performing the operation
user_messages: List of user messages to persist
assistant_message: Assistant message to persist
model_name: Model name for the messages
proxy_name: Name of the proxy for logging (e.g., "Anthropic Proxy", "Z.ai Proxy")
"""
try:
result = await capture_and_persist_messages(
server=server,
agent=agent,
actor=actor,
user_messages=user_messages,
assistant_message=assistant_message,
model=model_name,
)
if result.get("success"):
logger.info(f"[{proxy_name}] Persisted messages: {result['messages_created']} messages saved")
else:
logger.debug(f"[{proxy_name}] Skipped persistence: {result.get('reason', 'unknown')}")
except Exception as e:
logger.error(f"[{proxy_name}] Failed to persist messages in background: {e}")
async def check_for_duplicate_message(server, agent, actor, user_messages: list[str], proxy_name: str) -> list[str]:
"""
Check if the last user message is a duplicate of the most recent persisted message.
Returns a filtered list with duplicates removed to prevent race conditions.
Args:
server: SyncServer instance
agent: Agent to check messages for
actor: Actor performing the operation
user_messages: List of user messages to check
proxy_name: Name of the proxy for logging
Returns:
Filtered list of user messages (empty if duplicate detected)
"""
user_messages_to_persist = user_messages.copy() if user_messages else []
if user_messages_to_persist:
try:
from letta.schemas.enums import MessageRole
recent_messages = await server.message_manager.list_messages(
agent_id=agent.id,
actor=actor,
limit=5,
roles=[MessageRole.user],
ascending=False,
)
if recent_messages:
last_user_msg = recent_messages[0]
last_message_text = ""
if last_user_msg.content:
for content_block in last_user_msg.content:
if hasattr(content_block, "text"):
last_message_text += content_block.text
incoming_msg = user_messages_to_persist[-1]
if last_message_text and last_message_text == incoming_msg:
logger.info(f"[{proxy_name}] Skipping duplicate user message: {incoming_msg[:100]}...")
user_messages_to_persist = []
except Exception as e:
logger.warning(f"[{proxy_name}] Failed to check for duplicate messages: {e}")
return user_messages_to_persist
async def backfill_agent_project_id(server, agent, actor, project_id: str):
"""
Temporary helper to backfill project_id for legacy agents.
TODO(@caren): Remove this function after all existing Claude Code agents have been backfilled.
Args:
server: SyncServer instance
agent: Agent to update
actor: Actor performing the operation
project_id: Project ID to set
Returns:
Updated agent or original agent if update fails
"""
from letta.schemas.agent import UpdateAgent
try:
updated_agent = await server.update_agent_async(
agent_id=agent.id,
request=UpdateAgent(project_id=project_id),
actor=actor,
)
logger.info(f"[Backfill] Successfully updated agent {agent.id} with project_id {project_id}")
return updated_agent
except Exception as e:
logger.warning(f"[Backfill] Failed to update agent project_id: {e}. Continuing with in-memory update.")
# Fallback: continue with in-memory update
agent.project_id = project_id
return agent
async def get_or_create_claude_code_agent(
server,
actor,
project_id: str | None = None,
agent_id: str | None = None,
):
"""
Get or create a special agent for Claude Code sessions.
Args:
server: SyncServer instance
actor: Actor performing the operation (user ID)
project_id: Optional project ID to associate the agent with
agent_id: Optional specific agent ID to use (from X-LETTA-AGENT-ID header)
Returns:
Agent instance
"""
from letta.schemas.agent import CreateAgent
# If a specific agent ID is provided, try to use it directly
if agent_id:
logger.debug(f"Attempting to fetch agent by ID: {agent_id}")
try:
agent = await server.agent_manager.get_agent_by_id_async(agent_id=agent_id, actor=actor)
logger.info(f"Found agent via X-LETTA-AGENT-ID header: {agent.id} (name: {agent.name})")
return agent
except Exception as e:
logger.warning(f"Could not find agent with ID {agent_id}: {e}. Falling back to default behavior.")
# Fall through to default behavior below
# Create short user identifier from UUID (first 8 chars)
if actor:
user_short_id = str(actor.id)[:8] if hasattr(actor, "id") else str(actor)[:8]
else:
user_short_id = "default"
agent_name = f"claude-code-{user_short_id}"
try:
# Try to find existing agent by name (most reliable)
# Note: Search by name only, not tags, since name is unique and more reliable
logger.debug(f"Searching for agent with name: {agent_name}")
agents = await server.agent_manager.list_agents_async(
actor=actor,
limit=10, # Get a few in case of duplicates
name=agent_name,
include=["agent.blocks", "agent.managed_group", "agent.tags"],
)
# list_agents_async returns a list directly, not an object with .agents
logger.debug(f"Agent search returned {len(agents) if agents else 0} results")
if agents and len(agents) > 0:
# Return the first matching agent
logger.info(f"Found existing Claude Code agent: {agents[0].id} (name: {agent_name})")
agent = agents[0]
# Temporary patch: Fix project_id if it's missing (legacy bug)
# TODO(@caren): Remove this after all existing Claude Code agents have been backfilled
if not agent.project_id and project_id:
logger.info(f"[Backfill] Agent {agent.id} missing project_id, backfilling with {project_id}")
agent = await backfill_agent_project_id(server, agent, actor, project_id)
return agent
else:
logger.debug(f"No existing agent found with name: {agent_name}")
except Exception as e:
logger.warning(f"Could not find existing agent: {e}", exc_info=True)
# Create new agent
try:
logger.info(f"Creating new Claude Code agent: {agent_name} with project_id: {project_id}")
# Create minimal agent config
agent_config = CreateAgent(
name=agent_name,
description="Agent for capturing Claude Code conversations",
memory_blocks=[
{
"label": "human",
"value": "This is my section of core memory devoted to information about the human.\nI don't yet know anything about them.\nWhat's their name? Where are they from? What do they do? Who are they?\nI should update this memory over time as I interact with the human and learn more about them.",
"description": "A memory block for keeping track of the human (user) the agent is interacting with.",
},
{
"label": "persona",
"value": "This is my section of core memory devoted to information myself.\nThere's nothing here yet.\nI should update this memory over time as I develop my personality.",
"description": "A memory block for storing the agent's core personality details and behavior profile.",
},
{
"label": "project",
"value": "This is my section of core memory devoted to information about what the agent is working on.\nI don't yet know anything about it.\nI should update this memory over time with high level understanding and learnings.",
"description": "A memory block for storing the information about the project the agent is working on.",
},
],
tags=["claude-code"],
enable_sleeptime=True,
agent_type="letta_v1_agent",
model="anthropic/claude-sonnet-4-5-20250929",
embedding="openai/text-embedding-ada-002",
project_id=project_id,
)
new_agent = await server.create_agent_async(
request=agent_config,
actor=actor,
)
logger.info(f"Created Claude Code agent {new_agent.name}: {new_agent.id}")
return new_agent
except Exception as e:
logger.exception(f"Failed to create Claude Code agent: {e}")
raise
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/proxy_helpers.py",
"license": "Apache License 2.0",
"lines": 470,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/server/rest_api/routers/v1/zai.py | import asyncio
import httpx
from fastapi import APIRouter, Depends, Request
from fastapi.responses import Response, StreamingResponse
from letta.log import get_logger
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
from letta.server.rest_api.proxy_helpers import (
build_response_from_chunks,
check_for_duplicate_message,
extract_assistant_message,
extract_user_messages,
get_or_create_claude_code_agent,
inject_memory_context,
is_topic_detection_response,
persist_messages_background,
prepare_headers,
)
from letta.server.server import SyncServer
logger = get_logger(__name__)
_background_tasks: set[asyncio.Task] = set()
router = APIRouter(prefix="/zai", tags=["zai"])
ZAI_API_BASE = "https://api.z.ai/api/anthropic"
PROXY_NAME = "Z.ai Proxy"
@router.api_route("/v1/messages", methods=["POST"], operation_id="zai_messages_proxy", include_in_schema=False)
async def zai_messages_proxy(
request: Request,
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Proxy endpoint for Z.ai Messages API.
This endpoint forwards requests to the Z.ai API, allowing Claude Code CLI
to use Letta as a proxy by configuring anthropic_base_url.
Usage in Claude Code CLI settings.json:
{
"env": {
"ANTHROPIC_BASE_URL": "http://localhost:3000/v1/zai"
}
}
"""
# Get the request body
body = await request.body()
logger.info(f"[{PROXY_NAME}] Proxying request to Z.ai Messages API: {ZAI_API_BASE}/v1/messages")
logger.debug(f"[{PROXY_NAME}] Request body preview: {body[:200]}...")
actor = await server.user_manager.get_actor_or_default_async(headers.actor_id)
# Extract all user messages from request
all_user_messages = extract_user_messages(body)
# Only capture the LAST user message (the new one the user just sent)
# Claude Code sends full conversation history, but we only want to persist the new message
user_messages = [all_user_messages[-1]] if all_user_messages else []
# Filter out system/metadata requests and policy specs
user_messages = [s for s in user_messages if not s.startswith("<system-reminder>") and not s.startswith("<policy_spec>")]
if not user_messages:
logger.debug(f"[{PROXY_NAME}] Skipping capture/memory for this turn")
zai_headers = prepare_headers(request, PROXY_NAME, use_bearer_auth=True)
if not zai_headers:
logger.error(f"[{PROXY_NAME}] No Anthropic API key found in headers or settings")
return Response(
content='{"error": {"type": "authentication_error", "message": "Anthropic API key required. Pass via anthropic-api-key or x-api-key header."}}',
status_code=401,
media_type="application/json",
)
# Check if this is a streaming request
try:
import json
request_data = json.loads(body)
is_streaming = request_data.get("stream", False)
model_name = request_data.get("model")
# Extract and remove project_id (internal use only, not for Z.ai API)
project_id = request_data.pop("project_id", None)
logger.debug(f"[{PROXY_NAME}] Request is streaming: {is_streaming}")
logger.debug(f"[{PROXY_NAME}] Model: {model_name}")
logger.debug(f"[{PROXY_NAME}] Project ID: {project_id}")
except Exception as e:
logger.warning(f"[{PROXY_NAME}] Failed to parse request body: {e}")
is_streaming = False
model_name = None
project_id = None
# Get or create agent for Claude Code session (skip for system requests)
# Note: Agent lookup and memory search are blocking operations before forwarding.
# Message persistence happens in the background after the response is returned.
agent = None
try:
agent = await get_or_create_claude_code_agent(
server=server,
actor=actor,
project_id=project_id,
)
logger.debug(f"[{PROXY_NAME}] Using agent ID: {agent.id}")
except Exception as e:
logger.error(f"[{PROXY_NAME}] Failed to get/create agent: {e}")
# Inject memory context into request (skip for system requests)
# TODO: Optimize - skip memory injection on subsequent messages in same session
# TODO: Add caching layer to avoid duplicate memory searches
modified_body = body
if agent and request_data:
modified_request_data = await inject_memory_context(
server=server,
agent=agent,
actor=actor,
request_data=request_data,
proxy_name=PROXY_NAME,
)
# Re-encode the modified request
import json
modified_body = json.dumps(modified_request_data).encode("utf-8")
# Forward the request to Z.ai API (preserve query params like ?beta=true)
# Note: For streaming, we create the client outside the generator to keep it alive
zai_url = f"{ZAI_API_BASE}/v1/messages"
if request.url.query:
zai_url = f"{zai_url}?{request.url.query}"
if is_streaming:
# Handle streaming response
collected_chunks = []
async def stream_response():
# Create client inside the generator so it stays alive during streaming
async with httpx.AsyncClient(timeout=300.0) as client:
async with client.stream(
"POST",
zai_url,
headers=zai_headers,
content=modified_body,
) as response:
async for chunk in response.aiter_bytes():
collected_chunks.append(chunk)
yield chunk
# After streaming is complete, extract and log assistant message
assistant_message = build_response_from_chunks(collected_chunks)
if user_messages and assistant_message:
logger.info("=" * 70)
logger.info("📨 CAPTURED USER MESSAGE:")
for i, user_message in enumerate(user_messages):
logger.info(f" {i}: {user_message[:200]}{'...' if len(user_message) > 200 else ''}")
logger.info("=" * 70)
logger.info("🤖 CAPTURED ASSISTANT RESPONSE (streaming):")
logger.info(f" {assistant_message[:200]}{'...' if len(assistant_message) > 200 else ''}")
logger.info("=" * 70)
# Skip persisting topic detection responses (metadata, not conversation)
if is_topic_detection_response(assistant_message):
logger.debug(f"[{PROXY_NAME}] Skipping persistence - topic detection response")
# Persist messages to database (non-blocking, skip for system requests)
elif agent:
# Check for duplicate user messages before creating background task
# This prevents race conditions where multiple requests persist the same message
user_messages_to_persist = await check_for_duplicate_message(server, agent, actor, user_messages, PROXY_NAME)
task = asyncio.create_task(
persist_messages_background(
server=server,
agent=agent,
actor=actor,
user_messages=user_messages_to_persist,
assistant_message=assistant_message,
model_name=model_name,
proxy_name=PROXY_NAME,
)
)
_background_tasks.add(task)
task.add_done_callback(_background_tasks.discard)
return StreamingResponse(
stream_response(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
},
)
# Non-streaming path
async with httpx.AsyncClient(timeout=300.0) as client:
try:
# Handle non-streaming response
response = await client.post(
zai_url,
headers=zai_headers,
content=modified_body,
)
logger.info(f"Successfully proxied request, status: {response.status_code}")
# Extract and log assistant message
if response.status_code == 200:
try:
import json
response_data = json.loads(response.content)
assistant_message = extract_assistant_message(response_data)
if assistant_message:
logger.info("=" * 70)
logger.info("🤖 CAPTURED ASSISTANT RESPONSE:")
logger.info(f" {assistant_message[:500]}{'...' if len(assistant_message) > 500 else ''}")
logger.info("=" * 70)
# Skip persisting topic detection responses (metadata, not conversation)
if is_topic_detection_response(assistant_message):
logger.debug(f"[{PROXY_NAME}] Skipping persistence - topic detection response")
# Persist messages to database (non-blocking)
elif agent:
# Check for duplicate user messages before creating background task
user_messages_to_persist = await check_for_duplicate_message(server, agent, actor, user_messages, PROXY_NAME)
task = asyncio.create_task(
persist_messages_background(
server=server,
agent=agent,
actor=actor,
user_messages=user_messages_to_persist,
assistant_message=assistant_message,
model_name=model_name,
proxy_name=PROXY_NAME,
)
)
_background_tasks.add(task)
task.add_done_callback(_background_tasks.discard)
except Exception as e:
logger.warning(f"[{PROXY_NAME}] Failed to extract assistant response for logging: {e}")
return Response(
content=response.content,
status_code=response.status_code,
media_type=response.headers.get("content-type", "application/json"),
headers={
k: v
for k, v in response.headers.items()
if k.lower() not in ["content-encoding", "content-length", "transfer-encoding", "connection"]
},
)
except httpx.HTTPError as e:
logger.error(f"[{PROXY_NAME}] Error proxying request to Z.ai API: {e}")
return Response(
content=f'{{"error": {{"type": "api_error", "message": "Failed to proxy request to Z.ai API: {str(e)}"}}}}',
status_code=500,
media_type="application/json",
)
@router.api_route(
"/v1/{endpoint:path}",
methods=["GET", "POST", "PUT", "DELETE", "PATCH"],
operation_id="zai_catchall_proxy",
include_in_schema=False,
)
async def zai_catchall_proxy(
endpoint: str,
request: Request,
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Catch-all proxy for other Z.ai API endpoints.
This forwards all other requests (like /v1/messages/count_tokens) directly to Z.ai
without message capture or memory injection.
"""
# Skip the /v1/messages endpoint (handled by specific route)
if endpoint == "messages" and request.method == "POST":
# This should be handled by the specific route, but just in case return error
return Response(
content='{"error": {"type": "routing_error", "message": "Use specific /v1/messages endpoint"}}',
status_code=500,
media_type="application/json",
)
# Get the request body
body = await request.body()
# Reconstruct the full path
path = f"v1/{endpoint}"
logger.info(f"[{PROXY_NAME}] Proxying catch-all request: {request.method} /{path}")
zai_headers = prepare_headers(request, PROXY_NAME, use_bearer_auth=True)
if not zai_headers:
logger.error(f"[{PROXY_NAME}] No Anthropic API key found in headers or settings")
return Response(
content='{"error": {"type": "authentication_error", "message": "Anthropic API key required"}}',
status_code=401,
media_type="application/json",
)
# Forward the request to Z.ai API
async with httpx.AsyncClient(timeout=300.0) as client:
try:
response = await client.request(
method=request.method,
url=f"{ZAI_API_BASE}/{path}",
headers=zai_headers,
content=body if body else None,
)
return Response(
content=response.content,
status_code=response.status_code,
media_type=response.headers.get("content-type", "application/json"),
headers={
k: v
for k, v in response.headers.items()
if k.lower() not in ["content-encoding", "content-length", "transfer-encoding", "connection"]
},
)
except httpx.HTTPError as e:
logger.error(f"[{PROXY_NAME}] Error proxying catch-all request to Z.ai API: {e}")
return Response(
content=f'{{"error": {{"type": "api_error", "message": "Failed to proxy request to Z.ai API: {str(e)}"}}}}',
status_code=500,
media_type="application/json",
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/routers/v1/zai.py",
"license": "Apache License 2.0",
"lines": 290,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/summarizer/summarizer_all.py | from typing import List, Optional
from letta.log import get_logger
from letta.otel.tracing import trace_method
from letta.schemas.llm_config import LLMConfig
from letta.schemas.message import Message, MessageRole
from letta.schemas.user import User
from letta.services.summarizer.summarizer import simple_summary
from letta.services.summarizer.summarizer_config import CompactionSettings
logger = get_logger(__name__)
@trace_method
async def summarize_all(
# Required to tag LLM calls
actor: User,
# LLM config for the summarizer model
llm_config: LLMConfig,
# Actual summarization configuration
summarizer_config: CompactionSettings,
in_context_messages: List[Message],
# Telemetry context
agent_id: Optional[str] = None,
agent_tags: Optional[List[str]] = None,
run_id: Optional[str] = None,
step_id: Optional[str] = None,
) -> str:
"""
Summarize the entire conversation history into a single summary.
Returns:
- The summary string
"""
logger.info(
f"Summarizing all messages (index 1 to {len(in_context_messages) - 2}), keeping last message: {in_context_messages[-1].role}"
)
if in_context_messages[-1].role == MessageRole.approval:
# cannot evict a pending approval request (will cause client-side errors)
# Also protect the assistant message before it if they share the same step_id
# (both are part of the same LLM response - assistant has thinking/tool_calls, approval has approval-required subset)
protected_messages = [in_context_messages[-1]]
# Check if the message before approval is also from the same step (has reasoning/tool_calls)
if len(in_context_messages) >= 2:
potential_assistant = in_context_messages[-2]
approval_request = in_context_messages[-1]
if potential_assistant.role == MessageRole.assistant and potential_assistant.step_id == approval_request.step_id:
# They're part of the same LLM response - protect both
protected_messages = [potential_assistant, approval_request]
messages_to_summarize = in_context_messages[1:-2]
else:
messages_to_summarize = in_context_messages[1:-1]
else:
messages_to_summarize = in_context_messages[1:-1]
else:
messages_to_summarize = in_context_messages[1:]
protected_messages = []
# TODO: add fallback in case this has a context window error
summary_message_str = await simple_summary(
messages=messages_to_summarize,
llm_config=llm_config,
actor=actor,
include_ack=bool(summarizer_config.prompt_acknowledgement),
prompt=summarizer_config.prompt,
agent_id=agent_id,
agent_tags=agent_tags,
run_id=run_id,
step_id=step_id,
compaction_settings={
"mode": "summarize_all",
"clip_chars": summarizer_config.clip_chars,
},
)
logger.info(f"Summarized {len(messages_to_summarize)} messages")
if summarizer_config.clip_chars is not None and len(summary_message_str) > summarizer_config.clip_chars:
logger.warning(f"Summary length {len(summary_message_str)} exceeds clip length {summarizer_config.clip_chars}. Truncating.")
summary_message_str = summary_message_str[: summarizer_config.clip_chars] + "... [summary truncated to fit]"
return summary_message_str, [in_context_messages[0], *protected_messages]
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/summarizer/summarizer_all.py",
"license": "Apache License 2.0",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/services/summarizer/summarizer_config.py | from typing import Literal
from pydantic import BaseModel, Field
from letta.prompts.summarizer_prompt import ALL_PROMPT, SELF_ALL_PROMPT, SELF_SLIDING_PROMPT, SLIDING_PROMPT
from letta.schemas.enums import ProviderType
from letta.schemas.model import ModelSettingsUnion
from letta.settings import summarizer_settings
def get_default_summarizer_model(provider_type: ProviderType) -> str | None:
"""Get default model for summarization for given provider type."""
summarizer_defaults = {
ProviderType.anthropic: "anthropic/claude-haiku-4-5",
ProviderType.openai: "openai/gpt-5-mini",
ProviderType.google_ai: "google_ai/gemini-2.5-flash",
}
return summarizer_defaults.get(provider_type)
def get_default_prompt_for_mode(mode: Literal["all", "sliding_window", "self_compact_all", "self_compact_sliding_window"]) -> str:
"""Get the default prompt for a given compaction mode.
Also used in /summarize endpoint if mode is changed and prompt is not explicitly set."""
if mode == "self_compact_sliding_window":
return SELF_SLIDING_PROMPT
elif mode == "self_compact_all":
return SELF_ALL_PROMPT
elif mode == "sliding_window":
return SLIDING_PROMPT
else: # all
return ALL_PROMPT
class CompactionSettings(BaseModel):
"""Configuration for conversation compaction / summarization.
Per-model settings (temperature,
max tokens, etc.) are derived from the default configuration for that handle.
"""
# Summarizer model handle (provider/model-name).
# If None, uses lightweight provider-specific defaults (e.g., haiku for Anthropic, gpt-5-mini for OpenAI).
model: str | None = Field(
default=None,
description="Model handle to use for sliding_window/all summarization (format: provider/model-name). If None, uses lightweight provider-specific defaults.",
)
# Optional provider-specific model settings for the summarizer model
model_settings: ModelSettingsUnion | None = Field(
default=None,
description="Optional model settings used to override defaults for the summarizer model.",
)
prompt: str | None = Field(default=None, description="The prompt to use for summarization. If None, uses mode-specific default.")
prompt_acknowledgement: bool = Field(
default=False, description="Whether to include an acknowledgement post-prompt (helps prevent non-summary outputs)."
)
clip_chars: int | None = Field(
default=50000, description="The maximum length of the summary in characters. If none, no clipping is performed."
)
mode: Literal["all", "sliding_window", "self_compact_all", "self_compact_sliding_window"] = Field(
default="sliding_window", description="The type of summarization technique use."
)
sliding_window_percentage: float = Field(
default_factory=lambda: summarizer_settings.partial_evict_summarizer_percentage,
description="The percentage of the context window to keep post-summarization (only used in sliding window modes).",
)
# Called upon agent creation and if mode is changed in summarize endpoint request
def set_mode_specific_prompt(self):
"""Set mode-specific default prompt if none provided."""
if self.prompt is None:
self.prompt = get_default_prompt_for_mode(self.mode)
return self
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/summarizer/summarizer_config.py",
"license": "Apache License 2.0",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/summarizer/summarizer_sliding_window.py | from typing import TYPE_CHECKING, List, Optional, Tuple
if TYPE_CHECKING:
from letta.schemas.tool import Tool
from letta.log import get_logger
from letta.otel.tracing import trace_method
from letta.schemas.enums import MessageRole
from letta.schemas.llm_config import LLMConfig
from letta.schemas.message import Message
from letta.schemas.user import User
from letta.services.context_window_calculator.token_counter import create_token_counter
from letta.services.summarizer.summarizer import simple_summary
from letta.services.summarizer.summarizer_config import CompactionSettings
logger = get_logger(__name__)
# Safety margin for approximate token counting.
# The bytes/4 heuristic underestimates by ~25-35% for JSON-serialized messages
# due to structural overhead (brackets, quotes, colons) each becoming tokens.
APPROX_TOKEN_SAFETY_MARGIN = 1.3
async def count_tokens(actor: User, llm_config: LLMConfig, messages: List[Message]) -> int:
"""Count tokens in messages using the appropriate token counter for the model configuration."""
token_counter = create_token_counter(
model_endpoint_type=llm_config.model_endpoint_type,
model=llm_config.model,
actor=actor,
)
converted_messages = token_counter.convert_messages(messages)
tokens = await token_counter.count_message_tokens(converted_messages)
# Apply safety margin for approximate counting to avoid underestimating
from letta.services.context_window_calculator.token_counter import ApproxTokenCounter
if isinstance(token_counter, ApproxTokenCounter):
return int(tokens * APPROX_TOKEN_SAFETY_MARGIN)
return tokens
async def count_tokens_with_tools(
actor: User,
llm_config: LLMConfig,
messages: List[Message],
tools: Optional[List["Tool"]] = None,
) -> int:
"""Count tokens in messages AND tool definitions.
This provides a more accurate context token count by including tool definitions,
which are sent to the LLM but not included in the messages list.
Args:
actor: The user making the request.
llm_config: The LLM configuration for selecting the appropriate tokenizer.
messages: The in-context messages (including system message).
tools: Optional list of Tool objects. If provided, their schemas are counted.
Returns:
Total token count for messages + tools.
"""
# Delegate message counting to existing function
message_tokens = await count_tokens(actor, llm_config, messages)
if not tools:
return message_tokens
# Count tools
from openai.types.beta.function_tool import FunctionTool as OpenAITool
from letta.services.context_window_calculator.token_counter import ApproxTokenCounter
token_counter = create_token_counter(
model_endpoint_type=llm_config.model_endpoint_type,
model=llm_config.model,
actor=actor,
)
# Tools can be either Tool objects (with .json_schema) or dicts (json schemas directly)
# For compatibility with how tools need to be passed in for self compaction
tool_definitions = [
OpenAITool(type="function", function=t.json_schema if hasattr(t, "json_schema") else t)
for t in tools
if (hasattr(t, "json_schema") and t.json_schema) or (isinstance(t, dict) and t)
]
tool_tokens = await token_counter.count_tool_tokens(tool_definitions) if tool_definitions else 0
# Apply safety margin for approximate counting (message_tokens already has margin applied)
if isinstance(token_counter, ApproxTokenCounter):
tool_tokens = int(tool_tokens * APPROX_TOKEN_SAFETY_MARGIN)
return message_tokens + tool_tokens
@trace_method
async def summarize_via_sliding_window(
# Required to tag LLM calls
actor: User,
# LLM config for the summarizer model (used to generate the summary)
llm_config: LLMConfig,
# LLM config for the agent model (used to determine context window cutoff for eviction)
agent_llm_config: LLMConfig,
summarizer_config: CompactionSettings,
in_context_messages: List[Message],
# Telemetry context
agent_id: Optional[str] = None,
agent_tags: Optional[List[str]] = None,
run_id: Optional[str] = None,
step_id: Optional[str] = None,
) -> Tuple[str, List[Message]]:
"""
If the total tokens is greater than the context window limit (or force=True),
then summarize and rearrange the in-context messages (with the summary in front).
Finding the summarization cutoff point (target of final post-summarize count is N% of agent's context window):
1. Start at a message index cutoff (1-N%)
2. Count tokens with system prompt, prior summary (if it exists), and messages past cutoff point (messages[0] + messages[cutoff:])
3. Is count(post_sum_messages) <= N% of agent's context window?
3a. Yes -> create new summary with [prior summary, cutoff:], and safety truncate summary with char count
3b. No -> increment cutoff by 10%, and repeat
Returns:
- The summary string
- The list of message IDs to keep in-context
"""
system_prompt = in_context_messages[0]
total_message_count = len(in_context_messages)
# cannot evict a pending approval request (will cause client-side errors)
if in_context_messages[-1].role == MessageRole.approval:
maximum_message_index = total_message_count - 2
else:
maximum_message_index = total_message_count - 1
# simple version: summarize(in_context[1:round(summarizer_config.sliding_window_percentage * len(in_context_messages))])
# this evicts 30% of the messages (via summarization) and keeps the remaining 70%
# problem: we need the cutoff point to be an assistant message, so will grow the cutoff point until we find an assistant message
# also need to grow the cutoff point until the token count is less than the target token count
# Starts at N% (eg 70%), and increments up until 100%
max(
1 - summarizer_config.sliding_window_percentage, 0.10
) # Some arbitrary minimum value (10%) to avoid negatives from badly configured summarizer percentage
eviction_percentage = summarizer_config.sliding_window_percentage
assert summarizer_config.sliding_window_percentage <= 1.0, "Sliding window percentage must be less than or equal to 1.0"
assistant_message_index = None
goal_tokens = (1 - summarizer_config.sliding_window_percentage) * agent_llm_config.context_window
approx_token_count = agent_llm_config.context_window
# allow approvals to be cutoffs (for headless agents) but ensure proper grouping with tool calls
def is_valid_cutoff(message: Message):
if message.role == MessageRole.assistant:
return True
if message.role == MessageRole.approval:
return message.tool_calls is not None and len(message.tool_calls) > 0
return False
while approx_token_count >= goal_tokens and eviction_percentage < 1.0:
# more eviction percentage
eviction_percentage += 0.10
# calculate message_cutoff_index
message_cutoff_index = round(eviction_percentage * total_message_count)
# get index of first assistant message after the cutoff point ()
assistant_message_index = next(
(
i
for i in reversed(range(1, message_cutoff_index + 1))
if i < len(in_context_messages) and is_valid_cutoff(in_context_messages[i])
),
None,
)
if assistant_message_index is None:
logger.warning(
f"No assistant/approval message found for evicting up to index {message_cutoff_index}, incrementing eviction percentage"
)
continue
# update token count
logger.info(f"Attempting to compact messages index 1:{assistant_message_index} messages")
post_summarization_buffer = [system_prompt, *in_context_messages[assistant_message_index:]]
approx_token_count = await count_tokens(actor, agent_llm_config, post_summarization_buffer)
logger.info(
f"Compacting messages index 1:{assistant_message_index} messages resulted in {approx_token_count} tokens, goal is {goal_tokens}"
)
if assistant_message_index is None or eviction_percentage >= 1.0:
raise ValueError("No assistant message found for sliding window summarization") # fall back to complete summarization
if assistant_message_index >= maximum_message_index:
# need to keep the last message (might contain an approval request)
raise ValueError(f"Assistant message index {assistant_message_index} is at the end of the message buffer, skipping summarization")
messages_to_summarize = in_context_messages[1:assistant_message_index]
logger.info(
f"Summarizing {len(messages_to_summarize)} messages, from index 1 to {assistant_message_index} (out of {total_message_count})"
)
summary_message_str = await simple_summary(
messages=messages_to_summarize,
llm_config=llm_config,
actor=actor,
include_ack=bool(summarizer_config.prompt_acknowledgement),
prompt=summarizer_config.prompt,
agent_id=agent_id,
agent_tags=agent_tags,
run_id=run_id,
step_id=step_id,
compaction_settings={
"mode": "sliding_window",
"messages_summarized": len(messages_to_summarize),
"messages_kept": total_message_count - assistant_message_index,
"sliding_window_percentage": summarizer_config.sliding_window_percentage,
"clip_chars": summarizer_config.clip_chars,
},
)
logger.info(f"\n==================\nSummary message string: {summary_message_str[:100]}...\n==================\n")
if summarizer_config.clip_chars is not None and len(summary_message_str) > summarizer_config.clip_chars:
logger.warning(f"Summary length {len(summary_message_str)} exceeds clip length {summarizer_config.clip_chars}. Truncating.")
summary_message_str = summary_message_str[: summarizer_config.clip_chars] + "... [summary truncated to fit]"
updated_in_context_messages = in_context_messages[assistant_message_index:]
return summary_message_str, [system_prompt, *updated_in_context_messages]
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/summarizer/summarizer_sliding_window.py",
"license": "Apache License 2.0",
"lines": 187,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:tests/integration_test_token_counters.py | """
Integration tests for token counting APIs.
These tests verify that the token counting implementations actually hit the real APIs
for Anthropic, Google Gemini, and OpenAI (tiktoken) by calling get_context_window
on an imported agent.
"""
import json
import os
import pytest
from letta.config import LettaConfig
from letta.orm import Base
from letta.schemas.agent import UpdateAgent
from letta.schemas.agent_file import AgentFileSchema
from letta.schemas.llm_config import LLMConfig
from letta.schemas.organization import Organization
from letta.schemas.user import User
from letta.server.server import SyncServer
# ============================================================================
# LLM Configs to test
# ============================================================================
def get_llm_config(filename: str, llm_config_dir: str = "tests/configs/llm_model_configs") -> LLMConfig:
"""Load LLM configuration from JSON file."""
filename = os.path.join(llm_config_dir, filename)
with open(filename, "r") as f:
config_data = json.load(f)
return LLMConfig(**config_data)
LLM_CONFIG_FILES = [
"openai-gpt-4o-mini.json",
"claude-4-5-sonnet.json",
"gemini-2.5-pro.json",
]
LLM_CONFIGS = [pytest.param(get_llm_config(f), id=f.replace(".json", "")) for f in LLM_CONFIG_FILES]
# ============================================================================
# Fixtures
# ============================================================================
async def _clear_tables():
from letta.server.db import db_registry
async with db_registry.async_session() as session:
for table in reversed(Base.metadata.sorted_tables):
await session.execute(table.delete())
# context manager now handles commits
# await session.commit()
@pytest.fixture(autouse=True)
async def clear_tables():
await _clear_tables()
@pytest.fixture
async def server():
config = LettaConfig.load()
config.save()
server = SyncServer(init_with_default_org_and_user=True)
await server.init_async()
await server.tool_manager.upsert_base_tools_async(actor=server.default_user)
yield server
@pytest.fixture
async def default_organization(server: SyncServer):
"""Fixture to create and return the default organization."""
org = await server.organization_manager.create_default_organization_async()
yield org
@pytest.fixture
async def default_user(server: SyncServer, default_organization):
"""Fixture to create and return the default user within the default organization."""
user = await server.user_manager.create_default_actor_async(org_id=default_organization.id)
yield user
@pytest.fixture
async def other_organization(server: SyncServer):
"""Fixture to create and return another organization."""
org = await server.organization_manager.create_organization_async(pydantic_org=Organization(name="test_org"))
yield org
@pytest.fixture
async def other_user(server: SyncServer, other_organization):
"""Fixture to create and return another user within the other organization."""
user = await server.user_manager.create_actor_async(pydantic_user=User(organization_id=other_organization.id, name="test_user"))
yield user
@pytest.fixture
async def imported_agent_id(server: SyncServer, other_user):
"""Import the test agent from the .af file and return the agent ID."""
file_path = os.path.join(os.path.dirname(__file__), "test_agent_files", "test_agent.af")
with open(file_path, "r") as f:
agent_file_json = json.load(f)
agent_schema = AgentFileSchema.model_validate(agent_file_json)
import_result = await server.agent_serialization_manager.import_file(
schema=agent_schema,
actor=other_user,
append_copy_suffix=False,
override_existing_tools=True,
)
assert import_result.success, f"Failed to import agent: {import_result.message}"
# Get the imported agent ID
agent_id = next(db_id for file_id, db_id in import_result.id_mappings.items() if file_id.startswith("agent-"))
yield agent_id
# ============================================================================
# Token Counter Integration Test
# ============================================================================
@pytest.mark.asyncio
@pytest.mark.parametrize("llm_config", LLM_CONFIGS)
async def test_get_context_window(server: SyncServer, imported_agent_id: str, other_user, llm_config: LLMConfig):
"""Test get_context_window with different LLM providers."""
# Update the agent to use the specified LLM config
await server.agent_manager.update_agent_async(
agent_id=imported_agent_id,
agent_update=UpdateAgent(llm_config=llm_config),
actor=other_user,
)
# Call get_context_window which will use the appropriate token counting API
context_window = await server.agent_manager.get_context_window(agent_id=imported_agent_id, actor=other_user)
# Verify we got valid token counts
assert context_window.context_window_size_current > 0
assert context_window.num_tokens_system >= 0
assert context_window.num_tokens_messages >= 0
assert context_window.num_tokens_functions_definitions >= 0
print(f"{llm_config.model_endpoint_type} ({llm_config.model}) context window:")
print(f" Total tokens: {context_window.context_window_size_current}")
print(f" System tokens: {context_window.num_tokens_system}")
print(f" Message tokens: {context_window.num_tokens_messages}")
print(f" Function tokens: {context_window.num_tokens_functions_definitions}")
# ============================================================================
# Edge Case Tests
# ============================================================================
@pytest.mark.asyncio
@pytest.mark.parametrize("llm_config", LLM_CONFIGS)
async def test_count_empty_text_tokens(llm_config: LLMConfig):
"""Test that empty text returns 0 tokens for all providers."""
from letta.llm_api.anthropic_client import AnthropicClient
from letta.llm_api.google_ai_client import GoogleAIClient
from letta.llm_api.google_vertex_client import GoogleVertexClient
from letta.services.context_window_calculator.token_counter import (
AnthropicTokenCounter,
ApproxTokenCounter,
GeminiTokenCounter,
)
if llm_config.model_endpoint_type == "anthropic":
token_counter = AnthropicTokenCounter(AnthropicClient(), llm_config.model)
elif llm_config.model_endpoint_type in ("google_vertex", "google_ai"):
client = GoogleAIClient() if llm_config.model_endpoint_type == "google_ai" else GoogleVertexClient()
token_counter = GeminiTokenCounter(client, llm_config.model)
else:
token_counter = ApproxTokenCounter()
token_count = await token_counter.count_text_tokens("")
assert token_count == 0
@pytest.mark.asyncio
@pytest.mark.parametrize("llm_config", LLM_CONFIGS)
async def test_count_empty_messages_tokens(llm_config: LLMConfig):
"""Test that empty message list returns 0 tokens for all providers."""
from letta.llm_api.anthropic_client import AnthropicClient
from letta.llm_api.google_ai_client import GoogleAIClient
from letta.llm_api.google_vertex_client import GoogleVertexClient
from letta.services.context_window_calculator.token_counter import (
AnthropicTokenCounter,
ApproxTokenCounter,
GeminiTokenCounter,
)
if llm_config.model_endpoint_type == "anthropic":
token_counter = AnthropicTokenCounter(AnthropicClient(), llm_config.model)
elif llm_config.model_endpoint_type in ("google_vertex", "google_ai"):
client = GoogleAIClient() if llm_config.model_endpoint_type == "google_ai" else GoogleVertexClient()
token_counter = GeminiTokenCounter(client, llm_config.model)
else:
token_counter = ApproxTokenCounter()
token_count = await token_counter.count_message_tokens([])
assert token_count == 0
@pytest.mark.asyncio
@pytest.mark.parametrize("llm_config", LLM_CONFIGS)
async def test_count_empty_tools_tokens(llm_config: LLMConfig):
"""Test that empty tools list returns 0 tokens for all providers."""
from letta.llm_api.anthropic_client import AnthropicClient
from letta.llm_api.google_ai_client import GoogleAIClient
from letta.llm_api.google_vertex_client import GoogleVertexClient
from letta.services.context_window_calculator.token_counter import (
AnthropicTokenCounter,
ApproxTokenCounter,
GeminiTokenCounter,
)
if llm_config.model_endpoint_type == "anthropic":
token_counter = AnthropicTokenCounter(AnthropicClient(), llm_config.model)
elif llm_config.model_endpoint_type in ("google_vertex", "google_ai"):
client = GoogleAIClient() if llm_config.model_endpoint_type == "google_ai" else GoogleVertexClient()
token_counter = GeminiTokenCounter(client, llm_config.model)
else:
token_counter = ApproxTokenCounter()
token_count = await token_counter.count_tool_tokens([])
assert token_count == 0
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/integration_test_token_counters.py",
"license": "Apache License 2.0",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/integration_test_usage_tracking.py | """
Integration tests for advanced usage tracking (cache tokens, reasoning tokens).
These tests verify that:
1. Cache token data (cached_input_tokens, cache_write_tokens) is captured from providers
2. Reasoning token data is captured from reasoning models
3. The data flows correctly through streaming and non-streaming paths
4. Step-level and run-level aggregation works correctly
Provider-specific cache field mappings:
- Anthropic: cache_read_input_tokens, cache_creation_input_tokens
- OpenAI: prompt_tokens_details.cached_tokens, completion_tokens_details.reasoning_tokens
- Gemini: cached_content_token_count
"""
import json
import logging
import os
import uuid
from typing import Any, List, Optional, Tuple
import pytest
from dotenv import load_dotenv
from letta_client import AsyncLetta
from letta_client.types import (
AgentState,
MessageCreateParam,
)
from letta_client.types.agents import Run
from letta_client.types.agents.letta_streaming_response import LettaUsageStatistics
logger = logging.getLogger(__name__)
# Load environment variables
load_dotenv()
# ------------------------------
# Test Configuration
# ------------------------------
# Model configs for testing - these models should support caching or reasoning
CACHE_TEST_CONFIGS = [
# Anthropic Sonnet 4.5 with prompt caching
("anthropic/claude-sonnet-4-5-20250514", {"provider_type": "anthropic"}),
# OpenAI gpt-4o with prompt caching (Chat Completions API)
("openai/gpt-4o", {"provider_type": "openai"}),
# Gemini 3 Pro Preview with context caching
("google_ai/gemini-3.1-pro-preview", {"provider_type": "google_ai"}),
]
REASONING_TEST_CONFIGS = [
# Anthropic Sonnet 4.5 with thinking enabled
(
"anthropic/claude-sonnet-4-5-20250514",
{"provider_type": "anthropic", "thinking": {"type": "enabled", "budget_tokens": 1024}},
),
# OpenAI gpt-5.1 reasoning model (Responses API)
("openai/gpt-5.1", {"provider_type": "openai", "reasoning": {"reasoning_effort": "low"}}),
# Gemini 3 Pro Preview with thinking enabled
(
"google_ai/gemini-3.1-pro-preview",
{"provider_type": "google_ai", "thinking_config": {"include_thoughts": True, "thinking_budget": 1024}},
),
]
# Filter based on environment variable if set
requested = os.getenv("USAGE_TEST_CONFIG")
if requested:
# Filter configs to only include the requested one
CACHE_TEST_CONFIGS = [(h, s) for h, s in CACHE_TEST_CONFIGS if requested in h]
REASONING_TEST_CONFIGS = [(h, s) for h, s in REASONING_TEST_CONFIGS if requested in h]
def get_model_config(filename: str, model_settings_dir: str = "tests/model_settings") -> Tuple[str, dict]:
"""Load a model_settings file and return the handle and settings dict."""
filepath = os.path.join(model_settings_dir, filename)
with open(filepath, "r") as f:
config_data = json.load(f)
return config_data["handle"], config_data.get("model_settings", {})
# ------------------------------
# Fixtures
# ------------------------------
@pytest.fixture
def base_url() -> str:
"""Get the Letta server URL from environment or use default."""
return os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
@pytest.fixture
async def async_client(base_url: str) -> AsyncLetta:
"""Create an async Letta client."""
token = os.getenv("LETTA_SERVER_TOKEN")
return AsyncLetta(base_url=base_url, token=token)
# ------------------------------
# Helper Functions
# ------------------------------
async def create_test_agent(
client: AsyncLetta,
model_handle: str,
model_settings: dict,
name_suffix: str = "",
) -> AgentState:
"""Create a test agent with the specified model configuration."""
agent = await client.agents.create(
name=f"usage-test-agent-{name_suffix}-{uuid.uuid4().hex[:8]}",
model=model_handle,
model_settings=model_settings,
include_base_tools=False, # Keep it simple for usage testing
)
return agent
async def cleanup_agent(client: AsyncLetta, agent_id: str) -> None:
"""Delete a test agent."""
try:
await client.agents.delete(agent_id)
except Exception as e:
logger.warning(f"Failed to cleanup agent {agent_id}: {e}")
def extract_usage_from_stream(messages: List[Any]) -> Optional[LettaUsageStatistics]:
"""Extract LettaUsageStatistics from a stream response."""
for msg in reversed(messages):
if isinstance(msg, LettaUsageStatistics):
return msg
return None
# ------------------------------
# Cache Token Tests
# ------------------------------
@pytest.mark.asyncio
@pytest.mark.parametrize("model_handle,model_settings", CACHE_TEST_CONFIGS)
async def test_cache_tokens_streaming(
async_client: AsyncLetta,
model_handle: str,
model_settings: dict,
) -> None:
"""
Test that cache token data is captured in streaming mode.
Cache hits typically occur on the second+ request with the same context,
so we send multiple messages to trigger caching.
"""
agent = await create_test_agent(async_client, model_handle, model_settings, "cache-stream")
try:
# First message - likely cache write (cache_creation_tokens for Anthropic)
messages1: List[Any] = []
async for chunk in async_client.agents.messages.send_message_streaming(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Hello, this is a test message for caching.")],
):
messages1.append(chunk)
usage1 = extract_usage_from_stream(messages1)
assert usage1 is not None, "Should receive usage statistics in stream"
assert usage1.prompt_tokens > 0, "Should have prompt tokens"
# Log first call usage for debugging
logger.info(
f"First call usage ({model_handle}): prompt={usage1.prompt_tokens}, "
f"cached_input={usage1.cached_input_tokens}, cache_write={usage1.cache_write_tokens}"
)
# Second message - same agent/context should trigger cache hits
messages2: List[Any] = []
async for chunk in async_client.agents.messages.send_message_streaming(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="This is a follow-up message.")],
):
messages2.append(chunk)
usage2 = extract_usage_from_stream(messages2)
assert usage2 is not None, "Should receive usage statistics in stream"
# Log second call usage
logger.info(
f"Second call usage ({model_handle}): prompt={usage2.prompt_tokens}, "
f"cached_input={usage2.cached_input_tokens}, cache_write={usage2.cache_write_tokens}"
)
# Verify cache fields exist (values may be 0 if caching not available for this model/config)
assert hasattr(usage2, "cached_input_tokens"), "Should have cached_input_tokens field"
assert hasattr(usage2, "cache_write_tokens"), "Should have cache_write_tokens field"
# For providers with caching enabled, we expect either:
# - cache_write_tokens > 0 on first call (writing to cache)
# - cached_input_tokens > 0 on second call (reading from cache)
# Note: Not all providers always return cache data, so we just verify the fields exist
finally:
await cleanup_agent(async_client, agent.id)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_handle,model_settings", CACHE_TEST_CONFIGS)
async def test_cache_tokens_non_streaming(
async_client: AsyncLetta,
model_handle: str,
model_settings: dict,
) -> None:
"""
Test that cache token data is captured in non-streaming (blocking) mode.
"""
agent = await create_test_agent(async_client, model_handle, model_settings, "cache-blocking")
try:
# First message
response1: Run = await async_client.agents.messages.send_message(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Hello, this is a test message for caching.")],
)
assert response1.usage is not None, "Should have usage in response"
logger.info(
f"First call usage ({model_handle}): prompt={response1.usage.prompt_tokens}, "
f"cached_input={response1.usage.cached_input_tokens}, cache_write={response1.usage.cache_write_tokens}"
)
# Second message - should trigger cache hit
response2: Run = await async_client.agents.messages.send_message(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="This is a follow-up message.")],
)
assert response2.usage is not None, "Should have usage in response"
logger.info(
f"Second call usage ({model_handle}): prompt={response2.usage.prompt_tokens}, "
f"cached_input={response2.usage.cached_input_tokens}, cache_write={response2.usage.cache_write_tokens}"
)
# Verify cache fields exist
assert hasattr(response2.usage, "cached_input_tokens"), "Should have cached_input_tokens field"
assert hasattr(response2.usage, "cache_write_tokens"), "Should have cache_write_tokens field"
finally:
await cleanup_agent(async_client, agent.id)
# ------------------------------
# Reasoning Token Tests
# ------------------------------
@pytest.mark.asyncio
@pytest.mark.parametrize("model_handle,model_settings", REASONING_TEST_CONFIGS)
async def test_reasoning_tokens_streaming(
async_client: AsyncLetta,
model_handle: str,
model_settings: dict,
) -> None:
"""
Test that reasoning token data is captured from reasoning models in streaming mode.
"""
agent = await create_test_agent(async_client, model_handle, model_settings, "reasoning-stream")
try:
messages: List[Any] = []
async for chunk in async_client.agents.messages.send_message_streaming(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Think step by step: what is 2 + 2? Explain your reasoning.")],
):
messages.append(chunk)
usage = extract_usage_from_stream(messages)
assert usage is not None, "Should receive usage statistics in stream"
logger.info(
f"Reasoning usage ({model_handle}): prompt={usage.prompt_tokens}, "
f"completion={usage.completion_tokens}, reasoning={usage.reasoning_tokens}"
)
# Verify reasoning_tokens field exists
assert hasattr(usage, "reasoning_tokens"), "Should have reasoning_tokens field"
# For reasoning models, we expect reasoning_tokens > 0
# Note: Some providers may not always return reasoning token counts
if "gpt-5" in model_handle or "o3" in model_handle or "o1" in model_handle:
# OpenAI reasoning models should always have reasoning tokens
assert usage.reasoning_tokens > 0, f"OpenAI reasoning model {model_handle} should have reasoning_tokens > 0"
finally:
await cleanup_agent(async_client, agent.id)
@pytest.mark.asyncio
@pytest.mark.parametrize("model_handle,model_settings", REASONING_TEST_CONFIGS)
async def test_reasoning_tokens_non_streaming(
async_client: AsyncLetta,
model_handle: str,
model_settings: dict,
) -> None:
"""
Test that reasoning token data is captured from reasoning models in non-streaming mode.
"""
agent = await create_test_agent(async_client, model_handle, model_settings, "reasoning-blocking")
try:
response: Run = await async_client.agents.messages.send_message(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Think step by step: what is 2 + 2? Explain your reasoning.")],
)
assert response.usage is not None, "Should have usage in response"
logger.info(
f"Reasoning usage ({model_handle}): prompt={response.usage.prompt_tokens}, "
f"completion={response.usage.completion_tokens}, reasoning={response.usage.reasoning_tokens}"
)
# Verify reasoning_tokens field exists
assert hasattr(response.usage, "reasoning_tokens"), "Should have reasoning_tokens field"
# For OpenAI reasoning models, we expect reasoning_tokens > 0
if "gpt-5" in model_handle or "o3" in model_handle or "o1" in model_handle:
assert response.usage.reasoning_tokens > 0, f"OpenAI reasoning model {model_handle} should have reasoning_tokens > 0"
finally:
await cleanup_agent(async_client, agent.id)
# ------------------------------
# Step-Level Usage Tests
# ------------------------------
@pytest.mark.asyncio
@pytest.mark.parametrize("model_handle,model_settings", CACHE_TEST_CONFIGS[:1]) # Test with one config
async def test_step_level_usage_details(
async_client: AsyncLetta,
model_handle: str,
model_settings: dict,
) -> None:
"""
Test that step-level usage details (prompt_tokens_details, completion_tokens_details)
are properly persisted and retrievable.
"""
agent = await create_test_agent(async_client, model_handle, model_settings, "step-details")
try:
# Send a message to create a step
response: Run = await async_client.agents.messages.send_message(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Hello!")],
)
# Get the run's steps
steps = await async_client.runs.list_steps(run_id=response.id)
assert len(steps) > 0, "Should have at least one step"
step = steps[0]
logger.info(
f"Step usage ({model_handle}): prompt_tokens={step.prompt_tokens}, "
f"prompt_tokens_details={step.prompt_tokens_details}, "
f"completion_tokens_details={step.completion_tokens_details}"
)
# Verify the step has the usage fields
assert step.prompt_tokens > 0, "Step should have prompt_tokens"
assert step.completion_tokens >= 0, "Step should have completion_tokens"
assert step.total_tokens > 0, "Step should have total_tokens"
# The details fields may be None if no cache/reasoning was involved,
# but they should be present in the schema
# Note: This test mainly verifies the field exists and can hold data
finally:
await cleanup_agent(async_client, agent.id)
# ------------------------------
# Run-Level Aggregation Tests
# ------------------------------
@pytest.mark.asyncio
@pytest.mark.parametrize("model_handle,model_settings", CACHE_TEST_CONFIGS[:1]) # Test with one config
async def test_run_level_usage_aggregation(
async_client: AsyncLetta,
model_handle: str,
model_settings: dict,
) -> None:
"""
Test that run-level usage correctly aggregates cache/reasoning tokens from steps.
"""
agent = await create_test_agent(async_client, model_handle, model_settings, "run-aggregation")
try:
# Send multiple messages to create multiple steps
await async_client.agents.messages.send_message(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Message 1")],
)
response2: Run = await async_client.agents.messages.send_message(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Message 2")],
)
# Get run usage for the second run (which should have accumulated context)
run_usage = await async_client.runs.get_run_usage(run_id=response2.id)
logger.info(
f"Run usage ({model_handle}): prompt={run_usage.prompt_tokens}, "
f"completion={run_usage.completion_tokens}, total={run_usage.total_tokens}, "
f"cached_input={run_usage.cached_input_tokens}, cache_write={run_usage.cache_write_tokens}, "
f"reasoning={run_usage.reasoning_tokens}"
)
# Verify the run usage has all the expected fields
assert run_usage.prompt_tokens >= 0, "Run should have prompt_tokens"
assert run_usage.completion_tokens >= 0, "Run should have completion_tokens"
assert run_usage.total_tokens >= 0, "Run should have total_tokens"
assert hasattr(run_usage, "cached_input_tokens"), "Run should have cached_input_tokens"
assert hasattr(run_usage, "cache_write_tokens"), "Run should have cache_write_tokens"
assert hasattr(run_usage, "reasoning_tokens"), "Run should have reasoning_tokens"
finally:
await cleanup_agent(async_client, agent.id)
# ------------------------------
# Comprehensive End-to-End Test
# ------------------------------
@pytest.mark.asyncio
async def test_usage_tracking_end_to_end(async_client: AsyncLetta) -> None:
"""
End-to-end test that verifies the complete usage tracking flow:
1. Create agent with a model that supports caching
2. Send messages to trigger cache writes and reads
3. Verify step-level details are persisted
4. Verify run-level aggregation is correct
"""
# Use Anthropic Sonnet 4.5 for this test as it has the most comprehensive caching
model_handle = "anthropic/claude-sonnet-4-5-20250514"
model_settings = {"provider_type": "anthropic"}
agent = await create_test_agent(async_client, model_handle, model_settings, "e2e")
try:
# Send first message (should trigger cache write)
response1: Run = await async_client.agents.messages.send_message(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="This is a longer message to ensure there's enough content to cache. " * 5)],
)
logger.info(f"E2E Test - First message usage: {response1.usage}")
# Send second message (should trigger cache read)
response2: Run = await async_client.agents.messages.send_message(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Short follow-up")],
)
logger.info(f"E2E Test - Second message usage: {response2.usage}")
# Verify basic usage is tracked
assert response1.usage is not None
assert response2.usage is not None
assert response1.usage.prompt_tokens > 0
assert response2.usage.prompt_tokens > 0
# Get steps for the second run
steps = await async_client.runs.list_steps(run_id=response2.id)
assert len(steps) > 0, "Should have steps for the run"
# Get run-level usage
run_usage = await async_client.runs.get_run_usage(run_id=response2.id)
assert run_usage.total_tokens > 0, "Run should have total tokens"
logger.info(
f"E2E Test - Run usage: prompt={run_usage.prompt_tokens}, "
f"completion={run_usage.completion_tokens}, "
f"cached_input={run_usage.cached_input_tokens}, "
f"cache_write={run_usage.cache_write_tokens}"
)
# The test passes if we get here without errors - cache data may or may not be present
# depending on whether the provider actually cached the content
finally:
await cleanup_agent(async_client, agent.id)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/integration_test_usage_tracking.py",
"license": "Apache License 2.0",
"lines": 394,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/managers/test_cancellation.py | """
Tests for agent cancellation at different points in the execution loop.
These tests use mocking and deterministic control to test cancellation at specific
points in the agent execution flow, covering all the issues documented in CANCELLATION_ISSUES.md.
"""
import asyncio
import pytest
from letta.agents.agent_loop import AgentLoop
from letta.constants import TOOL_CALL_DENIAL_ON_CANCEL
from letta.schemas.agent import CreateAgent
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import MessageRole, RunStatus
from letta.schemas.letta_request import LettaStreamingRequest
from letta.schemas.llm_config import LLMConfig
from letta.schemas.message import MessageCreate
from letta.schemas.run import Run as PydanticRun
from letta.server.server import SyncServer
from letta.services.streaming_service import StreamingService
@pytest.fixture
async def test_agent_with_tool(server: SyncServer, default_user, print_tool):
"""Create a test agent with letta_v1_agent type (uses LettaAgentV3)."""
agent_state = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="test_cancellation_agent",
agent_type="letta_v1_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
tool_ids=[print_tool.id],
include_base_tools=False,
),
actor=default_user,
)
yield agent_state
@pytest.fixture
async def test_run(server: SyncServer, default_user, test_agent_with_tool):
"""Create a test run for cancellation testing."""
run = await server.run_manager.create_run(
pydantic_run=PydanticRun(
agent_id=test_agent_with_tool.id,
status=RunStatus.created,
),
actor=default_user,
)
yield run
class TestMessageStateDesyncIssues:
"""
Test Issue #2: Message State Desync Issues
Tests that message state stays consistent between client and server during cancellation.
"""
@pytest.mark.asyncio
async def test_message_state_consistency_after_cancellation(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
test_run,
):
"""
Test that message state is consistent after cancellation.
Verifies:
- response_messages list matches persisted messages
- response_messages_for_metadata list matches persisted messages
- agent.message_ids includes all persisted messages
"""
# Load agent loop
agent_loop = AgentLoop.load(agent_state=test_agent_with_tool, actor=default_user)
input_messages = [MessageCreate(role=MessageRole.user, content="Call print_tool with 'test'")]
# Cancel after first step
call_count = [0]
async def mock_check_cancellation(run_id):
call_count[0] += 1
if call_count[0] > 1:
await server.run_manager.cancel_run(
actor=default_user,
run_id=run_id,
)
return True
return False
agent_loop._check_run_cancellation = mock_check_cancellation
# Execute step
result = await agent_loop.step(
input_messages=input_messages,
max_steps=5,
run_id=test_run.id,
)
# Get messages from database
db_messages = await server.message_manager.list_messages(
actor=default_user,
agent_id=test_agent_with_tool.id,
run_id=test_run.id,
limit=1000,
)
# Verify response_messages count matches result messages
assert len(agent_loop.response_messages) == len(result.messages), (
f"response_messages ({len(agent_loop.response_messages)}) should match result.messages ({len(result.messages)})"
)
# Verify persisted message count is reasonable
assert len(db_messages) > 0, "Should have persisted messages from completed step"
# CRITICAL CHECK: Verify agent state after cancellation
agent_after_cancel = await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
)
# Verify last_stop_reason is set to cancelled
assert agent_after_cancel.last_stop_reason == "cancelled", (
f"Agent's last_stop_reason should be 'cancelled', got '{agent_after_cancel.last_stop_reason}'"
)
agent_message_ids = set(agent_after_cancel.message_ids or [])
db_message_ids = {m.id for m in db_messages}
# Check for desync: every message in DB must be in agent.message_ids
messages_in_db_not_in_agent = db_message_ids - agent_message_ids
assert len(messages_in_db_not_in_agent) == 0, (
f"MESSAGE DESYNC: {len(messages_in_db_not_in_agent)} messages in DB but not in agent.message_ids\n"
f"Missing message IDs: {messages_in_db_not_in_agent}\n"
f"This indicates message_ids was not updated after cancellation."
)
@pytest.mark.asyncio
async def test_agent_can_continue_after_cancellation(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
test_run,
):
"""
Test that agent can continue execution after a cancelled run.
Verifies:
- Agent state is not corrupted after cancellation
- Subsequent runs complete successfully
- Message IDs are properly updated
"""
# Load agent loop
agent_loop = AgentLoop.load(agent_state=test_agent_with_tool, actor=default_user)
# First run: cancel it
input_messages_1 = [MessageCreate(role=MessageRole.user, content="First message")]
# Cancel immediately
await server.run_manager.cancel_run(
actor=default_user,
run_id=test_run.id,
)
result_1 = await agent_loop.step(
input_messages=input_messages_1,
max_steps=5,
run_id=test_run.id,
)
assert result_1.stop_reason.stop_reason == "cancelled"
# Get agent state after cancellation
agent_after_cancel = await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
)
# Verify last_stop_reason is set to cancelled
assert agent_after_cancel.last_stop_reason == "cancelled", (
f"Agent's last_stop_reason should be 'cancelled', got '{agent_after_cancel.last_stop_reason}'"
)
message_ids_after_cancel = len(agent_after_cancel.message_ids or [])
# Second run: complete it successfully
test_run_2 = await server.run_manager.create_run(
pydantic_run=PydanticRun(
agent_id=test_agent_with_tool.id,
status=RunStatus.created,
),
actor=default_user,
)
# Reload agent loop with fresh state
agent_loop_2 = AgentLoop.load(
agent_state=await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
include_relationships=["memory", "tools", "sources"],
),
actor=default_user,
)
input_messages_2 = [MessageCreate(role=MessageRole.user, content="Second message")]
result_2 = await agent_loop_2.step(
input_messages=input_messages_2,
max_steps=5,
run_id=test_run_2.id,
)
# Verify second run completed successfully
assert result_2.stop_reason.stop_reason != "cancelled", f"Second run should complete, got {result_2.stop_reason.stop_reason}"
# Get agent state after completion
agent_after_complete = await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
)
message_ids_after_complete = len(agent_after_complete.message_ids or [])
# Verify message count increased
assert message_ids_after_complete >= message_ids_after_cancel, (
f"Message IDs should increase or stay same: "
f"after_cancel={message_ids_after_cancel}, after_complete={message_ids_after_complete}"
)
# CRITICAL CHECK: Verify agent.message_ids consistency with DB for BOTH runs
# Check first run (cancelled)
db_messages_run1 = await server.message_manager.list_messages(
actor=default_user,
agent_id=test_agent_with_tool.id,
run_id=test_run.id,
limit=1000,
)
# Check second run (completed)
db_messages_run2 = await server.message_manager.list_messages(
actor=default_user,
agent_id=test_agent_with_tool.id,
run_id=test_run_2.id,
limit=1000,
)
agent_message_ids = set(agent_after_complete.message_ids or [])
all_db_message_ids = {m.id for m in db_messages_run1} | {m.id for m in db_messages_run2}
# Check for desync: every message in DB must be in agent.message_ids
messages_in_db_not_in_agent = all_db_message_ids - agent_message_ids
assert len(messages_in_db_not_in_agent) == 0, (
f"MESSAGE DESYNC: {len(messages_in_db_not_in_agent)} messages in DB but not in agent.message_ids\n"
f"Missing message IDs: {messages_in_db_not_in_agent}\n"
f"Run 1 (cancelled) had {len(db_messages_run1)} messages\n"
f"Run 2 (completed) had {len(db_messages_run2)} messages\n"
f"Agent has {len(agent_message_ids)} message_ids total\n"
f"This indicates message_ids was not updated properly after cancellation or continuation."
)
@pytest.mark.asyncio
async def test_approval_request_message_ids_desync_with_background_token_streaming(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
bash_tool,
):
"""
Test for the specific desync bug with BACKGROUND + TOKEN STREAMING.
This is the EXACT scenario where the bug occurs in production:
- background=True (background streaming)
- stream_tokens=True (token streaming)
- Agent calls HITL tool requiring approval
- Run is cancelled during approval
Bug Scenario:
1. Agent calls HITL tool requiring approval
2. Approval request message is persisted to DB
3. Run is cancelled while processing in background with token streaming
4. Approval request message ID is NOT in agent.message_ids
5. Result: "Desync detected - cursor last: X, in-context last: Y"
"""
# Add bash_tool to agent (requires approval)
await server.agent_manager.attach_tool_async(
agent_id=test_agent_with_tool.id,
tool_id=bash_tool.id,
actor=default_user,
)
# Get initial message count
agent_before = await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
)
initial_message_ids = set(agent_before.message_ids or [])
print(f"\nInitial message_ids count: {len(initial_message_ids)}")
# Create streaming service
streaming_service = StreamingService(server)
# Create request with BACKGROUND + TOKEN STREAMING (the key conditions!)
request = LettaStreamingRequest(
messages=[MessageCreate(role=MessageRole.user, content="Please run the bash_tool with operation 'test'")],
max_steps=5,
stream_tokens=True, # TOKEN STREAMING - KEY CONDITION
background=True, # BACKGROUND STREAMING - KEY CONDITION
)
print("\n🔥 Starting agent with BACKGROUND + TOKEN STREAMING...")
print(f" stream_tokens={request.stream_tokens}")
print(f" background={request.background}")
# Start the background streaming agent
run, _stream_response = await streaming_service.create_agent_stream(
agent_id=test_agent_with_tool.id,
actor=default_user,
request=request,
run_type="test_desync",
)
assert run is not None, "Run should be created for background streaming"
print(f"\n✅ Run created: {run.id}")
print(f" Status: {run.status}")
# Cancel almost immediately - we want to interrupt DURING processing, not after
# The bug happens when cancellation interrupts the approval flow mid-execution
print("\n⏳ Starting background task, will cancel quickly to catch mid-execution...")
await asyncio.sleep(0.3) # Just enough time for LLM to start, but not complete
# NOW CANCEL THE RUN WHILE IT'S STILL PROCESSING - This is where the bug happens!
print("\n❌ CANCELLING RUN while in background + token streaming mode (MID-EXECUTION)...")
await server.run_manager.cancel_run(
actor=default_user,
run_id=run.id,
)
# Give cancellation time to propagate and background task to react
print("⏳ Waiting for cancellation to propagate through background task...")
await asyncio.sleep(2) # Let the background task detect cancellation and clean up
# Check run status after cancellation
run_status = await server.run_manager.get_run_by_id(run.id, actor=default_user)
print(f"\n📊 Run status after cancel: {run_status.status}")
print(f" Stop reason: {run_status.stop_reason}")
# Get messages from DB AFTER cancellation
db_messages_after_cancel = await server.message_manager.list_messages(
actor=default_user,
agent_id=test_agent_with_tool.id,
run_id=run.id,
limit=1000,
)
print(f"\n📨 Messages in DB after cancel: {len(db_messages_after_cancel)}")
for msg in db_messages_after_cancel:
print(f" - {msg.id}: role={msg.role}")
# Get agent state AFTER cancellation
agent_after_cancel = await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
)
# Verify last_stop_reason is set to cancelled
print(f"\n🔍 Agent last_stop_reason: {agent_after_cancel.last_stop_reason}")
assert agent_after_cancel.last_stop_reason == "cancelled", (
f"Agent's last_stop_reason should be 'cancelled', got '{agent_after_cancel.last_stop_reason}'"
)
agent_message_ids = set(agent_after_cancel.message_ids or [])
new_message_ids = agent_message_ids - initial_message_ids
print(f"\n📝 Agent message_ids after cancel: {len(agent_message_ids)}")
print(f" New message_ids in this run: {len(new_message_ids)}")
db_message_ids = {m.id for m in db_messages_after_cancel}
# CRITICAL CHECK: Every message in DB must be in agent.message_ids
messages_in_db_not_in_agent = db_message_ids - agent_message_ids
if messages_in_db_not_in_agent:
# THIS IS THE DESYNC BUG!
print("\n❌ DESYNC BUG DETECTED!")
print(f"🐛 Found {len(messages_in_db_not_in_agent)} messages in DB but NOT in agent.message_ids")
print(" This bug occurs specifically with: background=True + stream_tokens=True")
missing_messages = [m for m in db_messages_after_cancel if m.id in messages_in_db_not_in_agent]
print("\n🔍 Missing messages details:")
for m in missing_messages:
print(f" - ID: {m.id}")
print(f" Role: {m.role}")
print(f" Created: {m.created_at}")
if hasattr(m, "content"):
content_preview = str(m.content)[:100] if m.content else "None"
print(f" Content: {content_preview}...")
# Get the last message IDs for the exact error message format
cursor_last = list(db_message_ids)[-1] if db_message_ids else None
in_context_last = list(agent_message_ids)[-1] if agent_message_ids else None
print("\n💥 This causes the EXACT error reported:")
print(f" 'Desync detected - cursor last: {cursor_last},")
print(f" in-context last: {in_context_last}'")
assert False, (
f"🐛 DESYNC DETECTED IN BACKGROUND + TOKEN STREAMING MODE\n\n"
f"Found {len(messages_in_db_not_in_agent)} messages in DB but not in agent.message_ids\n\n"
f"This reproduces the reported bug:\n"
f" 'Desync detected - cursor last: {cursor_last},\n"
f" in-context last: {in_context_last}'\n\n"
f"Missing message IDs: {messages_in_db_not_in_agent}\n\n"
f"Root cause: With background=True + stream_tokens=True, approval request messages\n"
f"are persisted to DB but NOT added to agent.message_ids when cancellation occurs\n"
f"during HITL approval flow.\n\n"
f"Fix location: Check approval flow in letta_agent_v3.py:442-486 and background\n"
f"streaming wrapper in streaming_service.py:138-146"
)
# Also check reverse: agent.message_ids shouldn't have messages not in DB
messages_in_agent_not_in_db = agent_message_ids - db_message_ids
messages_in_agent_not_in_db = messages_in_agent_not_in_db - initial_message_ids
if messages_in_agent_not_in_db:
print("\n❌ REVERSE DESYNC DETECTED!")
print(f"Found {len(messages_in_agent_not_in_db)} message IDs in agent.message_ids but NOT in DB")
assert False, (
f"REVERSE DESYNC: {len(messages_in_agent_not_in_db)} messages in agent.message_ids but not in DB\n"
f"Message IDs: {messages_in_agent_not_in_db}"
)
# If we get here, message IDs are consistent!
print("\n✅ No desync detected - message IDs are consistent between DB and agent state")
print(f" DB message count: {len(db_message_ids)}")
print(f" Agent message_ids count: {len(agent_message_ids)}")
print("\n Either the bug is fixed, or we need to adjust test timing/conditions.")
class TestStreamingCancellation:
"""
Test cancellation during different streaming modes.
"""
@pytest.mark.asyncio
async def test_token_streaming_cancellation(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
test_run,
):
"""
Test cancellation during token streaming mode.
This tests Issue #3: Cancellation During LLM Streaming (token mode).
Verifies:
- Cancellation can be detected during token streaming
- Partial messages are handled correctly
- Stop reason is set to 'cancelled'
"""
# Load agent loop
agent_loop = AgentLoop.load(agent_state=test_agent_with_tool, actor=default_user)
input_messages = [MessageCreate(role=MessageRole.user, content="Hello")]
# Cancel after first chunk
cancel_triggered = [False]
async def mock_check_cancellation(run_id):
if cancel_triggered[0]:
return True
return False
agent_loop._check_run_cancellation = mock_check_cancellation
# Mock streaming
async def cancel_during_stream():
"""Generator that simulates streaming and cancels mid-stream."""
chunks_yielded = 0
stream = agent_loop.stream(
input_messages=input_messages,
max_steps=5,
stream_tokens=True,
run_id=test_run.id,
)
async for chunk in stream:
chunks_yielded += 1
yield chunk
# Cancel after a few chunks
if chunks_yielded == 2 and not cancel_triggered[0]:
cancel_triggered[0] = True
await server.run_manager.cancel_run(
actor=default_user,
run_id=test_run.id,
)
# Consume the stream
chunks = []
try:
async for chunk in cancel_during_stream():
chunks.append(chunk)
except Exception:
# May raise exception on cancellation
pass
# Verify we got some chunks before cancellation
assert len(chunks) > 0, "Should receive at least some chunks before cancellation"
@pytest.mark.asyncio
async def test_step_streaming_cancellation(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
test_run,
):
"""
Test cancellation during step streaming mode (not token streaming).
Verifies:
- Cancellation detected between steps
- Completed steps are streamed fully
- Partial step is not streamed
"""
# Load agent loop
agent_loop = AgentLoop.load(agent_state=test_agent_with_tool, actor=default_user)
input_messages = [MessageCreate(role=MessageRole.user, content="Call print_tool with 'message'")]
# Cancel after first step
call_count = [0]
async def mock_check_cancellation(run_id):
call_count[0] += 1
if call_count[0] > 1:
await server.run_manager.cancel_run(
actor=default_user,
run_id=run_id,
)
return True
return False
agent_loop._check_run_cancellation = mock_check_cancellation
# Stream with step streaming (not token streaming)
chunks = []
stream = agent_loop.stream(
input_messages=input_messages,
max_steps=5,
stream_tokens=False, # Step streaming
run_id=test_run.id,
)
async for chunk in stream:
chunks.append(chunk)
# Verify we got chunks from the first step
assert len(chunks) > 0, "Should receive chunks from first step before cancellation"
# Verify cancellation was detected
assert agent_loop.stop_reason.stop_reason == "cancelled"
class TestToolExecutionCancellation:
"""
Test cancellation during tool execution.
This tests Issue #2C: Token streaming tool return desync.
"""
@pytest.mark.asyncio
async def test_cancellation_during_tool_execution(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
test_run,
print_tool,
):
"""
Test cancellation while tool is executing.
Verifies:
- Tool execution completes or is interrupted cleanly
- Tool return messages are consistent
- Database state matches client state
"""
# Load agent loop
agent_loop = AgentLoop.load(agent_state=test_agent_with_tool, actor=default_user)
input_messages = [MessageCreate(role=MessageRole.user, content="Call print_tool with 'test message'")]
# Mock the tool execution to detect cancellation
tool_execution_started = [False]
tool_execution_completed = [False]
original_execute = agent_loop._execute_tool
async def mock_execute_tool(target_tool, tool_args, agent_state, agent_step_span, step_id):
tool_execution_started[0] = True
# Cancel during tool execution
await server.run_manager.cancel_run(
actor=default_user,
run_id=test_run.id,
)
# Call original (tool execution should complete)
result = await original_execute(target_tool, tool_args, agent_state, agent_step_span, step_id)
tool_execution_completed[0] = True
return result
agent_loop._execute_tool = mock_execute_tool
# Execute step
result = await agent_loop.step(
input_messages=input_messages,
max_steps=5,
run_id=test_run.id,
)
# Verify tool execution started
assert tool_execution_started[0], "Tool execution should have started"
# Verify cancellation was eventually detected
# (may be after tool completes, at next step boundary)
assert result.stop_reason.stop_reason == "cancelled"
# If tool completed, verify its messages are persisted
if tool_execution_completed[0]:
db_messages = await server.message_manager.list_messages(
agent_id=test_agent_with_tool.id,
actor=default_user,
)
run_messages = [m for m in db_messages if m.run_id == test_run.id]
tool_returns = [m for m in run_messages if m.role == "tool"]
# If tool executed, should have a tool return message
assert len(tool_returns) > 0, "Should have persisted tool return message"
class TestResourceCleanupAfterCancellation:
"""
Test Issue #6: Resource Cleanup Issues
Tests that resources are properly cleaned up after cancellation.
"""
@pytest.mark.asyncio
async def test_stop_reason_set_correctly_on_cancellation(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
test_run,
):
"""
Test that stop_reason is set to 'cancelled' not 'end_turn' or other.
This tests Issue #6: Resource Cleanup Issues.
The finally block should set stop_reason to 'cancelled' when appropriate.
Verifies:
- stop_reason is 'cancelled' when run is cancelled
- stop_reason is not 'end_turn' or 'completed' for cancelled runs
"""
# Load agent loop
agent_loop = AgentLoop.load(agent_state=test_agent_with_tool, actor=default_user)
# Cancel before execution
await server.run_manager.cancel_run(
actor=default_user,
run_id=test_run.id,
)
input_messages = [MessageCreate(role=MessageRole.user, content="Hello")]
result = await agent_loop.step(
input_messages=input_messages,
max_steps=5,
run_id=test_run.id,
)
# Verify stop reason is cancelled, not end_turn
assert result.stop_reason.stop_reason == "cancelled", f"Stop reason should be 'cancelled', got '{result.stop_reason.stop_reason}'"
# Verify run status in database
run = await server.run_manager.get_run_by_id(run_id=test_run.id, actor=default_user)
assert run.status == RunStatus.cancelled, f"Run status should be cancelled, got {run.status}"
@pytest.mark.asyncio
async def test_response_messages_cleared_after_cancellation(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
test_run,
):
"""
Test that internal message buffers are properly managed after cancellation.
Verifies:
- response_messages list is in expected state after cancellation
- No memory leaks from accumulated messages
"""
# Load agent loop
agent_loop = AgentLoop.load(agent_state=test_agent_with_tool, actor=default_user)
# Execute and cancel
call_count = [0]
async def mock_check_cancellation(run_id):
call_count[0] += 1
if call_count[0] > 1:
await server.run_manager.cancel_run(
actor=default_user,
run_id=run_id,
)
return True
return False
agent_loop._check_run_cancellation = mock_check_cancellation
input_messages = [MessageCreate(role=MessageRole.user, content="Call print_tool with 'test'")]
await agent_loop.step(
input_messages=input_messages,
max_steps=5,
run_id=test_run.id,
)
# Verify response_messages is not empty (contains messages from completed step)
# or is properly cleared depending on implementation
response_msg_count = len(agent_loop.response_messages)
# The exact behavior may vary, but we're checking that the state is reasonable
assert response_msg_count >= 0, "response_messages should be in valid state"
# Verify no excessive accumulation
assert response_msg_count < 100, "response_messages should not have excessive accumulation"
class TestApprovalFlowCancellation:
"""
Test Issue #5: Approval Flow + Cancellation
Tests edge cases with HITL tool approvals and cancellation.
"""
@pytest.mark.asyncio
async def test_cancellation_while_waiting_for_approval(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
test_run,
bash_tool,
):
"""
Test cancellation while agent is waiting for tool approval.
This tests the scenario where:
1. Agent calls a tool requiring approval
2. Run is cancelled while waiting for approval
3. Agent should detect cancellation and not process approval
Verifies:
- Run status is cancelled
- Agent does not process approval after cancellation
- No tool execution happens
"""
# Add bash_tool which requires approval to agent
await server.agent_manager.attach_tool_async(
agent_id=test_agent_with_tool.id,
tool_id=bash_tool.id,
actor=default_user,
)
# Reload agent with new tool
test_agent_with_tool = await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
include_relationships=["memory", "tools", "sources"],
)
# Load agent loop
agent_loop = AgentLoop.load(agent_state=test_agent_with_tool, actor=default_user)
input_messages = [MessageCreate(role=MessageRole.user, content="Call bash_tool with operation 'test'")]
# Execute step - should stop at approval request
result = await agent_loop.step(
input_messages=input_messages,
max_steps=5,
run_id=test_run.id,
)
# Verify we got approval request
assert result.stop_reason.stop_reason == "requires_approval", f"Should stop for approval, got {result.stop_reason.stop_reason}"
# Now cancel the run while "waiting for approval"
await server.run_manager.cancel_run(
actor=default_user,
run_id=test_run.id,
)
# Reload agent loop with fresh state
agent_loop_2 = AgentLoop.load(
agent_state=await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
include_relationships=["memory", "tools", "sources"],
),
actor=default_user,
)
# Try to continue - should detect cancellation
result_2 = await agent_loop_2.step(
input_messages=[MessageCreate(role=MessageRole.user, content="Hello")], # No new input, just continuing
max_steps=5,
run_id=test_run.id,
)
# Should detect cancellation
assert result_2.stop_reason.stop_reason == "cancelled", f"Should detect cancellation, got {result_2.stop_reason.stop_reason}"
@pytest.mark.asyncio
async def test_agent_state_after_cancelled_approval(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
test_run,
bash_tool,
):
"""
Test that agent state is consistent after approval request is cancelled.
This addresses the issue where agents say they are "awaiting approval"
even though the run is cancelled.
Verifies:
- Agent can continue after cancelled approval
- No phantom "awaiting approval" state
- Messages reflect actual state
"""
# Add bash_tool which requires approval
await server.agent_manager.attach_tool_async(
agent_id=test_agent_with_tool.id,
tool_id=bash_tool.id,
actor=default_user,
)
# Reload agent with new tool
test_agent_with_tool = await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
include_relationships=["memory", "tools", "sources"],
)
agent_loop = AgentLoop.load(agent_state=test_agent_with_tool, actor=default_user)
# First run: trigger approval request then cancel
input_messages_1 = [MessageCreate(role=MessageRole.user, content="Call bash_tool with operation 'test'")]
result_1 = await agent_loop.step(
input_messages=input_messages_1,
max_steps=5,
run_id=test_run.id,
)
assert result_1.stop_reason.stop_reason == "requires_approval"
# Cancel the run
await server.run_manager.cancel_run(
actor=default_user,
run_id=test_run.id,
)
# Get messages to check for "awaiting approval" state
messages_after_cancel = await server.message_manager.list_messages(
actor=default_user,
agent_id=test_agent_with_tool.id,
run_id=test_run.id,
limit=1000,
)
# Check for approval request messages
[m for m in messages_after_cancel if m.role == "approval_request"]
# Second run: try to execute normally (should work, not stuck in approval)
test_run_2 = await server.run_manager.create_run(
pydantic_run=PydanticRun(
agent_id=test_agent_with_tool.id,
status=RunStatus.created,
),
actor=default_user,
)
agent_loop_2 = AgentLoop.load(
agent_state=await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
include_relationships=["memory", "tools", "sources"],
),
actor=default_user,
)
# Call a different tool that doesn't require approval
input_messages_2 = [MessageCreate(role=MessageRole.user, content="Call print_tool with message 'hello'")]
result_2 = await agent_loop_2.step(
input_messages=input_messages_2,
max_steps=5,
run_id=test_run_2.id,
)
# Should complete normally, not be stuck in approval state
assert result_2.stop_reason.stop_reason != "requires_approval", "Agent should not be stuck in approval state from cancelled run"
@pytest.mark.asyncio
async def test_approval_state_persisted_correctly_after_cancel(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
test_run,
bash_tool,
):
"""
Test that approval state is correctly persisted/cleaned after cancellation.
This addresses the specific issue mentioned:
"agents say they are awaiting approval despite the run not being shown as pending approval"
Verifies:
- Run status matches actual state
- No phantom "pending approval" status
- Messages accurately reflect cancellation
"""
# Add bash_tool
await server.agent_manager.attach_tool_async(
agent_id=test_agent_with_tool.id,
tool_id=bash_tool.id,
actor=default_user,
)
test_agent_with_tool = await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
include_relationships=["memory", "tools", "sources"],
)
agent_loop = AgentLoop.load(agent_state=test_agent_with_tool, actor=default_user)
# Trigger approval
result = await agent_loop.step(
input_messages=[MessageCreate(role=MessageRole.user, content="Call bash_tool with 'test'")],
max_steps=5,
run_id=test_run.id,
)
assert result.stop_reason.stop_reason == "requires_approval"
# Cancel the run
await server.run_manager.cancel_run(
actor=default_user,
run_id=test_run.id,
)
# Verify run status is cancelled, NOT pending_approval
run_after_cancel = await server.run_manager.get_run_by_id(run_id=test_run.id, actor=default_user)
assert run_after_cancel.status == RunStatus.cancelled, f"Run status should be cancelled, got {run_after_cancel.status}"
# Agent should be able to start fresh run
test_run_3 = await server.run_manager.create_run(
pydantic_run=PydanticRun(
agent_id=test_agent_with_tool.id,
status=RunStatus.created,
),
actor=default_user,
)
agent_loop_3 = AgentLoop.load(
agent_state=await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
include_relationships=["memory", "tools", "sources"],
),
actor=default_user,
)
# Should be able to make normal call
result_3 = await agent_loop_3.step(
input_messages=[MessageCreate(role=MessageRole.user, content="Call print_tool with 'test'")],
max_steps=5,
run_id=test_run_3.id,
)
# Should complete normally
assert result_3.stop_reason.stop_reason != "requires_approval", "New run should not be stuck in approval state"
@pytest.mark.asyncio
async def test_approval_request_message_ids_desync(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
test_run,
bash_tool,
):
"""
Test for the specific desync bug reported:
"Desync detected - cursor last: message-X, in-context last: message-Y"
Bug Scenario:
1. Agent calls HITL tool requiring approval
2. Approval request message is persisted to DB
3. Run is cancelled
4. Approval request message ID is NOT in agent.message_ids
5. Result: cursor desync between DB and agent state
This is the root cause of the reported error:
"Desync detected - cursor last: message-c07fa1ec..., in-context last: message-a2615dc3..."
The bug happens because:
- Database contains the approval_request message
- Agent's message_ids list does NOT contain the approval_request message ID
- Causes cursor/pagination to fail
Verifies:
- If approval request is in DB, it must be in agent.message_ids
- Cancellation doesn't cause partial message persistence
- Cursor consistency between DB and agent state
"""
# Add bash_tool which requires approval
await server.agent_manager.attach_tool_async(
agent_id=test_agent_with_tool.id,
tool_id=bash_tool.id,
actor=default_user,
)
# Get initial message count
agent_before = await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
)
initial_message_ids = set(agent_before.message_ids or [])
# Reload agent with new tool
test_agent_with_tool = await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
include_relationships=["memory", "tools", "sources"],
)
agent_loop = AgentLoop.load(agent_state=test_agent_with_tool, actor=default_user)
# Trigger approval request
result = await agent_loop.step(
input_messages=[MessageCreate(role=MessageRole.user, content="Call bash_tool with 'test'")],
max_steps=5,
run_id=test_run.id,
)
assert result.stop_reason.stop_reason == "requires_approval", f"Expected requires_approval, got {result.stop_reason.stop_reason}"
# Get all messages from database for this run
await server.message_manager.list_messages(
actor=default_user,
agent_id=test_agent_with_tool.id,
run_id=test_run.id,
limit=1000,
)
# Cancel the run
await server.run_manager.cancel_run(
actor=default_user,
run_id=test_run.id,
)
# Get agent state after cancellation
agent_after_cancel = await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
)
agent_message_ids = set(agent_after_cancel.message_ids or [])
# Get all messages from database again
db_messages_after = await server.message_manager.list_messages(
actor=default_user,
agent_id=test_agent_with_tool.id,
run_id=test_run.id,
limit=1000,
)
db_message_ids = {m.id for m in db_messages_after}
# CRITICAL CHECK: Every message in DB must be in agent.message_ids
messages_in_db_not_in_agent = db_message_ids - agent_message_ids
if messages_in_db_not_in_agent:
# THIS IS THE DESYNC BUG!
missing_messages = [m for m in db_messages_after if m.id in messages_in_db_not_in_agent]
missing_details = [f"ID: {m.id}, Role: {m.role}, Created: {m.created_at}" for m in missing_messages]
# Get the cursor values that would cause the error
cursor_last = list(db_message_ids)[-1] if db_message_ids else None
in_context_last = list(agent_message_ids)[-1] if agent_message_ids else None
assert False, (
f"DESYNC DETECTED: {len(messages_in_db_not_in_agent)} messages in DB but not in agent.message_ids\n\n"
f"This is the reported bug:\n"
f" 'Desync detected - cursor last: {cursor_last}, in-context last: {in_context_last}'\n\n"
f"Missing messages:\n" + "\n".join(missing_details) + "\n\n"
f"Agent message_ids count: {len(agent_message_ids)}\n"
f"DB messages count: {len(db_message_ids)}\n\n"
f"Root cause: Approval request message was persisted to DB but not added to agent.message_ids\n"
f"when cancellation occurred during HITL approval flow."
)
# Also check the inverse: agent.message_ids shouldn't have messages not in DB
messages_in_agent_not_in_db = agent_message_ids - db_message_ids
messages_in_agent_not_in_db = messages_in_agent_not_in_db - initial_message_ids
if messages_in_agent_not_in_db:
assert False, (
f"REVERSE DESYNC: {len(messages_in_agent_not_in_db)} messages in agent.message_ids but not in DB\n"
f"Message IDs: {messages_in_agent_not_in_db}"
)
@pytest.mark.asyncio
async def test_parallel_tool_calling_cancellation_with_denials(
self,
server: SyncServer,
default_user,
bash_tool,
):
"""
Test that parallel tool calls receive proper denial messages on cancellation.
This tests the scenario where:
1. Agent has parallel tool calling enabled
2. Agent calls a tool 3 times in parallel (requiring approval)
3. Run is cancelled while waiting for approval
4. All 3 tool calls receive denial messages with TOOL_CALL_DENIAL_ON_CANCEL
5. Agent can still be messaged again (creating a new run)
Verifies:
- All parallel tool calls get proper denial messages
- Denial messages contain TOOL_CALL_DENIAL_ON_CANCEL reason
- Agent state is not corrupted
- New runs can be created after cancellation
"""
# Create agent with parallel tool calling enabled
config = LLMConfig.default_config("gpt-4o-mini")
config.parallel_tool_calls = True
agent_state = await server.agent_manager.create_agent_async(
agent_create=CreateAgent(
name="test_parallel_tool_calling_agent",
agent_type="letta_v1_agent",
memory_blocks=[],
llm_config=LLMConfig.default_config("gpt-4o-mini"),
embedding_config=EmbeddingConfig.default_config(provider="openai"),
tool_ids=[bash_tool.id],
include_base_tools=False,
),
actor=default_user,
)
# Create a run
test_run = await server.run_manager.create_run(
pydantic_run=PydanticRun(
agent_id=agent_state.id,
status=RunStatus.created,
),
actor=default_user,
)
# Load agent loop
agent_loop = AgentLoop.load(agent_state=agent_state, actor=default_user)
# Prompt the agent to call bash_tool 3 times
# The agent should make parallel tool calls since parallel_tool_calls is enabled
input_messages = [
MessageCreate(
role=MessageRole.user,
content="Please call bash_tool three times with operations: 'ls', 'pwd', and 'echo test'",
)
]
# Execute step - should stop at approval request with multiple tool calls
result = await agent_loop.step(
input_messages=input_messages,
max_steps=5,
run_id=test_run.id,
)
# Verify we got approval request
assert result.stop_reason.stop_reason == "requires_approval", f"Should stop for approval, got {result.stop_reason.stop_reason}"
# Get the approval request message to see how many tool calls were made
await server.message_manager.list_messages(
actor=default_user,
agent_id=agent_state.id,
run_id=test_run.id,
limit=1000,
)
# should not be possible to message the agent (Pending approval)
from letta.errors import PendingApprovalError
with pytest.raises(PendingApprovalError):
test_run2 = await server.run_manager.create_run(
pydantic_run=PydanticRun(
agent_id=agent_state.id,
status=RunStatus.created,
),
actor=default_user,
)
await agent_loop.step(
input_messages=[MessageCreate(role=MessageRole.user, content="Hello, how are you?")],
max_steps=5,
run_id=test_run2.id,
)
from letta.schemas.letta_message import ApprovalRequestMessage
approval_request_messages = [m for m in result.messages if isinstance(m, ApprovalRequestMessage)]
assert len(approval_request_messages) > 0, "Should have at least one approval request message"
# Get the last approval request message (should have the tool calls)
approval_request = approval_request_messages[-1]
tool_calls = approval_request.tool_calls if hasattr(approval_request, "tool_calls") else []
num_tool_calls = len(tool_calls)
print(f"\nFound {num_tool_calls} tool calls in approval request")
# The agent might not always make exactly 3 parallel calls depending on the LLM,
# but we should have at least 1 tool call. For the test to be meaningful,
# we want multiple tool calls, but we'll verify whatever the LLM decides
assert num_tool_calls >= 1, f"Should have at least 1 tool call, got {num_tool_calls}"
# Now cancel the run while "waiting for approval"
await server.run_manager.cancel_run(
actor=default_user,
run_id=test_run.id,
)
# Get messages after cancellation
db_messages_after_cancel = await server.message_manager.list_messages(
actor=default_user,
agent_id=agent_state.id,
run_id=test_run.id,
limit=1000,
)
# Find tool return messages (these should be the denial messages)
tool_return_messages = [m for m in db_messages_after_cancel if m.role == "tool"]
print(f"Found {len(tool_return_messages)} tool return messages after cancellation")
# Verify we got denial messages for all tool calls
assert len(tool_return_messages) == num_tool_calls, (
f"Should have {num_tool_calls} tool return messages (one per tool call), got {len(tool_return_messages)}"
)
# Verify each tool return message contains the denial reason
for tool_return_msg in tool_return_messages:
# Check if message has tool_returns (new format) or tool_return (old format)
print("TOOL RETURN MESSAGE:\n\n", tool_return_msg)
if hasattr(tool_return_msg, "tool_returns") and tool_return_msg.tool_returns:
# New format: list of tool returns
for tool_return in tool_return_msg.tool_returns:
assert TOOL_CALL_DENIAL_ON_CANCEL in tool_return.func_response, (
f"Tool return should contain denial message, got: {tool_return.tool_return}"
)
elif hasattr(tool_return_msg, "tool_return"):
# Old format: single tool_return field
assert TOOL_CALL_DENIAL_ON_CANCEL in tool_return_msg.content, (
f"Tool return should contain denial message, got: {tool_return_msg.tool_return}"
)
elif hasattr(tool_return_msg, "content"):
# Check content field
content_str = str(tool_return_msg.content)
assert TOOL_CALL_DENIAL_ON_CANCEL in content_str, f"Tool return content should contain denial message, got: {content_str}"
# Verify run status is cancelled
run_after_cancel = await server.run_manager.get_run_by_id(run_id=test_run.id, actor=default_user)
assert run_after_cancel.status == RunStatus.cancelled, f"Run status should be cancelled, got {run_after_cancel.status}"
# Verify agent can be messaged again (create a new run)
test_run_2 = await server.run_manager.create_run(
pydantic_run=PydanticRun(
agent_id=agent_state.id,
status=RunStatus.created,
),
actor=default_user,
)
# Reload agent loop with fresh state
agent_loop_2 = AgentLoop.load(
agent_state=await server.agent_manager.get_agent_by_id_async(
agent_id=agent_state.id,
actor=default_user,
include_relationships=["memory", "tools", "sources"],
),
actor=default_user,
)
# Send a simple message that doesn't require approval
input_messages_2 = [MessageCreate(role=MessageRole.user, content="Hello, how are you?")]
result_2 = await agent_loop_2.step(
input_messages=input_messages_2,
max_steps=5,
run_id=test_run_2.id,
)
# Verify second run completed successfully (not cancelled, not stuck in approval)
assert result_2.stop_reason.stop_reason != "cancelled", (
f"Second run should not be cancelled, got {result_2.stop_reason.stop_reason}"
)
assert result_2.stop_reason.stop_reason != "requires_approval", (
f"Second run should not require approval for simple message, got {result_2.stop_reason.stop_reason}"
)
# Verify the second run has messages
db_messages_run2 = await server.message_manager.list_messages(
actor=default_user,
agent_id=agent_state.id,
run_id=test_run_2.id,
limit=1000,
)
assert len(db_messages_run2) > 0, "Second run should have messages"
class TestEdgeCases:
"""
Test edge cases and boundary conditions for cancellation.
"""
@pytest.mark.asyncio
async def test_cancellation_with_max_steps_reached(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
test_run,
):
"""
Test interaction between max_steps and cancellation.
Verifies:
- If both max_steps and cancellation occur, correct stop_reason is set
- Cancellation takes precedence over max_steps
"""
# Load agent loop
agent_loop = AgentLoop.load(agent_state=test_agent_with_tool, actor=default_user)
# Cancel after second step, but max_steps=2
call_count = [0]
async def mock_check_cancellation(run_id):
call_count[0] += 1
if call_count[0] >= 2:
await server.run_manager.cancel_run(
actor=default_user,
run_id=run_id,
)
return True
return False
agent_loop._check_run_cancellation = mock_check_cancellation
input_messages = [MessageCreate(role=MessageRole.user, content="Call print_tool with 'test'")]
result = await agent_loop.step(
input_messages=input_messages,
max_steps=2, # Will hit max_steps around the same time as cancellation
run_id=test_run.id,
)
# Stop reason could be either cancelled or max_steps depending on timing
# Both are acceptable in this edge case
assert result.stop_reason.stop_reason in ["cancelled", "max_steps"], (
f"Stop reason should be cancelled or max_steps, got {result.stop_reason.stop_reason}"
)
@pytest.mark.asyncio
async def test_double_cancellation(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
test_run,
):
"""
Test that cancelling an already-cancelled run is handled gracefully.
Verifies:
- No errors when checking already-cancelled run
- State remains consistent
"""
# Cancel the run
await server.run_manager.cancel_run(
actor=default_user,
run_id=test_run.id,
)
# Load agent loop
agent_loop = AgentLoop.load(agent_state=test_agent_with_tool, actor=default_user)
input_messages = [MessageCreate(role=MessageRole.user, content="Hello")]
# First execution - should detect cancellation
result_1 = await agent_loop.step(
input_messages=input_messages,
max_steps=5,
run_id=test_run.id,
)
assert result_1.stop_reason.stop_reason == "cancelled"
# Try to execute again with same cancelled run - should handle gracefully
agent_loop_2 = AgentLoop.load(
agent_state=await server.agent_manager.get_agent_by_id_async(
agent_id=test_agent_with_tool.id,
actor=default_user,
include_relationships=["memory", "tools", "sources"],
),
actor=default_user,
)
result_2 = await agent_loop_2.step(
input_messages=input_messages,
max_steps=5,
run_id=test_run.id,
)
# Should still detect as cancelled
assert result_2.stop_reason.stop_reason == "cancelled"
class TestErrorDataPersistence:
"""
Test that error data is properly stored in run metadata when runs fail.
This ensures errors can be debugged by inspecting the run's metadata field.
"""
@pytest.mark.asyncio
async def test_error_data_stored_in_run_metadata_on_background_streaming_llm_error(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
):
"""
Test that when a background streaming run fails due to an LLM error,
the error details are stored in the run's metadata field.
This test validates the fix for the issue where failed runs showed
empty metadata in the database, making it impossible to debug errors.
The test patches LettaAgentV3.stream to raise an LLMError, simulating
what happens when the LLM provider returns an error during streaming.
Verifies:
- Run status is set to 'failed'
- Run metadata contains 'error' key with error details
"""
from unittest.mock import patch
from letta.agents.letta_agent_v3 import LettaAgentV3
from letta.errors import LLMError
from letta.services.streaming_service import StreamingService
# Create streaming service
streaming_service = StreamingService(server)
# Create request with background streaming - NOT background for simplicity
# Background streaming adds Redis complexity, so we test foreground streaming
# which still exercises the same error handling in _create_error_aware_stream
request = LettaStreamingRequest(
messages=[MessageCreate(role=MessageRole.user, content="Hello, please respond")],
max_steps=1,
stream_tokens=True,
background=False,
)
# Mock stream method that raises an error
async def mock_stream_raises_llm_error(*args, **kwargs):
raise LLMError("Simulated LLM error for testing")
yield # Make it a generator
# Use patch to simulate the error during streaming
with patch.object(LettaAgentV3, "stream", mock_stream_raises_llm_error):
# Start the streaming request
run, stream_response = await streaming_service.create_agent_stream(
agent_id=test_agent_with_tool.id,
actor=default_user,
request=request,
run_type="test_error_persistence",
)
assert run is not None, "Run should be created"
# Consume the stream to trigger error handling
collected_chunks = []
async for chunk in stream_response.body_iterator:
collected_chunks.append(chunk)
# Give any async handling time to complete
await asyncio.sleep(0.2)
# Fetch the run from the database
fetched_run = await server.run_manager.get_run_by_id(run.id, actor=default_user)
# Verify the run status is failed
assert fetched_run.status == RunStatus.failed, f"Expected status 'failed', got '{fetched_run.status}'"
# Verify metadata contains error information
assert fetched_run.metadata is not None, (
f"Run metadata should not be None after error. "
f"Run ID: {run.id}, Status: {fetched_run.status}, Stop reason: {fetched_run.stop_reason}"
)
assert "error" in fetched_run.metadata, f"Run metadata should contain 'error' key, got: {fetched_run.metadata}"
error_info = fetched_run.metadata["error"]
# The error is stored as a dict from LettaErrorMessage.model_dump()
assert isinstance(error_info, dict), f"Error info should be a dict, got: {type(error_info)}"
assert "error_type" in error_info, f"Error info should contain 'error_type', got: {error_info}"
assert error_info["error_type"] == "llm_error", f"Expected error_type 'llm_error', got: {error_info['error_type']}"
@pytest.mark.asyncio
async def test_error_data_stored_on_streaming_timeout_error(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
):
"""
Test that timeout errors during streaming store error data.
Verifies:
- Timeout errors are properly captured in run metadata
- Run can be queried from DB and error details are available
"""
from unittest.mock import patch
from letta.agents.letta_agent_v3 import LettaAgentV3
from letta.errors import LLMTimeoutError
from letta.services.streaming_service import StreamingService
streaming_service = StreamingService(server)
request = LettaStreamingRequest(
messages=[MessageCreate(role=MessageRole.user, content="Hello")],
max_steps=1,
stream_tokens=True,
background=False,
)
async def mock_stream_raises_timeout(*args, **kwargs):
raise LLMTimeoutError("Request timed out after 30 seconds")
yield
with patch.object(LettaAgentV3, "stream", mock_stream_raises_timeout):
run, stream_response = await streaming_service.create_agent_stream(
agent_id=test_agent_with_tool.id,
actor=default_user,
request=request,
run_type="test_timeout_error",
)
# Consume the stream
async for _ in stream_response.body_iterator:
pass
await asyncio.sleep(0.2)
fetched_run = await server.run_manager.get_run_by_id(run.id, actor=default_user)
assert fetched_run.status == RunStatus.failed
assert fetched_run.metadata is not None, f"Run metadata should contain error info for run {run.id}"
assert "error" in fetched_run.metadata
assert fetched_run.metadata["error"]["error_type"] == "llm_timeout"
@pytest.mark.asyncio
async def test_error_data_stored_on_streaming_rate_limit_error(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
):
"""
Test that rate limit errors during streaming store error data.
Verifies:
- Rate limit errors are properly captured in run metadata
"""
from unittest.mock import patch
from letta.agents.letta_agent_v3 import LettaAgentV3
from letta.errors import LLMRateLimitError
from letta.services.streaming_service import StreamingService
streaming_service = StreamingService(server)
request = LettaStreamingRequest(
messages=[MessageCreate(role=MessageRole.user, content="Hello")],
max_steps=1,
stream_tokens=True,
background=False,
)
async def mock_stream_raises_rate_limit(*args, **kwargs):
raise LLMRateLimitError("Rate limit exceeded: 100 requests per minute")
yield
with patch.object(LettaAgentV3, "stream", mock_stream_raises_rate_limit):
run, stream_response = await streaming_service.create_agent_stream(
agent_id=test_agent_with_tool.id,
actor=default_user,
request=request,
run_type="test_rate_limit_error",
)
async for _ in stream_response.body_iterator:
pass
await asyncio.sleep(0.2)
fetched_run = await server.run_manager.get_run_by_id(run.id, actor=default_user)
assert fetched_run.status == RunStatus.failed
assert fetched_run.metadata is not None
assert "error" in fetched_run.metadata
assert fetched_run.metadata["error"]["error_type"] == "llm_rate_limit"
@pytest.mark.asyncio
async def test_error_data_stored_on_streaming_auth_error(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
):
"""
Test that authentication errors during streaming store error data.
Verifies:
- Auth errors are properly captured in run metadata
"""
from unittest.mock import patch
from letta.agents.letta_agent_v3 import LettaAgentV3
from letta.errors import LLMAuthenticationError
from letta.services.streaming_service import StreamingService
streaming_service = StreamingService(server)
request = LettaStreamingRequest(
messages=[MessageCreate(role=MessageRole.user, content="Hello")],
max_steps=1,
stream_tokens=True,
background=False,
)
async def mock_stream_raises_auth_error(*args, **kwargs):
raise LLMAuthenticationError("Invalid API key")
yield
with patch.object(LettaAgentV3, "stream", mock_stream_raises_auth_error):
run, stream_response = await streaming_service.create_agent_stream(
agent_id=test_agent_with_tool.id,
actor=default_user,
request=request,
run_type="test_auth_error",
)
async for _ in stream_response.body_iterator:
pass
await asyncio.sleep(0.2)
fetched_run = await server.run_manager.get_run_by_id(run.id, actor=default_user)
assert fetched_run.status == RunStatus.failed
assert fetched_run.metadata is not None
assert "error" in fetched_run.metadata
assert fetched_run.metadata["error"]["error_type"] == "llm_authentication"
@pytest.mark.asyncio
async def test_error_data_stored_on_generic_exception(
self,
server: SyncServer,
default_user,
test_agent_with_tool,
):
"""
Test that generic exceptions during streaming store error data.
Verifies:
- Generic exceptions result in error data being stored
- Error details are preserved in metadata with 'internal_error' type
"""
from unittest.mock import patch
from letta.agents.letta_agent_v3 import LettaAgentV3
from letta.services.streaming_service import StreamingService
streaming_service = StreamingService(server)
request = LettaStreamingRequest(
messages=[MessageCreate(role=MessageRole.user, content="Hello")],
max_steps=1,
stream_tokens=True,
background=False,
)
async def mock_stream_raises_generic_error(*args, **kwargs):
raise RuntimeError("Unexpected internal error")
yield
with patch.object(LettaAgentV3, "stream", mock_stream_raises_generic_error):
run, stream_response = await streaming_service.create_agent_stream(
agent_id=test_agent_with_tool.id,
actor=default_user,
request=request,
run_type="test_generic_error",
)
async for _ in stream_response.body_iterator:
pass
await asyncio.sleep(0.2)
fetched_run = await server.run_manager.get_run_by_id(run.id, actor=default_user)
assert fetched_run.status == RunStatus.failed
assert fetched_run.metadata is not None, "Run metadata should contain error info for generic exception"
assert "error" in fetched_run.metadata
assert fetched_run.metadata["error"]["error_type"] == "internal_error"
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/managers/test_cancellation.py",
"license": "Apache License 2.0",
"lines": 1442,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_message_serialization.py | from letta.llm_api.openai_client import fill_image_content_in_responses_input
from letta.schemas.enums import MessageRole
from letta.schemas.letta_message_content import Base64Image, ImageContent, TextContent
from letta.schemas.message import Message
def _user_message_with_image_first(text: str) -> Message:
image = ImageContent(source=Base64Image(media_type="image/png", data="dGVzdA=="))
return Message(role=MessageRole.user, content=[image, TextContent(text=text)])
def test_to_openai_responses_dicts_handles_image_first_content():
message = _user_message_with_image_first("hello world")
serialized = Message.to_openai_responses_dicts_from_list([message])
parts = serialized[0]["content"]
assert any(part["type"] == "input_text" and part["text"] == "hello world" for part in parts)
assert any(part["type"] == "input_image" for part in parts)
def test_fill_image_content_in_responses_input_includes_image_parts():
message = _user_message_with_image_first("describe image")
serialized = Message.to_openai_responses_dicts_from_list([message])
rewritten = fill_image_content_in_responses_input(serialized, [message])
assert rewritten == serialized
def test_to_openai_responses_dicts_handles_image_only_content():
image = ImageContent(source=Base64Image(media_type="image/png", data="dGVzdA=="))
message = Message(role=MessageRole.user, content=[image])
serialized = Message.to_openai_responses_dicts_from_list([message])
parts = serialized[0]["content"]
assert parts[0]["type"] == "input_image"
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_message_serialization.py",
"license": "Apache License 2.0",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_prompt_caching.py | """
Integration tests for prompt caching validation.
These tests verify that our LLM clients properly enable caching for each provider:
- OpenAI: Automatic caching (≥1024 tokens)
- Anthropic: Requires cache_control breakpoints (≥1024 tokens for Sonnet 4.5)
- Gemini: Implicit caching on 2.5 models (≥1024 tokens for 2.5 Flash)
Test strategy:
1. Create agent with large memory block (>5000 tokens to exceed all thresholds)
2. Send message 1 → assert cache WRITE occurred
3. Send message 2 → assert cache HIT occurred
If these tests fail, it means:
- For OpenAI: Something is broken (caching is automatic)
- For Anthropic: We're not adding cache_control breakpoints
- For Gemini: Implicit caching isn't working (or we're below threshold)
"""
import logging
import os
import uuid
import pytest
from letta_client import AsyncLetta
from letta_client.types import MessageCreateParam
logger = logging.getLogger(__name__)
# ------------------------------
# Test Configuration
# ------------------------------
# Large memory block to exceed all provider thresholds
# NOTE: The actual token count depends on the tokenizer each provider uses.
# We aim for a very large block to ensure we exceed:
# - OpenAI: 1,024 tokens
# - Anthropic Sonnet 4.5: 1,024 tokens (Opus/Haiku 4.5: 4,096)
# - Gemini 2.5 Flash: 1,024 tokens (2.5 Pro: 4,096, 3 Pro Preview: 2,048)
LARGE_MEMORY_BLOCK = (
"""
You are an advanced AI assistant with extensive knowledge across multiple domains.
This memory block is intentionally very large to ensure prompt caching thresholds are exceeded
for testing purposes. The content provides rich context that should be cached by the LLM
provider on the first request and reused on subsequent requests to the same agent.
IMPORTANT: This block is designed to exceed 2,048 tokens to test all provider thresholds.
You are an advanced AI assistant with extensive knowledge across multiple domains.
# Core Capabilities
## Technical Knowledge
- Software Engineering: Expert in Python, JavaScript, TypeScript, Go, Rust, and many other languages
- System Design: Deep understanding of distributed systems, microservices, and cloud architecture
- DevOps: Proficient in Docker, Kubernetes, CI/CD pipelines, and infrastructure as code
- Databases: Experience with SQL (PostgreSQL, MySQL) and NoSQL (MongoDB, Redis, Cassandra) databases
- Machine Learning: Knowledge of neural networks, transformers, and modern ML frameworks
## Problem Solving Approach
When tackling problems, you follow a structured methodology:
1. Understand the requirements thoroughly
2. Break down complex problems into manageable components
3. Consider multiple solution approaches
4. Evaluate trade-offs between different options
5. Implement solutions with clean, maintainable code
6. Test thoroughly and iterate based on feedback
## Communication Style
- Clear and concise explanations
- Use examples and analogies when helpful
- Adapt technical depth to the audience
- Ask clarifying questions when requirements are ambiguous
- Provide context and rationale for recommendations
# Domain Expertise
## Web Development
You have deep knowledge of:
- Frontend: React, Vue, Angular, Next.js, modern CSS frameworks
- Backend: Node.js, Express, FastAPI, Django, Flask
- API Design: REST, GraphQL, gRPC
- Authentication: OAuth, JWT, session management
- Performance: Caching strategies, CDNs, lazy loading
## Data Engineering
You understand:
- ETL pipelines and data transformation
- Data warehousing concepts (Snowflake, BigQuery, Redshift)
- Stream processing (Kafka, Kinesis)
- Data modeling and schema design
- Data quality and validation
## Cloud Platforms
You're familiar with:
- AWS: EC2, S3, Lambda, RDS, DynamoDB, CloudFormation
- GCP: Compute Engine, Cloud Storage, Cloud Functions, BigQuery
- Azure: Virtual Machines, Blob Storage, Azure Functions
- Serverless architectures and best practices
- Cost optimization strategies
## Security
You consider:
- Common vulnerabilities (OWASP Top 10)
- Secure coding practices
- Encryption and key management
- Access control and authorization patterns
- Security audit and compliance requirements
# Interaction Principles
## Helpfulness
- Provide actionable guidance
- Share relevant resources and documentation
- Offer multiple approaches when appropriate
- Point out potential pitfalls and edge cases
## Accuracy
- Verify information before sharing
- Acknowledge uncertainty when appropriate
- Correct mistakes promptly
- Stay up-to-date with best practices
## Efficiency
- Get to the point quickly
- Avoid unnecessary verbosity
- Focus on what's most relevant
- Provide code examples when they clarify concepts
# Background Context
## Your Role
You serve as a technical advisor, collaborator, and problem solver. Your goal is to help users
achieve their objectives efficiently while teaching them along the way.
## Continuous Improvement
You learn from each interaction:
- Adapting to user preferences and communication styles
- Refining explanations based on feedback
- Expanding knowledge through conversations
- Improving recommendations based on outcomes
## Ethical Guidelines
- Prioritize user privacy and data security
- Recommend sustainable and maintainable solutions
- Consider accessibility and inclusivity
- Promote best practices and industry standards
This memory block is intentionally large to ensure prompt caching thresholds are exceeded
for testing purposes. The content provides rich context that should be cached by the LLM
provider on the first request and reused on subsequent requests to the same agent.
---
Additional Context (Repeated for Token Count):
"""
+ "\n\n".join(
[
f"Section {i + 1}: "
+ """
You have deep expertise in software development, including but not limited to:
- Programming languages: Python, JavaScript, TypeScript, Java, C++, Rust, Go, Swift, Kotlin, Ruby, PHP, Scala
- Web frameworks: React, Vue, Angular, Django, Flask, FastAPI, Express, Next.js, Nuxt, SvelteKit, Remix, Astro
- Databases: PostgreSQL, MySQL, MongoDB, Redis, Cassandra, DynamoDB, ElasticSearch, Neo4j, InfluxDB, TimescaleDB
- Cloud platforms: AWS (EC2, S3, Lambda, ECS, EKS, RDS), GCP (Compute Engine, Cloud Run, GKE), Azure (VMs, Functions, AKS)
- DevOps tools: Docker, Kubernetes, Terraform, Ansible, Jenkins, GitHub Actions, GitLab CI, CircleCI, ArgoCD
- Testing frameworks: pytest, Jest, Mocha, JUnit, unittest, Cypress, Playwright, Selenium, TestNG, RSpec
- Architecture patterns: Microservices, Event-driven, Serverless, Monolithic, CQRS, Event Sourcing, Hexagonal
- API design: REST, GraphQL, gRPC, WebSockets, Server-Sent Events, tRPC, JSON-RPC
- Security: OAuth 2.0, JWT, SAML, encryption (AES, RSA), OWASP Top 10, secure coding practices, penetration testing
- Performance: Caching strategies (Redis, Memcached, CDN), load balancing (Nginx, HAProxy), database optimization (indexing, query tuning)
- Monitoring: Prometheus, Grafana, DataDog, New Relic, Sentry, Elastic APM, OpenTelemetry
- Message queues: RabbitMQ, Apache Kafka, AWS SQS, Google Pub/Sub, NATS, Redis Streams
- Search engines: Elasticsearch, Solr, Algolia, Meilisearch, Typesense
- Logging: ELK Stack, Loki, Fluentd, Logstash, CloudWatch Logs
- CI/CD: Jenkins, GitLab CI/CD, GitHub Actions, CircleCI, Travis CI, Bamboo
"""
for i in range(6)
]
)
+ """
This content is repeated to ensure we exceed the 2,048 token threshold for all providers.
""".strip()
)
# Model configurations for testing
CACHING_TEST_CONFIGS = [
# OpenAI: Automatic caching, ≥1024 tokens
pytest.param(
"openai/gpt-4o",
{},
1024, # Min tokens for caching
"cached_tokens", # Field name in prompt_tokens_details
None, # No write field (caching is free)
id="openai-gpt4o-auto",
),
# Anthropic: Requires cache_control, ≥1024 tokens for Sonnet 4.5
pytest.param(
"anthropic/claude-sonnet-4-5-20250929",
{},
1024, # Min tokens for Sonnet 4.5
"cache_read_tokens", # Field name for cache hits
"cache_creation_tokens", # Field name for cache writes
id="anthropic-sonnet-4.5-explicit",
),
# Gemini: Implicit caching on 2.5 models, ≥1024 tokens for 2.5 Flash
pytest.param(
"google_ai/gemini-2.5-flash",
{},
1024, # Min tokens for 2.5 Flash
"cached_tokens", # Field name (normalized from cached_content_token_count)
None, # No separate write field
id="gemini-2.5-flash-implicit",
),
# Gemini 3 Pro Preview: NOTE - Implicit caching seems to NOT work for 3 Pro Preview
# The docs say "Implicit caching is enabled by default for all Gemini 2.5 models"
# This suggests 3 Pro Preview may require explicit caching instead
pytest.param(
"google_ai/gemini-3.1-pro-preview",
{},
2048, # Min tokens for 3 Pro Preview
"cached_tokens", # Field name (normalized from cached_content_token_count)
None, # No separate write field
id="gemini-3.1-pro-preview-implicit",
marks=pytest.mark.xfail(reason="Gemini 3 Pro Preview doesn't have implicit caching (only 2.5 models do)"),
),
]
# Filter based on environment variable if set
requested = os.getenv("PROMPT_CACHE_TEST_MODEL")
if requested:
CACHING_TEST_CONFIGS = [config for config in CACHING_TEST_CONFIGS if requested in config[0]]
# ------------------------------
# Fixtures
# ------------------------------
@pytest.fixture
def base_url() -> str:
"""Get the Letta server URL from environment or use default."""
return os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
@pytest.fixture
async def async_client(base_url: str) -> AsyncLetta:
"""Create an async Letta client."""
return AsyncLetta(base_url=base_url)
# ------------------------------
# Helper Functions
# ------------------------------
async def create_agent_with_large_memory(client: AsyncLetta, model: str, model_settings: dict, suffix: str):
"""
Create an agent with a large memory block to exceed caching thresholds.
Uses DEFAULT agent configuration (thinking enabled, base tools included) to test
real-world caching behavior, not artificial simplified scenarios.
If tests fail, that reveals actual caching issues with production configurations.
"""
from letta_client.types import CreateBlockParam
# Clean suffix to avoid invalid characters (e.g., dots in model names)
clean_suffix = suffix.replace(".", "-").replace("/", "-")
agent = await client.agents.create(
name=f"cache-test-{clean_suffix}-{uuid.uuid4().hex[:8]}",
model=model,
embedding="openai/text-embedding-3-small",
memory_blocks=[
CreateBlockParam(
label="persona",
value=LARGE_MEMORY_BLOCK,
)
],
# Use default settings - include_base_tools defaults to True, thinking enabled by default
# This tests REAL production behavior, not simplified scenarios
)
logger.info(f"Created agent {agent.id} with model {model} using default config")
return agent
async def cleanup_agent(client: AsyncLetta, agent_id: str):
"""Delete a test agent."""
try:
await client.agents.delete(agent_id)
logger.info(f"Cleaned up agent {agent_id}")
except Exception as e:
logger.warning(f"Failed to cleanup agent {agent_id}: {e}")
def assert_usage_sanity(usage, context: str = ""):
"""
Sanity checks for usage data to catch obviously wrong values.
These catch bugs like:
- output_tokens=1 (impossible for real responses)
- Cumulative values being accumulated instead of assigned
- Token counts exceeding model limits
"""
prefix = f"[{context}] " if context else ""
# Basic existence checks
assert usage is not None, f"{prefix}Usage should not be None"
# Prompt tokens sanity
if usage.prompt_tokens is not None:
assert usage.prompt_tokens > 0, f"{prefix}prompt_tokens should be > 0, got {usage.prompt_tokens}"
assert usage.prompt_tokens < 500000, f"{prefix}prompt_tokens unreasonably high: {usage.prompt_tokens}"
# Completion tokens sanity - a real response should have more than 1 token
if usage.completion_tokens is not None:
assert usage.completion_tokens > 1, (
f"{prefix}completion_tokens={usage.completion_tokens} is suspiciously low. "
"A real response should have > 1 output token. This may indicate a usage tracking bug."
)
assert usage.completion_tokens < 50000, (
f"{prefix}completion_tokens={usage.completion_tokens} unreasonably high. "
"This may indicate cumulative values being accumulated instead of assigned."
)
# Cache tokens sanity (if present)
if usage.cache_write_tokens is not None and usage.cache_write_tokens > 0:
# Cache write shouldn't exceed total input
total_input = (usage.prompt_tokens or 0) + (usage.cache_write_tokens or 0) + (usage.cached_input_tokens or 0)
assert usage.cache_write_tokens <= total_input, (
f"{prefix}cache_write_tokens ({usage.cache_write_tokens}) > total input ({total_input})"
)
if usage.cached_input_tokens is not None and usage.cached_input_tokens > 0:
# Cached input shouldn't exceed prompt tokens + cached
total_input = (usage.prompt_tokens or 0) + (usage.cached_input_tokens or 0)
assert usage.cached_input_tokens <= total_input, (
f"{prefix}cached_input_tokens ({usage.cached_input_tokens}) exceeds reasonable bounds"
)
# ------------------------------
# Prompt Caching Validation Tests
# ------------------------------
@pytest.mark.asyncio
@pytest.mark.parametrize("model,model_settings,min_tokens,read_field,write_field", CACHING_TEST_CONFIGS)
async def test_prompt_caching_cache_write_then_read(
async_client: AsyncLetta,
model: str,
model_settings: dict,
min_tokens: int,
read_field: str,
write_field: str,
):
"""
Test that prompt caching properly creates cache on first message and hits cache on second message.
This test validates that our LLM clients are correctly enabling caching:
- OpenAI: Should automatically cache (no config needed)
- Anthropic: Should add cache_control breakpoints
- Gemini: Should benefit from implicit caching on 2.5 models
Args:
model: Model handle (e.g., "openai/gpt-4o")
model_settings: Additional model settings
min_tokens: Minimum token threshold for this provider
read_field: Field name in prompt_tokens_details for cache reads
write_field: Field name in prompt_tokens_details for cache writes (None if no separate field)
"""
agent = await create_agent_with_large_memory(
async_client,
model,
model_settings,
"write-read",
)
try:
# Message 1: First interaction should trigger cache WRITE
response1 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Hello! Please introduce yourself.")],
)
assert response1.usage is not None, "First message should have usage data"
assert_usage_sanity(response1.usage, f"{model} msg1")
logger.info(
f"[{model}] Message 1 usage: "
f"prompt={response1.usage.prompt_tokens}, "
f"cached_input={response1.usage.cached_input_tokens}, "
f"cache_write={response1.usage.cache_write_tokens}"
)
# Verify we exceeded the minimum token threshold
# Note: For Anthropic, prompt_tokens only shows non-cached tokens, so we need to add cache_write_tokens
total_input_tokens = (
response1.usage.prompt_tokens + (response1.usage.cache_write_tokens or 0) + (response1.usage.cached_input_tokens or 0)
)
assert total_input_tokens >= min_tokens, f"Total input must be ≥{min_tokens} tokens for caching, got {total_input_tokens}"
# For providers with separate write field (Anthropic), check cache creation on first message
if write_field:
write_tokens = response1.usage.cache_write_tokens
logger.info(f"[{model}] Cache write tokens on message 1: {write_tokens}")
# Anthropic should show cache creation on first message
if "anthropic" in model:
assert write_tokens is not None and write_tokens > 0, (
f"Anthropic should create cache on first message, got cache_write_tokens={write_tokens}"
)
# Message 2: Follow-up with same agent/context should trigger cache HIT
response2 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="What are your main areas of expertise?")],
)
assert response2.usage is not None, "Second message should have usage data"
assert_usage_sanity(response2.usage, f"{model} msg2")
logger.info(
f"[{model}] Message 2 usage: "
f"prompt={response2.usage.prompt_tokens}, "
f"cached_input={response2.usage.cached_input_tokens}, "
f"cache_write={response2.usage.cache_write_tokens}"
)
# CRITICAL ASSERTION: Cache hit should occur on second message
read_tokens = response2.usage.cached_input_tokens
logger.info(f"[{model}] Cache read tokens on message 2: {read_tokens}")
assert read_tokens is not None and read_tokens > 0, (
f"Provider {model} should have cache hit on message 2, got cached_input_tokens={read_tokens}. This means caching is NOT working!"
)
# The cached amount should be significant (most of the prompt)
# Allow some variance for conversation history, but expect >50% cache hit
# Note: For Anthropic, prompt_tokens only shows non-cached tokens, so total = prompt + cached
total_input_msg2 = (
response2.usage.prompt_tokens + (response2.usage.cached_input_tokens or 0) + (response2.usage.cache_write_tokens or 0)
)
cache_hit_ratio = read_tokens / total_input_msg2 if total_input_msg2 > 0 else 0
logger.info(f"[{model}] Cache hit ratio: {cache_hit_ratio:.2%}")
# Note: With thinking mode enabled, Anthropic may have lower cache ratios due to
# thinking blocks changing between messages. The key assertion is that SOME caching occurs.
assert cache_hit_ratio >= 0.15, (
f"Expected >15% cache hit ratio, got {cache_hit_ratio:.2%}. Some portion of prompt should be cached!"
)
finally:
await cleanup_agent(async_client, agent.id)
@pytest.mark.asyncio
@pytest.mark.parametrize("model,model_settings,min_tokens,read_field,write_field", CACHING_TEST_CONFIGS)
async def test_prompt_caching_multiple_messages(
async_client: AsyncLetta,
model: str,
model_settings: dict,
min_tokens: int,
read_field: str,
write_field: str,
):
"""
Test that prompt caching continues to work across multiple messages in a conversation.
After the initial cache write, subsequent messages should continue to hit the cache
as long as the context remains similar.
"""
agent = await create_agent_with_large_memory(
async_client,
model,
model_settings,
"multi-msg",
)
try:
# Send 3 messages to ensure cache persists
messages_to_send = [
"Hello! What can you help me with?",
"Tell me about your technical knowledge.",
"What's your approach to solving problems?",
]
responses = []
for i, message in enumerate(messages_to_send):
response = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content=message)],
)
responses.append(response)
if response.usage:
read_tokens = response.usage.cached_input_tokens
logger.info(
f"[{model}] Message {i + 1}: prompt={response.usage.prompt_tokens}, "
f"cached={read_tokens}, ratio={read_tokens / response.usage.prompt_tokens:.2%}"
if read_tokens and response.usage.prompt_tokens
else f"[{model}] Message {i + 1}: prompt={response.usage.prompt_tokens}, cached=N/A"
)
# After message 1, all subsequent messages should have cache hits
for i in range(1, len(responses)):
assert responses[i].usage is not None, f"Message {i + 1} should have usage"
read_tokens = responses[i].usage.cached_input_tokens
assert read_tokens is not None and read_tokens > 0, (
f"Message {i + 1} should have cache hit, got cached_input_tokens={read_tokens}"
)
finally:
await cleanup_agent(async_client, agent.id)
@pytest.mark.asyncio
@pytest.mark.parametrize("model,model_settings,min_tokens,read_field,write_field", CACHING_TEST_CONFIGS)
async def test_prompt_caching_cache_invalidation_on_memory_update(
async_client: AsyncLetta,
model: str,
model_settings: dict,
min_tokens: int,
read_field: str,
write_field: str,
):
"""
Test that updating memory blocks invalidates the cache.
When memory is modified, the prompt changes, so the cache should miss
and a new cache should be created.
"""
agent = await create_agent_with_large_memory(
async_client,
model,
model_settings,
"cache-invalidation",
)
try:
# Message 1: Establish cache
await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Hello!")],
)
# Message 2: Should hit cache
response2 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="How are you?")],
)
read_tokens_before_update = response2.usage.cached_input_tokens if response2.usage else None
prompt_tokens_before = response2.usage.prompt_tokens if response2.usage else 0
logger.info(f"[{model}] Cache hit before memory update: {read_tokens_before_update}")
assert read_tokens_before_update is not None and read_tokens_before_update > 0, "Should have cache hit before update"
# Update memory block (this should invalidate cache)
agent = await async_client.agents.get(agent_id=agent.id)
persona_block = next((b for b in agent.memory_blocks if b.label == "persona"), None)
assert persona_block is not None, "Should have persona block"
await async_client.blocks.update(
block_id=persona_block.id,
label="persona",
value=LARGE_MEMORY_BLOCK + "\n\nADDITIONAL NOTE: You are now extra helpful!",
)
# Message 3: After memory update, cache should MISS (then create new cache)
response3 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="What changed?")],
)
# After memory update, we expect cache miss (low or zero cache hits)
read_tokens_after_update = response3.usage.cached_input_tokens if response3.usage else None
prompt_tokens_after = response3.usage.prompt_tokens if response3.usage else 0
logger.info(f"[{model}] Cache hit after memory update: {read_tokens_after_update}")
# Cache should be invalidated - we expect low/zero cache hits
# (Some providers might still cache parts, but it should be significantly less)
cache_ratio_before = read_tokens_before_update / prompt_tokens_before if prompt_tokens_before > 0 else 0
cache_ratio_after = read_tokens_after_update / prompt_tokens_after if read_tokens_after_update and prompt_tokens_after > 0 else 0
logger.info(f"[{model}] Cache ratio before: {cache_ratio_before:.2%}, after: {cache_ratio_after:.2%}")
# After update, cache hit ratio should drop significantly (or be zero)
assert cache_ratio_after < cache_ratio_before, "Cache hit ratio should drop after memory update"
finally:
await cleanup_agent(async_client, agent.id)
# ------------------------------
# Provider-Specific Cache Behavior Tests
# ------------------------------
@pytest.mark.asyncio
async def test_anthropic_system_prompt_stability(async_client: AsyncLetta):
"""
Check if Anthropic system prompt is actually stable between REAL requests.
Uses provider traces from actual messages sent to Anthropic to compare
what was really sent, not what the preview endpoint generates.
"""
import difflib
import json
model = "anthropic/claude-sonnet-4-5-20250929"
agent = await create_agent_with_large_memory(async_client, model, {}, "anthropic-stability")
try:
# Send message 1
response1 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Hello!")],
)
# Send message 2
response2 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Follow up!")],
)
# Get provider traces from ACTUAL requests sent to Anthropic
step_id_1, step_id_2 = None, None
if response1.messages:
for msg in response1.messages:
if hasattr(msg, "step_id") and msg.step_id:
step_id_1 = msg.step_id
break
if response2.messages:
for msg in response2.messages:
if hasattr(msg, "step_id") and msg.step_id:
step_id_2 = msg.step_id
break
if not step_id_1 or not step_id_2:
logger.error("Could not find step_ids from responses")
return
# Get the ACTUAL requests that were sent to Anthropic
trace1 = await async_client.telemetry.retrieve_provider_trace(step_id=step_id_1)
trace2 = await async_client.telemetry.retrieve_provider_trace(step_id=step_id_2)
if not (trace1 and trace2 and trace1.request_json and trace2.request_json):
logger.error("Could not retrieve provider traces")
return
# Compare the ACTUAL system prompts sent to Anthropic
system1 = trace1.request_json.get("system", [])
system2 = trace2.request_json.get("system", [])
system1_str = json.dumps(system1, sort_keys=True)
system2_str = json.dumps(system2, sort_keys=True)
if system1_str == system2_str:
logger.info("✅ ACTUAL SYSTEM PROMPTS SENT TO ANTHROPIC ARE IDENTICAL")
logger.info(" → Cache SHOULD work, but isn't. Issue is likely:")
logger.info(" → 1. Thinking blocks breaking cache")
logger.info(" → 2. Tool definitions changing")
logger.info(" → 3. Something else in the request changing")
else:
logger.error("❌ ACTUAL SYSTEM PROMPTS SENT TO ANTHROPIC DIFFER!")
logger.info("=" * 80)
logger.info("SYSTEM PROMPT DIFF (actual requests):")
diff = difflib.unified_diff(
system1_str.splitlines(keepends=True),
system2_str.splitlines(keepends=True),
fromfile="message1_actual",
tofile="message2_actual",
lineterm="",
)
diff_output = "\n".join(diff)
logger.info(diff_output[:2000]) # Truncate if too long
logger.info("=" * 80)
if "System prompt last recompiled" in diff_output:
logger.error("⚠️ TIMESTAMP IS CHANGING IN ACTUAL REQUESTS!")
logger.error(" → This is the root cause of cache misses")
logger.info(f"Message 1: cache_write={response1.usage.cache_write_tokens if response1.usage else 'N/A'}")
logger.info(f"Message 2: cached_input={response2.usage.cached_input_tokens if response2.usage else 'N/A'}")
finally:
await cleanup_agent(async_client, agent.id)
@pytest.mark.asyncio
async def test_anthropic_inspect_raw_request(async_client: AsyncLetta):
"""
Debug test to inspect the raw Anthropic request and see where cache_control is placed.
"""
model = "anthropic/claude-sonnet-4-5-20250929"
agent = await create_agent_with_large_memory(async_client, model, {}, "anthropic-debug")
try:
# Message 1
response1 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Hello!")],
)
# Get step_id from message 1
step_id_1 = None
if response1.messages:
for msg in response1.messages:
if hasattr(msg, "step_id") and msg.step_id:
step_id_1 = msg.step_id
break
if step_id_1:
provider_trace_1 = await async_client.telemetry.retrieve_provider_trace(step_id=step_id_1)
if provider_trace_1 and provider_trace_1.request_json:
logger.info("=" * 80)
logger.info("MESSAGE 1 REQUEST:")
logger.info(f"System has cache_control: {'cache_control' in provider_trace_1.request_json.get('system', [{}])[-1]}")
logger.info(f"Number of messages: {len(provider_trace_1.request_json.get('messages', []))}")
last_msg_content = provider_trace_1.request_json.get("messages", [{}])[-1].get("content", [])
if isinstance(last_msg_content, list) and len(last_msg_content) > 0:
logger.info(f"Last message block has cache_control: {'cache_control' in last_msg_content[-1]}")
logger.info("=" * 80)
# Message 2 - this should hit the cache
response2 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Follow up!")],
)
# Get step_id from message 2
step_id_2 = None
if response2.messages:
for msg in response2.messages:
if hasattr(msg, "step_id") and msg.step_id:
step_id_2 = msg.step_id
break
if step_id_2:
provider_trace_2 = await async_client.telemetry.retrieve_provider_trace(step_id=step_id_2)
if provider_trace_2 and provider_trace_2.request_json:
logger.info("=" * 80)
logger.info("MESSAGE 2 REQUEST:")
logger.info(f"System has cache_control: {'cache_control' in provider_trace_2.request_json.get('system', [{}])[-1]}")
logger.info(f"Number of messages: {len(provider_trace_2.request_json.get('messages', []))}")
# Show all messages to understand the structure
for i, msg in enumerate(provider_trace_2.request_json.get("messages", [])):
logger.info(f" Message {i}: role={msg.get('role')}")
content = msg.get("content")
if isinstance(content, list):
for j, block in enumerate(content):
logger.info(f" Block {j}: type={block.get('type')}, has_cache_control={'cache_control' in block}")
last_msg_content = provider_trace_2.request_json.get("messages", [{}])[-1].get("content", [])
if isinstance(last_msg_content, list) and len(last_msg_content) > 0:
logger.info(f"Last message block has cache_control: {'cache_control' in last_msg_content[-1]}")
logger.info("=" * 80)
logger.info(f"Message 1 cache_write_tokens: {response1.usage.cache_write_tokens if response1.usage else 'N/A'}")
logger.info(f"Message 2 cached_input_tokens: {response2.usage.cached_input_tokens if response2.usage else 'N/A'}")
finally:
await cleanup_agent(async_client, agent.id)
@pytest.mark.asyncio
async def test_anthropic_cache_control_breakpoints(async_client: AsyncLetta):
"""
Anthropic-specific test to verify we're adding cache_control breakpoints.
If this test fails, it means cache_control isn't working properly - either:
- Breakpoints aren't being added at all
- Breakpoints are positioned incorrectly
- Something in the prompt is changing between messages
We send multiple messages to account for any timing/routing issues.
"""
model = "anthropic/claude-sonnet-4-5-20250929"
agent = await create_agent_with_large_memory(async_client, model, {}, "anthropic-breakpoints")
try:
# First message
response1 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Hello!")],
)
assert response1.usage is not None, "Should have usage data"
# Anthropic should show cache_write_tokens > 0 on first message if cache_control is set
cache_creation = response1.usage.cache_write_tokens
logger.info(f"[Anthropic] First message cache_write_tokens: {cache_creation}")
assert cache_creation is not None and cache_creation >= 1024, (
f"Anthropic should create cache ≥1024 tokens on first message. Got {cache_creation}. This means cache_control breakpoints are NOT being added!"
)
# Send multiple follow-up messages to increase chance of cache hit
follow_up_messages = [
"Follow up question",
"Tell me more",
"What else can you do?",
]
cached_token_counts = []
for i, msg in enumerate(follow_up_messages):
response = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content=msg)],
)
cache_read = response.usage.cached_input_tokens if response.usage else 0
cached_token_counts.append(cache_read)
logger.info(f"[Anthropic] Message {i + 2} cached_input_tokens: {cache_read}")
# Early exit if we got a cache hit
if cache_read and cache_read > 0:
logger.info(f"[Anthropic] Cache hit detected on message {i + 2}, stopping early")
break
# Check if ANY of the follow-up messages had a cache hit
max_cached = max(cached_token_counts) if cached_token_counts else 0
logger.info(f"[Anthropic] Max cached tokens across {len(cached_token_counts)} messages: {max_cached}")
assert max_cached > 0, (
f"Anthropic should read from cache in at least one of {len(follow_up_messages)} follow-up messages. Got max={max_cached}. Cache reads are NOT working!"
)
finally:
await cleanup_agent(async_client, agent.id)
@pytest.mark.asyncio
async def test_openai_automatic_caching(async_client: AsyncLetta):
"""
OpenAI-specific test to verify automatic caching works.
OpenAI caching is automatic, so this should just work if we have ≥1024 tokens.
"""
model = "openai/gpt-4o"
agent = await create_agent_with_large_memory(async_client, model, {}, "openai-auto")
try:
# First message
response1 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Hello!")],
)
# OpenAI doesn't charge for cache writes, so cached_input_tokens should be 0 or None on first message
cached_tokens_1 = response1.usage.cached_input_tokens if response1.usage else None
logger.info(f"[OpenAI] First message cached_input_tokens: {cached_tokens_1} (should be 0 or None)")
# Second message should show cached_input_tokens > 0
response2 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="What can you help with?")],
)
cached_tokens_2 = response2.usage.cached_input_tokens if response2.usage else None
logger.info(f"[OpenAI] Second message cached_input_tokens: {cached_tokens_2}")
assert cached_tokens_2 is not None and cached_tokens_2 >= 1024, (
f"OpenAI should cache ≥1024 tokens automatically on second message. Got {cached_tokens_2}. Automatic caching is NOT working!"
)
# Cached tokens should be in 128-token increments
assert cached_tokens_2 % 128 == 0, f"OpenAI cached_input_tokens should be in 128-token increments, got {cached_tokens_2}"
finally:
await cleanup_agent(async_client, agent.id)
@pytest.mark.asyncio
async def test_gemini_2_5_flash_implicit_caching(async_client: AsyncLetta):
"""
Gemini-specific test to verify implicit caching works on 2.5 Flash.
Gemini 2.5 Flash has implicit caching (automatic) with ≥1024 token threshold.
"""
model = "google_ai/gemini-2.5-flash"
agent = await create_agent_with_large_memory(async_client, model, {}, "gemini-2.5-flash")
try:
# First message
response1 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Hello!")],
)
logger.info(f"[Gemini 2.5 Flash] First message prompt_tokens: {response1.usage.prompt_tokens if response1.usage else 'N/A'}")
# Second message should show implicit cache hit
response2 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="What are your capabilities?")],
)
# For Gemini, cached_input_tokens comes from cached_content_token_count (normalized in backend)
cached_tokens = response2.usage.cached_input_tokens if response2.usage else None
logger.info(f"[Gemini 2.5 Flash] Second message cached_input_tokens: {cached_tokens}")
assert cached_tokens is not None and cached_tokens >= 1024, (
f"Gemini 2.5 Flash should implicitly cache ≥1024 tokens on second message. Got {cached_tokens}. Implicit caching is NOT working!"
)
finally:
await cleanup_agent(async_client, agent.id)
@pytest.mark.asyncio
async def test_gemini_3_pro_preview_implicit_caching(async_client: AsyncLetta):
"""
Gemini-specific test to verify implicit caching works on 3 Pro Preview.
Gemini 3 Pro Preview has implicit caching (automatic) with ≥2048 token threshold.
Since implicit caching is stochastic (depends on routing, timing, etc.), we send
multiple messages in quick succession and check if ANY of them hit the cache.
"""
model = "google_ai/gemini-3.1-pro-preview"
agent = await create_agent_with_large_memory(async_client, model, {}, "gemini-3-pro")
try:
# First message establishes the prompt
response1 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Hello!")],
)
logger.info(f"[Gemini 3 Pro] First message prompt_tokens: {response1.usage.prompt_tokens if response1.usage else 'N/A'}")
# Send multiple follow-up messages quickly to increase chance of implicit cache hit
follow_up_messages = [
"What are your capabilities?",
"Tell me about your technical knowledge.",
"What can you help me with?",
]
cached_token_counts = []
for i, msg in enumerate(follow_up_messages):
response = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content=msg)],
)
cached_tokens = response.usage.cached_input_tokens if response.usage else 0
cached_token_counts.append(cached_tokens)
logger.info(f"[Gemini 3 Pro] Message {i + 2} cached_input_tokens: {cached_tokens}")
# Early exit if we got a cache hit
if cached_tokens >= 2048:
logger.info(f"[Gemini 3 Pro] Cache hit detected on message {i + 2}, stopping early")
break
# Check if ANY of the follow-up messages had a cache hit
max_cached = max(cached_token_counts) if cached_token_counts else 0
logger.info(f"[Gemini 3 Pro] Max cached tokens across {len(cached_token_counts)} messages: {max_cached}")
assert max_cached >= 2048, (
f"Gemini 3 Pro Preview should implicitly cache ≥2048 tokens in at least one of {len(follow_up_messages)} messages. Got max={max_cached}. Implicit caching is NOT working!"
)
finally:
await cleanup_agent(async_client, agent.id)
@pytest.mark.asyncio
async def test_gemini_request_prefix_stability(async_client: AsyncLetta):
"""
Check if Gemini requests have stable prefixes between REAL requests.
Gemini implicit caching requires the PREFIX of the request to be identical.
This test compares actual requests sent to Gemini to see what's changing.
Key things to check:
- System instruction (should be identical)
- Tool definitions (order must be same)
- Early contents (must be identical prefix)
"""
import difflib
import json
model = "google_ai/gemini-2.5-flash"
agent = await create_agent_with_large_memory(async_client, model, {}, "gemini-prefix-stability")
try:
# Send message 1
response1 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Hello!")],
)
# Send message 2
response2 = await async_client.agents.messages.create(
agent_id=agent.id,
messages=[MessageCreateParam(role="user", content="Follow up!")],
)
# Get provider traces from ACTUAL requests sent to Gemini
step_id_1, step_id_2 = None, None
if response1.messages:
for msg in response1.messages:
if hasattr(msg, "step_id") and msg.step_id:
step_id_1 = msg.step_id
break
if response2.messages:
for msg in response2.messages:
if hasattr(msg, "step_id") and msg.step_id:
step_id_2 = msg.step_id
break
if not step_id_1 or not step_id_2:
logger.error("Could not find step_ids from responses")
return
# Get the ACTUAL requests that were sent to Gemini
trace1 = await async_client.telemetry.retrieve_provider_trace(step_id=step_id_1)
trace2 = await async_client.telemetry.retrieve_provider_trace(step_id=step_id_2)
if not (trace1 and trace2 and trace1.request_json and trace2.request_json):
logger.error("Could not retrieve provider traces")
return
# Compare key parts of the request that affect cache prefix
req1 = trace1.request_json
req2 = trace2.request_json
# 1. Check system instruction
system_instruction_1 = req1.get("systemInstruction") or req1.get("system_instruction")
system_instruction_2 = req2.get("systemInstruction") or req2.get("system_instruction")
if system_instruction_1 != system_instruction_2:
logger.error("❌ SYSTEM INSTRUCTIONS DIFFER!")
logger.info("System Instruction 1:")
logger.info(json.dumps(system_instruction_1, indent=2)[:500])
logger.info("System Instruction 2:")
logger.info(json.dumps(system_instruction_2, indent=2)[:500])
sys1_str = json.dumps(system_instruction_1, sort_keys=True)
sys2_str = json.dumps(system_instruction_2, sort_keys=True)
diff = difflib.unified_diff(
sys1_str.splitlines(keepends=True),
sys2_str.splitlines(keepends=True),
fromfile="message1_system",
tofile="message2_system",
lineterm="",
)
diff_output = "\n".join(diff)
if "System prompt last recompiled" in diff_output or "timestamp" in diff_output.lower():
logger.error("⚠️ TIMESTAMP IN SYSTEM INSTRUCTION IS CHANGING!")
logger.error(" → This breaks Gemini implicit caching (prefix must match)")
else:
logger.info("✅ System instructions are identical")
# 2. Check tools (must be in same order for prefix matching)
tools_1 = req1.get("tools") or []
tools_2 = req2.get("tools") or []
# For Gemini, tools are in format: [{"functionDeclarations": [...]}]
# Extract just the function names/signatures for comparison
def extract_tool_names(tools):
names = []
for tool_group in tools:
if "functionDeclarations" in tool_group:
for func in tool_group["functionDeclarations"]:
names.append(func.get("name"))
return names
tool_names_1 = extract_tool_names(tools_1)
tool_names_2 = extract_tool_names(tools_2)
if tool_names_1 != tool_names_2:
logger.error("❌ TOOL ORDER/NAMES DIFFER!")
logger.info(f"Message 1 tools: {tool_names_1}")
logger.info(f"Message 2 tools: {tool_names_2}")
logger.error(" → Tool order must be identical for Gemini implicit caching")
else:
logger.info(f"✅ Tool order is identical ({len(tool_names_1)} tools)")
# 3. Check if tool definitions (not just names) are identical
tools_1_str = json.dumps(tools_1, sort_keys=True)
tools_2_str = json.dumps(tools_2, sort_keys=True)
if tools_1_str != tools_2_str:
logger.warning("⚠️ Tool DEFINITIONS differ (not just order)")
# Show a sample diff
diff = difflib.unified_diff(
tools_1_str[:1000].splitlines(keepends=True),
tools_2_str[:1000].splitlines(keepends=True),
fromfile="message1_tools",
tofile="message2_tools",
lineterm="",
)
logger.info("Sample tool definition diff:")
logger.info("\n".join(diff))
else:
logger.info("✅ Tool definitions are identical")
# 4. Check contents structure (just the first few items in the prefix)
contents_1 = req1.get("contents") or []
contents_2 = req2.get("contents") or []
logger.info(f"Message 1: {len(contents_1)} content items")
logger.info(f"Message 2: {len(contents_2)} content items")
# Compare the overlapping prefix (message 2 should have message 1's contents + new message)
min_len = min(len(contents_1), len(contents_2))
prefix_identical = True
for i in range(min_len - 1): # Exclude last item (user's new message)
if contents_1[i] != contents_2[i]:
prefix_identical = False
logger.error(f"❌ Content item {i} differs between requests!")
logger.info(f"Message 1 item {i}: {json.dumps(contents_1[i], indent=2)[:200]}")
logger.info(f"Message 2 item {i}: {json.dumps(contents_2[i], indent=2)[:200]}")
if prefix_identical:
logger.info("✅ Content prefix matches between requests")
# Log cache results
logger.info("=" * 80)
logger.info(f"Message 1: prompt_tokens={response1.usage.prompt_tokens if response1.usage else 'N/A'}")
logger.info(
f"Message 2: prompt_tokens={response2.usage.prompt_tokens if response2.usage else 'N/A'}, cached={response2.usage.cached_input_tokens if response2.usage else 'N/A'}"
)
if response2.usage and response2.usage.cached_input_tokens and response2.usage.cached_input_tokens > 0:
logger.info("✅ CACHE HIT DETECTED")
else:
logger.error("❌ NO CACHE HIT - This is the issue we're debugging")
finally:
await cleanup_agent(async_client, agent.id)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_prompt_caching.py",
"license": "Apache License 2.0",
"lines": 932,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_internal_agents_count.py | from typing import List
import httpx
import pytest
from letta_client import Letta
from letta.schemas.agent import AgentState
@pytest.fixture(scope="function")
def test_agents(client: Letta) -> List[AgentState]:
"""
Creates test agents - some hidden, some not hidden.
Cleans them up after the test.
"""
agents = []
# Create 3 non-hidden agents
for i in range(3):
agent = client.agents.create(
name=f"test_agent_visible_{i}",
tags=["test", "visible"],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
)
agents.append(agent)
# Create 2 hidden agents
for i in range(2):
# Create agent as hidden using direct HTTP call (SDK might not support hidden parameter yet)
response = httpx.post(
f"{client._client._base_url}/v1/agents/",
json={
"name": f"test_agent_hidden_{i}",
"tags": ["test", "hidden"],
"model": "openai/gpt-4o-mini",
"embedding": "openai/text-embedding-3-small",
"hidden": True,
},
headers=client._client._headers,
timeout=10.0,
)
response.raise_for_status()
agent_data = response.json()
# Create a simple AgentState-like object for tracking
class SimpleAgent:
def __init__(self, id):
self.id = id
agents.append(SimpleAgent(agent_data["id"]))
yield agents
# Cleanup
for agent in agents:
try:
client.agents.delete(agent.id)
except Exception:
pass
def test_internal_agents_count_exclude_hidden(client: Letta, test_agents: List[AgentState]):
"""
Test that the internal agents count endpoint correctly excludes hidden agents
when exclude_hidden=True (default).
"""
# Make a request to the internal endpoint
# Note: We need to use the raw HTTP client since the SDK might not have this endpoint
response = httpx.get(
f"{client._client._base_url}/v1/_internal_agents/count",
params={"exclude_hidden": True},
headers=client._client._headers,
timeout=10.0,
)
assert response.status_code == 200
count = response.json()
# Should count at least the 3 visible agents we created
# (there might be other agents in the system)
assert isinstance(count, int)
assert count >= 3
# Get the total count with hidden agents included
response_with_hidden = httpx.get(
f"{client._client._base_url}/v1/_internal_agents/count",
params={"exclude_hidden": False},
headers=client._client._headers,
timeout=10.0,
)
assert response_with_hidden.status_code == 200
count_with_hidden = response_with_hidden.json()
# The count with hidden should be at least 2 more than without hidden
assert count_with_hidden >= count + 2
def test_internal_agents_count_include_all(client: Letta, test_agents: List[AgentState]):
"""
Test that the internal agents count endpoint correctly includes all agents
when exclude_hidden=False.
"""
response = httpx.get(
f"{client._client._base_url}/v1/_internal_agents/count",
params={"exclude_hidden": False},
headers=client._client._headers,
timeout=10.0,
)
assert response.status_code == 200
count = response.json()
# Should count at least all 5 agents we created (3 visible + 2 hidden)
assert isinstance(count, int)
assert count >= 5
def test_internal_agents_count_default_behavior(client: Letta, test_agents: List[AgentState]):
"""
Test that the default behavior (exclude_hidden=True) works correctly.
"""
# Call without specifying exclude_hidden (should default to True)
response = httpx.get(
f"{client._client._base_url}/v1/_internal_agents/count",
headers=client._client._headers,
timeout=10.0,
)
assert response.status_code == 200
count = response.json()
# Should count at least the 3 visible agents we created
assert isinstance(count, int)
assert count >= 3
# This should be the same as explicitly setting exclude_hidden=True
response_explicit = httpx.get(
f"{client._client._base_url}/v1/_internal_agents/count",
params={"exclude_hidden": True},
headers=client._client._headers,
timeout=10.0,
)
count_explicit = response_explicit.json()
# The two counts should be equal
assert count == count_explicit
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_internal_agents_count.py",
"license": "Apache License 2.0",
"lines": 121,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:letta/monitoring/event_loop_watchdog.py | """
Lightweight thread-based watchdog to detect event loop hangs.
Runs independently and won't interfere with tests or normal operation.
"""
import asyncio
import threading
import time
import traceback
from collections import defaultdict
from typing import Optional
from letta.log import get_logger
logger = get_logger(__name__)
class EventLoopWatchdog:
"""
Minimal watchdog that monitors event loop health from a separate thread.
Detects complete event loop freezes that would cause health check failures.
"""
def __init__(self, check_interval: float = 5.0, timeout_threshold: float = 15.0):
"""
Args:
check_interval: How often to check (seconds)
timeout_threshold: Threshold for hang detection (seconds)
"""
self.check_interval = check_interval
self.timeout_threshold = timeout_threshold
self._thread: Optional[threading.Thread] = None
self._stop_event = threading.Event()
self._last_heartbeat = time.time()
self._heartbeat_scheduled_at = time.time()
self._heartbeat_lock = threading.Lock()
self._loop: Optional[asyncio.AbstractEventLoop] = None
self._monitoring = False
self._last_dump_time = 0.0 # Cooldown between task dumps
self._saturation_start: Optional[float] = None # Track when saturation began
def start(self, loop: asyncio.AbstractEventLoop):
"""Start the watchdog thread."""
if self._monitoring:
return
self._loop = loop
self._monitoring = True
self._stop_event.clear()
now = time.time()
self._last_heartbeat = now
self._heartbeat_scheduled_at = now
self._thread = threading.Thread(target=self._watch_loop, daemon=True, name="EventLoopWatchdog")
self._thread.start()
# Schedule periodic heartbeats on the event loop
loop.call_soon(self._schedule_heartbeats)
logger.info(
f"Event loop watchdog started - monitoring thread running, heartbeat every 1s, "
f"checks every {self.check_interval}s, hang threshold: {self.timeout_threshold}s"
)
def stop(self):
"""Stop the watchdog thread."""
self._monitoring = False
self._stop_event.set()
if self._thread:
self._thread.join(timeout=2)
logger.info("Watchdog stopped")
def _schedule_heartbeats(self):
"""Schedule periodic heartbeat updates on the event loop."""
if not self._monitoring:
return
now = time.time()
with self._heartbeat_lock:
# Calculate event loop lag: time between when we scheduled this callback and when it ran
lag = now - self._heartbeat_scheduled_at
self._last_heartbeat = now
self._heartbeat_scheduled_at = now + 1.0
# Log if lag is significant (> 2 seconds means event loop is saturated)
if lag > 2.0:
logger.warning(f"Event loop lag in heartbeat: {lag:.2f}s (expected ~1.0s)")
if self._loop and self._monitoring:
self._loop.call_later(1.0, self._schedule_heartbeats)
def _watch_loop(self):
"""Main watchdog loop running in separate thread."""
consecutive_hangs = 0
max_lag_seen = 0.0
while not self._stop_event.is_set():
try:
time.sleep(self.check_interval)
with self._heartbeat_lock:
last_beat = self._last_heartbeat
scheduled_at = self._heartbeat_scheduled_at
now = time.time()
time_since_heartbeat = now - last_beat
# Calculate current lag: how far behind schedule is the heartbeat?
current_lag = now - scheduled_at
max_lag_seen = max(max_lag_seen, current_lag)
# Try to estimate event loop load (safe from separate thread)
task_count = -1
try:
if self._loop and not self._loop.is_closed():
# all_tasks returns only unfinished tasks
all_tasks = asyncio.all_tasks(self._loop)
task_count = len(all_tasks)
except Exception:
# Accessing loop from thread can be fragile, don't fail
pass
# ALWAYS log every check to prove watchdog is alive
logger.debug(
f"WATCHDOG_CHECK: heartbeat_age={time_since_heartbeat:.1f}s, current_lag={current_lag:.2f}s, "
f"max_lag={max_lag_seen:.2f}s, consecutive_hangs={consecutive_hangs}, tasks={task_count}"
)
# Log at INFO if we see significant lag (> 2 seconds indicates saturation)
if current_lag > 2.0:
# Track saturation duration
if self._saturation_start is None:
self._saturation_start = now
saturation_duration = now - self._saturation_start
logger.info(
f"Event loop saturation detected: lag={current_lag:.2f}s, duration={saturation_duration:.1f}s, "
f"tasks={task_count}, max_lag_seen={max_lag_seen:.2f}s"
)
# Only dump stack traces with 60s cooldown to avoid spam
if (now - self._last_dump_time) > 60.0:
self._dump_asyncio_tasks() # Dump async tasks
self._dump_state() # Dump thread stacks
self._last_dump_time = now
else:
# Reset saturation tracking when recovered
if self._saturation_start is not None:
duration = now - self._saturation_start
logger.info(f"Event loop saturation ended after {duration:.1f}s")
self._saturation_start = None
if time_since_heartbeat > self.timeout_threshold:
consecutive_hangs += 1
logger.error(
f"EVENT LOOP HANG DETECTED! No heartbeat for {time_since_heartbeat:.1f}s (threshold: {self.timeout_threshold}s), "
f"tasks={task_count}"
)
# Dump both thread state and asyncio tasks
self._dump_asyncio_tasks()
self._dump_state()
if consecutive_hangs >= 2:
logger.critical(f"Event loop appears frozen ({consecutive_hangs} consecutive hangs), tasks={task_count}")
else:
if consecutive_hangs > 0:
logger.info(f"Event loop recovered (was {consecutive_hangs} hangs, tasks now: {task_count})")
consecutive_hangs = 0
except Exception as e:
logger.error(f"Watchdog error: {e}")
def _dump_state(self):
"""Dump state with stack traces when hang detected."""
try:
import sys
# Get all threads
logger.error(f"Active threads: {threading.active_count()}")
for thread in threading.enumerate():
logger.error(f" {thread.name} (daemon={thread.daemon})")
# Get stack traces from all threads
logger.error("\nStack traces of all threads:")
for thread_id, frame in sys._current_frames().items():
# Find thread name
thread_name = "unknown"
for thread in threading.enumerate():
if thread.ident == thread_id:
thread_name = thread.name
break
logger.error(f"\nThread {thread_name} (ID: {thread_id}):")
# Format stack trace
for filename, lineno, name, line in traceback.extract_stack(frame):
logger.error(f" File: {filename}:{lineno}")
logger.error(f" in {name}")
if line:
logger.error(f" > {line.strip()}")
except Exception as e:
logger.error(f"Failed to dump state: {e}")
def _dump_asyncio_tasks(self):
"""Dump asyncio task stack traces to diagnose event loop saturation."""
try:
if not self._loop or self._loop.is_closed():
return
active_tasks = asyncio.all_tasks(self._loop)
if not active_tasks:
return
logger.warning(f"Severe lag detected - dumping active tasks ({len(active_tasks)} total):")
# Collect task data in single pass
tasks_by_location = defaultdict(list)
for task in active_tasks:
try:
if task.done():
continue
stack = task.get_stack()
if not stack:
continue
# Find top letta frame for grouping
for frame in reversed(stack):
if "letta" in frame.f_code.co_filename:
idx = frame.f_code.co_filename.find("letta/")
path = frame.f_code.co_filename[idx + 6 :] if idx != -1 else frame.f_code.co_filename
location = f"{path}:{frame.f_lineno}:{frame.f_code.co_name}"
# For bounded tasks, use wrapped coroutine location instead
if frame.f_code.co_name == "bounded_coro":
task_name = task.get_name()
if task_name and task_name.startswith("bounded["):
location = task_name[8:-1] # Extract "file:line:func" from "bounded[...]"
tasks_by_location[location].append((task, stack))
break
except Exception:
continue
if not tasks_by_location:
return
total_tasks = sum(len(tasks) for tasks in tasks_by_location.values())
logger.warning(f" Letta tasks: {total_tasks} total")
# Sort by task count (most blocked first) and show detailed stacks for top 3
sorted_patterns = sorted(tasks_by_location.items(), key=lambda x: len(x[1]), reverse=True)
num_patterns = len(sorted_patterns)
logger.warning(f" Task patterns ({num_patterns} unique locations):")
# Show detailed stacks for top 3, summary for rest
for i, (location, tasks) in enumerate(sorted_patterns, 1):
count = len(tasks)
pct = (count / total_tasks) * 100 if total_tasks > 0 else 0
if i <= 3:
# Top 3: show detailed vertical stack trace
logger.warning(f" [{i}] {count} tasks ({pct:.0f}%) at: {location}")
_, sample_stack = tasks[0]
# Show up to 8 frames vertically for better context
for frame in sample_stack[-8:]:
filename = frame.f_code.co_filename
letta_idx = filename.find("letta/")
if letta_idx != -1:
short_path = filename[letta_idx + 6 :]
logger.warning(f" {short_path}:{frame.f_lineno} in {frame.f_code.co_name}")
else:
pkg_idx = filename.find("site-packages/")
if pkg_idx != -1:
lib_path = filename[pkg_idx + 14 :]
logger.warning(f" [{lib_path}:{frame.f_lineno}] {frame.f_code.co_name}")
elif i <= 10:
# Positions 4-10: show location only
logger.warning(f" [{i}] {count} tasks ({pct:.0f}%) at: {location}")
else:
# Beyond 10: just show count in summary
if i == 11:
remaining = sum(len(t) for _, t in sorted_patterns[10:])
remaining_patterns = num_patterns - 10
logger.warning(f" ... and {remaining} more tasks across {remaining_patterns} other locations")
except Exception as e:
logger.error(f"Failed to dump asyncio tasks: {e}")
_global_watchdog: Optional[EventLoopWatchdog] = None
def get_watchdog() -> Optional[EventLoopWatchdog]:
"""Get the global watchdog instance."""
return _global_watchdog
def start_watchdog(loop: asyncio.AbstractEventLoop, check_interval: float = 5.0, timeout_threshold: float = 15.0):
"""Start the global watchdog."""
global _global_watchdog
if _global_watchdog is None:
_global_watchdog = EventLoopWatchdog(check_interval=check_interval, timeout_threshold=timeout_threshold)
_global_watchdog.start(loop)
return _global_watchdog
def stop_watchdog():
"""Stop the global watchdog."""
global _global_watchdog
if _global_watchdog:
_global_watchdog.stop()
_global_watchdog = None
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/monitoring/event_loop_watchdog.py",
"license": "Apache License 2.0",
"lines": 257,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/server/rest_api/routers/v1/anthropic.py | import asyncio
import httpx
from fastapi import APIRouter, Depends, Request
from fastapi.responses import Response, StreamingResponse
from letta.log import get_logger
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
from letta.server.rest_api.proxy_helpers import (
build_response_from_chunks,
check_for_duplicate_message,
extract_assistant_message,
extract_user_messages,
get_or_create_claude_code_agent,
inject_memory_context,
is_topic_detection_response,
persist_messages_background,
prepare_headers,
)
from letta.server.server import SyncServer
logger = get_logger(__name__)
_background_tasks: set[asyncio.Task] = set()
router = APIRouter(prefix="/anthropic", tags=["anthropic"])
ANTHROPIC_API_BASE = "https://api.anthropic.com"
PROXY_NAME = "Anthropic Proxy"
@router.api_route("/v1/messages", methods=["POST"], operation_id="anthropic_messages_proxy", include_in_schema=False)
async def anthropic_messages_proxy(
request: Request,
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Proxy endpoint for Anthropic Messages API.
This endpoint forwards requests to the Anthropic API, allowing Claude Code CLI
to use Letta as a proxy by configuring anthropic_base_url.
Usage in Claude Code CLI settings.json:
{
"env": {
"ANTHROPIC_BASE_URL": "http://localhost:3000/v1/anthropic"
}
}
"""
# Get the request body
body = await request.body()
logger.info(f"[{PROXY_NAME}] Proxying request to Anthropic Messages API: {ANTHROPIC_API_BASE}/v1/messages")
logger.debug(f"[{PROXY_NAME}] Request body preview: {body[:200]}...")
actor = await server.user_manager.get_actor_or_default_async(headers.actor_id)
# Extract all user messages from request
all_user_messages = extract_user_messages(body)
# Only capture the LAST user message (the new one the user just sent)
# Claude Code sends full conversation history, but we only want to persist the new message
user_messages = [all_user_messages[-1]] if all_user_messages else []
# Filter out system/metadata requests and policy specs
user_messages = [s for s in user_messages if not s.startswith("<system-reminder>") and not s.startswith("<policy_spec>")]
if not user_messages:
logger.debug(f"[{PROXY_NAME}] Skipping capture/memory for this turn")
anthropic_headers = prepare_headers(request, PROXY_NAME)
if not anthropic_headers:
logger.error(f"[{PROXY_NAME}] No Anthropic API key found in headers or settings")
return Response(
content='{"error": {"type": "authentication_error", "message": "Anthropic API key required. Pass via anthropic-api-key or x-api-key header."}}',
status_code=401,
media_type="application/json",
)
# Check if this is a streaming request
try:
import json
request_data = json.loads(body)
is_streaming = request_data.get("stream", False)
model_name = request_data.get("model")
# Extract and remove project_id (internal use only, not for Anthropic API)
project_id = request_data.pop("project_id", None)
logger.debug(f"[{PROXY_NAME}] Request is streaming: {is_streaming}")
logger.debug(f"[{PROXY_NAME}] Model: {model_name}")
logger.debug(f"[{PROXY_NAME}] Project ID: {project_id}")
except Exception as e:
logger.warning(f"[{PROXY_NAME}] Failed to parse request body: {e}")
is_streaming = False
model_name = None
project_id = None
# Get or create agent for Claude Code session (skip for system requests)
# Note: Agent lookup and memory search are blocking operations before forwarding.
# Message persistence happens in the background after the response is returned.
agent = None
try:
# Check if X-LETTA-AGENT-ID header is provided
custom_agent_id = request.headers.get("x-letta-agent-id")
agent = await get_or_create_claude_code_agent(
server=server,
actor=actor,
project_id=project_id,
agent_id=custom_agent_id,
)
logger.debug(f"[{PROXY_NAME}] Using agent ID: {agent.id}")
except Exception as e:
logger.error(f"[{PROXY_NAME}] Failed to get/create agent: {e}")
# Inject memory context into request (skip for system requests)
# TODO: Optimize - skip memory injection on subsequent messages in same session
# TODO: Add caching layer to avoid duplicate memory searches
modified_body = body
if agent and request_data:
modified_request_data = await inject_memory_context(
server=server,
agent=agent,
actor=actor,
request_data=request_data,
proxy_name=PROXY_NAME,
)
# Re-encode the modified request
import json
modified_body = json.dumps(modified_request_data).encode("utf-8")
# Forward the request to Anthropic API (preserve query params like ?beta=true)
# Note: For streaming, we create the client outside the generator to keep it alive
anthropic_url = f"{ANTHROPIC_API_BASE}/v1/messages"
if request.url.query:
anthropic_url = f"{anthropic_url}?{request.url.query}"
if is_streaming:
# Handle streaming response
collected_chunks = []
async def stream_response():
# Create client inside the generator so it stays alive during streaming
async with httpx.AsyncClient(timeout=300.0) as client:
async with client.stream(
"POST",
anthropic_url,
headers=anthropic_headers,
content=modified_body,
) as response:
async for chunk in response.aiter_bytes():
collected_chunks.append(chunk)
yield chunk
# After streaming is complete, extract and log assistant message
assistant_message = build_response_from_chunks(collected_chunks)
if user_messages and assistant_message:
logger.info("=" * 70)
logger.info("📨 CAPTURED USER MESSAGE:")
for i, user_message in enumerate(user_messages):
logger.info(f" {i}: {user_message[:200]}{'...' if len(user_message) > 200 else ''}")
logger.info("=" * 70)
logger.info("🤖 CAPTURED ASSISTANT RESPONSE (streaming):")
logger.info(f" {assistant_message[:200]}{'...' if len(assistant_message) > 200 else ''}")
logger.info("=" * 70)
# Skip persisting topic detection responses (metadata, not conversation)
if is_topic_detection_response(assistant_message):
logger.debug(f"[{PROXY_NAME}] Skipping persistence - topic detection response")
# Persist messages to database (non-blocking, skip for system requests)
elif agent:
# Check for duplicate user messages before creating background task
# This prevents race conditions where multiple requests persist the same message
user_messages_to_persist = await check_for_duplicate_message(server, agent, actor, user_messages, PROXY_NAME)
task = asyncio.create_task(
persist_messages_background(
server=server,
agent=agent,
actor=actor,
user_messages=user_messages_to_persist,
assistant_message=assistant_message,
model_name=model_name,
proxy_name=PROXY_NAME,
)
)
_background_tasks.add(task)
task.add_done_callback(_background_tasks.discard)
return StreamingResponse(
stream_response(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
},
)
# Non-streaming path
async with httpx.AsyncClient(timeout=300.0) as client:
try:
# Handle non-streaming response
response = await client.post(
anthropic_url,
headers=anthropic_headers,
content=modified_body,
)
logger.info(f"Successfully proxied request, status: {response.status_code}")
# Extract and log assistant message
if response.status_code == 200:
try:
import json
response_data = json.loads(response.content)
assistant_message = extract_assistant_message(response_data)
if assistant_message:
logger.info("=" * 70)
logger.info("🤖 CAPTURED ASSISTANT RESPONSE:")
logger.info(f" {assistant_message[:500]}{'...' if len(assistant_message) > 500 else ''}")
logger.info("=" * 70)
# Skip persisting topic detection responses (metadata, not conversation)
if is_topic_detection_response(assistant_message):
logger.debug(f"[{PROXY_NAME}] Skipping persistence - topic detection response")
# Persist messages to database (non-blocking)
elif agent:
# Check for duplicate user messages before creating background task
user_messages_to_persist = await check_for_duplicate_message(server, agent, actor, user_messages, PROXY_NAME)
task = asyncio.create_task(
persist_messages_background(
server=server,
agent=agent,
actor=actor,
user_messages=user_messages_to_persist,
assistant_message=assistant_message,
model_name=model_name,
proxy_name=PROXY_NAME,
)
)
_background_tasks.add(task)
task.add_done_callback(_background_tasks.discard)
except Exception as e:
logger.warning(f"[{PROXY_NAME}] Failed to extract assistant response for logging: {e}")
return Response(
content=response.content,
status_code=response.status_code,
media_type=response.headers.get("content-type", "application/json"),
headers={
k: v
for k, v in response.headers.items()
if k.lower() not in ["content-encoding", "content-length", "transfer-encoding", "connection"]
},
)
except httpx.HTTPError as e:
logger.error(f"[{PROXY_NAME}] Error proxying request to Anthropic API: {e}")
return Response(
content=f'{{"error": {{"type": "api_error", "message": "Failed to proxy request to Anthropic API: {str(e)}"}}}}',
status_code=500,
media_type="application/json",
)
@router.api_route(
"/v1/{endpoint:path}",
methods=["GET", "POST", "PUT", "DELETE", "PATCH"],
operation_id="anthropic_catchall_proxy",
include_in_schema=False,
)
async def anthropic_catchall_proxy(
endpoint: str,
request: Request,
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Catch-all proxy for other Anthropic API endpoints.
This forwards all other requests (like /v1/messages/count_tokens) directly to Anthropic
without message capture or memory injection.
"""
# Skip the /v1/messages endpoint (handled by specific route)
if endpoint == "messages" and request.method == "POST":
# This should be handled by the specific route, but just in case return error
return Response(
content='{"error": {"type": "routing_error", "message": "Use specific /v1/messages endpoint"}}',
status_code=500,
media_type="application/json",
)
# Get the request body
body = await request.body()
# Reconstruct the full path
path = f"v1/{endpoint}"
logger.info(f"[{PROXY_NAME}] Proxying catch-all request: {request.method} /{path}")
anthropic_headers = prepare_headers(request, PROXY_NAME)
if not anthropic_headers:
logger.error(f"[{PROXY_NAME}] No Anthropic API key found in headers or settings")
return Response(
content='{"error": {"type": "authentication_error", "message": "Anthropic API key required"}}',
status_code=401,
media_type="application/json",
)
# Forward the request to Anthropic API
async with httpx.AsyncClient(timeout=300.0) as client:
try:
response = await client.request(
method=request.method,
url=f"{ANTHROPIC_API_BASE}/{path}",
headers=anthropic_headers,
content=body if body else None,
)
return Response(
content=response.content,
status_code=response.status_code,
media_type=response.headers.get("content-type", "application/json"),
headers={
k: v
for k, v in response.headers.items()
if k.lower() not in ["content-encoding", "content-length", "transfer-encoding", "connection"]
},
)
except httpx.HTTPError as e:
logger.error(f"[{PROXY_NAME}] Error proxying catch-all request to Anthropic API: {e}")
return Response(
content=f'{{"error": {{"type": "api_error", "message": "Failed to proxy request to Anthropic API: {str(e)}"}}}}',
status_code=500,
media_type="application/json",
)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/routers/v1/anthropic.py",
"license": "Apache License 2.0",
"lines": 293,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/server/rest_api/routers/v1/passages.py | from datetime import datetime
from typing import List, Literal, Optional
from fastapi import APIRouter, Body, Depends
from pydantic import BaseModel, Field
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import TagMatchMode
from letta.schemas.passage import Passage
from letta.schemas.user import User as PydanticUser
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
from letta.server.server import SyncServer
router = APIRouter(prefix="/passages", tags=["passages"])
async def _get_embedding_config_for_search(
server: SyncServer,
actor: PydanticUser,
agent_id: Optional[str],
archive_id: Optional[str],
) -> Optional[EmbeddingConfig]:
"""Determine which embedding config to use for a passage search.
Args:
server: The SyncServer instance
actor: The user making the request
agent_id: Optional agent ID to get embedding config from
archive_id: Optional archive ID to get embedding config from
Returns:
The embedding config to use, or None if not found
Priority:
1. If agent_id is provided, use that agent's embedding config
2. If archive_id is provided, use that archive's embedding config
3. Otherwise, try to get embedding config from any existing agent
4. Fall back to server default if no agents exist
"""
if agent_id:
agent_state = await server.agent_manager.get_agent_by_id_async(agent_id=agent_id, actor=actor)
return agent_state.embedding_config
if archive_id:
archive = await server.archive_manager.get_archive_by_id_async(archive_id=archive_id, actor=actor)
return archive.embedding_config
# Search across all passages - try to get embedding config from any agent
agent_count = await server.agent_manager.size_async(actor=actor)
if agent_count > 0:
agents = await server.agent_manager.list_agents_async(actor=actor, limit=1)
if agents:
return agents[0].embedding_config
# Fall back to server default
return server.default_embedding_config
class PassageSearchRequest(BaseModel):
"""Request model for searching passages across archives."""
query: Optional[str] = Field(None, description="Text query for semantic search")
agent_id: Optional[str] = Field(None, description="Filter passages by agent ID")
archive_id: Optional[str] = Field(None, description="Filter passages by archive ID")
tags: Optional[List[str]] = Field(None, description="Optional list of tags to filter search results")
tag_match_mode: Literal["any", "all"] = Field(
"any", description="How to match tags - 'any' to match passages with any of the tags, 'all' to match only passages with all tags"
)
limit: int = Field(50, description="Maximum number of results to return", ge=1, le=100)
start_date: Optional[datetime] = Field(None, description="Filter results to passages created after this datetime")
end_date: Optional[datetime] = Field(None, description="Filter results to passages created before this datetime")
class PassageSearchResult(BaseModel):
"""Result from a passage search operation with scoring details."""
passage: Passage = Field(..., description="The passage object")
score: float = Field(..., description="Relevance score")
metadata: dict = Field(default_factory=dict, description="Additional metadata about the search result")
@router.post("/search", response_model=List[PassageSearchResult], operation_id="search_passages")
async def search_passages(
request: PassageSearchRequest = Body(...),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Search passages across the organization with optional agent and archive filtering.
Returns passages with relevance scores.
This endpoint supports semantic search through passages:
- If neither agent_id nor archive_id is provided, searches ALL passages in the organization
- If agent_id is provided, searches passages across all archives attached to that agent
- If archive_id is provided, searches passages within that specific archive
- If both are provided, agent_id takes precedence
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
# Convert tag_match_mode to enum
tag_mode = TagMatchMode.ANY if request.tag_match_mode == "any" else TagMatchMode.ALL
# Determine embedding config (only needed when query text is provided)
embed_query = bool(request.query)
embedding_config = None
if embed_query:
embedding_config = await _get_embedding_config_for_search(
server=server,
actor=actor,
agent_id=request.agent_id,
archive_id=request.archive_id,
)
# Search passages
passages_with_metadata = await server.agent_manager.query_agent_passages_async(
actor=actor,
agent_id=request.agent_id, # Can be None for organization-wide search
archive_id=request.archive_id, # Can be None if searching by agent or org-wide
query_text=request.query,
limit=request.limit,
embedding_config=embedding_config,
embed_query=embed_query,
tags=request.tags,
tag_match_mode=tag_mode,
start_date=request.start_date,
end_date=request.end_date,
)
# Convert to PassageSearchResult objects
results = [
PassageSearchResult(
passage=passage,
score=score,
metadata=metadata,
)
for passage, score, metadata in passages_with_metadata
]
return results
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/routers/v1/passages.py",
"license": "Apache License 2.0",
"lines": 114,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/sandbox_credentials_service.py | import logging
import os
from typing import Any, Dict, Optional
import httpx
from letta.schemas.user import User
logger = logging.getLogger(__name__)
class SandboxCredentialsService:
"""Service for fetching sandbox credentials from a webhook."""
def __init__(self):
self.credentials_webhook_url = os.getenv("STEP_ORCHESTRATOR_ENDPOINT")
self.credentials_webhook_key = os.getenv("STEP_COMPLETE_KEY")
async def fetch_credentials(
self,
actor: User,
tool_name: Optional[str] = None,
agent_id: Optional[str] = None,
) -> Dict[str, Any]:
"""
Fetch sandbox credentials from the configured webhook.
Args:
actor: The user executing the tool
tool_name: Optional name of the tool being executed
agent_id: Optional ID of the agent executing the tool
Returns:
Dict[str, Any]: Dictionary of environment variables to add to sandbox
"""
if not self.credentials_webhook_url:
logger.debug("SANDBOX_CREDENTIALS_WEBHOOK not configured, skipping credentials fetch")
return {}
try:
headers = {}
if self.credentials_webhook_key:
headers["Authorization"] = f"Bearer {self.credentials_webhook_key}"
payload = {
"user_id": actor.id,
"organization_id": actor.organization_id,
}
if tool_name:
payload["tool_name"] = tool_name
if agent_id:
payload["agent_id"] = agent_id
async with httpx.AsyncClient(timeout=10.0) as client:
response = await client.post(
self.credentials_webhook_url + "/webhook/sandbox-credentials",
json=payload,
headers=headers,
)
response.raise_for_status()
response_data = response.json()
if not isinstance(response_data, dict):
logger.warning(f"Invalid response format from credentials webhook: expected dict, got {type(response_data)}")
return {}
logger.info(f"Successfully fetched sandbox credentials for user {actor.id}")
return response_data
except httpx.TimeoutException:
logger.warning(f"Timeout fetching sandbox credentials for user {actor.id}")
return {}
except httpx.HTTPStatusError as e:
logger.warning(f"HTTP error fetching sandbox credentials for user {actor.id}: {e.response.status_code}")
return {}
except Exception as e:
logger.error(f"Unexpected error fetching sandbox credentials for user {actor.id}: {e}")
return {}
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/sandbox_credentials_service.py",
"license": "Apache License 2.0",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/sandbox_credentials_service_test.py | """
Test for sandbox credentials service functionality.
To run this test:
python -m pytest letta/services/sandbox_credentials_service_test.py -v
To test with actual webhook:
export SANDBOX_CREDENTIALS_WEBHOOK=https://your-webhook-url.com/endpoint
export SANDBOX_CREDENTIALS_KEY=your-secret-key
python -m pytest letta/services/sandbox_credentials_service_test.py -v
"""
import os
from unittest.mock import AsyncMock, patch
import pytest
from letta.schemas.user import User
from letta.services.sandbox_credentials_service import SandboxCredentialsService
@pytest.mark.asyncio
async def test_credentials_not_configured():
"""Test that credentials fetch returns empty dict when URL is not configured."""
with patch.dict(os.environ, {}, clear=True):
service = SandboxCredentialsService()
mock_user = User(id="user_123", organization_id="org_456")
result = await service.fetch_credentials(mock_user)
assert result == {}
@pytest.mark.asyncio
async def test_credentials_fetch_success():
"""Test successful credentials fetch."""
with patch.dict(
os.environ,
{"SANDBOX_CREDENTIALS_WEBHOOK": "https://example.com/credentials", "SANDBOX_CREDENTIALS_KEY": "test-key"},
):
service = SandboxCredentialsService()
mock_user = User(id="user_123", organization_id="org_456")
with patch("httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.status_code = 200
mock_response.raise_for_status = AsyncMock()
mock_response.json = AsyncMock(return_value={"API_KEY": "secret_key_123", "OTHER_VAR": "value"})
mock_post = AsyncMock(return_value=mock_response)
mock_client.return_value.__aenter__.return_value.post = mock_post
result = await service.fetch_credentials(mock_user, tool_name="my_tool", agent_id="agent_789")
assert result == {"API_KEY": "secret_key_123", "OTHER_VAR": "value"}
mock_post.assert_called_once()
call_args = mock_post.call_args
assert call_args.kwargs["json"] == {
"user_id": "user_123",
"organization_id": "org_456",
"tool_name": "my_tool",
"agent_id": "agent_789",
}
assert call_args.kwargs["headers"]["Authorization"] == "Bearer test-key"
@pytest.mark.asyncio
async def test_credentials_fetch_without_auth():
"""Test credentials fetch without authentication key."""
with patch.dict(os.environ, {"SANDBOX_CREDENTIALS_WEBHOOK": "https://example.com/credentials"}, clear=True):
service = SandboxCredentialsService()
mock_user = User(id="user_123", organization_id="org_456")
with patch("httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.status_code = 200
mock_response.raise_for_status = AsyncMock()
mock_response.json = AsyncMock(return_value={"API_KEY": "secret_key_123"})
mock_post = AsyncMock(return_value=mock_response)
mock_client.return_value.__aenter__.return_value.post = mock_post
result = await service.fetch_credentials(mock_user)
assert result == {"API_KEY": "secret_key_123"}
call_args = mock_post.call_args
# Should not have Authorization header
assert "Authorization" not in call_args.kwargs["headers"]
@pytest.mark.asyncio
async def test_credentials_fetch_timeout():
"""Test credentials fetch timeout handling."""
with patch.dict(os.environ, {"SANDBOX_CREDENTIALS_WEBHOOK": "https://example.com/credentials"}):
service = SandboxCredentialsService()
mock_user = User(id="user_123", organization_id="org_456")
with patch("httpx.AsyncClient") as mock_client:
import httpx
mock_post = AsyncMock(side_effect=httpx.TimeoutException("Request timed out"))
mock_client.return_value.__aenter__.return_value.post = mock_post
result = await service.fetch_credentials(mock_user)
assert result == {}
@pytest.mark.asyncio
async def test_credentials_fetch_http_error():
"""Test credentials fetch HTTP error handling."""
with patch.dict(os.environ, {"SANDBOX_CREDENTIALS_WEBHOOK": "https://example.com/credentials"}):
service = SandboxCredentialsService()
mock_user = User(id="user_123", organization_id="org_456")
with patch("httpx.AsyncClient") as mock_client:
import httpx
mock_response = AsyncMock()
mock_response.status_code = 500
mock_response.raise_for_status = AsyncMock(
side_effect=httpx.HTTPStatusError("Server error", request=None, response=mock_response)
)
mock_post = AsyncMock(return_value=mock_response)
mock_client.return_value.__aenter__.return_value.post = mock_post
result = await service.fetch_credentials(mock_user)
assert result == {}
@pytest.mark.asyncio
async def test_credentials_fetch_invalid_response():
"""Test credentials fetch with invalid response format."""
with patch.dict(os.environ, {"SANDBOX_CREDENTIALS_WEBHOOK": "https://example.com/credentials"}):
service = SandboxCredentialsService()
mock_user = User(id="user_123", organization_id="org_456")
with patch("httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.status_code = 200
mock_response.raise_for_status = AsyncMock()
mock_response.json = AsyncMock(return_value="not a dict")
mock_post = AsyncMock(return_value=mock_response)
mock_client.return_value.__aenter__.return_value.post = mock_post
result = await service.fetch_credentials(mock_user)
assert result == {}
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/sandbox_credentials_service_test.py",
"license": "Apache License 2.0",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:test_watchdog_hang.py | #!/usr/bin/env python3
"""
Test script to verify the watchdog detects event loop hangs.
Run this to validate the watchdog works before deploying.
"""
import asyncio
import logging
import sys
import time
from pathlib import Path
# Setup logging to see watchdog output
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# Add letta to path
sys.path.insert(0, str(Path(__file__).parent))
from letta.monitoring.event_loop_watchdog import start_watchdog
def blocking_operation(seconds: float):
"""Simulate a blocking operation that hangs the event loop."""
print(f"\n⚠️ BLOCKING event loop for {seconds}s (simulating hang)...")
time.sleep(seconds)
print("✓ Blocking operation completed\n")
async def test_watchdog_detection():
"""Test that watchdog detects event loop hangs."""
print("\n" + "=" * 70)
print("EVENT LOOP WATCHDOG TEST")
print("=" * 70)
# Start the watchdog with aggressive settings for testing
loop = asyncio.get_running_loop()
watchdog = start_watchdog(loop, check_interval=2.0, timeout_threshold=5.0)
print("\n✓ Watchdog started (will alert if no heartbeat for >5s)")
print(" Checking every 2 seconds...\n")
# Test 1: Normal operation (should NOT trigger)
print("TEST 1: Normal async operation (no hang expected)")
print("-" * 70)
for i in range(3):
await asyncio.sleep(1)
print(f" Heartbeat {i + 1}/3 - event loop running normally")
print("✓ Test 1 passed: No false alarms\n")
await asyncio.sleep(3)
# Test 2: Short block (should NOT trigger - under 5s threshold)
print("TEST 2: Short blocking operation (4s - should NOT trigger)")
print("-" * 70)
blocking_operation(4.0)
await asyncio.sleep(3)
print("✓ Test 2 passed: Short blocks don't trigger false alarms\n")
await asyncio.sleep(2)
# Test 3: Long block (SHOULD trigger - exceeds 5s threshold)
print("TEST 3: Long blocking operation (8s - SHOULD trigger watchdog)")
print("-" * 70)
print("🔍 Watch for ERROR logs from the watchdog...")
blocking_operation(8.0)
# Give watchdog time to detect and log
await asyncio.sleep(3)
print("\n" + "=" * 70)
print("TEST COMPLETE")
print("=" * 70)
print("\nExpected results:")
print(" ✓ Test 1: No watchdog alerts (normal operation)")
print(" ✓ Test 2: No watchdog alerts (4s < 5s threshold)")
print(" ✓ Test 3: WATCHDOG ALERT logged (8s > 5s threshold)")
print("\nIf you saw 'EVENT LOOP HANG DETECTED' in Test 3, watchdog works! ✓")
print("\n")
# Stop watchdog
watchdog.stop()
async def main():
"""Run the test."""
try:
await test_watchdog_detection()
except Exception as e:
print(f"\n❌ Test failed with error: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
asyncio.run(main())
| {
"repo_id": "letta-ai/letta",
"file_path": "test_watchdog_hang.py",
"license": "Apache License 2.0",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:tests/sdk/search_test.py | """
End-to-end tests for passage and message search endpoints using the SDK client.
These tests verify that the /v1/passages/search and /v1/messages/search endpoints work correctly
with Turbopuffer integration, including vector search, FTS, hybrid search, filtering, and pagination.
"""
import time
import uuid
from datetime import datetime, timedelta, timezone
from typing import Any
import pytest
from letta_client import Letta
from letta_client.types import CreateBlockParam, MessageCreateParam
from letta.config import LettaConfig
from letta.schemas.tool import ToolSearchResult
from letta.server.rest_api.routers.v1.passages import PassageSearchResult
from letta.server.server import SyncServer
from letta.settings import model_settings, settings
DEFAULT_ORG_ID = "org-00000000-0000-4000-8000-000000000000"
def cleanup_agent_with_messages(client: Letta, agent_id: str):
"""
Helper function to properly clean up an agent by first deleting all its messages
from Turbopuffer before deleting the agent itself.
Args:
client: Letta SDK client
agent_id: ID of the agent to clean up
"""
try:
# First, delete all messages for this agent from Turbopuffer
# This ensures no orphaned messages remain in Turbopuffer
try:
import asyncio
from letta.helpers.tpuf_client import TurbopufferClient, should_use_tpuf_for_messages
if should_use_tpuf_for_messages():
tpuf_client = TurbopufferClient()
# Delete all messages for this agent from Turbopuffer
asyncio.run(tpuf_client.delete_all_messages(agent_id, DEFAULT_ORG_ID))
except Exception as e:
print(f"Warning: Failed to clean up Turbopuffer messages for agent {agent_id}: {e}")
# Now delete the agent itself (which will delete SQL messages via cascade)
client.agents.delete(agent_id=agent_id)
except Exception as e:
print(f"Warning: Failed to clean up agent {agent_id}: {e}")
def cleanup_tool(client: Letta, tool_id: str):
"""
Helper function to properly clean up a tool by deleting it from both
Turbopuffer and the database.
Args:
client: Letta SDK client
tool_id: ID of the tool to clean up
"""
try:
# First, delete from Turbopuffer if tool embedding is enabled
try:
import asyncio
from letta.helpers.tpuf_client import TurbopufferClient, should_use_tpuf_for_tools
if should_use_tpuf_for_tools():
tpuf_client = TurbopufferClient()
asyncio.run(tpuf_client.delete_tools(DEFAULT_ORG_ID, [tool_id]))
except Exception as e:
print(f"Warning: Failed to clean up Turbopuffer tool {tool_id}: {e}")
# Now delete the tool from the database
client.tools.delete(tool_id=tool_id)
except Exception as e:
print(f"Warning: Failed to clean up tool {tool_id}: {e}")
@pytest.fixture(scope="module")
def server():
"""Server fixture for testing"""
config = LettaConfig.load()
config.save()
server = SyncServer(init_with_default_org_and_user=False)
return server
@pytest.fixture
def enable_turbopuffer():
"""Temporarily enable Turbopuffer for testing"""
original_use_tpuf = settings.use_tpuf
original_api_key = settings.tpuf_api_key
original_environment = settings.environment
# Enable Turbopuffer with test key
settings.use_tpuf = True
if not settings.tpuf_api_key:
settings.tpuf_api_key = original_api_key
settings.environment = "DEV"
yield
# Restore original values
settings.use_tpuf = original_use_tpuf
settings.tpuf_api_key = original_api_key
settings.environment = original_environment
@pytest.fixture
def enable_message_embedding():
"""Enable both Turbopuffer and message embedding"""
original_use_tpuf = settings.use_tpuf
original_api_key = settings.tpuf_api_key
original_embed_messages = settings.embed_all_messages
original_environment = settings.environment
settings.use_tpuf = True
settings.tpuf_api_key = settings.tpuf_api_key or "test-key"
settings.embed_all_messages = True
settings.environment = "DEV"
yield
settings.use_tpuf = original_use_tpuf
settings.tpuf_api_key = original_api_key
settings.embed_all_messages = original_embed_messages
settings.environment = original_environment
@pytest.fixture
def disable_turbopuffer():
"""Ensure Turbopuffer is disabled for testing"""
original_use_tpuf = settings.use_tpuf
original_embed_messages = settings.embed_all_messages
settings.use_tpuf = False
settings.embed_all_messages = False
yield
settings.use_tpuf = original_use_tpuf
settings.embed_all_messages = original_embed_messages
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
def test_passage_search_basic(client: Letta, enable_turbopuffer):
"""Test basic passage search functionality through the SDK"""
# Create an agent
agent = client.agents.create(
name=f"test_passage_search_{uuid.uuid4()}",
memory_blocks=[CreateBlockParam(label="persona", value="test assistant")],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
)
try:
# Create an archive and attach to agent
archive = client.archives.create(name=f"test_archive_{uuid.uuid4()}", embedding="openai/text-embedding-3-small")
try:
# Attach archive to agent
client.agents.archives.attach(agent_id=agent.id, archive_id=archive.id)
# Insert some passages
test_passages = [
"Python is a popular programming language for data science and machine learning.",
"JavaScript is widely used for web development and frontend applications.",
"Turbopuffer is a vector database optimized for performance and scalability.",
]
for passage_text in test_passages:
client.archives.passages.create(archive_id=archive.id, text=passage_text)
# Wait for indexing
time.sleep(2)
# Test search by agent_id
results = client.post(
"/v1/passages/search",
cast_to=list[PassageSearchResult],
body={
"query": "python programming",
"agent_id": agent.id,
"limit": 10,
},
)
assert len(results) > 0, "Should find at least one passage"
assert any("Python" in result["passage"]["text"] for result in results), "Should find Python-related passage"
# Verify result structure
for result in results:
assert "passage" in result, "Result should have passage field"
assert "score" in result, "Result should have score field"
assert "metadata" in result, "Result should have metadata field"
assert isinstance(result["score"], float), "Score should be a float"
# Test search by archive_id
archive_results = client.post(
"/v1/passages/search",
cast_to=list[PassageSearchResult],
body={
"query": "vector database",
"archive_id": archive.id,
"limit": 10,
},
)
assert len(archive_results) > 0, "Should find passages in archive"
assert any("Turbopuffer" in result["passage"]["text"] or "vector" in result["passage"]["text"] for result in archive_results), (
"Should find vector-related passage"
)
finally:
# Clean up archive
try:
client.archives.delete(archive_id=archive.id)
except Exception:
pass
finally:
# Clean up agent
cleanup_agent_with_messages(client, agent.id)
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
def test_passage_search_with_tags(client: Letta, enable_turbopuffer):
"""Test passage search with tag filtering"""
# Create an agent
agent = client.agents.create(
name=f"test_passage_tags_{uuid.uuid4()}",
memory_blocks=[CreateBlockParam(label="persona", value="test assistant")],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
)
try:
# Create an archive
archive = client.archives.create(name=f"test_archive_tags_{uuid.uuid4()}", embedding="openai/text-embedding-3-small")
try:
# Attach archive to agent
client.agents.archives.attach(agent_id=agent.id, archive_id=archive.id)
# Insert passages with tags (if supported)
# Note: Tag support may depend on the SDK version
test_passages = [
"Python tutorial for beginners",
"Advanced Python techniques",
"JavaScript basics",
]
for passage_text in test_passages:
client.archives.passages.create(archive_id=archive.id, text=passage_text)
# Wait for indexing
time.sleep(2)
# Test basic search without tags first
results = client.post(
"/v1/passages/search",
cast_to=list[PassageSearchResult],
body={
"query": "programming tutorial",
"agent_id": agent.id,
"limit": 10,
},
)
assert len(results) > 0, "Should find passages"
# Test with tag filtering if supported
# Note: The SDK may not expose tag parameters directly, so this test verifies basic functionality
# The backend will handle tag filtering when available
finally:
# Clean up archive
try:
client.archives.delete(archive_id=archive.id)
except Exception:
pass
finally:
# Clean up agent
cleanup_agent_with_messages(client, agent.id)
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
def test_passage_search_with_date_filters(client: Letta, enable_turbopuffer):
"""Test passage search with date range filtering"""
# Create an agent
agent = client.agents.create(
name=f"test_passage_dates_{uuid.uuid4()}",
memory_blocks=[CreateBlockParam(label="persona", value="test assistant")],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
)
try:
# Create an archive
archive = client.archives.create(name=f"test_archive_dates_{uuid.uuid4()}", embedding="openai/text-embedding-3-small")
try:
# Attach archive to agent
client.agents.archives.attach(agent_id=agent.id, archive_id=archive.id)
# Insert passages at different times
client.archives.passages.create(archive_id=archive.id, text="Recent passage about AI trends")
# Wait a bit before creating another
time.sleep(1)
client.archives.passages.create(archive_id=archive.id, text="Another passage about machine learning")
# Wait for indexing
time.sleep(2)
# Test search with date range
now = datetime.now(timezone.utc)
start_date = now - timedelta(hours=1)
results = client.post(
"/v1/passages/search",
cast_to=list[PassageSearchResult],
body={
"query": "AI machine learning",
"agent_id": agent.id,
"limit": 10,
"start_date": start_date.isoformat(),
},
)
assert len(results) > 0, "Should find recent passages"
# Verify all results are within date range
for result in results:
passage_date = result["passage"]["created_at"]
if passage_date:
# Convert to datetime if it's a string
if isinstance(passage_date, str):
passage_date = datetime.fromisoformat(passage_date.replace("Z", "+00:00"))
assert passage_date >= start_date, "Passage should be after start_date"
finally:
# Clean up archive
try:
client.archives.delete(archive_id=archive.id)
except Exception:
pass
finally:
# Clean up agent
cleanup_agent_with_messages(client, agent.id)
@pytest.mark.skipif(
not (settings.use_tpuf and settings.tpuf_api_key and model_settings.openai_api_key and settings.embed_all_messages),
reason="Message search requires Turbopuffer, OpenAI, and message embedding to be enabled",
)
def test_message_search_basic(client: Letta, enable_message_embedding):
"""Test basic message search functionality through the SDK"""
# Create an agent
agent = client.agents.create(
name=f"test_message_search_{uuid.uuid4()}",
memory_blocks=[CreateBlockParam(label="persona", value="helpful assistant")],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
)
try:
# Send messages to the agent
test_messages = [
"What is the capital of Saudi Arabia?",
]
for msg_text in test_messages:
client.agents.messages.create(agent_id=agent.id, messages=[MessageCreateParam(role="user", content=msg_text)])
# Wait for messages to be indexed and database transactions to complete
# Extra time needed for async embedding and database commits
time.sleep(10)
# Test FTS search for messages
# Note: The endpoint returns LettaMessageSearchResult (API schema)
# and we treat the response as generic dicts here to avoid tight
# coupling to internal server-side models.
results = client.post(
"/v1/messages/search",
cast_to=list[dict[str, Any]],
body={
"query": "capital Saudi Arabia",
"search_mode": "fts",
"limit": 10,
},
)
print(f"Search returned {len(results)} results")
if len(results) > 0:
print(f"First result type: {type(results[0])}")
print(f"First result keys: {results[0].keys() if isinstance(results[0], dict) else 'N/A'}")
for result in results:
assert "agent_id" in result, "Result should have agent_id field"
# created_at should always be present and parseable
assert "created_at" in result, "Result should have created_at field"
assert result["created_at"], "created_at should be set"
created_at = result["created_at"]
if isinstance(created_at, str):
# Handle both "+00:00" and "Z" suffixes
datetime.fromisoformat(created_at.replace("Z", "+00:00"))
assert len(results) > 0, f"Should find at least one message. Got {len(results)} results."
finally:
# Clean up agent
cleanup_agent_with_messages(client, agent.id)
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
def test_passage_search_pagination(client: Letta, enable_turbopuffer):
"""Test passage search pagination"""
# Create an agent
agent = client.agents.create(
name=f"test_passage_pagination_{uuid.uuid4()}",
memory_blocks=[CreateBlockParam(label="persona", value="test assistant")],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
)
try:
# Create an archive
archive = client.archives.create(name=f"test_archive_pagination_{uuid.uuid4()}", embedding="openai/text-embedding-3-small")
try:
# Attach archive to agent
client.agents.archives.attach(agent_id=agent.id, archive_id=archive.id)
# Insert many passages
for i in range(10):
client.archives.passages.create(archive_id=archive.id, text=f"Test passage number {i} about programming")
# Wait for indexing
time.sleep(2)
# Test with different limit values
results_limit_3 = client.post(
"/v1/passages/search",
cast_to=list[PassageSearchResult],
body={
"query": "programming",
"agent_id": agent.id,
"limit": 3,
},
)
assert len(results_limit_3) == 3, "Should respect limit parameter"
results_limit_5 = client.post(
"/v1/passages/search",
cast_to=list[PassageSearchResult],
body={
"query": "programming",
"agent_id": agent.id,
"limit": 5,
},
)
assert len(results_limit_5) == 5, "Should return 5 results"
results_all = client.post(
"/v1/passages/search",
cast_to=list[PassageSearchResult],
body={
"query": "programming",
"agent_id": agent.id,
"limit": 20,
},
)
assert len(results_all) >= 10, "Should return all matching passages"
finally:
# Clean up archive
try:
client.archives.delete(archive_id=archive.id)
except Exception:
pass
finally:
# Clean up agent
cleanup_agent_with_messages(client, agent.id)
@pytest.mark.skipif(not settings.tpuf_api_key, reason="Turbopuffer API key not configured")
def test_passage_search_org_wide(client: Letta, enable_turbopuffer):
"""Test organization-wide passage search (without agent_id or archive_id)"""
# Create multiple agents with archives
agent1 = client.agents.create(
name=f"test_org_search_agent1_{uuid.uuid4()}",
memory_blocks=[CreateBlockParam(label="persona", value="test assistant 1")],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
)
agent2 = client.agents.create(
name=f"test_org_search_agent2_{uuid.uuid4()}",
memory_blocks=[CreateBlockParam(label="persona", value="test assistant 2")],
model="openai/gpt-4o-mini",
embedding="openai/text-embedding-3-small",
)
try:
# Create archives for both agents
archive1 = client.archives.create(name=f"test_archive_org1_{uuid.uuid4()}", embedding="openai/text-embedding-3-small")
archive2 = client.archives.create(name=f"test_archive_org2_{uuid.uuid4()}", embedding="openai/text-embedding-3-small")
try:
# Attach archives
client.agents.archives.attach(agent_id=agent1.id, archive_id=archive1.id)
client.agents.archives.attach(agent_id=agent2.id, archive_id=archive2.id)
# Insert passages in both archives
client.archives.passages.create(archive_id=archive1.id, text="Unique passage in agent1 about quantum computing")
client.archives.passages.create(archive_id=archive2.id, text="Unique passage in agent2 about blockchain technology")
# Wait for indexing
time.sleep(2)
# Test org-wide search (no agent_id or archive_id)
results = client.post(
"/v1/passages/search",
cast_to=list[PassageSearchResult],
body={
"query": "unique passage",
"limit": 20,
},
)
# Should find passages from both agents
assert len(results) >= 2, "Should find passages from multiple agents"
found_texts = [result["passage"]["text"] for result in results]
assert any("quantum computing" in text for text in found_texts), "Should find agent1 passage"
assert any("blockchain" in text for text in found_texts), "Should find agent2 passage"
finally:
# Clean up archives
try:
client.archives.delete(archive_id=archive1.id)
except Exception:
pass
try:
client.archives.delete(archive_id=archive2.id)
except Exception:
pass
finally:
# Clean up agents
cleanup_agent_with_messages(client, agent1.id)
cleanup_agent_with_messages(client, agent2.id)
@pytest.fixture
def enable_tool_embedding():
"""Enable both Turbopuffer and tool embedding"""
original_use_tpuf = settings.use_tpuf
original_api_key = settings.tpuf_api_key
original_embed_tools = settings.embed_tools
original_environment = settings.environment
settings.use_tpuf = True
settings.tpuf_api_key = settings.tpuf_api_key or "test-key"
settings.embed_tools = True
settings.environment = "DEV"
yield
settings.use_tpuf = original_use_tpuf
settings.tpuf_api_key = original_api_key
settings.embed_tools = original_embed_tools
settings.environment = original_environment
@pytest.mark.skipif(
not (settings.use_tpuf and settings.tpuf_api_key and model_settings.openai_api_key and settings.embed_tools),
reason="Tool search requires Turbopuffer, OpenAI, and tool embedding to be enabled",
)
def test_tool_search_basic(client: Letta, enable_tool_embedding):
"""Test basic tool search functionality through the SDK"""
tool_ids = []
try:
# Create test tools with distinct descriptions for semantic search
test_tools = [
{
"source_code": '''
def send_email_to_user(recipient: str, subject: str, body: str) -> str:
"""Send an email message to a specified recipient.
Args:
recipient: Email address of the recipient
subject: Subject line of the email
body: Body content of the email message
Returns:
Confirmation message
"""
return f"Email sent to {recipient}"
''',
"description": "Send an email message to a specified recipient with subject and body.",
"tags": ["communication", "email"],
},
{
"source_code": '''
def fetch_weather_data(city: str, units: str = "celsius") -> str:
"""Fetch current weather information for a city.
Args:
city: Name of the city to get weather for
units: Temperature units (celsius or fahrenheit)
Returns:
Weather information string
"""
return f"Weather in {city}: sunny, 25 {units}"
''',
"description": "Fetch current weather information for a specified city.",
"tags": ["weather", "api"],
},
{
"source_code": '''
def calculate_compound_interest(principal: float, rate: float, years: int) -> float:
"""Calculate compound interest on an investment.
Args:
principal: Initial investment amount
rate: Annual interest rate as decimal
years: Number of years
Returns:
Final amount after compound interest
"""
return principal * (1 + rate) ** years
''',
"description": "Calculate compound interest on a financial investment over time.",
"tags": ["finance", "calculator"],
},
]
# Create tools via SDK
for tool_data in test_tools:
tool = client.tools.create(
source_code=tool_data["source_code"],
description=tool_data["description"],
tags=tool_data["tags"],
)
tool_ids.append(tool.id)
# Wait for embeddings to be indexed
time.sleep(3)
# Test semantic search - should find email-related tool
results = client.post(
"/v1/tools/search",
cast_to=list[ToolSearchResult],
body={
"query": "send message to someone",
"search_mode": "hybrid",
"limit": 10,
},
)
assert len(results) > 0, "Should find at least one tool"
# The email tool should be ranked highly for this query
tool_names = [result["tool"]["name"] for result in results]
assert "send_email_to_user" in tool_names, "Should find email tool for messaging query"
# Verify result structure
for result in results:
assert "tool" in result, "Result should have tool field"
assert "combined_score" in result, "Result should have combined_score field"
assert isinstance(result["combined_score"], float), "combined_score should be a float"
# Test search with different query - should find weather tool
weather_results = client.post(
"/v1/tools/search",
cast_to=list[ToolSearchResult],
body={
"query": "get temperature forecast",
"search_mode": "hybrid",
"limit": 10,
},
)
assert len(weather_results) > 0, "Should find tools for weather query"
weather_tool_names = [result["tool"]["name"] for result in weather_results]
assert "fetch_weather_data" in weather_tool_names, "Should find weather tool"
# Test search with tag filter
finance_results = client.post(
"/v1/tools/search",
cast_to=list[ToolSearchResult],
body={
"query": "money calculation",
"tags": ["finance"],
"search_mode": "hybrid",
"limit": 10,
},
)
# Should find the finance tool when filtering by tag
if len(finance_results) > 0:
finance_tool_names = [result["tool"]["name"] for result in finance_results]
assert "calculate_compound_interest" in finance_tool_names, "Should find finance tool with tag filter"
finally:
# Clean up all created tools
for tool_id in tool_ids:
cleanup_tool(client, tool_id)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/sdk/search_test.py",
"license": "Apache License 2.0",
"lines": 590,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:alembic/versions/2dbb2cf49e07_add_models_table.py | """add models table
Revision ID: 2dbb2cf49e07
Revises: a1b2c3d4e5f6
Create Date: 2025-11-06 14:49:10.902099
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "2dbb2cf49e07"
down_revision: Union[str, None] = "a1b2c3d4e5f6"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"provider_models",
sa.Column("handle", sa.String(), nullable=False),
sa.Column("display_name", sa.String(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("provider_id", sa.String(), nullable=False),
sa.Column("organization_id", sa.String(), nullable=True),
sa.Column("model_type", sa.String(), nullable=False),
sa.Column("enabled", sa.Boolean(), server_default="TRUE", nullable=False),
sa.Column("model_endpoint_type", sa.String(), nullable=False),
sa.Column("max_context_window", sa.Integer(), nullable=True),
sa.Column("supports_token_streaming", sa.Boolean(), nullable=True),
sa.Column("supports_tool_calling", sa.Boolean(), nullable=True),
sa.Column("embedding_dim", sa.Integer(), nullable=True),
sa.Column("id", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("FALSE"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.ForeignKeyConstraint(["organization_id"], ["organizations.id"], ondelete="CASCADE"),
sa.ForeignKeyConstraint(["provider_id"], ["providers.id"], ondelete="CASCADE"),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("handle", "organization_id", "model_type", name="unique_handle_per_org_and_type"),
sa.UniqueConstraint("name", "provider_id", "model_type", name="unique_model_per_provider_and_type"),
)
op.create_index(op.f("ix_provider_models_handle"), "provider_models", ["handle"], unique=False)
op.create_index(op.f("ix_provider_models_model_type"), "provider_models", ["model_type"], unique=False)
op.create_index(op.f("ix_provider_models_organization_id"), "provider_models", ["organization_id"], unique=False)
op.create_index(op.f("ix_provider_models_provider_id"), "provider_models", ["provider_id"], unique=False)
op.alter_column("providers", "organization_id", existing_type=sa.VARCHAR(), nullable=True)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column("providers", "organization_id", existing_type=sa.VARCHAR(), nullable=False)
op.drop_index(op.f("ix_provider_models_provider_id"), table_name="provider_models")
op.drop_index(op.f("ix_provider_models_organization_id"), table_name="provider_models")
op.drop_index(op.f("ix_provider_models_model_type"), table_name="provider_models")
op.drop_index(op.f("ix_provider_models_handle"), table_name="provider_models")
op.drop_table("provider_models")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/2dbb2cf49e07_add_models_table.py",
"license": "Apache License 2.0",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/89fd4648866b_add_last_stop_reason_to_agent_state.py | """add last_stop_reason to agent state
Revision ID: 89fd4648866b
Revises: f6cd5a1e519d
Create Date: 2025-10-27 16:55:54.383688
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "89fd4648866b"
down_revision: Union[str, None] = "f6cd5a1e519d"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("agents", sa.Column("last_stop_reason", sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("agents", "last_stop_reason")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/89fd4648866b_add_last_stop_reason_to_agent_state.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/a1b2c3d4e5f6_add_index_to_step_metrics_run_id.py | """add index to step_metrics run_id
Revision ID: a1b2c3d4e5f6
Revises: d798609d65ff
Create Date: 2025-11-11 19:16:00.000000
"""
from typing import Sequence, Union
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "a1b2c3d4e5f6"
down_revision: Union[str, None] = "d798609d65ff"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# Add index to step_metrics.run_id for efficient foreign key cascade operations
# This prevents full table scans when runs are deleted (ondelete="SET NULL")
op.create_index("ix_step_metrics_run_id", "step_metrics", ["run_id"], unique=False, if_not_exists=True)
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
op.drop_index("ix_step_metrics_run_id", table_name="step_metrics", if_exists=True)
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/a1b2c3d4e5f6_add_index_to_step_metrics_run_id.py",
"license": "Apache License 2.0",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/b1c2d3e4f5a6_drop_unused_and_redundant_indexes.py | """drop unused and redundant indexes
Revision ID: b1c2d3e4f5a6
Revises: 2dbb2cf49e07
Create Date: 2025-11-11 21:16:00.000000
"""
from typing import Sequence, Union
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "b1c2d3e4f5a6"
down_revision: Union[str, None] = "2dbb2cf49e07"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# Drop unused indexes
op.drop_index("ix_passage_tags_archive_tag", table_name="passage_tags", if_exists=True)
op.drop_index("ix_jobs_created_at", table_name="jobs", if_exists=True)
op.drop_index("ix_block_project_id", table_name="block", if_exists=True)
op.drop_index("ix_block_label", table_name="block", if_exists=True)
# Drop redundant indexes (covered by other composite indexes or FKs)
op.drop_index("ix_messages_run_id", table_name="messages", if_exists=True) # Redundant with ix_messages_run_sequence
op.drop_index("ix_files_agents_agent_id", table_name="files_agents", if_exists=True) # Redundant with FK index
op.drop_index(
"ix_agents_organization_id", table_name="agents", if_exists=True
) # Redundant with ix_agents_organization_id_deployment_id
op.drop_index(
"ix_passage_tags_archive_id", table_name="passage_tags", if_exists=True
) # Redundant with ix_passage_tags_archive_tag and ix_passage_tags_org_archive
op.drop_index(
"ix_blocks_block_label", table_name="blocks_agents", if_exists=True
) # Redundant with ix_blocks_agents_block_label_agent_id
op.drop_index("ix_block_organization_id", table_name="block", if_exists=True) # Redundant with ix_block_org_project_template
op.drop_index(
"archival_passages_org_idx", table_name="archival_passages", if_exists=True
) # Redundant with ix_archival_passages_org_archive
# Drop unused table (leftover from PlanetScale migration)
op.drop_table("_planetscale_import", if_exists=True)
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
# Re-create indexes in reverse order
op.create_index("archival_passages_org_idx", "archival_passages", ["organization_id"], unique=False, if_not_exists=True)
op.create_index("ix_block_organization_id", "block", ["organization_id"], unique=False, if_not_exists=True)
op.create_index("ix_blocks_block_label", "blocks_agents", ["block_label"], unique=False, if_not_exists=True)
op.create_index("ix_passage_tags_archive_id", "passage_tags", ["archive_id"], unique=False, if_not_exists=True)
op.create_index("ix_agents_organization_id", "agents", ["organization_id"], unique=False, if_not_exists=True)
op.create_index("ix_files_agents_agent_id", "files_agents", ["agent_id"], unique=False, if_not_exists=True)
op.create_index("ix_messages_run_id", "messages", ["run_id"], unique=False, if_not_exists=True)
op.create_index("ix_block_label", "block", ["label"], unique=False, if_not_exists=True)
op.create_index("ix_block_project_id", "block", ["project_id"], unique=False, if_not_exists=True)
op.create_index("ix_jobs_created_at", "jobs", ["created_at", "id"], unique=False, if_not_exists=True)
op.create_index("ix_passage_tags_archive_tag", "passage_tags", ["archive_id", "tag"], unique=False, if_not_exists=True)
# Note: Not recreating _planetscale_import table in downgrade as it's application-specific
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/b1c2d3e4f5a6_drop_unused_and_redundant_indexes.py",
"license": "Apache License 2.0",
"lines": 57,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/d798609d65ff_add_index_on_messages_step_id.py | """add_index_on_messages_step_id
Revision ID: d798609d65ff
Revises: 89fd4648866b
Create Date: 2025-11-07 15:43:59.446292
"""
from typing import Sequence, Union
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "d798609d65ff"
down_revision: Union[str, None] = "89fd4648866b"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
op.create_index("idx_messages_step_id", "messages", ["step_id"], if_not_exists=True)
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
op.drop_index("idx_messages_step_id", table_name="messages", if_exists=True)
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/d798609d65ff_add_index_on_messages_step_id.py",
"license": "Apache License 2.0",
"lines": 23,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/exceptions/logging.py | """
Helper utilities for structured exception logging.
Use these when you need to add context to exceptions before raising them.
"""
from typing import Any, Dict, Optional
from letta.log import get_logger
logger = get_logger(__name__)
def log_and_raise(
exception: Exception,
message: str,
context: Optional[Dict[str, Any]] = None,
level: str = "error",
) -> None:
"""
Log an exception with structured context and then raise it.
This is useful when you want to ensure an exception is logged with
full context before raising it.
Args:
exception: The exception to log and raise
message: Human-readable message to log
context: Additional context to include in logs (dict)
level: Log level (default: "error")
Example:
try:
result = some_operation()
except ValueError as e:
log_and_raise(
e,
"Failed to process operation",
context={
"user_id": user.id,
"operation": "some_operation",
"input": input_data,
}
)
"""
extra = {
"exception_type": exception.__class__.__name__,
"exception_message": str(exception),
"exception_module": exception.__class__.__module__,
}
if context:
extra.update(context)
log_method = getattr(logger, level.lower())
log_method(
f"{message}: {exception.__class__.__name__}: {str(exception)}",
extra=extra,
exc_info=exception,
)
raise exception
def log_exception(
exception: Exception,
message: str,
context: Optional[Dict[str, Any]] = None,
level: str = "error",
) -> None:
"""
Log an exception with structured context without raising it.
Use this when you want to log an exception but handle it gracefully.
Args:
exception: The exception to log
message: Human-readable message to log
context: Additional context to include in logs (dict)
level: Log level (default: "error")
Example:
try:
result = some_operation()
except ValueError as e:
log_exception(
e,
"Operation failed, using fallback",
context={"user_id": user.id}
)
result = fallback_operation()
"""
extra = {
"exception_type": exception.__class__.__name__,
"exception_message": str(exception),
"exception_module": exception.__class__.__module__,
}
if context:
extra.update(context)
log_method = getattr(logger, level.lower())
log_method(
f"{message}: {exception.__class__.__name__}: {str(exception)}",
extra=extra,
exc_info=exception,
)
def add_exception_context(exception: Exception, **context) -> Exception:
"""
Add context to an exception that will be picked up by the global exception handler.
This attaches a __letta_context__ attribute to the exception with structured data.
The global exception handler will automatically include this context in logs.
Args:
exception: The exception to add context to
**context: Key-value pairs to add as context
Returns:
The same exception with context attached
Example:
try:
result = operation()
except ValueError as e:
raise add_exception_context(
e,
user_id=user.id,
operation="do_thing",
input_data=data,
)
"""
if not hasattr(exception, "__letta_context__"):
exception.__letta_context__ = {}
exception.__letta_context__.update(context)
return exception
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/exceptions/logging.py",
"license": "Apache License 2.0",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
letta-ai/letta:letta/helpers/tool_helpers.py | import hashlib
from letta.constants import MODAL_VERSION_HASH_LENGTH
from letta.schemas.tool import Tool
def _serialize_dependencies(tool: Tool) -> str:
"""
Serialize dependencies in a consistent way for hashing.
TODO: This should be improved per LET-3770 to ensure consistent ordering.
For now, we convert to string representation.
"""
parts = []
if tool.pip_requirements:
# TODO: Sort these consistently
parts.append(f"pip:{str(tool.pip_requirements)}")
if tool.npm_requirements:
# TODO: Sort these consistently
parts.append(f"npm:{str(tool.npm_requirements)}")
return ";".join(parts)
def compute_tool_hash(tool: Tool):
"""
Calculate a hash representing the current version of the tool and configuration.
This hash changes when:
- Tool source code changes
- Tool dependencies change
- Sandbox configuration changes
- Language/runtime changes
"""
components = [
tool.source_code if tool.source_code else "",
tool.source_type if tool.source_type else "",
_serialize_dependencies(tool),
]
combined = "|".join(components)
return hashlib.sha256(combined.encode()).hexdigest()[:MODAL_VERSION_HASH_LENGTH]
def generate_modal_function_name(tool_name: str, organization_id: str, project_id: str = "default") -> str:
"""
Generate a Modal function name from tool name and project ID.
Shortens the project ID to just the prefix and first UUID segment.
Args:
tool_name: Name of the tool
organization_id: Full organization ID (not used in function name, but kept for future use)
project_id: Project ID (e.g., project-12345678-90ab-cdef-1234-567890abcdef or "default")
Returns:
Modal function name (e.g., tool_name_project-12345678 or tool_name_default)
"""
from letta.constants import MAX_TOOL_NAME_LENGTH
max_tool_name_length = 64
# Shorten the organization_id to just the first segment (e.g., project-12345678)
short_organization_id = organization_id[: (max_tool_name_length - MAX_TOOL_NAME_LENGTH - 1)]
# make extra sure the tool name is not too long
name = f"{tool_name[:MAX_TOOL_NAME_LENGTH]}_{short_organization_id}"
# safe fallback
return name
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/helpers/tool_helpers.py",
"license": "Apache License 2.0",
"lines": 52,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
letta-ai/letta:letta/log_context.py | from contextvars import ContextVar
from typing import Any, Optional
_log_context: ContextVar[dict[str, Any]] = ContextVar("log_context", default={})
def set_log_context(key: str, value: Any) -> None:
ctx = _log_context.get().copy()
ctx[key] = value
_log_context.set(ctx)
def get_log_context(key: Optional[str] = None) -> Any:
ctx = _log_context.get()
if key is None:
return ctx
return ctx.get(key)
def clear_log_context() -> None:
_log_context.set({})
def update_log_context(**kwargs: Any) -> None:
ctx = _log_context.get().copy()
ctx.update(kwargs)
_log_context.set(ctx)
def remove_log_context(key: str) -> None:
ctx = _log_context.get().copy()
ctx.pop(key, None)
_log_context.set(ctx)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/log_context.py",
"license": "Apache License 2.0",
"lines": 22,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/orm/provider_model.py | from typing import TYPE_CHECKING, Optional
from sqlalchemy import Boolean, ForeignKey, String, UniqueConstraint
from sqlalchemy.orm import Mapped, mapped_column, relationship
from letta.orm.sqlalchemy_base import SqlalchemyBase
from letta.schemas.provider_model import ProviderModel as PydanticProviderModel
if TYPE_CHECKING:
from letta.orm.organization import Organization
from letta.orm.provider import Provider
class ProviderModel(SqlalchemyBase):
"""ProviderModel ORM class - represents individual models available from providers"""
__tablename__ = "provider_models"
__pydantic_model__ = PydanticProviderModel
__table_args__ = (
UniqueConstraint(
"handle",
"organization_id",
"model_type",
name="unique_handle_per_org_and_type",
),
UniqueConstraint(
"name",
"provider_id",
"model_type",
name="unique_model_per_provider_and_type",
),
)
# The unique handle used in the API (e.g., "openai/gpt-4o-mini", "anthropic/claude-3-5-sonnet")
# Format: {provider_name}/{display_name}
handle: Mapped[str] = mapped_column(String, nullable=False, index=True, doc="Unique handle for API reference")
# Display name shown in the UI for the model
display_name: Mapped[str] = mapped_column(String, nullable=False, doc="Display name for the model")
# The actual model name used by the provider (e.g., "gpt-4o-mini", "openai/gpt-4" for OpenRouter)
name: Mapped[str] = mapped_column(String, nullable=False, doc="The actual model name used by the provider")
# Foreign key to the provider
provider_id: Mapped[str] = mapped_column(
String, ForeignKey("providers.id", ondelete="CASCADE"), nullable=False, index=True, doc="Provider ID reference"
)
# Optional organization ID - NULL for global models, set for org-scoped models
organization_id: Mapped[Optional[str]] = mapped_column(
String,
ForeignKey("organizations.id", ondelete="CASCADE"),
nullable=True,
index=True,
doc="Organization ID if org-scoped, NULL if global",
)
# Model type: llm or embedding
model_type: Mapped[str] = mapped_column(String, nullable=False, index=True, doc="Type of model (llm or embedding)")
# Whether the model is enabled (default True)
enabled: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True, server_default="TRUE", doc="Whether the model is enabled")
# Model endpoint type (e.g., "openai", "anthropic", etc.)
model_endpoint_type: Mapped[str] = mapped_column(String, nullable=False, doc="The endpoint type for the model")
# Additional metadata fields
max_context_window: Mapped[int] = mapped_column(nullable=True, doc="Context window size for the model")
supports_token_streaming: Mapped[bool] = mapped_column(Boolean, nullable=True, doc="Whether streaming is supported")
supports_tool_calling: Mapped[bool] = mapped_column(Boolean, nullable=True, doc="Whether tool calling is supported")
embedding_dim: Mapped[Optional[int]] = mapped_column(nullable=True, doc="Embedding dimension for embedding models")
# relationships
provider: Mapped["Provider"] = relationship("Provider", back_populates="models")
organization: Mapped[Optional["Organization"]] = relationship("Organization", back_populates="provider_models")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/orm/provider_model.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/schemas/model.py | from typing import Annotated, Literal, Optional, Union
from pydantic import BaseModel, Field
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import ProviderCategory, ProviderType
from letta.schemas.llm_config import LLMConfig
from letta.schemas.response_format import ResponseFormatUnion
class ModelBase(BaseModel):
handle: str = Field(..., description="Unique handle for API reference (format: provider_display_name/model_display_name)")
name: str = Field(..., description="The actual model name used by the provider")
display_name: str = Field(..., description="Display name for the model shown in UI")
provider_type: ProviderType = Field(..., description="The type of the provider")
provider_name: str = Field(..., description="The name of the provider")
model_type: Literal["llm", "embedding"] = Field(..., description="Type of model (llm or embedding)")
class Model(LLMConfig, ModelBase):
model_type: Literal["llm"] = Field("llm", description="Type of model (llm or embedding)")
max_context_window: int = Field(..., description="The maximum context window for the model")
# supports_token_streaming: Optional[bool] = Field(None, description="Whether token streaming is supported")
# supports_tool_calling: Optional[bool] = Field(None, description="Whether tool calling is supported")
# Deprecated fields from LLMConfig - use new field names instead
model: str = Field(..., description="Deprecated: Use 'name' field instead. LLM model name.", deprecated=True)
model_endpoint_type: Literal[
"openai",
"anthropic",
"google_ai",
"google_vertex",
"azure",
"groq",
"ollama",
"webui",
"webui-legacy",
"lmstudio",
"lmstudio-legacy",
"lmstudio-chatcompletions",
"llamacpp",
"koboldcpp",
"vllm",
"hugging-face",
"minimax",
"mistral",
"together",
"bedrock",
"deepseek",
"xai",
"zai",
"openrouter",
"chatgpt_oauth",
] = Field(..., description="Deprecated: Use 'provider_type' field instead. The endpoint type for the model.", deprecated=True)
context_window: int = Field(
..., description="Deprecated: Use 'max_context_window' field instead. The context window size for the model.", deprecated=True
)
# Additional deprecated LLMConfig fields - kept for backward compatibility
model_endpoint: Optional[str] = Field(None, description="Deprecated: The endpoint for the model.", deprecated=True)
model_wrapper: Optional[str] = Field(None, description="Deprecated: The wrapper for the model.", deprecated=True)
put_inner_thoughts_in_kwargs: Optional[bool] = Field(
True, description="Deprecated: Puts 'inner_thoughts' as a kwarg in the function call.", deprecated=True
)
temperature: float = Field(0.7, description="Deprecated: The temperature to use when generating text with the model.", deprecated=True)
max_tokens: Optional[int] = Field(None, description="Deprecated: The maximum number of tokens to generate.", deprecated=True)
enable_reasoner: bool = Field(
True,
description="Deprecated: Whether or not the model should use extended thinking if it is a 'reasoning' style model.",
deprecated=True,
)
reasoning_effort: Optional[Literal["none", "minimal", "low", "medium", "high", "xhigh"]] = Field(
None, description="Deprecated: The reasoning effort to use when generating text reasoning models.", deprecated=True
)
max_reasoning_tokens: int = Field(0, description="Deprecated: Configurable thinking budget for extended thinking.", deprecated=True)
frequency_penalty: Optional[float] = Field(
None,
description="Deprecated: Positive values penalize new tokens based on their existing frequency in the text so far.",
deprecated=True,
)
compatibility_type: Optional[Literal["gguf", "mlx"]] = Field(
None, description="Deprecated: The framework compatibility type for the model.", deprecated=True
)
verbosity: Optional[Literal["low", "medium", "high"]] = Field(
None, description="Deprecated: Soft control for how verbose model output should be.", deprecated=True
)
tier: Optional[str] = Field(None, description="Deprecated: The cost tier for the model (cloud only).", deprecated=True)
parallel_tool_calls: Optional[bool] = Field(
False, description="Deprecated: If set to True, enables parallel tool calling.", deprecated=True
)
provider_category: Optional[ProviderCategory] = Field(
None, description="Deprecated: The provider category for the model.", deprecated=True
)
@classmethod
def from_llm_config(cls, llm_config: "LLMConfig") -> "Model":
"""Create a Model instance from an LLMConfig"""
return cls(
# New fields
handle=llm_config.handle or f"{llm_config.provider_name}/{llm_config.model}",
name=llm_config.model,
display_name=llm_config.display_name or llm_config.model,
provider_type=llm_config.model_endpoint_type,
provider_name=llm_config.provider_name or llm_config.model_endpoint_type,
model_type="llm",
max_context_window=llm_config.context_window,
# Deprecated fields (copy from LLMConfig for backward compatibility)
model=llm_config.model,
model_endpoint_type=llm_config.model_endpoint_type,
model_endpoint=llm_config.model_endpoint,
model_wrapper=llm_config.model_wrapper,
context_window=llm_config.context_window,
put_inner_thoughts_in_kwargs=llm_config.put_inner_thoughts_in_kwargs,
temperature=llm_config.temperature,
max_tokens=llm_config.max_tokens,
enable_reasoner=llm_config.enable_reasoner,
reasoning_effort=llm_config.reasoning_effort,
max_reasoning_tokens=llm_config.max_reasoning_tokens,
effort=llm_config.effort,
frequency_penalty=llm_config.frequency_penalty,
compatibility_type=llm_config.compatibility_type,
verbosity=llm_config.verbosity,
tier=llm_config.tier,
parallel_tool_calls=llm_config.parallel_tool_calls,
provider_category=llm_config.provider_category,
)
@property
def model_settings_schema(self) -> Optional[dict]:
"""Returns the JSON schema for the ModelSettings class corresponding to this model's provider."""
PROVIDER_SETTINGS_MAP = {
ProviderType.openai: OpenAIModelSettings,
ProviderType.anthropic: AnthropicModelSettings,
ProviderType.google_ai: GoogleAIModelSettings,
ProviderType.google_vertex: GoogleVertexModelSettings,
ProviderType.azure: AzureModelSettings,
ProviderType.xai: XAIModelSettings,
ProviderType.zai: ZAIModelSettings,
ProviderType.groq: GroqModelSettings,
ProviderType.deepseek: DeepseekModelSettings,
ProviderType.together: TogetherModelSettings,
ProviderType.bedrock: BedrockModelSettings,
ProviderType.openrouter: OpenRouterModelSettings,
}
settings_class = PROVIDER_SETTINGS_MAP.get(self.provider_type)
return settings_class.model_json_schema() if settings_class else None
class EmbeddingModel(EmbeddingConfig, ModelBase):
model_type: Literal["embedding"] = Field("embedding", description="Type of model (llm or embedding)")
embedding_dim: int = Field(..., description="The dimension of the embedding")
# Deprecated fields from EmbeddingConfig - use new field names instead
embedding_model: str = Field(..., description="Deprecated: Use 'name' field instead. Embedding model name.", deprecated=True)
embedding_endpoint_type: Literal[
"openai",
"anthropic",
"bedrock",
"google_ai",
"google_vertex",
"azure",
"groq",
"ollama",
"webui",
"webui-legacy",
"lmstudio",
"lmstudio-legacy",
"llamacpp",
"koboldcpp",
"vllm",
"hugging-face",
"mistral",
"together",
"pinecone",
] = Field(..., description="Deprecated: Use 'provider_type' field instead. The endpoint type for the embedding model.", deprecated=True)
# Additional deprecated EmbeddingConfig fields - kept for backward compatibility
embedding_endpoint: Optional[str] = Field(None, description="Deprecated: The endpoint for the model.", deprecated=True)
embedding_chunk_size: Optional[int] = Field(300, description="Deprecated: The chunk size of the embedding.", deprecated=True)
batch_size: int = Field(32, description="Deprecated: The maximum batch size for processing embeddings.", deprecated=True)
azure_endpoint: Optional[str] = Field(None, description="Deprecated: The Azure endpoint for the model.", deprecated=True)
azure_version: Optional[str] = Field(None, description="Deprecated: The Azure version for the model.", deprecated=True)
azure_deployment: Optional[str] = Field(None, description="Deprecated: The Azure deployment for the model.", deprecated=True)
@classmethod
def from_embedding_config(cls, embedding_config: "EmbeddingConfig") -> "EmbeddingModel":
"""Create an EmbeddingModel instance from an EmbeddingConfig"""
return cls(
# New fields
handle=embedding_config.handle or f"{embedding_config.embedding_endpoint_type}/{embedding_config.embedding_model}",
name=embedding_config.embedding_model,
display_name=embedding_config.embedding_model,
provider_type=embedding_config.embedding_endpoint_type,
provider_name=embedding_config.embedding_endpoint_type,
model_type="embedding",
embedding_dim=embedding_config.embedding_dim,
# Deprecated fields (copy from EmbeddingConfig for backward compatibility)
embedding_model=embedding_config.embedding_model,
embedding_endpoint_type=embedding_config.embedding_endpoint_type,
embedding_endpoint=embedding_config.embedding_endpoint,
embedding_chunk_size=embedding_config.embedding_chunk_size,
batch_size=embedding_config.batch_size,
azure_endpoint=embedding_config.azure_endpoint,
azure_version=embedding_config.azure_version,
azure_deployment=embedding_config.azure_deployment,
)
class ModelSettings(BaseModel):
"""Schema for defining settings for a model"""
# model: str = Field(..., description="The name of the model.")
max_output_tokens: int = Field(4096, description="The maximum number of tokens the model can generate.")
parallel_tool_calls: bool = Field(True, description="Whether to enable parallel tool calling.")
class OpenAIReasoning(BaseModel):
reasoning_effort: Literal["none", "minimal", "low", "medium", "high", "xhigh"] = Field(
"minimal", description="The reasoning effort to use when generating text reasoning models"
)
# TODO: implement support for this
# summary: Optional[Literal["auto", "detailed"]] = Field(
# None, description="The reasoning summary level to use when generating text reasoning models"
# )
class OpenAIModelSettings(ModelSettings):
provider_type: Literal[ProviderType.openai] = Field(ProviderType.openai, description="The type of the provider.")
temperature: float = Field(0.7, description="The temperature of the model.")
reasoning: OpenAIReasoning = Field(OpenAIReasoning(reasoning_effort="high"), description="The reasoning configuration for the model.")
response_format: Optional[ResponseFormatUnion] = Field(None, description="The response format for the model.")
# OpenAI supports strict mode for tool calling - defaults to True
strict: bool = Field(
True,
description="Enable strict mode for tool calling. When true, tool outputs are guaranteed to match JSON schemas.",
)
# TODO: implement support for these
# reasoning_summary: Optional[Literal["none", "short", "detailed"]] = Field(
# None, description="The reasoning summary level to use when generating text reasoning models"
# )
# max_tool_calls: int = Field(10, description="The maximum number of tool calls the model can make.")
# parallel_tool_calls: bool = Field(False, description="Whether the model supports parallel tool calls.")
# top_logprobs: int = Field(10, description="The number of top logprobs to return.")
# top_p: float = Field(1.0, description="The top-p value to use when generating text.")
def _to_legacy_config_params(self) -> dict:
return {
"temperature": self.temperature,
"max_tokens": self.max_output_tokens,
"reasoning_effort": self.reasoning.reasoning_effort,
"response_format": self.response_format,
"parallel_tool_calls": self.parallel_tool_calls,
"strict": self.strict,
}
# "thinking": {
# "type": "enabled",
# "budget_tokens": 10000
# }
class AnthropicThinking(BaseModel):
type: Literal["enabled", "disabled"] = Field("enabled", description="The type of thinking to use.")
budget_tokens: int = Field(1024, description="The maximum number of tokens the model can use for extended thinking.")
class AnthropicModelSettings(ModelSettings):
provider_type: Literal[ProviderType.anthropic] = Field(ProviderType.anthropic, description="The type of the provider.")
temperature: float = Field(1.0, description="The temperature of the model.")
thinking: AnthropicThinking = Field(
AnthropicThinking(type="enabled", budget_tokens=1024), description="The thinking configuration for the model."
)
response_format: Optional[ResponseFormatUnion] = Field(None, description="The response format for the model.")
# gpt-5 models only
verbosity: Optional[Literal["low", "medium", "high"]] = Field(
None,
description="Soft control for how verbose model output should be, used for GPT-5 models.",
)
# Effort parameter for Opus 4.5, Opus 4.6, and Sonnet 4.6
effort: Optional[Literal["low", "medium", "high", "max"]] = Field(
None,
description="Effort level for supported Anthropic models (controls token spending). 'max' is only available on Opus 4.6. Not setting this gives similar performance to 'high'.",
)
# Anthropic supports strict mode for tool calling - defaults to False
strict: bool = Field(
False,
description="Enable strict mode for tool calling. When true, tool outputs are guaranteed to match JSON schemas.",
)
# TODO: implement support for these
# top_k: Optional[int] = Field(None, description="The number of top tokens to return.")
# top_p: Optional[float] = Field(None, description="The top-p value to use when generating text.")
def _to_legacy_config_params(self) -> dict:
return {
"temperature": self.temperature,
"max_tokens": self.max_output_tokens,
"extended_thinking": self.thinking.type == "enabled",
"max_reasoning_tokens": self.thinking.budget_tokens,
"verbosity": self.verbosity,
"parallel_tool_calls": self.parallel_tool_calls,
"effort": self.effort,
"response_format": self.response_format,
"strict": self.strict,
}
class GeminiThinkingConfig(BaseModel):
include_thoughts: bool = Field(True, description="Whether to include thoughts in the model's response.")
thinking_budget: int = Field(1024, description="The thinking budget for the model.")
class GoogleAIModelSettings(ModelSettings):
provider_type: Literal[ProviderType.google_ai] = Field(ProviderType.google_ai, description="The type of the provider.")
temperature: float = Field(0.7, description="The temperature of the model.")
thinking_config: GeminiThinkingConfig = Field(
GeminiThinkingConfig(include_thoughts=True, thinking_budget=1024), description="The thinking configuration for the model."
)
response_schema: Optional[ResponseFormatUnion] = Field(None, description="The response schema for the model.")
max_output_tokens: int = Field(65536, description="The maximum number of tokens the model can generate.")
def _to_legacy_config_params(self) -> dict:
return {
"temperature": self.temperature,
"max_tokens": self.max_output_tokens,
"max_reasoning_tokens": self.thinking_config.thinking_budget if self.thinking_config.include_thoughts else 0,
"parallel_tool_calls": self.parallel_tool_calls,
"strict": False, # Google AI does not support strict mode
}
class GoogleVertexModelSettings(GoogleAIModelSettings):
provider_type: Literal[ProviderType.google_vertex] = Field(ProviderType.google_vertex, description="The type of the provider.")
class AzureModelSettings(ModelSettings):
"""Azure OpenAI model configuration (OpenAI-compatible)."""
provider_type: Literal[ProviderType.azure] = Field(ProviderType.azure, description="The type of the provider.")
temperature: float = Field(0.7, description="The temperature of the model.")
response_format: Optional[ResponseFormatUnion] = Field(None, description="The response format for the model.")
def _to_legacy_config_params(self) -> dict:
return {
"temperature": self.temperature,
"max_tokens": self.max_output_tokens,
"response_format": self.response_format,
"parallel_tool_calls": self.parallel_tool_calls,
"strict": False, # Azure does not support strict mode
}
class XAIModelSettings(ModelSettings):
"""xAI model configuration (OpenAI-compatible)."""
provider_type: Literal[ProviderType.xai] = Field(ProviderType.xai, description="The type of the provider.")
temperature: float = Field(0.7, description="The temperature of the model.")
response_format: Optional[ResponseFormatUnion] = Field(None, description="The response format for the model.")
def _to_legacy_config_params(self) -> dict:
return {
"temperature": self.temperature,
"max_tokens": self.max_output_tokens,
"response_format": self.response_format,
"parallel_tool_calls": self.parallel_tool_calls,
"strict": False, # xAI does not support strict mode
}
class ZAIThinking(BaseModel):
"""Thinking configuration for ZAI GLM-4.5+ models."""
type: Literal["enabled", "disabled"] = Field("enabled", description="Whether thinking is enabled or disabled.")
clear_thinking: bool = Field(False, description="If False, preserved thinking is used (recommended for agents).")
class ZAIModelSettings(ModelSettings):
"""Z.ai (ZhipuAI) model configuration (OpenAI-compatible)."""
provider_type: Literal[ProviderType.zai] = Field(ProviderType.zai, description="The type of the provider.")
temperature: float = Field(0.7, description="The temperature of the model.")
response_format: Optional[ResponseFormatUnion] = Field(None, description="The response format for the model.")
thinking: ZAIThinking = Field(
ZAIThinking(type="enabled", clear_thinking=False), description="The thinking configuration for GLM-4.5+ models."
)
def _to_legacy_config_params(self) -> dict:
return {
"temperature": self.temperature,
"max_tokens": self.max_output_tokens,
"response_format": self.response_format,
"parallel_tool_calls": self.parallel_tool_calls,
"strict": False, # ZAI does not support strict mode
"extended_thinking": self.thinking.type == "enabled",
}
class GroqModelSettings(ModelSettings):
"""Groq model configuration (OpenAI-compatible)."""
provider_type: Literal[ProviderType.groq] = Field(ProviderType.groq, description="The type of the provider.")
temperature: float = Field(0.7, description="The temperature of the model.")
response_format: Optional[ResponseFormatUnion] = Field(None, description="The response format for the model.")
def _to_legacy_config_params(self) -> dict:
return {
"temperature": self.temperature,
"max_tokens": self.max_output_tokens,
"response_format": self.response_format,
"parallel_tool_calls": self.parallel_tool_calls,
"strict": False, # Groq does not support strict mode
}
class DeepseekModelSettings(ModelSettings):
"""Deepseek model configuration (OpenAI-compatible)."""
provider_type: Literal[ProviderType.deepseek] = Field(ProviderType.deepseek, description="The type of the provider.")
temperature: float = Field(0.7, description="The temperature of the model.")
response_format: Optional[ResponseFormatUnion] = Field(None, description="The response format for the model.")
def _to_legacy_config_params(self) -> dict:
return {
"temperature": self.temperature,
"max_tokens": self.max_output_tokens,
"response_format": self.response_format,
"parallel_tool_calls": self.parallel_tool_calls,
"strict": False, # Deepseek does not support strict mode
}
class TogetherModelSettings(ModelSettings):
"""Together AI model configuration (OpenAI-compatible)."""
provider_type: Literal[ProviderType.together] = Field(ProviderType.together, description="The type of the provider.")
temperature: float = Field(0.7, description="The temperature of the model.")
response_format: Optional[ResponseFormatUnion] = Field(None, description="The response format for the model.")
def _to_legacy_config_params(self) -> dict:
return {
"temperature": self.temperature,
"max_tokens": self.max_output_tokens,
"response_format": self.response_format,
"parallel_tool_calls": self.parallel_tool_calls,
"strict": False, # Together does not support strict mode
}
class BedrockModelSettings(ModelSettings):
"""AWS Bedrock model configuration."""
provider_type: Literal[ProviderType.bedrock] = Field(ProviderType.bedrock, description="The type of the provider.")
temperature: float = Field(0.7, description="The temperature of the model.")
response_format: Optional[ResponseFormatUnion] = Field(None, description="The response format for the model.")
def _to_legacy_config_params(self) -> dict:
return {
"temperature": self.temperature,
"max_tokens": self.max_output_tokens,
"response_format": self.response_format,
"parallel_tool_calls": self.parallel_tool_calls,
"strict": False, # Bedrock does not support strict mode
}
class OpenRouterModelSettings(ModelSettings):
"""OpenRouter model configuration (OpenAI-compatible)."""
provider_type: Literal[ProviderType.openrouter] = Field(ProviderType.openrouter, description="The type of the provider.")
temperature: float = Field(0.7, description="The temperature of the model.")
response_format: Optional[ResponseFormatUnion] = Field(None, description="The response format for the model.")
def _to_legacy_config_params(self) -> dict:
return {
"temperature": self.temperature,
"max_tokens": self.max_output_tokens,
"response_format": self.response_format,
"parallel_tool_calls": self.parallel_tool_calls,
"strict": False, # OpenRouter does not support strict mode
}
class ChatGPTOAuthReasoning(BaseModel):
"""Reasoning configuration for ChatGPT OAuth models (GPT-5.x, o-series)."""
reasoning_effort: Literal["none", "low", "medium", "high", "xhigh"] = Field(
"medium", description="The reasoning effort level for GPT-5.x and o-series models."
)
class ChatGPTOAuthModelSettings(ModelSettings):
"""ChatGPT OAuth model configuration (uses ChatGPT backend API)."""
provider_type: Literal[ProviderType.chatgpt_oauth] = Field(ProviderType.chatgpt_oauth, description="The type of the provider.")
temperature: float = Field(0.7, description="The temperature of the model.")
reasoning: ChatGPTOAuthReasoning = Field(
ChatGPTOAuthReasoning(reasoning_effort="medium"), description="The reasoning configuration for the model."
)
def _to_legacy_config_params(self) -> dict:
return {
"temperature": self.temperature,
"max_tokens": self.max_output_tokens,
"reasoning_effort": self.reasoning.reasoning_effort,
"parallel_tool_calls": self.parallel_tool_calls,
}
ModelSettingsUnion = Annotated[
Union[
OpenAIModelSettings,
AnthropicModelSettings,
GoogleAIModelSettings,
GoogleVertexModelSettings,
AzureModelSettings,
XAIModelSettings,
ZAIModelSettings,
GroqModelSettings,
DeepseekModelSettings,
TogetherModelSettings,
BedrockModelSettings,
OpenRouterModelSettings,
ChatGPTOAuthModelSettings,
],
Field(discriminator="provider_type"),
]
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/schemas/model.py",
"license": "Apache License 2.0",
"lines": 445,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/schemas/provider_model.py | from typing import Optional
from pydantic import Field
from letta.schemas.enums import PrimitiveType
from letta.schemas.letta_base import OrmMetadataBase
class ProviderModelBase(OrmMetadataBase):
__id_prefix__ = PrimitiveType.PROVIDER_MODEL.value
class ProviderModel(ProviderModelBase):
"""
Pydantic model for provider models.
This represents individual models available from providers with a unique handle
that decouples the user-facing API from provider-specific implementation details.
"""
id: str = ProviderModelBase.generate_id_field()
# The unique handle used in the API (e.g., "openai/gpt-4o-mini", "anthropic/claude-3-5-sonnet")
# Format: {provider_display_name}/{model_display_name}
handle: str = Field(..., description="Unique handle for API reference (format: provider_display_name/model_display_name)")
# Display name shown in the UI for the model
name: str = Field(..., description="The actual model name used by the provider")
display_name: str = Field(..., description="Display name for the model shown in UI")
# Foreign key to the provider
provider_id: str = Field(..., description="Provider ID reference")
# Optional organization ID - NULL for global models, set for org-scoped models
organization_id: Optional[str] = Field(None, description="Organization ID if org-scoped, NULL if global")
# Model type: llm or embedding
model_type: str = Field(..., description="Type of model (llm or embedding)")
# Whether the model is enabled (default True)
enabled: bool = Field(default=True, description="Whether the model is enabled")
# Model endpoint type (e.g., "openai", "anthropic", etc.)
model_endpoint_type: str = Field(..., description="The endpoint type for the model (e.g., 'openai', 'anthropic')")
# Additional metadata fields
max_context_window: Optional[int] = Field(None, description="Context window size for the model")
supports_token_streaming: Optional[bool] = Field(None, description="Whether token streaming is supported")
supports_tool_calling: Optional[bool] = Field(None, description="Whether tool calling is supported")
embedding_dim: Optional[int] = Field(None, description="Embedding dimension for embedding models")
class ProviderModelCreate(ProviderModelBase):
"""Schema for creating a new provider model"""
handle: str = Field(..., description="Unique handle for API reference (format: provider_display_name/model_display_name)")
display_name: str = Field(..., description="Display name for the model shown in UI")
model_name: str = Field(..., description="The actual model name used by the provider")
model_display_name: str = Field(..., description="Model display name used in the handle")
provider_display_name: str = Field(..., description="Display name for the provider")
provider_id: str = Field(..., description="Provider ID reference")
model_type: str = Field(..., description="Type of model (llm or embedding)")
enabled: bool = Field(default=True, description="Whether the model is enabled")
context_window: Optional[int] = Field(None, description="Context window size for the model")
supports_streaming: Optional[bool] = Field(None, description="Whether streaming is supported")
supports_function_calling: Optional[bool] = Field(None, description="Whether function calling is supported")
class ProviderModelUpdate(ProviderModelBase):
"""Schema for updating a provider model"""
display_name: Optional[str] = Field(None, description="Display name for the model shown in UI")
enabled: Optional[bool] = Field(None, description="Whether the model is enabled")
context_window: Optional[int] = Field(None, description="Context window size for the model")
supports_streaming: Optional[bool] = Field(None, description="Whether streaming is supported")
supports_function_calling: Optional[bool] = Field(None, description="Whether function calling is supported")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/schemas/provider_model.py",
"license": "Apache License 2.0",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/server/global_exception_handler.py | """
Global exception handlers for non-request exceptions (background tasks, startup, etc.)
"""
import sys
import threading
import traceback
from letta.log import get_logger
logger = get_logger(__name__)
def setup_global_exception_handlers():
"""
Set up global exception handlers to catch exceptions that occur outside of request handling.
This includes:
- Uncaught exceptions in the main thread
- Exceptions in background threads
- Asyncio task exceptions
"""
# 1. Handle uncaught exceptions in the main thread
def global_exception_hook(exc_type, exc_value, exc_traceback):
"""
Global exception hook for uncaught exceptions in the main thread.
This catches exceptions that would otherwise crash the application.
"""
# Don't log KeyboardInterrupt (Ctrl+C)
if issubclass(exc_type, KeyboardInterrupt):
sys.__excepthook__(exc_type, exc_value, exc_traceback)
return
logger.critical(
f"Uncaught exception in main thread: {exc_type.__name__}: {exc_value}",
extra={
"exception_type": exc_type.__name__,
"exception_message": str(exc_value),
"exception_module": exc_type.__module__,
"traceback": "".join(traceback.format_exception(exc_type, exc_value, exc_traceback)),
},
exc_info=(exc_type, exc_value, exc_traceback),
)
sys.excepthook = global_exception_hook
# 2. Handle exceptions in threading
def thread_exception_hook(args):
"""
Hook for exceptions in threads.
"""
logger.error(
f"Uncaught exception in thread {args.thread.name}: {args.exc_type.__name__}: {args.exc_value}",
extra={
"exception_type": args.exc_type.__name__,
"exception_message": str(args.exc_value),
"exception_module": args.exc_type.__module__,
"thread_name": args.thread.name,
"thread_id": args.thread.ident,
"traceback": "".join(traceback.format_exception(args.exc_type, args.exc_value, args.exc_traceback)),
},
exc_info=(args.exc_type, args.exc_value, args.exc_traceback),
)
threading.excepthook = thread_exception_hook
logger.info("Global exception handlers initialized")
def setup_asyncio_exception_handler(loop):
"""
Set up exception handler for asyncio loop.
Call this with your event loop.
"""
def asyncio_exception_handler(loop, context):
"""
Handler for exceptions in asyncio tasks.
"""
exception = context.get("exception")
message = context.get("message", "Unhandled exception in asyncio")
extra = {
"asyncio_context": str(context),
"task": str(context.get("task")),
}
if exception:
extra.update(
{
"exception_type": exception.__class__.__name__,
"exception_message": str(exception),
"exception_module": exception.__class__.__module__,
}
)
logger.error(
f"Asyncio exception: {message}: {exception}",
extra=extra,
exc_info=exception,
)
else:
logger.error(
f"Asyncio exception: {message}",
extra=extra,
)
loop.set_exception_handler(asyncio_exception_handler)
logger.info("Asyncio exception handler initialized")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/global_exception_handler.py",
"license": "Apache License 2.0",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/server/rest_api/middleware/logging.py | """
Unified logging middleware that enriches log context and ensures exceptions are logged.
"""
import traceback
from typing import Callable
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.requests import Request
from letta.log import get_logger
from letta.log_context import clear_log_context, update_log_context
from letta.otel.tracing import tracer
from letta.schemas.enums import PrimitiveType
from letta.validators import PRIMITIVE_ID_PATTERNS
logger = get_logger(__name__)
class LoggingMiddleware(BaseHTTPMiddleware):
"""
Middleware that enriches log context with request-specific attributes and logs exceptions.
Automatically extracts and sets:
- actor_id: From the 'user_id' header
- org_id: From organization-related endpoints
- Letta primitive IDs: agent_id, tool_id, block_id, etc. from URL paths
Also catches all exceptions and logs them with structured context before re-raising.
"""
async def dispatch(self, request: Request, call_next: Callable):
clear_log_context()
try:
with tracer.start_as_current_span("middleware.logging"):
# Extract and set log context
context = {}
with tracer.start_as_current_span("middleware.logging.context"):
# Headers
actor_id = request.headers.get("user_id")
if actor_id:
context["actor_id"] = actor_id
project_id = request.headers.get("x-project-id")
if project_id:
context["project_id"] = project_id
org_id = request.headers.get("x-organization-id")
if org_id:
context["org_id"] = org_id
user_agent = request.headers.get("x-agent-id")
if user_agent:
context["agent_id"] = user_agent
run_id_header = request.headers.get("x-run-id") or request.headers.get("run-id")
if run_id_header:
context["run_id"] = run_id_header
path = request.url.path
path_parts = [p for p in path.split("/") if p]
# Path
matched_parts = set()
for part in path_parts:
if part in matched_parts:
continue
for primitive_type in PrimitiveType:
prefix = primitive_type.value
pattern = PRIMITIVE_ID_PATTERNS.get(prefix)
if pattern and pattern.match(part):
context_key = f"{primitive_type.name.lower()}_id"
if primitive_type == PrimitiveType.ORGANIZATION:
context_key = "org_id"
elif primitive_type == PrimitiveType.USER:
context_key = "user_id"
context[context_key] = part
matched_parts.add(part)
break
# Query Parameters
for param_value in request.query_params.values():
if param_value in matched_parts:
continue
for primitive_type in PrimitiveType:
prefix = primitive_type.value
pattern = PRIMITIVE_ID_PATTERNS.get(prefix)
if pattern and pattern.match(param_value):
context_key = f"{primitive_type.name.lower()}_id"
if primitive_type == PrimitiveType.ORGANIZATION:
context_key = "org_id"
elif primitive_type == PrimitiveType.USER:
context_key = "user_id"
# Only set if not already set from path (path takes precedence over query params)
# Query params can overwrite headers, but path values take precedence
if context_key not in context:
context[context_key] = param_value
matched_parts.add(param_value)
break
if context:
update_log_context(**context)
logger.debug(
f"Incoming request: {request.method} {request.url.path}",
extra={
"method": request.method,
"url": str(request.url),
"path": request.url.path,
"query_params": dict(request.query_params),
"client_host": request.client.host if request.client else None,
},
)
response = await call_next(request)
return response
except Exception as exc:
import anyio
if isinstance(exc, (anyio.BrokenResourceError, anyio.ClosedResourceError)):
logger.info(f"Client disconnected during request: {request.method} {request.url.path}")
raise
# Extract request context
request_context = {
"method": request.method,
"url": str(request.url),
"path": request.url.path,
"query_params": dict(request.query_params),
"client_host": request.client.host if request.client else None,
"user_agent": request.headers.get("user-agent"),
}
# Extract user context if available
user_context = {}
if hasattr(request.state, "user_id"):
user_context["user_id"] = request.state.user_id
if hasattr(request.state, "org_id"):
user_context["org_id"] = request.state.org_id
# Check for custom context attached to the exception
custom_context = {}
if hasattr(exc, "__letta_context__"):
custom_context = exc.__letta_context__
# Log with structured data
logger.error(
f"Unhandled exception in request: {exc.__class__.__name__}: {str(exc)}",
extra={
"exception_type": exc.__class__.__name__,
"exception_message": str(exc),
"exception_module": exc.__class__.__module__,
"request": request_context,
"user": user_context,
"custom_context": custom_context,
"traceback": traceback.format_exc(),
},
exc_info=True,
)
# Re-raise to let FastAPI's exception handlers deal with it
raise
finally:
clear_log_context()
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/middleware/logging.py",
"license": "Apache License 2.0",
"lines": 139,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/server/rest_api/routers/v1/internal_agents.py | from fastapi import APIRouter, Body, Depends, Query
from letta.schemas.block import Block, BlockUpdate
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
from letta.server.server import SyncServer
from letta.validators import AgentId
router = APIRouter(prefix="/_internal_agents", tags=["_internal_agents"])
@router.get("/count", response_model=int, operation_id="count_internal_agents")
async def count_agents(
exclude_hidden: bool = Query(True, description="If True, excludes hidden agents from the count. If False, includes all agents."),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Get the total number of agents for a user, with option to exclude hidden agents.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
# When exclude_hidden=True, we want show_hidden_agents=False
# When exclude_hidden=False, we want show_hidden_agents=True
show_hidden_agents = not exclude_hidden
# Always use count_agents_async to ensure proper filtering
return await server.agent_manager.count_agents_async(
actor=actor,
show_hidden_agents=show_hidden_agents,
)
@router.patch("/{agent_id}/core-memory/blocks/{block_label}", response_model=Block, operation_id="modify_internal_core_memory_block")
async def modify_block_for_agent(
block_label: str,
agent_id: AgentId,
block_update: BlockUpdate = Body(...),
server: "SyncServer" = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Updates a core memory block of an agent.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
block = await server.agent_manager.modify_block_by_label_async(
agent_id=agent_id, block_label=block_label, block_update=block_update, actor=actor
)
# This should also trigger a system prompt change in the agent
await server.agent_manager.rebuild_system_prompt_async(agent_id=agent_id, actor=actor, force=True, update_timestamp=False)
return block
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/routers/v1/internal_agents.py",
"license": "Apache License 2.0",
"lines": 42,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/server/rest_api/routers/v1/internal_blocks.py | from typing import TYPE_CHECKING, List, Literal, Optional
from fastapi import APIRouter, Body, Depends, Query
from letta.schemas.agent import AgentState
from letta.schemas.block import Block, CreateBlock
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
from letta.server.server import SyncServer
from letta.utils import is_1_0_sdk_version
from letta.validators import (
BlockDescriptionSearchQuery,
BlockId,
BlockLabelQuery,
BlockLabelSearchQuery,
BlockNameQuery,
BlockValueSearchQuery,
IdentityIdQuery,
)
if TYPE_CHECKING:
pass
router = APIRouter(prefix="/_internal_blocks", tags=["_internal_blocks"])
@router.get("/", response_model=List[Block], operation_id="list_internal_blocks")
async def list_blocks(
# query parameters
label: BlockLabelQuery = None,
templates_only: bool = Query(False, description="Whether to include only templates"),
name: BlockNameQuery = None,
identity_id: IdentityIdQuery = None,
identifier_keys: Optional[List[str]] = Query(None, description="Search agents by identifier keys"),
project_id: Optional[str] = Query(None, description="Search blocks by project id"),
limit: Optional[int] = Query(50, description="Number of blocks to return"),
before: Optional[str] = Query(
None,
description="Block ID cursor for pagination. Returns blocks that come before this block ID in the specified sort order",
),
after: Optional[str] = Query(
None,
description="Block ID cursor for pagination. Returns blocks that come after this block ID in the specified sort order",
),
order: Literal["asc", "desc"] = Query(
"asc", description="Sort order for blocks by creation time. 'asc' for oldest first, 'desc' for newest first"
),
order_by: Literal["created_at"] = Query("created_at", description="Field to sort by"),
label_search: BlockLabelSearchQuery = None,
description_search: BlockDescriptionSearchQuery = None,
value_search: BlockValueSearchQuery = None,
connected_to_agents_count_gt: Optional[int] = Query(
None,
description=(
"Filter blocks by the number of connected agents. "
"If provided, returns blocks that have more than this number of connected agents."
),
),
connected_to_agents_count_lt: Optional[int] = Query(
None,
description=(
"Filter blocks by the number of connected agents. "
"If provided, returns blocks that have less than this number of connected agents."
),
),
connected_to_agents_count_eq: Optional[List[int]] = Query(
None,
description=(
"Filter blocks by the exact number of connected agents. "
"If provided, returns blocks that have exactly this number of connected agents."
),
),
show_hidden_blocks: bool | None = Query(
False,
include_in_schema=False,
description="If set to True, include blocks marked as hidden in the results.",
),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
return await server.block_manager.get_blocks_async(
actor=actor,
label=label,
is_template=templates_only,
value_search=value_search,
label_search=label_search,
description_search=description_search,
template_name=name,
identity_id=identity_id,
identifier_keys=identifier_keys,
project_id=project_id,
before=before,
connected_to_agents_count_gt=connected_to_agents_count_gt,
connected_to_agents_count_lt=connected_to_agents_count_lt,
connected_to_agents_count_eq=connected_to_agents_count_eq,
limit=limit,
after=after,
ascending=(order == "asc"),
show_hidden_blocks=show_hidden_blocks,
)
@router.post("/", response_model=Block, operation_id="create_internal_block")
async def create_block(
create_block: CreateBlock = Body(...),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
block = Block(**create_block.model_dump())
return await server.block_manager.create_or_update_block_async(actor=actor, block=block)
@router.delete("/{block_id}", operation_id="delete_internal_block")
async def delete_block(
block_id: BlockId,
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
await server.block_manager.delete_block_async(block_id=block_id, actor=actor)
@router.get("/{block_id}/agents", response_model=List[AgentState], operation_id="list_agents_for_internal_block")
async def list_agents_for_block(
block_id: BlockId,
before: Optional[str] = Query(
None,
description="Agent ID cursor for pagination. Returns agents that come before this agent ID in the specified sort order",
),
after: Optional[str] = Query(
None,
description="Agent ID cursor for pagination. Returns agents that come after this agent ID in the specified sort order",
),
limit: Optional[int] = Query(50, description="Maximum number of agents to return"),
order: Literal["asc", "desc"] = Query(
"desc", description="Sort order for agents by creation time. 'asc' for oldest first, 'desc' for newest first"
),
order_by: Literal["created_at"] = Query("created_at", description="Field to sort by"),
include_relationships: list[str] | None = Query(
None,
description=(
"Specify which relational fields (e.g., 'tools', 'sources', 'memory') to include in the response. "
"If not provided, all relationships are loaded by default. "
"Using this can optimize performance by reducing unnecessary joins."
"This is a legacy parameter, and no longer supported after 1.0.0 SDK versions."
),
deprecated=True,
),
include: List[str] = Query(
[],
description=("Specify which relational fields to include in the response. No relationships are included by default."),
),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Retrieves all agents associated with the specified block.
Raises a 404 if the block does not exist.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
if include_relationships is None and is_1_0_sdk_version(headers):
include_relationships = [] # don't default include all if using new SDK version
agents = await server.block_manager.get_agents_for_block_async(
block_id=block_id,
before=before,
after=after,
limit=limit,
ascending=(order == "asc"),
include_relationships=include_relationships,
include=include,
actor=actor,
)
return agents
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/routers/v1/internal_blocks.py",
"license": "Apache License 2.0",
"lines": 162,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/webhook_service.py | import logging
import os
import httpx
logger = logging.getLogger(__name__)
class WebhookService:
"""Service for sending webhook notifications when steps complete."""
def __init__(self):
self.webhook_url = os.getenv("STEP_COMPLETE_WEBHOOK")
self.webhook_key = os.getenv("STEP_COMPLETE_KEY")
async def notify_step_complete(self, step_id: str) -> bool:
"""
Send a POST request to the configured webhook URL when a step completes.
Args:
step_id: The ID of the completed step
Returns:
bool: True if notification was sent successfully, False otherwise
"""
if not self.webhook_url:
logger.debug("STEP_COMPLETE_WEBHOOK not configured, skipping webhook notification")
return False
try:
headers = {}
if self.webhook_key:
headers["Authorization"] = f"Bearer {self.webhook_key}"
payload = {"step_id": step_id}
async with httpx.AsyncClient(timeout=10.0) as client:
response = await client.post(
self.webhook_url,
json=payload,
headers=headers,
)
response.raise_for_status()
logger.info(f"Successfully sent step completion webhook for step {step_id}")
return True
except httpx.TimeoutException:
logger.warning(f"Timeout sending step completion webhook for step {step_id}")
return False
except httpx.HTTPStatusError as e:
logger.warning(f"HTTP error sending step completion webhook for step {step_id}: {e.response.status_code}")
return False
except Exception as e:
logger.error(f"Unexpected error sending step completion webhook for step {step_id}: {e}")
return False
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/webhook_service.py",
"license": "Apache License 2.0",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/services/webhook_service_test.py | """
Simple test to verify webhook service functionality.
To run this test:
python -m pytest letta/services/webhook_service_test.py -v
To test with actual webhook:
export STEP_COMPLETE_WEBHOOK=https://your-webhook-url.com/endpoint
export STEP_COMPLETE_KEY=your-secret-key
python -m pytest letta/services/webhook_service_test.py -v
These tests verify the webhook service works in both:
- Temporal mode (when webhooks are called as Temporal activities)
- Non-Temporal mode (when webhooks are called directly from StepManager)
"""
import os
from unittest.mock import AsyncMock, patch
import pytest
from letta.services.webhook_service import WebhookService
@pytest.mark.asyncio
async def test_webhook_not_configured():
"""Test that webhook does not send when URL is not configured."""
with patch.dict(os.environ, {}, clear=True):
service = WebhookService()
result = await service.notify_step_complete("step_123")
assert result is False
@pytest.mark.asyncio
async def test_webhook_success():
"""Test successful webhook notification."""
with patch.dict(
os.environ,
{"STEP_COMPLETE_WEBHOOK": "https://example.com/webhook", "STEP_COMPLETE_KEY": "test-key"},
):
service = WebhookService()
with patch("httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.status_code = 200
mock_response.raise_for_status = AsyncMock()
mock_post = AsyncMock(return_value=mock_response)
mock_client.return_value.__aenter__.return_value.post = mock_post
result = await service.notify_step_complete("step_123")
assert result is True
mock_post.assert_called_once()
call_args = mock_post.call_args
assert call_args.kwargs["json"] == {"step_id": "step_123"}
assert call_args.kwargs["headers"]["Authorization"] == "Bearer test-key"
@pytest.mark.asyncio
async def test_webhook_without_auth():
"""Test webhook notification without authentication key."""
with patch.dict(os.environ, {"STEP_COMPLETE_WEBHOOK": "https://example.com/webhook"}, clear=True):
service = WebhookService()
with patch("httpx.AsyncClient") as mock_client:
mock_response = AsyncMock()
mock_response.status_code = 200
mock_response.raise_for_status = AsyncMock()
mock_post = AsyncMock(return_value=mock_response)
mock_client.return_value.__aenter__.return_value.post = mock_post
result = await service.notify_step_complete("step_123")
assert result is True
call_args = mock_post.call_args
# Should not have Authorization header
assert "Authorization" not in call_args.kwargs["headers"]
@pytest.mark.asyncio
async def test_webhook_timeout():
"""Test webhook notification timeout handling."""
with patch.dict(os.environ, {"STEP_COMPLETE_WEBHOOK": "https://example.com/webhook"}):
service = WebhookService()
with patch("httpx.AsyncClient") as mock_client:
import httpx
mock_post = AsyncMock(side_effect=httpx.TimeoutException("Request timed out"))
mock_client.return_value.__aenter__.return_value.post = mock_post
result = await service.notify_step_complete("step_123")
assert result is False
@pytest.mark.asyncio
async def test_webhook_http_error():
"""Test webhook notification HTTP error handling."""
with patch.dict(os.environ, {"STEP_COMPLETE_WEBHOOK": "https://example.com/webhook"}):
service = WebhookService()
with patch("httpx.AsyncClient") as mock_client:
import httpx
mock_response = AsyncMock()
mock_response.status_code = 500
mock_response.raise_for_status = AsyncMock(
side_effect=httpx.HTTPStatusError("Server error", request=None, response=mock_response)
)
mock_post = AsyncMock(return_value=mock_response)
mock_client.return_value.__aenter__.return_value.post = mock_post
result = await service.notify_step_complete("step_123")
assert result is False
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/services/webhook_service_test.py",
"license": "Apache License 2.0",
"lines": 86,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/integration_test_cancellation.py | import asyncio
import json
import os
import threading
from typing import Any, List
import pytest
from dotenv import load_dotenv
from letta_client import AsyncLetta
from letta_client.types import MessageCreateParam
from letta.log import get_logger
from letta.schemas.agent import AgentState
from letta.schemas.enums import AgentType, JobStatus
from letta.schemas.llm_config import LLMConfig
logger = get_logger(__name__)
def get_llm_config(filename: str, llm_config_dir: str = "tests/configs/llm_model_configs") -> LLMConfig:
filename = os.path.join(llm_config_dir, filename)
with open(filename, "r") as f:
config_data = json.load(f)
llm_config = LLMConfig(**config_data)
return llm_config
all_configs = [
"openai-gpt-4o-mini.json",
"openai-o3.json",
"openai-gpt-5.json",
"claude-4-5-sonnet.json",
"claude-4-1-opus.json",
"gemini-2.5-flash.json",
]
requested = os.getenv("LLM_CONFIG_FILE")
filenames = [requested] if requested else all_configs
TESTED_LLM_CONFIGS: List[LLMConfig] = [get_llm_config(fn) for fn in filenames]
def roll_dice(num_sides: int) -> int:
"""
Returns a random number between 1 and num_sides.
Args:
num_sides (int): The number of sides on the die.
Returns:
int: A random integer between 1 and num_sides, representing the die roll.
"""
import random
return random.randint(1, num_sides)
USER_MESSAGE_ROLL_DICE: List[MessageCreateParam] = [
MessageCreateParam(
role="user",
content="This is an automated test message. Call the roll_dice tool with 16 sides and reply back to me with the outcome.",
)
]
async def accumulate_chunks(chunks: Any) -> List[Any]:
"""
Accumulates chunks into a list of messages.
"""
messages = []
current_message = None
prev_message_type = None
async for chunk in chunks:
current_message_type = chunk.message_type
if prev_message_type != current_message_type:
messages.append(current_message)
current_message = chunk
else:
current_message = chunk
prev_message_type = current_message_type
messages.append(current_message)
return [m for m in messages if m is not None]
async def cancel_run_after_delay(client: AsyncLetta, agent_id: str, delay: float = 0.5):
await asyncio.sleep(delay)
await client.agents.messages.cancel(agent_id=agent_id)
@pytest.fixture(scope="module")
def server_url() -> str:
"""
Provides the URL for the Letta server.
If LETTA_SERVER_URL is not set, starts the server in a background thread
and polls until it's accepting connections.
"""
def _run_server() -> None:
load_dotenv()
from letta.server.rest_api.app import start_server
start_server(debug=True)
url: str = os.getenv("LETTA_SERVER_URL", "http://localhost:8283")
if not os.getenv("LETTA_SERVER_URL"):
if os.getenv("LETTA_REDIS_HOST"):
print(f"Redis is configured at {os.getenv('LETTA_REDIS_HOST')}:{os.getenv('LETTA_REDIS_PORT', '6379')}")
thread = threading.Thread(target=_run_server, daemon=True)
thread.start()
timeout_seconds = 60
import time
import httpx
start_time = time.time()
while time.time() - start_time < timeout_seconds:
try:
response = httpx.get(url + "/v1/health", timeout=1.0, follow_redirects=True)
if response.status_code < 500:
break
except Exception:
pass
time.sleep(0.5)
else:
raise TimeoutError(f"Server at {url} did not become ready in {timeout_seconds}s")
return url
@pytest.fixture(scope="function")
async def client(server_url: str) -> AsyncLetta:
"""
Creates and returns an asynchronous Letta REST client for testing.
"""
client_instance = AsyncLetta(base_url=server_url)
yield client_instance
@pytest.fixture(scope="function")
async def agent_state(client: AsyncLetta) -> AgentState:
"""
Creates and returns an agent state for testing with a pre-configured agent.
The agent is configured with the roll_dice tool.
"""
dice_tool = await client.tools.upsert_from_function(func=roll_dice)
agent_state_instance = await client.agents.create(
agent_type=AgentType.letta_v1_agent,
name="test_agent",
include_base_tools=False,
tool_ids=[dice_tool.id],
model="openai/gpt-4o",
embedding="openai/text-embedding-3-small",
tags=["test"],
)
yield agent_state_instance
await client.agents.delete(agent_state_instance.id)
@pytest.mark.skipif(not os.getenv("LETTA_REDIS_HOST"), reason="Redis is required for background streaming (set LETTA_REDIS_HOST to enable)")
@pytest.mark.parametrize(
"llm_config",
TESTED_LLM_CONFIGS,
ids=[c.model for c in TESTED_LLM_CONFIGS],
)
@pytest.mark.asyncio(loop_scope="function")
async def test_background_streaming_cancellation(
disable_e2b_api_key: Any,
client: AsyncLetta,
agent_state: AgentState,
llm_config: LLMConfig,
) -> None:
agent_state = await client.agents.update(agent_id=agent_state.id, llm_config=llm_config)
delay = 1.5
_cancellation_task = asyncio.create_task(cancel_run_after_delay(client, agent_state.id, delay=delay))
response = await client.agents.messages.stream(
agent_id=agent_state.id,
messages=USER_MESSAGE_ROLL_DICE,
stream_tokens=True,
background=True,
)
messages = await accumulate_chunks(response)
run_id = messages[0].run_id if hasattr(messages[0], "run_id") else None
await _cancellation_task
if run_id:
run = await client.runs.retrieve(run_id=run_id)
assert run.status == JobStatus.cancelled
else:
runs = await client.runs.list(agent_id=agent_state.id, stop_reason="cancelled", limit=1)
assert len(list(runs)) == 1
run_id = runs.items[0].id
response = await client.runs.messages.stream(run_id=run_id, starting_after=0)
messages_from_stream = await accumulate_chunks(response)
assert len(messages_from_stream) > 0
# Verify the stream contains stop_reason: cancelled (from our new cancellation logic)
stop_reasons = [msg for msg in messages_from_stream if hasattr(msg, "message_type") and msg.message_type == "stop_reason"]
assert len(stop_reasons) == 1, f"Expected exactly 1 stop_reason in stream, got {len(stop_reasons)}"
assert stop_reasons[0].stop_reason == "cancelled", f"Expected stop_reason 'cancelled', got '{stop_reasons[0].stop_reason}'"
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/integration_test_cancellation.py",
"license": "Apache License 2.0",
"lines": 163,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/integration_test_modal.py | import os
import secrets
import string
import uuid
from pathlib import Path
from unittest.mock import patch
import pytest
from sqlalchemy import delete
from letta.config import LettaConfig
from letta.functions.function_sets.base import core_memory_append, core_memory_replace
from letta.orm.sandbox_config import SandboxConfig, SandboxEnvironmentVariable
from letta.schemas.agent import AgentState, CreateAgent
from letta.schemas.block import CreateBlock
from letta.schemas.environment_variables import AgentEnvironmentVariable, SandboxEnvironmentVariableCreate
from letta.schemas.organization import Organization
from letta.schemas.pip_requirement import PipRequirement
from letta.schemas.sandbox_config import LocalSandboxConfig, ModalSandboxConfig, SandboxConfigCreate
from letta.schemas.user import User
from letta.server.server import SyncServer
from letta.services.organization_manager import OrganizationManager
from letta.services.sandbox_config_manager import SandboxConfigManager
from letta.services.tool_manager import ToolManager
from letta.services.tool_sandbox.modal_sandbox import AsyncToolSandboxModal
from letta.services.user_manager import UserManager
from tests.helpers.utils import create_tool_from_func
# Constants
namespace = uuid.NAMESPACE_DNS
org_name = str(uuid.uuid5(namespace, "test-tool-execution-sandbox-org"))
user_name = str(uuid.uuid5(namespace, "test-tool-execution-sandbox-user"))
# Set environment variable immediately to prevent pooling issues
os.environ["LETTA_DISABLE_SQLALCHEMY_POOLING"] = "true"
# Disable SQLAlchemy connection pooling for tests to prevent event loop issues
@pytest.fixture(scope="session", autouse=True)
def disable_db_pooling_for_tests():
"""Disable database connection pooling for the entire test session."""
# Environment variable is already set above and settings reloaded
yield
# Clean up environment variable after tests
if "LETTA_DISABLE_SQLALCHEMY_POOLING" in os.environ:
del os.environ["LETTA_DISABLE_SQLALCHEMY_POOLING"]
# @pytest.fixture(autouse=True)
# async def cleanup_db_connections():
# """Cleanup database connections after each test."""
# yield
#
# # Dispose async engines in the current event loop
# try:
# await close_db()
# except Exception as e:
# # Log the error but don't fail the test
# print(f"Warning: Failed to cleanup database connections: {e}")
# Fixtures
@pytest.fixture(scope="module")
def server():
"""
Creates a SyncServer instance for testing.
Loads and saves config to ensure proper initialization.
"""
config = LettaConfig.load()
config.save()
server = SyncServer(init_with_default_org_and_user=True)
# create user/org
yield server
@pytest.fixture(autouse=True)
async def clear_tables():
"""Fixture to clear the organization table before each test."""
from letta.server.db import db_registry
async with db_registry.async_session() as session:
await session.execute(delete(SandboxEnvironmentVariable))
await session.execute(delete(SandboxConfig))
# context manager now handles commits
# await session.commit()
@pytest.fixture
async def test_organization():
"""Fixture to create and return the default organization."""
org = await OrganizationManager().create_organization_async(Organization(name=org_name))
yield org
@pytest.fixture
async def test_user(test_organization):
"""Fixture to create and return the default user within the default organization."""
user = await UserManager().create_actor_async(User(name=user_name, organization_id=test_organization.id))
yield user
@pytest.fixture
async def add_integers_tool(test_user):
def add(x: int, y: int) -> int:
"""
Simple function that adds two integers.
Parameters:
x (int): The first integer to add.
y (int): The second integer to add.
Returns:
int: The result of adding x and y.
"""
return x + y
tool = create_tool_from_func(add)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def cowsay_tool(test_user):
# This defines a tool for a package we definitely do NOT have in letta
# If this test passes, that means the tool was correctly executed in a separate Python environment
def cowsay() -> str:
"""
Simple function that uses the cowsay package to print out the secret word env variable.
Returns:
str: The cowsay ASCII art.
"""
import os
import cowsay
cowsay.cow(os.getenv("secret_word"))
tool = create_tool_from_func(cowsay)
# Add cowsay as a pip requirement for Modal
tool.pip_requirements = [PipRequirement(name="cowsay")]
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def get_env_tool(test_user):
def get_env() -> str:
"""
Simple function that returns the secret word env variable.
Returns:
str: The secret word
"""
import os
secret_word = os.getenv("secret_word")
print(secret_word)
return secret_word
tool = create_tool_from_func(get_env)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def get_warning_tool(test_user):
def warn_hello_world() -> str:
"""
Simple function that warns hello world.
Returns:
str: hello world
"""
import warnings
msg = "Hello World"
warnings.warn(msg)
return msg
tool = create_tool_from_func(warn_hello_world)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def always_err_tool(test_user):
def error() -> str:
"""
Simple function that errors
Returns:
str: not important
"""
# Raise a unusual error so we know it's from this function
print("Going to error now")
raise ZeroDivisionError("This is an intentionally weird division!")
tool = create_tool_from_func(error)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def list_tool(test_user):
def create_list():
"""Simple function that returns a list"""
return [1] * 5
tool = create_tool_from_func(create_list)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def clear_core_memory_tool(test_user):
def clear_memory(agent_state: "AgentState"):
"""Clear the core memory"""
agent_state.memory.get_block("human").value = ""
agent_state.memory.get_block("persona").value = ""
tool = create_tool_from_func(clear_memory)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def external_codebase_tool(test_user):
from tests.test_tool_sandbox.restaurant_management_system.adjust_menu_prices import adjust_menu_prices
tool = create_tool_from_func(adjust_menu_prices)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def agent_state(server: SyncServer):
await server.init_async(init_with_default_org_and_user=True)
actor = await server.user_manager.create_default_actor_async()
agent_state = await server.create_agent_async(
CreateAgent(
memory_blocks=[
CreateBlock(
label="human",
value="username: sarah",
),
CreateBlock(
label="persona",
value="This is the persona",
),
],
include_base_tools=True,
model="openai/gpt-4o-mini",
tags=["test_agents"],
embedding="openai/text-embedding-3-small",
),
actor=actor,
)
agent_state.tool_rules = []
yield agent_state
@pytest.fixture
async def custom_test_sandbox_config(test_user):
"""
Fixture to create a consistent local sandbox configuration for tests.
Args:
test_user: The test user to be used for creating the sandbox configuration.
Returns:
A tuple containing the SandboxConfigManager and the created sandbox configuration.
"""
# Create the SandboxConfigManager
manager = SandboxConfigManager()
# Set the sandbox to be within the external codebase path and use a venv
external_codebase_path = str(Path(__file__).parent / "test_tool_sandbox" / "restaurant_management_system")
# tqdm is used in this codebase, but NOT in the requirements.txt, this tests that we can successfully install pip requirements
local_sandbox_config = LocalSandboxConfig(
sandbox_dir=external_codebase_path, use_venv=True, pip_requirements=[PipRequirement(name="tqdm")]
)
# Create the sandbox configuration
config_create = SandboxConfigCreate(config=local_sandbox_config.model_dump())
# Create or update the sandbox configuration
await manager.create_or_update_sandbox_config_async(sandbox_config_create=config_create, actor=test_user)
return manager, local_sandbox_config
@pytest.fixture
async def core_memory_tools(test_user):
"""Create all base tools for testing."""
tools = {}
for func in [
core_memory_replace,
core_memory_append,
]:
tool = create_tool_from_func(func)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
tools[func.__name__] = tool
yield tools
@pytest.fixture
async def async_add_integers_tool(test_user):
async def async_add(x: int, y: int) -> int:
"""
Async function that adds two integers.
Parameters:
x (int): The first integer to add.
y (int): The second integer to add.
Returns:
int: The result of adding x and y.
"""
import asyncio
# Add a small delay to simulate async work
await asyncio.sleep(0.1)
return x + y
tool = create_tool_from_func(async_add)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def async_get_env_tool(test_user):
async def async_get_env() -> str:
"""
Async function that returns the secret word env variable.
Returns:
str: The secret word
"""
import asyncio
import os
# Add a small delay to simulate async work
await asyncio.sleep(0.1)
secret_word = os.getenv("secret_word")
print(secret_word)
return secret_word
tool = create_tool_from_func(async_get_env)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def async_stateful_tool(test_user):
async def async_clear_memory(agent_state: "AgentState"):
"""Async function that clears the core memory"""
import asyncio
# Add a small delay to simulate async work
await asyncio.sleep(0.1)
agent_state.memory.get_block("human").value = ""
agent_state.memory.get_block("persona").value = ""
tool = create_tool_from_func(async_clear_memory)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def async_error_tool(test_user):
async def async_error() -> str:
"""
Async function that errors
Returns:
str: not important
"""
import asyncio
# Add some async work before erroring
await asyncio.sleep(0.1)
print("Going to error now")
raise ValueError("This is an intentional async error!")
tool = create_tool_from_func(async_error)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def async_list_tool(test_user):
async def async_create_list() -> list:
"""Async function that returns a list"""
import asyncio
await asyncio.sleep(0.05)
return [1, 2, 3, 4, 5]
tool = create_tool_from_func(async_create_list)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def tool_with_pip_requirements(test_user):
def use_requests_and_numpy() -> str:
"""
Function that uses requests and numpy packages to test tool-specific pip requirements.
Returns:
str: Success message if packages are available.
"""
try:
import numpy as np
import requests
# Simple usage to verify packages work
response = requests.get("https://httpbin.org/json", timeout=30)
arr = np.array([1, 2, 3])
return f"Success! Status: {response.status_code}, Array sum: {np.sum(arr)}"
except ImportError as e:
return f"Import error: {e}"
except Exception as e:
return f"Other error: {e}"
tool = create_tool_from_func(use_requests_and_numpy)
# Add pip requirements to the tool
tool.pip_requirements = [
PipRequirement(name="requests", version="2.31.0"),
PipRequirement(name="numpy"),
]
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
@pytest.fixture
async def async_complex_tool(test_user):
async def async_complex_computation(iterations: int = 3) -> dict:
"""
Async function that performs complex computation with multiple awaits.
Parameters:
iterations (int): Number of iterations to perform.
Returns:
dict: Results of the computation.
"""
import asyncio
import time
results = []
start_time = time.time()
for i in range(iterations):
# Simulate async I/O
await asyncio.sleep(0.1)
results.append(i * 2)
end_time = time.time()
return {
"results": results,
"duration": end_time - start_time,
"iterations": iterations,
"average": sum(results) / len(results) if results else 0,
}
tool = create_tool_from_func(async_complex_computation)
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
yield tool
# Modal sandbox tests
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_default(check_modal_key_is_set, add_integers_tool, test_user):
args = {"x": 10, "y": 5}
# Mock and assert correct pathway was invoked
with patch.object(AsyncToolSandboxModal, "run") as mock_run:
sandbox = AsyncToolSandboxModal(add_integers_tool.name, args, user=test_user, tool_id=add_integers_tool.id)
await sandbox.run()
mock_run.assert_called_once()
# Run again to get actual response
sandbox = AsyncToolSandboxModal(add_integers_tool.name, args, user=test_user, tool_id=add_integers_tool.id)
result = await sandbox.run()
assert int(result.func_return) == args["x"] + args["y"]
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_pip_installs(check_modal_key_is_set, cowsay_tool, test_user):
"""Test that Modal sandbox installs tool-level pip requirements."""
manager = SandboxConfigManager()
config_create = SandboxConfigCreate(config=ModalSandboxConfig().model_dump())
config = await manager.create_or_update_sandbox_config_async(config_create, test_user)
key = "secret_word"
long_random_string = "".join(secrets.choice(string.ascii_letters + string.digits) for _ in range(20))
await manager.create_sandbox_env_var_async(
SandboxEnvironmentVariableCreate(key=key, value=long_random_string),
sandbox_config_id=config.id,
actor=test_user,
)
sandbox = AsyncToolSandboxModal(cowsay_tool.name, {}, user=test_user, tool_id=cowsay_tool.id)
result = await sandbox.run()
assert long_random_string in result.stdout[0]
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_stateful_tool(check_modal_key_is_set, clear_core_memory_tool, test_user, agent_state):
sandbox = AsyncToolSandboxModal(clear_core_memory_tool.name, {}, user=test_user, tool_id=clear_core_memory_tool.id)
result = await sandbox.run(agent_state=agent_state)
assert result.agent_state.memory.get_block("human").value == ""
assert result.agent_state.memory.get_block("persona").value == ""
assert result.func_return is None
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_inject_env_var_existing_sandbox(check_modal_key_is_set, get_env_tool, test_user):
manager = SandboxConfigManager()
config_create = SandboxConfigCreate(config=ModalSandboxConfig().model_dump())
config = await manager.create_or_update_sandbox_config_async(config_create, test_user)
sandbox = AsyncToolSandboxModal(get_env_tool.name, {}, user=test_user, tool_id=get_env_tool.id)
result = await sandbox.run()
assert result.func_return is None
key = "secret_word"
long_random_string = "".join(secrets.choice(string.ascii_letters + string.digits) for _ in range(20))
await manager.create_sandbox_env_var_async(
SandboxEnvironmentVariableCreate(key=key, value=long_random_string),
sandbox_config_id=config.id,
actor=test_user,
)
sandbox = AsyncToolSandboxModal(get_env_tool.name, {}, user=test_user, tool_id=get_env_tool.id)
result = await sandbox.run()
assert long_random_string in result.func_return
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_per_agent_env(check_modal_key_is_set, get_env_tool, agent_state, test_user):
manager = SandboxConfigManager()
key = "secret_word"
wrong_val = "".join(secrets.choice(string.ascii_letters + string.digits) for _ in range(20))
correct_val = "".join(secrets.choice(string.ascii_letters + string.digits) for _ in range(20))
config_create = SandboxConfigCreate(config=ModalSandboxConfig().model_dump())
config = await manager.create_or_update_sandbox_config_async(config_create, test_user)
await manager.create_sandbox_env_var_async(
SandboxEnvironmentVariableCreate(key=key, value=wrong_val),
sandbox_config_id=config.id,
actor=test_user,
)
agent_state.secrets = [AgentEnvironmentVariable(key=key, value=correct_val, agent_id=agent_state.id)]
sandbox = AsyncToolSandboxModal(get_env_tool.name, {}, user=test_user, tool_id=get_env_tool.id)
result = await sandbox.run(agent_state=agent_state)
assert wrong_val not in result.func_return
assert correct_val in result.func_return
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_with_list_rv(check_modal_key_is_set, list_tool, test_user):
sandbox = AsyncToolSandboxModal(list_tool.name, {}, user=test_user, tool_id=list_tool.id)
result = await sandbox.run()
assert len(result.func_return) == 5
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_with_tool_pip_requirements(check_modal_key_is_set, tool_with_pip_requirements, test_user):
"""Test that Modal sandbox installs tool-specific pip requirements."""
manager = SandboxConfigManager()
config_create = SandboxConfigCreate(config=ModalSandboxConfig().model_dump())
await manager.create_or_update_sandbox_config_async(config_create, test_user)
sandbox = AsyncToolSandboxModal(
tool_with_pip_requirements.name, {}, user=test_user, tool_id=tool_with_pip_requirements.id, tool_object=tool_with_pip_requirements
)
result = await sandbox.run()
# Should succeed since tool pip requirements were installed
assert "Success!" in result.func_return
assert "Status: 200" in result.func_return
assert "Array sum: 6" in result.func_return
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_with_mixed_pip_requirements(check_modal_key_is_set, tool_with_pip_requirements, test_user):
"""Test that Modal sandbox installs tool pip requirements.
Note: Modal does not support sandbox-level pip requirements - all pip requirements
must be specified at the tool level since the Modal app is deployed with a fixed image.
"""
manager = SandboxConfigManager()
config_create = SandboxConfigCreate(config=ModalSandboxConfig().model_dump())
await manager.create_or_update_sandbox_config_async(config_create, test_user)
sandbox = AsyncToolSandboxModal(
tool_with_pip_requirements.name, {}, user=test_user, tool_id=tool_with_pip_requirements.id, tool_object=tool_with_pip_requirements
)
result = await sandbox.run()
# Should succeed since tool pip requirements were installed
assert "Success!" in result.func_return
assert "Status: 200" in result.func_return
assert "Array sum: 6" in result.func_return
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_with_broken_tool_pip_requirements_error_handling(check_modal_key_is_set, test_user):
"""Test that Modal sandbox provides informative error messages for broken tool pip requirements."""
def use_broken_package() -> str:
"""
Function that tries to use packages with broken version constraints.
Returns:
str: Success message if packages are available.
"""
return "Should not reach here"
tool = create_tool_from_func(use_broken_package)
# Add broken pip requirements
tool.pip_requirements = [
PipRequirement(name="numpy", version="1.24.0"), # Old version incompatible with newer Python
PipRequirement(name="nonexistent-package-12345"), # Non-existent package
]
# expect a LettaInvalidArgumentError
from letta.errors import LettaInvalidArgumentError
with pytest.raises(LettaInvalidArgumentError):
tool = await ToolManager().create_or_update_tool_async(tool, test_user)
@pytest.mark.asyncio
async def test_async_function_detection(add_integers_tool, async_add_integers_tool, test_user):
"""Test that async function detection works correctly"""
# Test sync function detection
sync_sandbox = AsyncToolSandboxModal(add_integers_tool.name, {}, test_user, tool_id=add_integers_tool.id, tool_object=add_integers_tool)
await sync_sandbox._init_async()
assert not sync_sandbox.is_async_function
# Test async function detection
async_sandbox = AsyncToolSandboxModal(
async_add_integers_tool.name, {}, test_user, tool_id=async_add_integers_tool.id, tool_object=async_add_integers_tool
)
await async_sandbox._init_async()
assert async_sandbox.is_async_function
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_async_function_execution(check_modal_key_is_set, async_add_integers_tool, test_user):
"""Test that async functions execute correctly in Modal sandbox"""
args = {"x": 20, "y": 30}
sandbox = AsyncToolSandboxModal(async_add_integers_tool.name, args, user=test_user, tool_id=async_add_integers_tool.id)
result = await sandbox.run()
assert int(result.func_return) == args["x"] + args["y"]
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_async_complex_computation(check_modal_key_is_set, async_complex_tool, test_user):
"""Test complex async computation with multiple awaits in Modal sandbox"""
args = {"iterations": 2}
sandbox = AsyncToolSandboxModal(async_complex_tool.name, args, user=test_user, tool_id=async_complex_tool.id)
result = await sandbox.run()
func_return = result.func_return
assert isinstance(func_return, dict)
assert func_return["results"] == [0, 2]
assert func_return["iterations"] == 2
assert func_return["average"] == 1.0
assert func_return["duration"] > 0.15
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_async_list_return(check_modal_key_is_set, async_list_tool, test_user):
"""Test async function returning list in Modal sandbox"""
sandbox = AsyncToolSandboxModal(async_list_tool.name, {}, user=test_user, tool_id=async_list_tool.id)
result = await sandbox.run()
assert result.func_return == [1, 2, 3, 4, 5]
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_async_with_env_vars(check_modal_key_is_set, async_get_env_tool, test_user):
"""Test async function with environment variables in Modal sandbox"""
manager = SandboxConfigManager()
config_create = SandboxConfigCreate(config=ModalSandboxConfig().model_dump())
config = await manager.create_or_update_sandbox_config_async(config_create, test_user)
# Create environment variable
key = "secret_word"
test_value = "async_modal_test_value_456"
await manager.create_sandbox_env_var_async(
SandboxEnvironmentVariableCreate(key=key, value=test_value), sandbox_config_id=config.id, actor=test_user
)
sandbox = AsyncToolSandboxModal(async_get_env_tool.name, {}, user=test_user, tool_id=async_get_env_tool.id)
result = await sandbox.run()
assert test_value in result.func_return
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_async_with_agent_state(check_modal_key_is_set, async_stateful_tool, test_user, agent_state):
"""Test async function with agent state in Modal sandbox"""
sandbox = AsyncToolSandboxModal(async_stateful_tool.name, {}, user=test_user, tool_id=async_stateful_tool.id)
result = await sandbox.run(agent_state=agent_state)
assert result.agent_state.memory.get_block("human").value == ""
assert result.agent_state.memory.get_block("persona").value == ""
assert result.func_return is None
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_async_error_handling(check_modal_key_is_set, async_error_tool, test_user):
"""Test async function error handling in Modal sandbox"""
sandbox = AsyncToolSandboxModal(async_error_tool.name, {}, user=test_user, tool_id=async_error_tool.id)
result = await sandbox.run()
# Check that error was captured
assert len(result.stdout) != 0, "stdout not empty"
assert "error" in result.stdout[0], "stdout contains printed string"
assert len(result.stderr) != 0, "stderr not empty"
assert "ValueError: This is an intentional async error!" in result.stderr[0], "stderr contains expected error"
@pytest.mark.asyncio
@pytest.mark.modal_sandbox
async def test_modal_sandbox_async_per_agent_env(check_modal_key_is_set, async_get_env_tool, agent_state, test_user):
"""Test async function with per-agent environment variables in Modal sandbox"""
manager = SandboxConfigManager()
key = "secret_word"
wrong_val = "wrong_async_modal_value"
correct_val = "correct_async_modal_value"
config_create = SandboxConfigCreate(config=ModalSandboxConfig().model_dump())
config = await manager.create_or_update_sandbox_config_async(config_create, test_user)
await manager.create_sandbox_env_var_async(
SandboxEnvironmentVariableCreate(key=key, value=wrong_val),
sandbox_config_id=config.id,
actor=test_user,
)
agent_state.secrets = [AgentEnvironmentVariable(key=key, value=correct_val, agent_id=agent_state.id)]
sandbox = AsyncToolSandboxModal(async_get_env_tool.name, {}, user=test_user, tool_id=async_get_env_tool.id)
result = await sandbox.run(agent_state=agent_state)
assert wrong_val not in result.func_return
assert correct_val in result.func_return
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/integration_test_modal.py",
"license": "Apache License 2.0",
"lines": 604,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_exception_logging.py | """
Tests for global exception logging system.
"""
import asyncio
from unittest.mock import patch
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
from letta.exceptions.logging import add_exception_context, log_and_raise, log_exception
from letta.server.rest_api.middleware.logging import LoggingMiddleware
@pytest.fixture
def app_with_exception_middleware():
"""Create a test FastAPI app with logging middleware."""
app = FastAPI()
app.add_middleware(LoggingMiddleware)
@app.get("/test-error")
def test_error():
raise ValueError("Test error message")
@app.get("/test-error-with-context")
def test_error_with_context():
exc = ValueError("Test error with context")
exc = add_exception_context(
exc,
user_id="test-user-123",
operation="test_operation",
)
raise exc
@app.get("/test-success")
def test_success():
return {"status": "ok"}
return app
def test_exception_middleware_logs_basic_exception(app_with_exception_middleware):
"""Test that the middleware logs exceptions with basic context."""
client = TestClient(app_with_exception_middleware, raise_server_exceptions=False)
with patch("letta.server.rest_api.middleware.logging.logger") as mock_logger:
response = client.get("/test-error")
# Should return 500
assert response.status_code == 500
# Should log the error
assert mock_logger.error.called
call_args = mock_logger.error.call_args
# Check the message
assert "ValueError" in call_args[0][0]
assert "Test error message" in call_args[0][0]
# Check the extra context
extra = call_args[1]["extra"]
assert extra["exception_type"] == "ValueError"
assert extra["exception_message"] == "Test error message"
assert "request" in extra
assert extra["request"]["method"] == "GET"
assert "/test-error" in extra["request"]["path"]
def test_exception_middleware_logs_custom_context(app_with_exception_middleware):
"""Test that the middleware logs custom context attached to exceptions."""
client = TestClient(app_with_exception_middleware, raise_server_exceptions=False)
with patch("letta.server.rest_api.middleware.logging.logger") as mock_logger:
response = client.get("/test-error-with-context")
# Should return 500
assert response.status_code == 500
# Should log the error with custom context
assert mock_logger.error.called
call_args = mock_logger.error.call_args
extra = call_args[1]["extra"]
# Check custom context
assert "custom_context" in extra
assert extra["custom_context"]["user_id"] == "test-user-123"
assert extra["custom_context"]["operation"] == "test_operation"
def test_exception_middleware_does_not_interfere_with_success(app_with_exception_middleware):
"""Test that the middleware doesn't interfere with successful requests."""
client = TestClient(app_with_exception_middleware)
response = client.get("/test-success")
assert response.status_code == 200
assert response.json() == {"status": "ok"}
def test_add_exception_context():
"""Test that add_exception_context properly attaches context to exceptions."""
exc = ValueError("Test error")
# Add context
exc_with_context = add_exception_context(
exc,
user_id="user-123",
agent_id="agent-456",
operation="test_op",
)
# Should be the same exception object
assert exc_with_context is exc
# Should have context attached
assert hasattr(exc, "__letta_context__")
assert exc.__letta_context__["user_id"] == "user-123"
assert exc.__letta_context__["agent_id"] == "agent-456"
assert exc.__letta_context__["operation"] == "test_op"
def test_add_exception_context_multiple_times():
"""Test that add_exception_context can be called multiple times."""
exc = ValueError("Test error")
# Add context in multiple calls
add_exception_context(exc, user_id="user-123")
add_exception_context(exc, agent_id="agent-456")
# Both should be present
assert exc.__letta_context__["user_id"] == "user-123"
assert exc.__letta_context__["agent_id"] == "agent-456"
def test_log_and_raise():
"""Test that log_and_raise logs and then raises the exception."""
exc = ValueError("Test error")
with patch("letta.exceptions.logging.logger") as mock_logger:
with pytest.raises(ValueError, match="Test error"):
log_and_raise(
exc,
"Operation failed",
context={"user_id": "user-123"},
)
# Should have logged
assert mock_logger.error.called
call_args = mock_logger.error.call_args
# Check message
assert "Operation failed" in call_args[0][0]
assert "ValueError" in call_args[0][0]
assert "Test error" in call_args[0][0]
# Check extra context
extra = call_args[1]["extra"]
assert extra["exception_type"] == "ValueError"
assert extra["user_id"] == "user-123"
def test_log_exception():
"""Test that log_exception logs without raising."""
exc = ValueError("Test error")
with patch("letta.exceptions.logging.logger") as mock_logger:
# Should not raise
log_exception(
exc,
"Operation failed, using fallback",
context={"user_id": "user-123"},
)
# Should have logged
assert mock_logger.error.called
call_args = mock_logger.error.call_args
# Check message
assert "Operation failed, using fallback" in call_args[0][0]
assert "ValueError" in call_args[0][0]
# Check extra context
extra = call_args[1]["extra"]
assert extra["exception_type"] == "ValueError"
assert extra["user_id"] == "user-123"
def test_log_exception_with_different_levels():
"""Test that log_exception respects different log levels."""
exc = ValueError("Test error")
with patch("letta.exceptions.logging.logger") as mock_logger:
# Test warning level
log_exception(exc, "Warning message", level="warning")
assert mock_logger.warning.called
# Test info level
log_exception(exc, "Info message", level="info")
assert mock_logger.info.called
@pytest.mark.asyncio
async def test_global_exception_handler_setup():
"""Test that global exception handlers can be set up without errors."""
from letta.server.global_exception_handler import setup_global_exception_handlers
# Should not raise
setup_global_exception_handlers()
# Verify sys.excepthook was modified
import sys
assert sys.excepthook != sys.__excepthook__
@pytest.mark.asyncio
async def test_asyncio_exception_handler():
"""Test that asyncio exception handler can be set up."""
from letta.server.global_exception_handler import setup_asyncio_exception_handler
loop = asyncio.get_event_loop()
# Should not raise
setup_asyncio_exception_handler(loop)
def test_exception_middleware_preserves_traceback(app_with_exception_middleware):
"""Test that the middleware preserves traceback information."""
client = TestClient(app_with_exception_middleware, raise_server_exceptions=False)
with patch("letta.server.rest_api.middleware.logging.logger") as mock_logger:
response = client.get("/test-error")
assert response.status_code == 500
call_args = mock_logger.error.call_args
# Check that exc_info was passed (enables traceback)
assert call_args[1]["exc_info"] is True
# Check that traceback is in extra
extra = call_args[1]["extra"]
assert "traceback" in extra
assert "ValueError" in extra["traceback"]
assert "test_error" in extra["traceback"]
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_exception_logging.py",
"license": "Apache License 2.0",
"lines": 179,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_log_context.py | import json
import logging
from io import StringIO
from letta.log import JSONFormatter, LogContextFilter
from letta.log_context import clear_log_context, get_log_context, remove_log_context, set_log_context, update_log_context
class TestLogContext:
def test_set_log_context(self):
clear_log_context()
set_log_context("agent_id", "agent-123")
assert get_log_context("agent_id") == "agent-123"
clear_log_context()
def test_update_log_context(self):
clear_log_context()
update_log_context(agent_id="agent-123", actor_id="user-456")
context = get_log_context()
assert context["agent_id"] == "agent-123"
assert context["actor_id"] == "user-456"
clear_log_context()
def test_remove_log_context(self):
clear_log_context()
update_log_context(agent_id="agent-123", actor_id="user-456")
remove_log_context("agent_id")
context = get_log_context()
assert "agent_id" not in context
assert context["actor_id"] == "user-456"
clear_log_context()
def test_clear_log_context(self):
update_log_context(agent_id="agent-123", actor_id="user-456")
clear_log_context()
context = get_log_context()
assert context == {}
def test_get_log_context_all(self):
clear_log_context()
update_log_context(agent_id="agent-123", actor_id="user-456")
context = get_log_context()
assert isinstance(context, dict)
assert len(context) == 2
clear_log_context()
class TestLogContextFilter:
def test_filter_adds_context_to_record(self):
clear_log_context()
update_log_context(agent_id="agent-123", actor_id="user-456")
log_filter = LogContextFilter()
record = logging.LogRecord(
name="test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
result = log_filter.filter(record)
assert result is True
assert hasattr(record, "agent_id")
assert record.agent_id == "agent-123"
assert hasattr(record, "actor_id")
assert record.actor_id == "user-456"
clear_log_context()
def test_filter_does_not_override_existing_attributes(self):
clear_log_context()
update_log_context(agent_id="agent-123")
log_filter = LogContextFilter()
record = logging.LogRecord(
name="test",
level=logging.INFO,
pathname="test.py",
lineno=1,
msg="Test message",
args=(),
exc_info=None,
)
record.agent_id = "agent-999"
log_filter.filter(record)
assert record.agent_id == "agent-999"
clear_log_context()
class TestLogContextIntegration:
def test_json_formatter_includes_context(self):
clear_log_context()
update_log_context(agent_id="agent-123", actor_id="user-456")
logger = logging.getLogger("test_logger")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(StringIO())
handler.setFormatter(JSONFormatter())
handler.addFilter(LogContextFilter())
logger.addHandler(handler)
log_stream = handler.stream
logger.info("Test message")
log_stream.seek(0)
log_output = log_stream.read()
log_data = json.loads(log_output)
assert log_data["message"] == "Test message"
assert log_data["agent_id"] == "agent-123"
assert log_data["actor_id"] == "user-456"
logger.removeHandler(handler)
clear_log_context()
def test_multiple_log_calls_with_changing_context(self):
clear_log_context()
logger = logging.getLogger("test_logger_2")
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(StringIO())
handler.setFormatter(JSONFormatter())
handler.addFilter(LogContextFilter())
logger.addHandler(handler)
log_stream = handler.stream
update_log_context(agent_id="agent-123")
logger.info("First message")
update_log_context(actor_id="user-456")
logger.info("Second message")
log_stream.seek(0)
log_lines = log_stream.readlines()
assert len(log_lines) == 2
first_log = json.loads(log_lines[0])
assert first_log["agent_id"] == "agent-123"
assert "actor_id" not in first_log
second_log = json.loads(log_lines[1])
assert second_log["agent_id"] == "agent-123"
assert second_log["actor_id"] == "user-456"
logger.removeHandler(handler)
clear_log_context()
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_log_context.py",
"license": "Apache License 2.0",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_log_context_middleware.py | from unittest.mock import patch
import pytest
from fastapi import FastAPI
from fastapi.testclient import TestClient
import letta.server.rest_api.routers.v1.git_http as git_http_router
from letta.log_context import get_log_context
from letta.server.rest_api.middleware import LoggingMiddleware
@pytest.fixture
def app():
app = FastAPI()
app.add_middleware(LoggingMiddleware)
@app.get("/v1/agents/{agent_id}")
async def get_agent(agent_id: str):
context = get_log_context()
return {"agent_id": agent_id, "context": context}
@app.get("/v1/agents/{agent_id}/tools/{tool_id}")
async def get_agent_tool(agent_id: str, tool_id: str):
context = get_log_context()
return {"agent_id": agent_id, "tool_id": tool_id, "context": context}
@app.get("/v1/organizations/{org_id}/users/{user_id}")
async def get_org_user(org_id: str, user_id: str):
context = get_log_context()
return {"org_id": org_id, "user_id": user_id, "context": context}
return app
@pytest.fixture
def client(app):
return TestClient(app)
class TestLogContextMiddleware:
@pytest.mark.asyncio
async def test_sync_after_push_syncs_nested_block_labels_to_postgres(self, monkeypatch):
"""Regression test: nested labels (e.g., system/human) are synced from git files."""
synced_calls = []
class DummyActor:
id = "user-123"
organization_id = "org-123"
class DummyGit:
async def get_files(self, agent_id, org_id, ref):
assert ref == "HEAD"
return {
"system/human.md": "---\ndescription: human\n---\nname: sarah",
"system/persona.md": "---\ndescription: persona\n---\nbe helpful",
"skills/research-helper/SKILL.md": (
"---\n"
"name: research-helper\n"
"description: Search the web and summarize findings.\n"
"---\n"
"# Research Helper\n\n"
"Use this skill to do deep web research and summarize results.\n"
),
"skills/research-helper/references/details.md": "---\ndescription: nested\n---\nShould not be synced",
}
class DummyMemoryRepoManager:
git = DummyGit()
class DummyBlockManager:
async def _sync_block_to_postgres(self, **kwargs):
synced_calls.append(kwargs)
class DummyAgentManager:
async def list_agent_blocks_async(self, **kwargs):
return []
class DummyUserManager:
async def get_actor_by_id_async(self, actor_id):
return DummyActor()
class DummyServer:
user_manager = DummyUserManager()
memory_repo_manager = DummyMemoryRepoManager()
block_manager = DummyBlockManager()
agent_manager = DummyAgentManager()
class DummyGitEnabledBlockManager(DummyBlockManager):
pass
dummy_server = DummyServer()
dummy_server.block_manager = DummyGitEnabledBlockManager()
monkeypatch.setattr(git_http_router, "_server_instance", dummy_server)
from letta.settings import settings as core_settings
monkeypatch.setattr(core_settings, "memfs_service_url", "http://memfs.test")
with patch("letta.services.block_manager_git.GitEnabledBlockManager", DummyGitEnabledBlockManager):
await git_http_router._sync_after_push(actor_id="user-123", agent_id="agent-123")
labels = {call["label"] for call in synced_calls}
assert "system/human" in labels
assert "system/persona" in labels
assert "skills/research-helper" in labels
assert "skills/research-helper/references/details" not in labels
by_label = {call["label"]: call for call in synced_calls}
assert by_label["skills/research-helper"]["description"] == "Search the web and summarize findings."
assert by_label["skills/research-helper"]["value"].startswith("# Research Helper")
def test_extracts_actor_id_from_headers(self, client):
response = client.get("/v1/agents/agent-123e4567-e89b-42d3-8456-426614174000", headers={"user_id": "user-abc123"})
assert response.status_code == 200
data = response.json()
assert data["context"]["actor_id"] == "user-abc123"
def test_extracts_agent_id_from_path(self, client):
agent_id = "agent-123e4567-e89b-42d3-8456-426614174000"
response = client.get(f"/v1/agents/{agent_id}")
assert response.status_code == 200
data = response.json()
assert data["context"]["agent_id"] == agent_id
def test_extracts_multiple_primitive_ids_from_path(self, client):
agent_id = "agent-123e4567-e89b-42d3-8456-426614174000"
tool_id = "tool-987e6543-e21c-42d3-9456-426614174000"
response = client.get(f"/v1/agents/{agent_id}/tools/{tool_id}")
assert response.status_code == 200
data = response.json()
assert data["context"]["agent_id"] == agent_id
assert data["context"]["tool_id"] == tool_id
def test_extracts_org_id_with_custom_mapping(self, client):
org_id = "org-123e4567-e89b-42d3-8456-426614174000"
user_id = "user-987e6543-e21c-42d3-9456-426614174000"
response = client.get(f"/v1/organizations/{org_id}/users/{user_id}")
assert response.status_code == 200
data = response.json()
assert data["context"]["org_id"] == org_id
assert data["context"]["user_id"] == user_id
def test_extracts_both_header_and_path_context(self, client):
agent_id = "agent-123e4567-e89b-42d3-8456-426614174000"
response = client.get(f"/v1/agents/{agent_id}", headers={"user_id": "user-abc123"})
assert response.status_code == 200
data = response.json()
assert data["context"]["actor_id"] == "user-abc123"
assert data["context"]["agent_id"] == agent_id
def test_handles_request_without_context(self, client):
response = client.get("/v1/health")
assert response.status_code == 404
def test_context_cleared_between_requests(self, client):
agent_id_1 = "agent-111e4567-e89b-42d3-8456-426614174000"
agent_id_2 = "agent-222e4567-e89b-42d3-8456-426614174000"
response1 = client.get(f"/v1/agents/{agent_id_1}", headers={"user_id": "user-1"})
assert response1.status_code == 200
data1 = response1.json()
assert data1["context"]["agent_id"] == agent_id_1
assert data1["context"]["actor_id"] == "user-1"
response2 = client.get(f"/v1/agents/{agent_id_2}", headers={"user_id": "user-2"})
assert response2.status_code == 200
data2 = response2.json()
assert data2["context"]["agent_id"] == agent_id_2
assert data2["context"]["actor_id"] == "user-2"
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_log_context_middleware.py",
"license": "Apache License 2.0",
"lines": 134,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_server_providers.py | """Tests for provider initialization via ProviderManager.sync_base_providers and provider model persistence."""
import json
import uuid
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from letta.orm.errors import UniqueConstraintViolationError
from letta.schemas.agent import CreateAgent
from letta.schemas.embedding_config import EmbeddingConfig
from letta.schemas.enums import ProviderCategory, ProviderType
from letta.schemas.llm_config import LLMConfig
from letta.schemas.providers import LettaProvider, OpenAIProvider, ProviderCreate
from letta.server.server import SyncServer
from letta.services.organization_manager import OrganizationManager
from letta.services.provider_manager import ProviderManager
from letta.services.user_manager import UserManager
def unique_provider_name(base_name="test-provider"):
"""Generate a unique provider name for testing."""
return f"{base_name}-{uuid.uuid4().hex[:8]}"
def generate_test_id():
"""Generate a unique test ID for handles and names."""
return uuid.uuid4().hex[:8]
@pytest.fixture
async def default_organization():
"""Fixture to create and return the default organization."""
manager = OrganizationManager()
org = await manager.create_default_organization_async()
yield org
@pytest.fixture
async def default_user(default_organization):
"""Fixture to create and return the default user within the default organization."""
manager = UserManager()
user = await manager.create_default_actor_async(org_id=default_organization.id)
yield user
@pytest.fixture
async def provider_manager():
"""Fixture to create and return a ProviderManager instance."""
return ProviderManager()
@pytest.fixture
async def org_manager():
"""Fixture to create and return an OrganizationManager instance."""
return OrganizationManager()
@pytest.mark.asyncio
async def test_sync_base_providers_creates_new_providers(default_user, provider_manager):
"""Test that sync_base_providers creates providers that don't exist."""
# Mock base providers from environment
base_providers = [
LettaProvider(name="letta"),
OpenAIProvider(name="openai", api_key="sk-test-key"),
]
# Sync providers to DB
await provider_manager.sync_base_providers(base_providers=base_providers, actor=default_user)
# Verify providers were created in the database
letta_providers = await provider_manager.list_providers_async(name="letta", actor=default_user)
openai_providers = await provider_manager.list_providers_async(name="openai", actor=default_user)
assert len(letta_providers) == 1
assert letta_providers[0].name == "letta"
assert letta_providers[0].provider_type == ProviderType.letta
assert len(openai_providers) == 1
assert openai_providers[0].name == "openai"
assert openai_providers[0].provider_type == ProviderType.openai
@pytest.mark.asyncio
async def test_sync_base_providers_skips_existing_providers(default_user, provider_manager):
"""Test that sync_base_providers skips providers that already exist."""
# Mock base providers from environment
base_providers = [
LettaProvider(name="letta"),
]
# Sync providers to DB first time
await provider_manager.sync_base_providers(base_providers=base_providers, actor=default_user)
# Sync again - should skip existing
await provider_manager.sync_base_providers(base_providers=base_providers, actor=default_user)
# Verify only one provider exists (not duplicated)
letta_providers = await provider_manager.list_providers_async(name="letta", actor=default_user)
assert len(letta_providers) == 1
@pytest.mark.asyncio
async def test_sync_base_providers_handles_race_condition(default_user, provider_manager):
"""Test that sync_base_providers handles race conditions gracefully."""
# Mock base providers from environment
base_providers = [
LettaProvider(name="letta"),
]
# Mock a race condition: list returns empty, but create fails with UniqueConstraintViolation
original_list = provider_manager.list_providers_async
call_count = {"count": 0}
async def mock_list(*args, **kwargs):
# First call returns empty (simulating race condition window)
if call_count["count"] == 0:
call_count["count"] += 1
return []
# Subsequent calls use original behavior
return await original_list(*args, **kwargs)
async def mock_create(*args, **kwargs):
# Simulate another pod creating the provider first
raise UniqueConstraintViolationError("Provider already exists")
with patch.object(provider_manager, "list_providers_async", side_effect=mock_list):
with patch.object(provider_manager, "create_provider_async", side_effect=mock_create):
# This should NOT raise an exception
await provider_manager.sync_base_providers(base_providers=base_providers, actor=default_user)
@pytest.mark.asyncio
async def test_sync_base_providers_handles_none_api_key(default_user, provider_manager):
"""Test that sync_base_providers handles providers with None api_key."""
# Mock base providers from environment (Letta doesn't need an API key)
base_providers = [
LettaProvider(name="letta", api_key=None),
]
# Sync providers to DB - should convert None to empty string
await provider_manager.sync_base_providers(base_providers=base_providers, actor=default_user)
# Verify provider was created
letta_providers = await provider_manager.list_providers_async(name="letta", actor=default_user)
assert len(letta_providers) == 1
assert letta_providers[0].name == "letta"
@pytest.mark.asyncio
async def test_sync_provider_models_async(default_user, provider_manager):
"""Test that sync_provider_models_async persists LLM and embedding models to database."""
# First create a provider in the database
test_id = generate_test_id()
provider_create = ProviderCreate(
name=f"test-openai-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=False)
# Mock LLM and embedding models with unique handles
llm_models = [
LLMConfig(
model=f"gpt-4o-mini-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=16384,
handle=f"test-{test_id}/gpt-4o-mini",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
LLMConfig(
model=f"gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-{test_id}/gpt-4o",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
]
embedding_models = [
EmbeddingConfig(
embedding_model=f"text-embedding-3-small-{test_id}",
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_dim=1536, # Add required embedding_dim
embedding_chunk_size=300,
handle=f"test-{test_id}/text-embedding-3-small",
),
]
# Sync models to database
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=llm_models,
embedding_models=embedding_models,
organization_id=None, # Global models
)
# Verify models were persisted
llm_model = await provider_manager.get_model_by_handle_async(
handle=f"test-{test_id}/gpt-4o-mini",
actor=default_user,
model_type="llm",
)
assert llm_model is not None
assert llm_model.handle == f"test-{test_id}/gpt-4o-mini"
assert llm_model.name == f"gpt-4o-mini-{test_id}"
assert llm_model.model_type == "llm"
assert llm_model.provider_id == provider.id
assert llm_model.organization_id is None # Global model
assert llm_model.max_context_window == 16384
assert llm_model.supports_token_streaming == True
embedding_model = await provider_manager.get_model_by_handle_async(
handle=f"test-{test_id}/text-embedding-3-small",
actor=default_user,
model_type="embedding",
)
assert embedding_model is not None
assert embedding_model.handle == f"test-{test_id}/text-embedding-3-small"
assert embedding_model.name == f"text-embedding-3-small-{test_id}"
assert embedding_model.model_type == "embedding"
@pytest.mark.asyncio
async def test_sync_provider_models_idempotent(default_user, provider_manager):
"""Test that sync_provider_models_async is idempotent and doesn't duplicate models."""
# First create a provider in the database
test_id = uuid.uuid4().hex[:8]
provider_create = ProviderCreate(
name=f"test-openai-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=False)
# Mock LLM models with unique handle
llm_models = [
LLMConfig(
model=f"gpt-4o-mini-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=16384,
handle=f"test-{test_id}/gpt-4o-mini",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
]
# Sync models to database twice
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=llm_models,
embedding_models=[],
organization_id=None,
)
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=llm_models,
embedding_models=[],
organization_id=None,
)
# Verify only one model exists
models = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
provider_id=provider.id,
)
# Filter for our specific model
test_handle = f"test-{test_id}/gpt-4o-mini"
gpt_models = [m for m in models if m.handle == test_handle]
assert len(gpt_models) == 1
@pytest.mark.asyncio
async def test_get_model_by_handle_async_org_scoped(default_user, provider_manager):
"""Test that get_model_by_handle_async returns both base and BYOK providers/models."""
test_id = generate_test_id()
# Create a base provider
base_provider_create = ProviderCreate(
name=f"test-base-openai-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
base_provider = await provider_manager.create_provider_async(base_provider_create, actor=default_user, is_byok=False)
# Create a BYOK provider with same type
byok_provider_create = ProviderCreate(
name=f"test-byok-openai-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-byok-key",
)
byok_provider = await provider_manager.create_provider_async(byok_provider_create, actor=default_user, is_byok=True)
# Create global base models with unique handles
global_base_model = LLMConfig(
model=f"gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-{test_id}/base-gpt-4o", # Unique handle for base model
provider_name=base_provider.name,
provider_category=ProviderCategory.base,
)
global_base_model_2 = LLMConfig(
model=f"gpt-3.5-turbo-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=4096,
handle=f"test-{test_id}/base-gpt-3.5-turbo", # Unique handle
provider_name=base_provider.name,
provider_category=ProviderCategory.base,
)
await provider_manager.sync_provider_models_async(
provider=base_provider,
llm_models=[global_base_model, global_base_model_2],
embedding_models=[],
organization_id=None, # Global
)
# Create org-scoped BYOK models with different unique handles
org_byok_model = LLMConfig(
model=f"gpt-4o-custom-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://custom.openai.com/v1",
context_window=64000,
handle=f"test-{test_id}/byok-gpt-4o", # Different unique handle for BYOK
provider_name=byok_provider.name,
provider_category=ProviderCategory.byok,
)
org_byok_model_2 = LLMConfig(
model=f"gpt-4o-mini-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://custom.openai.com/v1",
context_window=16384,
handle=f"test-{test_id}/byok-gpt-4o-mini", # Unique handle
provider_name=byok_provider.name,
provider_category=ProviderCategory.byok,
)
# Sync all BYOK models at once
await provider_manager.sync_provider_models_async(
provider=byok_provider,
llm_models=[org_byok_model, org_byok_model_2],
embedding_models=[],
organization_id=default_user.organization_id, # Org-scoped
)
# Test 1: Get base model by its unique handle
model = await provider_manager.get_model_by_handle_async(
handle=f"test-{test_id}/base-gpt-4o",
actor=default_user,
model_type="llm",
)
assert model is not None
assert model.organization_id is None # Global base model
assert model.max_context_window == 128000
assert model.provider_id == base_provider.id
# Test 2: Get BYOK model by its unique handle
model_2 = await provider_manager.get_model_by_handle_async(
handle=f"test-{test_id}/byok-gpt-4o",
actor=default_user,
model_type="llm",
)
assert model_2 is not None
assert model_2.organization_id == default_user.organization_id # Org-scoped BYOK
assert model_2.max_context_window == 64000
assert model_2.provider_id == byok_provider.id
# Test 3: Get another BYOK model
model_3 = await provider_manager.get_model_by_handle_async(
handle=f"test-{test_id}/byok-gpt-4o-mini",
actor=default_user,
model_type="llm",
)
assert model_3 is not None
assert model_3.organization_id == default_user.organization_id
assert model_3.max_context_window == 16384
assert model_3.provider_id == byok_provider.id
# Test 4: Get base model
model_4 = await provider_manager.get_model_by_handle_async(
handle=f"test-{test_id}/base-gpt-3.5-turbo",
actor=default_user,
model_type="llm",
)
assert model_4 is not None
assert model_4.organization_id is None # Global model
assert model_4.max_context_window == 4096
assert model_4.provider_id == base_provider.id
# Test 5: List all models to verify both base and BYOK are returned
all_models = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
)
test_handles = {m.handle for m in all_models if test_id in m.handle}
# Should have 4 unique models with unique handles
assert f"test-{test_id}/base-gpt-4o" in test_handles
assert f"test-{test_id}/base-gpt-3.5-turbo" in test_handles
assert f"test-{test_id}/byok-gpt-4o" in test_handles
assert f"test-{test_id}/byok-gpt-4o-mini" in test_handles
@pytest.mark.asyncio
async def test_get_model_by_handle_async_unique_handles(default_user, provider_manager):
"""Test that handles are unique within each organization scope."""
test_id = generate_test_id()
# Create a base provider
provider_create = ProviderCreate(
name=f"test-openai-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=False)
# Create a global model with a unique handle
test_handle = f"test-{test_id}/gpt-4o"
global_model = LLMConfig(
model=f"gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=test_handle,
provider_name=provider.name,
provider_category=ProviderCategory.base,
)
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=[global_model],
embedding_models=[],
organization_id=None, # Global
)
# Test 1: Verify the global model was created
model = await provider_manager.get_model_by_handle_async(
handle=test_handle,
actor=default_user,
model_type="llm",
)
assert model is not None
assert model.organization_id is None # Global model
assert model.max_context_window == 128000
# Test 2: Create an org-scoped model with the SAME handle - should work now (different org scope)
org_model_same_handle = LLMConfig(
model=f"gpt-4o-custom-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://custom.openai.com/v1",
context_window=64000,
handle=test_handle, # Same handle - allowed since different org
provider_name=provider.name,
provider_category=ProviderCategory.byok,
)
# This should work now since handles are unique per org, not globally
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=[org_model_same_handle],
embedding_models=[],
organization_id=default_user.organization_id, # Org-scoped
)
# Verify we now get the org-specific model (prioritized over global)
model_check = await provider_manager.get_model_by_handle_async(
handle=test_handle,
actor=default_user,
model_type="llm",
)
# Should now return the org-specific model (prioritized over global)
assert model_check is not None
assert model_check.organization_id == default_user.organization_id # Org-specific
assert model_check.max_context_window == 64000 # Org model's context window
# Test 3: Create a model with a different unique handle - should succeed
different_handle = f"test-{test_id}/gpt-4o-mini"
org_model = LLMConfig(
model=f"gpt-4o-mini-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://custom.openai.com/v1",
context_window=16384,
handle=different_handle, # Different handle
provider_name=provider.name,
provider_category=ProviderCategory.byok,
)
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=[org_model],
embedding_models=[],
organization_id=default_user.organization_id, # Org-scoped
)
# Verify the org model was created
org_model_result = await provider_manager.get_model_by_handle_async(
handle=different_handle,
actor=default_user,
model_type="llm",
)
assert org_model_result is not None
assert org_model_result.organization_id == default_user.organization_id
assert org_model_result.max_context_window == 16384
# Test 4: Get model with handle that doesn't exist - should return None
nonexistent_model = await provider_manager.get_model_by_handle_async(
handle=f"test-{test_id}/nonexistent",
actor=default_user,
model_type="llm",
)
assert nonexistent_model is None
@pytest.mark.asyncio
async def test_list_models_async_combines_global_and_org(default_user, provider_manager):
"""Test that list_models_async returns both global and org-scoped models with org-scoped taking precedence."""
# Create a provider in the database with unique test ID
test_id = generate_test_id()
provider_create = ProviderCreate(
name=f"test-openai-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=False)
# Create global models with unique handles
global_models = [
LLMConfig(
model=f"gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-{test_id}/gpt-4o",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
LLMConfig(
model=f"gpt-4o-mini-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=16384,
handle=f"test-{test_id}/gpt-4o-mini",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
]
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=global_models,
embedding_models=[],
organization_id=None, # Global
)
# Create org-scoped model with a different unique handle
org_model = LLMConfig(
model=f"gpt-4o-custom-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://custom.openai.com/v1",
context_window=64000,
handle=f"test-{test_id}/gpt-4o-custom", # Different unique handle
provider_name=provider.name,
provider_category=ProviderCategory.byok,
)
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=[org_model],
embedding_models=[],
organization_id=default_user.organization_id, # Org-scoped
)
# List models
models = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
provider_id=provider.id,
)
# Should have 3 unique models
handles = {m.handle for m in models}
assert f"test-{test_id}/gpt-4o" in handles
assert f"test-{test_id}/gpt-4o-mini" in handles
assert f"test-{test_id}/gpt-4o-custom" in handles
# gpt-4o should be the global version
gpt4o = next(m for m in models if m.handle == f"test-{test_id}/gpt-4o")
assert gpt4o.organization_id is None
assert gpt4o.max_context_window == 128000
# gpt-4o-mini should be the global version
gpt4o_mini = next(m for m in models if m.handle == f"test-{test_id}/gpt-4o-mini")
assert gpt4o_mini.organization_id is None
assert gpt4o_mini.max_context_window == 16384
# gpt-4o-custom should be the org-scoped version
gpt4o_custom = next(m for m in models if m.handle == f"test-{test_id}/gpt-4o-custom")
assert gpt4o_custom.organization_id == default_user.organization_id
assert gpt4o_custom.max_context_window == 64000
@pytest.mark.asyncio
async def test_list_models_async_filters(default_user, provider_manager):
"""Test that list_models_async properly applies filters."""
# Create providers in the database with unique test ID
test_id = generate_test_id()
openai_create = ProviderCreate(
name=f"test-openai-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
openai_provider = await provider_manager.create_provider_async(openai_create, actor=default_user, is_byok=False)
# For anthropic, we need to use a valid provider type
anthropic_create = ProviderCreate(
name=f"test-anthropic-{test_id}",
provider_type=ProviderType.anthropic,
api_key="sk-test-key",
)
anthropic_provider = await provider_manager.create_provider_async(anthropic_create, actor=default_user, is_byok=False)
# Create models for different providers with unique handles
openai_llm = LLMConfig(
model=f"gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-{test_id}/openai-gpt-4o",
provider_name=openai_provider.name,
provider_category=ProviderCategory.base,
)
openai_embedding = EmbeddingConfig(
embedding_model=f"text-embedding-3-small-{test_id}",
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_dim=1536, # Add required embedding_dim
embedding_chunk_size=300,
handle=f"test-{test_id}/openai-text-embedding",
)
anthropic_llm = LLMConfig(
model=f"claude-3-5-sonnet-{test_id}",
model_endpoint_type="anthropic",
model_endpoint="https://api.anthropic.com",
context_window=200000,
handle=f"test-{test_id}/anthropic-claude",
provider_name=anthropic_provider.name,
provider_category=ProviderCategory.base,
)
await provider_manager.sync_provider_models_async(
provider=openai_provider,
llm_models=[openai_llm],
embedding_models=[openai_embedding],
organization_id=None,
)
await provider_manager.sync_provider_models_async(
provider=anthropic_provider,
llm_models=[anthropic_llm],
embedding_models=[],
organization_id=None,
)
# Test filter by model_type
llm_models = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
)
llm_handles = {m.handle for m in llm_models}
assert f"test-{test_id}/openai-gpt-4o" in llm_handles
assert f"test-{test_id}/anthropic-claude" in llm_handles
assert f"test-{test_id}/openai-text-embedding" not in llm_handles
embedding_models = await provider_manager.list_models_async(
actor=default_user,
model_type="embedding",
)
embedding_handles = {m.handle for m in embedding_models}
assert f"test-{test_id}/openai-text-embedding" in embedding_handles
assert f"test-{test_id}/openai-gpt-4o" not in embedding_handles
assert f"test-{test_id}/anthropic-claude" not in embedding_handles
# Test filter by provider_id
openai_models = await provider_manager.list_models_async(
actor=default_user,
provider_id=openai_provider.id,
)
openai_handles = {m.handle for m in openai_models}
assert f"test-{test_id}/openai-gpt-4o" in openai_handles
assert f"test-{test_id}/openai-text-embedding" in openai_handles
assert f"test-{test_id}/anthropic-claude" not in openai_handles
@pytest.mark.asyncio
async def test_model_metadata_persistence(default_user, provider_manager):
"""Test that model metadata like context window, streaming, and tool calling are properly persisted."""
# Create a provider in the database
test_id = generate_test_id()
provider_create = ProviderCreate(
name=f"test-openai-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=False)
# Create model with specific metadata and unique handle
llm_model = LLMConfig(
model=f"gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-{test_id}/gpt-4o",
provider_name=provider.name,
provider_category=ProviderCategory.base,
)
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=[llm_model],
embedding_models=[],
organization_id=None,
)
# Retrieve model and verify metadata
model = await provider_manager.get_model_by_handle_async(
handle=f"test-{test_id}/gpt-4o",
actor=default_user,
model_type="llm",
)
assert model is not None
assert model.max_context_window == 128000
assert model.supports_token_streaming == True # OpenAI supports streaming
assert model.supports_tool_calling == True # Assumed true for LLMs
assert model.model_endpoint_type == "openai"
assert model.enabled == True
@pytest.mark.asyncio
async def test_model_enabled_filter(default_user, provider_manager):
"""Test that enabled filter works properly in list_models_async."""
# Create a provider in the database
provider_create = ProviderCreate(
name=unique_provider_name("test-openai"),
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=False)
# Create models
models = [
LLMConfig(
model="gpt-4o",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle="openai/gpt-4o",
provider_name="openai",
provider_category=ProviderCategory.base,
),
LLMConfig(
model="gpt-4o-mini",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=16384,
handle="openai/gpt-4o-mini",
provider_name="openai",
provider_category=ProviderCategory.base,
),
]
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=models,
embedding_models=[],
organization_id=None,
)
# All models should be enabled by default
enabled_models = await provider_manager.list_models_async(
actor=default_user,
enabled=True,
)
handles = {m.handle for m in enabled_models}
assert "openai/gpt-4o" in handles
assert "openai/gpt-4o-mini" in handles
# Test with enabled=None (should return all models)
all_models = await provider_manager.list_models_async(
actor=default_user,
enabled=None,
)
all_handles = {m.handle for m in all_models}
assert "openai/gpt-4o" in all_handles
assert "openai/gpt-4o-mini" in all_handles
@pytest.mark.asyncio
async def test_get_llm_config_from_handle_uses_cached_models(default_user):
"""Test that get_llm_config_from_handle_async uses cached models from database instead of querying provider."""
from letta.server.server import SyncServer
server = SyncServer(init_with_default_org_and_user=False)
server.default_user = default_user
# Create a provider and model in database
provider = OpenAIProvider(name="openai", api_key="sk-test-key")
provider.id = "provider_test_id"
provider.provider_category = ProviderCategory.base
provider.base_url = "https://custom.openai.com/v1"
# Mock the provider manager methods
server.provider_manager = AsyncMock()
# Mock get_llm_config_from_handle to return cached LLM config
mock_llm_config = LLMConfig(
model="gpt-4o",
model_endpoint_type="openai",
model_endpoint="https://custom.openai.com/v1",
context_window=128000,
handle="openai/gpt-4o",
provider_name="openai",
provider_category=ProviderCategory.base,
)
server.provider_manager.get_llm_config_from_handle.return_value = mock_llm_config
# Get LLM config - should use cached data
llm_config = await server.get_llm_config_from_handle_async(
actor=default_user,
handle="openai/gpt-4o",
context_window_limit=100000,
)
# Verify it used the cached model data
assert llm_config.model == "gpt-4o"
assert llm_config.model_endpoint == "https://custom.openai.com/v1"
assert llm_config.context_window == 100000 # Limited by context_window_limit
assert llm_config.handle == "openai/gpt-4o"
assert llm_config.provider_name == "openai"
# Verify provider methods were called
server.provider_manager.get_llm_config_from_handle.assert_called_once_with(
handle="openai/gpt-4o",
actor=default_user,
)
@pytest.mark.asyncio
async def test_get_embedding_config_from_handle_uses_cached_models(default_user):
"""Test that get_embedding_config_from_handle_async uses cached models from database instead of querying provider."""
from letta.server.server import SyncServer
server = SyncServer(init_with_default_org_and_user=False)
server.default_user = default_user
# Mock the provider manager methods
server.provider_manager = AsyncMock()
# Mock get_embedding_config_from_handle to return cached embedding config
mock_embedding_config = EmbeddingConfig(
embedding_model="text-embedding-3-small",
embedding_endpoint_type="openai",
embedding_endpoint="https://custom.openai.com/v1",
embedding_dim=1536,
embedding_chunk_size=500,
handle="openai/text-embedding-3-small",
)
server.provider_manager.get_embedding_config_from_handle.return_value = mock_embedding_config
# Get embedding config - should use cached data
embedding_config = await server.get_embedding_config_from_handle_async(
actor=default_user,
handle="openai/text-embedding-3-small",
embedding_chunk_size=500,
)
# Verify it used the cached model data
assert embedding_config.embedding_model == "text-embedding-3-small"
assert embedding_config.embedding_endpoint == "https://custom.openai.com/v1"
assert embedding_config.embedding_chunk_size == 500
assert embedding_config.handle == "openai/text-embedding-3-small"
# Note: EmbeddingConfig doesn't have provider_name field unlike LLMConfig
# Verify provider methods were called
server.provider_manager.get_embedding_config_from_handle.assert_called_once_with(
handle="openai/text-embedding-3-small",
actor=default_user,
)
@pytest.mark.asyncio
async def test_server_sync_provider_models_on_init(default_user):
"""Test that the server syncs provider models to database during initialization."""
from letta.server.server import SyncServer
server = SyncServer(init_with_default_org_and_user=False)
server.default_user = default_user
# Mock providers
mock_letta_provider = AsyncMock()
mock_letta_provider.name = "letta"
mock_letta_provider.list_llm_models_async.return_value = [
LLMConfig(
model="letta-model",
model_endpoint_type="openai", # Use valid endpoint type
model_endpoint="https://api.letta.com",
context_window=8192,
handle="letta/letta-model",
provider_name="letta",
provider_category=ProviderCategory.base,
)
]
mock_letta_provider.list_embedding_models_async.return_value = []
mock_openai_provider = AsyncMock()
mock_openai_provider.name = "openai"
mock_openai_provider.list_llm_models_async.return_value = [
LLMConfig(
model="gpt-4o",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle="openai/gpt-4o",
provider_name="openai",
provider_category=ProviderCategory.base,
)
]
mock_openai_provider.list_embedding_models_async.return_value = [
EmbeddingConfig(
embedding_model="text-embedding-3-small",
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_dim=1536, # Add required embedding_dim
embedding_chunk_size=300,
handle="openai/text-embedding-3-small",
)
]
server._enabled_providers = [mock_letta_provider, mock_openai_provider]
# Mock provider manager
server.provider_manager = AsyncMock()
# Mock list_providers_async to return providers with IDs
db_letta = MagicMock()
db_letta.id = "letta_provider_id"
db_letta.name = "letta"
db_openai = MagicMock()
db_openai.id = "openai_provider_id"
db_openai.name = "openai"
server.provider_manager.list_providers_async.return_value = [db_letta, db_openai]
# Call the sync method
await server._sync_provider_models_async()
# Verify models were synced for each provider
assert server.provider_manager.sync_provider_models_async.call_count == 2
# Verify Letta models were synced
letta_call = server.provider_manager.sync_provider_models_async.call_args_list[0]
assert letta_call.kwargs["provider"].id == "letta_provider_id"
assert len(letta_call.kwargs["llm_models"]) == 1
assert len(letta_call.kwargs["embedding_models"]) == 0
assert letta_call.kwargs["organization_id"] is None
# Verify OpenAI models were synced
openai_call = server.provider_manager.sync_provider_models_async.call_args_list[1]
assert openai_call.kwargs["provider"].id == "openai_provider_id"
assert len(openai_call.kwargs["llm_models"]) == 1
assert len(openai_call.kwargs["embedding_models"]) == 1
assert openai_call.kwargs["organization_id"] is None
@pytest.mark.asyncio
async def test_provider_model_unique_constraint_per_org(default_user, provider_manager, org_manager, default_organization):
"""Test that provider models have unique handles within each organization (not globally)."""
# Create a second organization
from letta.schemas.organization import Organization
org2 = Organization(name="Test Org 2")
org2 = await org_manager.create_organization_async(org2)
# Create a user for the second organization
from letta.services.user_manager import UserManager
user_manager = UserManager()
# Note: create_default_actor_async has a bug where it ignores the org_id parameter
# Create a user properly for org2
from letta.schemas.user import User
org2_user = User(name="Test User Org2", organization_id=org2.id)
org2_user = await user_manager.create_actor_async(org2_user)
# Create a global base provider
provider_create = ProviderCreate(
name=unique_provider_name("test-openai"),
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=False)
# Create model configuration with a unique handle for this test
import uuid
test_id = uuid.uuid4().hex[:8]
test_handle = f"test-{test_id}/gpt-4o"
model_org1 = LLMConfig(
model=f"gpt-4o-org1-{test_id}", # Unique model name per org
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=test_handle,
provider_name=provider.name,
provider_category=ProviderCategory.base,
)
# Sync for default organization
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=[model_org1],
embedding_models=[],
organization_id=default_organization.id,
)
# Create model with same handle but different model name for org2
model_org2 = LLMConfig(
model=f"gpt-4o-org2-{test_id}", # Different model name for org2
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=test_handle, # Same handle - now allowed since handles are unique per org
provider_name=provider.name,
provider_category=ProviderCategory.base,
)
# Sync for organization 2 with same handle - now allowed since handles are unique per org
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=[model_org2],
embedding_models=[],
organization_id=org2.id,
)
# Each organization should have its own model with the same handle
org1_model = await provider_manager.get_model_by_handle_async(
handle=test_handle,
actor=default_user,
model_type="llm",
)
org2_model = await provider_manager.get_model_by_handle_async(
handle=test_handle,
actor=org2_user,
model_type="llm",
)
# Both organizations should have their own models with the same handle
assert org1_model is not None, "Model should exist for org1"
assert org2_model is not None, "Model should exist for org2"
# Each model should belong to its respective organization
assert org1_model.organization_id == default_organization.id
assert org2_model.organization_id == org2.id
# They should have the same handle but different IDs
assert org1_model.handle == org2_model.handle == test_handle
assert org1_model.id != org2_model.id
# Now create a model with a different handle for org2
test_handle_org2 = f"test-{test_id}/gpt-4o-org2"
model_org2 = LLMConfig(
model="gpt-4o",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=test_handle_org2, # Different handle
provider_name=provider.name,
provider_category=ProviderCategory.base,
)
# Sync for organization 2 with different handle
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=[model_org2],
embedding_models=[],
organization_id=org2.id,
)
# Now org2 should see their model
org2_model_new = await provider_manager.get_model_by_handle_async(
handle=test_handle_org2,
actor=org2_user,
model_type="llm",
)
assert org2_model_new is not None
assert org2_model_new.handle == test_handle_org2
assert org2_model_new.organization_id == org2.id
@pytest.mark.asyncio
async def test_sync_provider_models_add_remove_models(default_user, provider_manager):
"""
Test that sync_provider_models_async correctly handles:
1. Adding new models to an existing provider
2. Removing models from an existing provider
3. Not dropping non-base (BYOK) provider models during sync
"""
# Create a base provider
test_id = generate_test_id()
provider_create = ProviderCreate(
name=f"test-openai-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
base_provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=False)
# Create a BYOK provider with same provider type
byok_provider_create = ProviderCreate(
name=f"test-openai-byok-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-byok-key",
)
byok_provider = await provider_manager.create_provider_async(byok_provider_create, actor=default_user, is_byok=True)
# Initial sync: Create initial base models
initial_base_models = [
LLMConfig(
model=f"gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-{test_id}/gpt-4o",
provider_name=base_provider.name,
provider_category=ProviderCategory.base,
),
LLMConfig(
model=f"gpt-4o-mini-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=16384,
handle=f"test-{test_id}/gpt-4o-mini",
provider_name=base_provider.name,
provider_category=ProviderCategory.base,
),
]
await provider_manager.sync_provider_models_async(
provider=base_provider,
llm_models=initial_base_models,
embedding_models=[],
organization_id=None, # Global base models
)
# Create BYOK models (should not be affected by base provider sync)
byok_models = [
LLMConfig(
model=f"custom-gpt-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://custom.api.com/v1",
context_window=64000,
handle=f"test-{test_id}/custom-gpt",
provider_name=byok_provider.name,
provider_category=ProviderCategory.byok,
),
]
await provider_manager.sync_provider_models_async(
provider=byok_provider,
llm_models=byok_models,
embedding_models=[],
organization_id=default_user.organization_id, # Org-scoped BYOK
)
# Verify initial state: all 3 models exist
all_models = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
)
handles = {m.handle for m in all_models}
assert f"test-{test_id}/gpt-4o" in handles
assert f"test-{test_id}/gpt-4o-mini" in handles
assert f"test-{test_id}/custom-gpt" in handles
# Second sync: Add a new model and remove one existing model
updated_base_models = [
# Keep gpt-4o
LLMConfig(
model=f"gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-{test_id}/gpt-4o",
provider_name=base_provider.name,
provider_category=ProviderCategory.base,
),
# Remove gpt-4o-mini (not in this list)
# Add new model gpt-4-turbo
LLMConfig(
model=f"gpt-4-turbo-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-{test_id}/gpt-4-turbo",
provider_name=base_provider.name,
provider_category=ProviderCategory.base,
),
]
await provider_manager.sync_provider_models_async(
provider=base_provider,
llm_models=updated_base_models,
embedding_models=[],
organization_id=None, # Global base models
)
# Verify updated state
all_models_after = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
)
handles_after = {m.handle for m in all_models_after}
# gpt-4o should still exist (kept)
assert f"test-{test_id}/gpt-4o" in handles_after
# gpt-4o-mini should be removed
assert f"test-{test_id}/gpt-4o-mini" not in handles_after
# gpt-4-turbo should be added
assert f"test-{test_id}/gpt-4-turbo" in handles_after
# BYOK model should NOT be affected by base provider sync
assert f"test-{test_id}/custom-gpt" in handles_after
# Verify the BYOK model still belongs to the correct provider
byok_model = await provider_manager.get_model_by_handle_async(
handle=f"test-{test_id}/custom-gpt",
actor=default_user,
model_type="llm",
)
assert byok_model is not None
assert byok_model.provider_id == byok_provider.id
assert byok_model.organization_id == default_user.organization_id
# Third sync: Remove all base provider models
await provider_manager.sync_provider_models_async(
provider=base_provider,
llm_models=[], # Empty list - remove all models
embedding_models=[],
organization_id=None,
)
# Verify all base models are removed
all_models_final = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
)
handles_final = {m.handle for m in all_models_final}
# All base provider models should be gone
assert f"test-{test_id}/gpt-4o" not in handles_final
assert f"test-{test_id}/gpt-4-turbo" not in handles_final
# But BYOK model should still exist
assert f"test-{test_id}/custom-gpt" in handles_final
@pytest.mark.asyncio
async def test_sync_provider_models_mixed_llm_and_embedding(default_user, provider_manager):
"""
Test that sync_provider_models_async correctly handles adding/removing both LLM and embedding models,
ensuring that changes to one model type don't affect the other.
"""
test_id = generate_test_id()
provider_create = ProviderCreate(
name=f"test-openai-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=False)
# Initial sync: LLM and embedding models
initial_llm_models = [
LLMConfig(
model=f"gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-{test_id}/gpt-4o",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
]
initial_embedding_models = [
EmbeddingConfig(
embedding_model=f"text-embedding-3-small-{test_id}",
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_dim=1536,
embedding_chunk_size=300,
handle=f"test-{test_id}/text-embedding-3-small",
),
]
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=initial_llm_models,
embedding_models=initial_embedding_models,
organization_id=None,
)
# Verify initial state
llm_models = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
provider_id=provider.id,
)
embedding_models = await provider_manager.list_models_async(
actor=default_user,
model_type="embedding",
provider_id=provider.id,
)
assert len([m for m in llm_models if m.handle == f"test-{test_id}/gpt-4o"]) == 1
assert len([m for m in embedding_models if m.handle == f"test-{test_id}/text-embedding-3-small"]) == 1
# Second sync: Add new LLM, remove embedding
updated_llm_models = [
# Keep existing
LLMConfig(
model=f"gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-{test_id}/gpt-4o",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
# Add new
LLMConfig(
model=f"gpt-4o-mini-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=16384,
handle=f"test-{test_id}/gpt-4o-mini",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
]
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=updated_llm_models,
embedding_models=[], # Remove all embeddings
organization_id=None,
)
# Verify updated state
llm_models_after = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
provider_id=provider.id,
)
embedding_models_after = await provider_manager.list_models_async(
actor=default_user,
model_type="embedding",
provider_id=provider.id,
)
llm_handles = {m.handle for m in llm_models_after}
embedding_handles = {m.handle for m in embedding_models_after}
# Both LLM models should exist
assert f"test-{test_id}/gpt-4o" in llm_handles
assert f"test-{test_id}/gpt-4o-mini" in llm_handles
# Embedding should be removed
assert f"test-{test_id}/text-embedding-3-small" not in embedding_handles
@pytest.mark.asyncio
async def test_provider_name_uniqueness_within_org(default_user, provider_manager):
"""Test that provider names must be unique within an organization, including conflicts with base provider names."""
test_id = generate_test_id()
# Create a base provider with a specific name
base_provider_name = f"test-provider-{test_id}"
base_provider_create = ProviderCreate(
name=base_provider_name,
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
await provider_manager.create_provider_async(base_provider_create, actor=default_user, is_byok=False)
# Test 1: Attempt to create another base provider with the same name - should fail with ValueError
with pytest.raises(ValueError, match="already exists"):
duplicate_provider_create = ProviderCreate(
name=base_provider_name, # Same name
provider_type=ProviderType.anthropic, # Different type
api_key="sk-different-key",
)
await provider_manager.create_provider_async(duplicate_provider_create, actor=default_user, is_byok=False)
# Test 2: Create a BYOK provider with the same name as a base provider - should fail with ValueError
with pytest.raises(ValueError, match="conflicts with an existing base provider"):
byok_duplicate_create = ProviderCreate(
name=base_provider_name, # Same name as base provider
provider_type=ProviderType.openai,
api_key="sk-byok-key",
)
await provider_manager.create_provider_async(byok_duplicate_create, actor=default_user, is_byok=True)
# Test 3: Create a provider with a different name - should succeed
different_provider_create = ProviderCreate(
name=f"different-provider-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-another-key",
)
different_provider = await provider_manager.create_provider_async(different_provider_create, actor=default_user, is_byok=False)
assert different_provider is not None
assert different_provider.name == f"different-provider-{test_id}"
@pytest.mark.asyncio
async def test_model_name_uniqueness_within_provider(default_user, provider_manager):
"""Test that model names must be unique within a provider."""
test_id = generate_test_id()
# Create a provider
provider_create = ProviderCreate(
name=f"test-provider-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=False)
# Create initial models with unique names
initial_models = [
LLMConfig(
model=f"model-1-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=4096,
handle=f"test-{test_id}/model-1",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
LLMConfig(
model=f"model-2-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=8192,
handle=f"test-{test_id}/model-2",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
]
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=initial_models,
embedding_models=[],
organization_id=None,
)
# Test 1: Try to sync models with duplicate names within the same provider - should be idempotent
duplicate_models = [
LLMConfig(
model=f"model-1-{test_id}", # Same model name
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=4096,
handle=f"test-{test_id}/model-1", # Same handle
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
LLMConfig(
model=f"model-1-{test_id}", # Duplicate model name in same sync
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=16384, # Different settings
handle=f"test-{test_id}/model-1-duplicate", # Different handle
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
]
# This should raise an error or handle the duplication appropriately
# The behavior depends on the implementation - it might dedupe or raise an error
try:
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=duplicate_models,
embedding_models=[],
organization_id=None,
)
# If it doesn't raise an error, verify that we don't have duplicate models
all_models = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
provider_id=provider.id,
)
# Count how many times each model name appears
model_names = [m.name for m in all_models if test_id in m.name]
model_1_count = model_names.count(f"model-1-{test_id}")
# Should only have one model with this name per provider
assert model_1_count <= 2, f"Found {model_1_count} models with name 'model-1-{test_id}', expected at most 2"
except (UniqueConstraintViolationError, ValueError):
# This is also acceptable behavior - raising an error for duplicate model names
pass
# Test 2: Different providers can have models with the same name
provider_2_create = ProviderCreate(
name=f"test-provider-2-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key-2",
)
provider_2 = await provider_manager.create_provider_async(provider_2_create, actor=default_user, is_byok=False)
# Create a model with the same name but in a different provider - should succeed
same_name_different_provider = [
LLMConfig(
model=f"model-1-{test_id}", # Same model name as in provider 1
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=4096,
handle=f"test-{test_id}/provider2-model-1", # Different handle
provider_name=provider_2.name,
provider_category=ProviderCategory.base,
),
]
await provider_manager.sync_provider_models_async(
provider=provider_2,
llm_models=same_name_different_provider,
embedding_models=[],
organization_id=None,
)
# Verify the model was created
provider_2_models = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
provider_id=provider_2.id,
)
assert any(m.name == f"model-1-{test_id}" for m in provider_2_models)
@pytest.mark.asyncio
async def test_handle_uniqueness_per_org(default_user, provider_manager):
"""Test that handles must be unique within organizations but can be duplicated across different orgs."""
test_id = generate_test_id()
# Create providers
provider_1_create = ProviderCreate(
name=f"test-provider-1-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
provider_1 = await provider_manager.create_provider_async(provider_1_create, actor=default_user, is_byok=False)
provider_2_create = ProviderCreate(
name=f"test-provider-2-{test_id}",
provider_type=ProviderType.anthropic,
api_key="sk-test-key-2",
)
provider_2 = await provider_manager.create_provider_async(provider_2_create, actor=default_user, is_byok=False)
# Create a global base model with a specific handle
base_handle = f"test-{test_id}/unique-handle"
base_model = LLMConfig(
model=f"base-model-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=4096,
handle=base_handle,
provider_name=provider_1.name,
provider_category=ProviderCategory.base,
)
await provider_manager.sync_provider_models_async(
provider=provider_1,
llm_models=[base_model],
embedding_models=[],
organization_id=None, # Global
)
# Test 1: Try to create another global model with the same handle from different provider
# This should succeed because we need a different model name (provider constraint)
duplicate_handle_model = LLMConfig(
model=f"different-model-{test_id}", # Different model name (required for provider uniqueness)
model_endpoint_type="anthropic",
model_endpoint="https://api.anthropic.com",
context_window=8192,
handle=base_handle, # Same handle - allowed since different model name
provider_name=provider_2.name,
provider_category=ProviderCategory.base,
)
# This will create another global model with same handle but different provider/model name
await provider_manager.sync_provider_models_async(
provider=provider_2,
llm_models=[duplicate_handle_model],
embedding_models=[],
organization_id=None, # Global
)
# The get_model_by_handle_async will return one of the global models
model = await provider_manager.get_model_by_handle_async(
handle=base_handle,
actor=default_user,
model_type="llm",
)
# Should return one of the global models
assert model is not None
assert model.organization_id is None # Global model
# Test 2: Org-scoped model CAN have the same handle as a global model
org_model_same_handle = LLMConfig(
model=f"org-model-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://custom.openai.com/v1",
context_window=16384,
handle=base_handle, # Same handle as global model - now allowed for different org
provider_name=provider_1.name,
provider_category=ProviderCategory.byok,
)
# This should succeed - handles are unique per org, not globally
await provider_manager.sync_provider_models_async(
provider=provider_1,
llm_models=[org_model_same_handle],
embedding_models=[],
organization_id=default_user.organization_id, # Org-scoped
)
# When user from this org queries, they should get their org-specific model (prioritized)
model = await provider_manager.get_model_by_handle_async(
handle=base_handle,
actor=default_user,
model_type="llm",
)
assert model is not None
assert model.organization_id == default_user.organization_id # Org-specific model (prioritized)
assert model.max_context_window == 16384 # Org model's context window
# Test 3: Create a model with a new unique handle - should succeed
unique_org_handle = f"test-{test_id}/org-unique-handle"
org_model_1 = LLMConfig(
model=f"org-model-1-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=8192,
handle=unique_org_handle,
provider_name=provider_1.name,
provider_category=ProviderCategory.byok,
)
await provider_manager.sync_provider_models_async(
provider=provider_1,
llm_models=[org_model_1],
embedding_models=[],
organization_id=default_user.organization_id,
)
# Verify the model was created
model = await provider_manager.get_model_by_handle_async(
handle=unique_org_handle,
actor=default_user,
model_type="llm",
)
assert model is not None
assert model.organization_id == default_user.organization_id
assert model.max_context_window == 8192
# Test 4: Try to create another model with the same handle even in different org - NOT allowed
org_model_2 = LLMConfig(
model=f"org-model-2-{test_id}",
model_endpoint_type="anthropic",
model_endpoint="https://api.anthropic.com",
context_window=16384,
handle=unique_org_handle, # Same handle - globally unique
provider_name=provider_2.name,
provider_category=ProviderCategory.byok,
)
# This should be idempotent
await provider_manager.sync_provider_models_async(
provider=provider_2,
llm_models=[org_model_2],
embedding_models=[],
organization_id=default_user.organization_id, # Same or different org doesn't matter
)
# Verify still the original model
model = await provider_manager.get_model_by_handle_async(
handle=unique_org_handle,
actor=default_user,
model_type="llm",
)
assert model is not None
assert model.provider_id == provider_1.id # Still original provider
assert model.max_context_window == 8192 # Still original
@pytest.mark.asyncio
async def test_delete_provider_cascades_to_models(default_user, provider_manager, monkeypatch):
"""Test that deleting a provider also soft-deletes its associated models."""
test_id = generate_test_id()
# Mock _sync_default_models_for_provider to avoid external API calls
async def mock_sync(provider, actor):
pass # Don't actually sync - we'll manually create models below
monkeypatch.setattr(provider_manager, "_sync_default_models_for_provider", mock_sync)
# 1. Create a BYOK provider (org-scoped, so the actor can delete it)
provider_create = ProviderCreate(
name=f"test-cascade-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=True)
# 2. Manually sync models to the provider
llm_models = [
LLMConfig(
model=f"gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-{test_id}/gpt-4o",
provider_name=provider.name,
provider_category=ProviderCategory.byok,
),
LLMConfig(
model=f"gpt-4o-mini-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=16384,
handle=f"test-{test_id}/gpt-4o-mini",
provider_name=provider.name,
provider_category=ProviderCategory.byok,
),
]
embedding_models = [
EmbeddingConfig(
embedding_model=f"text-embedding-3-small-{test_id}",
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_dim=1536,
embedding_chunk_size=300,
handle=f"test-{test_id}/text-embedding-3-small",
),
]
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=llm_models,
embedding_models=embedding_models,
organization_id=default_user.organization_id, # Org-scoped for BYOK provider
)
# 3. Verify models exist before deletion
llm_models_before = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
provider_id=provider.id,
)
embedding_models_before = await provider_manager.list_models_async(
actor=default_user,
model_type="embedding",
provider_id=provider.id,
)
llm_handles_before = {m.handle for m in llm_models_before}
embedding_handles_before = {m.handle for m in embedding_models_before}
assert f"test-{test_id}/gpt-4o" in llm_handles_before
assert f"test-{test_id}/gpt-4o-mini" in llm_handles_before
assert f"test-{test_id}/text-embedding-3-small" in embedding_handles_before
# 4. Delete the provider
await provider_manager.delete_provider_by_id_async(provider.id, actor=default_user)
# 5. Verify models are soft-deleted (no longer returned in list)
all_llm_models_after = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
)
all_embedding_models_after = await provider_manager.list_models_async(
actor=default_user,
model_type="embedding",
)
all_llm_handles_after = {m.handle for m in all_llm_models_after}
all_embedding_handles_after = {m.handle for m in all_embedding_models_after}
# All models from the deleted provider should be gone
assert f"test-{test_id}/gpt-4o" not in all_llm_handles_after
assert f"test-{test_id}/gpt-4o-mini" not in all_llm_handles_after
assert f"test-{test_id}/text-embedding-3-small" not in all_embedding_handles_after
# 6. Verify provider is also deleted
providers_after = await provider_manager.list_providers_async(
actor=default_user,
name=f"test-cascade-{test_id}",
)
assert len(providers_after) == 0
@pytest.mark.asyncio
async def test_get_llm_config_from_handle_includes_max_tokens(default_user, provider_manager):
"""Test that get_llm_config_from_handle includes max_tokens from provider's get_default_max_output_tokens.
This test verifies that:
1. The max_tokens field is populated when retrieving LLMConfig from a handle
2. The max_tokens value comes from the provider's get_default_max_output_tokens method
3. Different providers return different default max_tokens values (e.g., OpenAI returns 16384)
"""
test_id = generate_test_id()
# Create an OpenAI provider
provider_create = ProviderCreate(
name=f"test-openai-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
base_url="https://api.openai.com/v1",
)
provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=False)
# Sync a model with the provider
llm_models = [
LLMConfig(
model=f"gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-{test_id}/gpt-4o",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
]
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=llm_models,
embedding_models=[],
organization_id=None, # Global model
)
# Get LLMConfig from handle
llm_config = await provider_manager.get_llm_config_from_handle(
handle=f"test-{test_id}/gpt-4o",
actor=default_user,
)
# Verify max_tokens is set and comes from OpenAI provider's default (16384 for non-o1/o3 models)
assert llm_config.max_tokens is not None, "max_tokens should be set"
assert llm_config.max_tokens == 16384, f"Expected max_tokens=16384 for OpenAI gpt-4o, got {llm_config.max_tokens}"
# Test with a gpt-5 model (should have 16384)
llm_models_gpt5 = [
LLMConfig(
model=f"gpt-5-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=200000,
handle=f"test-{test_id}/gpt-5",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
]
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=llm_models_gpt5,
embedding_models=[],
organization_id=None,
)
llm_config_gpt5 = await provider_manager.get_llm_config_from_handle(
handle=f"test-{test_id}/gpt-5",
actor=default_user,
)
# gpt-5 models also have 16384 max_tokens
assert llm_config_gpt5.max_tokens == 16384, f"Expected max_tokens=16384 for gpt-5, got {llm_config_gpt5.max_tokens}"
@pytest.mark.asyncio
async def test_server_list_llm_models_async_reads_from_database(default_user, provider_manager):
"""Test that the server's list_llm_models_async reads models from database, not in-memory.
This test verifies that:
1. Models synced to the database are returned by list_llm_models_async
2. The LLMConfig objects are correctly constructed from database-cached models
3. Provider filtering works correctly when reading from database
"""
from letta.server.server import SyncServer
test_id = generate_test_id()
# Create a provider in the database
provider_create = ProviderCreate(
name=f"test-db-provider-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
base_url="https://custom.openai.com/v1",
)
provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=False)
# Sync models to database
llm_models = [
LLMConfig(
model=f"custom-model-1-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://custom.openai.com/v1",
context_window=32000,
handle=f"test-{test_id}/custom-model-1",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
LLMConfig(
model=f"custom-model-2-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://custom.openai.com/v1",
context_window=64000,
handle=f"test-{test_id}/custom-model-2",
provider_name=provider.name,
provider_category=ProviderCategory.base,
),
]
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=llm_models,
embedding_models=[],
organization_id=None,
)
# Create server instance
server = SyncServer(init_with_default_org_and_user=False)
server.default_user = default_user
server.provider_manager = provider_manager
# List LLM models via server
models = await server.list_llm_models_async(
actor=default_user,
provider_name=f"test-db-provider-{test_id}",
)
# Verify models were read from database
handles = {m.handle for m in models}
assert f"test-{test_id}/custom-model-1" in handles, "custom-model-1 should be in database"
assert f"test-{test_id}/custom-model-2" in handles, "custom-model-2 should be in database"
# Verify LLMConfig properties are correctly populated from database
model_1 = next(m for m in models if m.handle == f"test-{test_id}/custom-model-1")
assert model_1.model == f"custom-model-1-{test_id}"
assert model_1.context_window == 32000
assert model_1.model_endpoint == "https://custom.openai.com/v1"
assert model_1.provider_name == f"test-db-provider-{test_id}"
model_2 = next(m for m in models if m.handle == f"test-{test_id}/custom-model-2")
assert model_2.model == f"custom-model-2-{test_id}"
assert model_2.context_window == 64000
@pytest.mark.asyncio
async def test_get_enabled_providers_async_queries_database(default_user, provider_manager):
"""Test that get_enabled_providers_async queries providers from database, not in-memory list.
This test verifies that:
1. Providers created in the database are returned by get_enabled_providers_async
2. The method queries the database, not an in-memory _enabled_providers list
3. Provider filtering by category works correctly from database
"""
from letta.server.server import SyncServer
test_id = generate_test_id()
# Create providers in the database
base_provider_create = ProviderCreate(
name=f"test-base-provider-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
base_url="https://api.openai.com/v1",
)
await provider_manager.create_provider_async(base_provider_create, actor=default_user, is_byok=False)
byok_provider_create = ProviderCreate(
name=f"test-byok-provider-{test_id}",
provider_type=ProviderType.anthropic,
api_key="sk-test-byok-key",
)
await provider_manager.create_provider_async(byok_provider_create, actor=default_user, is_byok=True)
# Create server instance - importantly, don't set _enabled_providers
# This ensures we're testing database queries, not in-memory list
server = SyncServer(init_with_default_org_and_user=False)
server.default_user = default_user
server.provider_manager = provider_manager
# Clear in-memory providers to prove we're querying database
server._enabled_providers = []
# Get all providers - should query database
all_providers = await server.get_enabled_providers_async(actor=default_user)
provider_names = [p.name for p in all_providers]
assert f"test-base-provider-{test_id}" in provider_names, "Base provider should be in database"
assert f"test-byok-provider-{test_id}" in provider_names, "BYOK provider should be in database"
# Filter by provider category
base_only = await server.get_enabled_providers_async(
actor=default_user,
provider_category=[ProviderCategory.base],
)
base_only_names = [p.name for p in base_only]
assert f"test-base-provider-{test_id}" in base_only_names, "Base provider should be in base-only list"
assert f"test-byok-provider-{test_id}" not in base_only_names, "BYOK provider should NOT be in base-only list"
byok_only = await server.get_enabled_providers_async(
actor=default_user,
provider_category=[ProviderCategory.byok],
)
byok_only_names = [p.name for p in byok_only]
assert f"test-byok-provider-{test_id}" in byok_only_names, "BYOK provider should be in byok-only list"
assert f"test-base-provider-{test_id}" not in byok_only_names, "Base provider should NOT be in byok-only list"
# Filter by provider name
specific_provider = await server.get_enabled_providers_async(
actor=default_user,
provider_name=f"test-base-provider-{test_id}",
)
assert len(specific_provider) == 1
assert specific_provider[0].name == f"test-base-provider-{test_id}"
assert specific_provider[0].provider_type == ProviderType.openai
# Filter by provider type
openai_providers = await server.get_enabled_providers_async(
actor=default_user,
provider_type=ProviderType.openai,
)
openai_names = [p.name for p in openai_providers]
assert f"test-base-provider-{test_id}" in openai_names
assert f"test-byok-provider-{test_id}" not in openai_names # This is anthropic type
# =============================================================================
# BYOK Provider and Model Listing Integration Tests
# =============================================================================
@pytest.mark.asyncio
async def test_list_providers_filters_by_category(default_user, provider_manager):
"""Test that list_providers_async correctly filters by provider_category."""
test_id = generate_test_id()
# Create a base provider
base_provider_create = ProviderCreate(
name=f"test-base-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-base-key",
)
base_provider = await provider_manager.create_provider_async(base_provider_create, actor=default_user, is_byok=False)
# Create a BYOK provider
byok_provider_create = ProviderCreate(
name=f"test-byok-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-byok-key",
)
byok_provider = await provider_manager.create_provider_async(byok_provider_create, actor=default_user, is_byok=True)
# Verify base provider has correct category
assert base_provider.provider_category == ProviderCategory.base
# Verify BYOK provider has correct category
assert byok_provider.provider_category == ProviderCategory.byok
# List only BYOK providers
byok_providers = await provider_manager.list_providers_async(
actor=default_user,
provider_category=[ProviderCategory.byok],
)
byok_names = [p.name for p in byok_providers]
assert f"test-byok-{test_id}" in byok_names
assert f"test-base-{test_id}" not in byok_names
# List only base providers
base_providers = await provider_manager.list_providers_async(
actor=default_user,
provider_category=[ProviderCategory.base],
)
base_names = [p.name for p in base_providers]
assert f"test-base-{test_id}" in base_names
assert f"test-byok-{test_id}" not in base_names
@pytest.mark.asyncio
async def test_base_provider_api_key_not_stored_in_db(default_user, provider_manager):
"""Test that sync_base_providers does NOT store API keys for base providers."""
# Create base providers with API keys
base_providers = [
OpenAIProvider(name="test-openai-no-key", api_key="sk-should-not-be-stored"),
]
# Sync to database
await provider_manager.sync_base_providers(base_providers=base_providers, actor=default_user)
# Retrieve the provider from database
providers = await provider_manager.list_providers_async(name="test-openai-no-key", actor=default_user)
assert len(providers) == 1
provider = providers[0]
assert provider.provider_category == ProviderCategory.base
# The API key should be empty (not stored) for base providers
if provider.api_key_enc:
api_key = await provider.api_key_enc.get_plaintext_async()
assert api_key == "" or api_key is None, "Base provider API key should not be stored in database"
@pytest.mark.asyncio
async def test_byok_provider_api_key_stored_in_db(default_user, provider_manager):
"""Test that BYOK providers DO have their API keys stored in the database."""
test_id = generate_test_id()
# Create a BYOK provider with API key
byok_provider_create = ProviderCreate(
name=f"test-byok-with-key-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-byok-should-be-stored",
)
await provider_manager.create_provider_async(byok_provider_create, actor=default_user, is_byok=True)
# Retrieve the provider from database
providers = await provider_manager.list_providers_async(name=f"test-byok-with-key-{test_id}", actor=default_user)
assert len(providers) == 1
provider = providers[0]
assert provider.provider_category == ProviderCategory.byok
# The API key SHOULD be stored for BYOK providers
assert provider.api_key_enc is not None
api_key = await provider.api_key_enc.get_plaintext_async()
assert api_key == "sk-byok-should-be-stored", "BYOK provider API key should be stored in database"
@pytest.mark.asyncio
async def test_server_list_llm_models_base_from_db(default_user, provider_manager):
"""Test that server.list_llm_models_async fetches base models from database."""
from letta.server.server import SyncServer
test_id = generate_test_id()
# Create base provider and models (these ARE stored in DB)
base_provider_create = ProviderCreate(
name=f"test-base-llm-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-base-key",
)
base_provider = await provider_manager.create_provider_async(base_provider_create, actor=default_user, is_byok=False)
base_llm_model = LLMConfig(
model=f"base-gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-base-llm-{test_id}/gpt-4o",
provider_name=base_provider.name,
provider_category=ProviderCategory.base,
)
await provider_manager.sync_provider_models_async(
provider=base_provider,
llm_models=[base_llm_model],
embedding_models=[],
organization_id=None,
)
# Create server and list models
server = SyncServer(init_with_default_org_and_user=False)
server.default_user = default_user
server.provider_manager = provider_manager
server._enabled_providers = [] # Clear to test database-backed listing
# List all models - base models come from DB
all_models = await server.list_llm_models_async(actor=default_user)
all_handles = [m.handle for m in all_models]
assert f"test-base-llm-{test_id}/gpt-4o" in all_handles, "Base model should be in list"
# List only base models
base_models = await server.list_llm_models_async(
actor=default_user,
provider_category=[ProviderCategory.base],
)
base_handles = [m.handle for m in base_models]
assert f"test-base-llm-{test_id}/gpt-4o" in base_handles
@pytest.mark.asyncio
async def test_server_list_llm_models_byok_from_provider_api(default_user, provider_manager):
"""Test that server.list_llm_models_async fetches BYOK models from provider API, not DB.
Note: BYOK models are fetched by calling the provider's list_llm_models_async() method,
which hits the actual provider API. This test uses mocking to verify that flow.
"""
from letta.schemas.providers import Provider
from letta.server.server import SyncServer
test_id = generate_test_id()
# Create a BYOK provider (but don't sync models to DB - they come from API)
byok_provider_create = ProviderCreate(
name=f"test-byok-llm-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-byok-key",
)
byok_provider = await provider_manager.create_provider_async(byok_provider_create, actor=default_user, is_byok=True)
# Create server
server = SyncServer(init_with_default_org_and_user=False)
server.default_user = default_user
server.provider_manager = provider_manager
server._enabled_providers = []
# Mock the BYOK provider's list_llm_models_async to return test models
mock_byok_models = [
LLMConfig(
model=f"byok-gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://custom.openai.com/v1",
context_window=64000,
handle=f"test-byok-llm-{test_id}/gpt-4o",
provider_name=byok_provider.name,
provider_category=ProviderCategory.byok,
)
]
# Create a mock typed provider that returns our test models
mock_typed_provider = MagicMock()
mock_typed_provider.list_llm_models_async = AsyncMock(return_value=mock_byok_models)
mock_typed_provider.list_embedding_models_async = AsyncMock(return_value=[])
# Patch cast_to_subtype on the Provider class to return our mock
with patch.object(Provider, "cast_to_subtype", return_value=mock_typed_provider):
# List BYOK models - should call provider API via cast_to_subtype().list_llm_models_async()
byok_models = await server.list_llm_models_async(
actor=default_user,
provider_category=[ProviderCategory.byok],
)
# Verify the mock was called (proving we hit provider API, not DB)
mock_typed_provider.list_llm_models_async.assert_called()
# Verify we got the mocked models back
byok_handles = [m.handle for m in byok_models]
assert f"test-byok-llm-{test_id}/gpt-4o" in byok_handles
@pytest.mark.asyncio
async def test_server_list_embedding_models_base_from_db(default_user, provider_manager):
"""Test that server.list_embedding_models_async fetches base models from database.
Note: Similar to LLM models, base embedding models are stored in DB while BYOK
embedding models would be fetched from provider API.
"""
from letta.server.server import SyncServer
test_id = generate_test_id()
# Create base provider and embedding models (these ARE stored in DB)
base_provider_create = ProviderCreate(
name=f"test-base-embed-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-base-key",
)
base_provider = await provider_manager.create_provider_async(base_provider_create, actor=default_user, is_byok=False)
base_embedding_model = EmbeddingConfig(
embedding_model=f"base-text-embedding-{test_id}",
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_dim=1536,
embedding_chunk_size=300,
handle=f"test-base-embed-{test_id}/text-embedding-3-small",
)
await provider_manager.sync_provider_models_async(
provider=base_provider,
llm_models=[],
embedding_models=[base_embedding_model],
organization_id=None,
)
# Create server and list models
server = SyncServer(init_with_default_org_and_user=False)
server.default_user = default_user
server.provider_manager = provider_manager
server._enabled_providers = []
# List all embedding models - base models come from DB
all_models = await server.list_embedding_models_async(actor=default_user)
all_handles = [m.handle for m in all_models]
assert f"test-base-embed-{test_id}/text-embedding-3-small" in all_handles
@pytest.mark.asyncio
async def test_provider_ordering_matches_constants(default_user, provider_manager):
"""Test that provider ordering in model listing matches PROVIDER_ORDER in constants."""
from letta.constants import PROVIDER_ORDER
from letta.server.server import SyncServer
test_id = generate_test_id()
# Create providers with different names that should have different ordering
providers_to_create = [
("zai", ProviderType.zai, 14), # Lower priority
("openai", ProviderType.openai, 1), # Higher priority
("anthropic", ProviderType.anthropic, 2), # Medium priority
]
created_providers = []
for name_suffix, provider_type, expected_order in providers_to_create:
provider_create = ProviderCreate(
name=f"{name_suffix}", # Use actual provider name for ordering
provider_type=provider_type,
api_key=f"sk-{name_suffix}-key",
)
# Check if provider already exists
existing = await provider_manager.list_providers_async(name=name_suffix, actor=default_user)
if not existing:
provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=False)
created_providers.append((provider, expected_order))
# Create a model for this provider
llm_model = LLMConfig(
model=f"test-model-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.example.com/v1",
context_window=8192,
handle=f"{name_suffix}/test-model-{test_id}",
provider_name=provider.name,
provider_category=ProviderCategory.base,
)
await provider_manager.sync_provider_models_async(
provider=provider,
llm_models=[llm_model],
embedding_models=[],
organization_id=None,
)
# Verify PROVIDER_ORDER has expected values
assert PROVIDER_ORDER.get("openai") == 1
assert PROVIDER_ORDER.get("anthropic") == 2
assert PROVIDER_ORDER.get("zai") == 14
# Create server and verify ordering
server = SyncServer(init_with_default_org_and_user=False)
server.default_user = default_user
server.provider_manager = provider_manager
server._enabled_providers = []
# List models and check ordering
all_models = await server.list_llm_models_async(actor=default_user)
# Filter to only our test models
test_models = [m for m in all_models if f"test-model-{test_id}" in m.handle]
if len(test_models) >= 2:
# Verify models are sorted by provider order
provider_names_in_order = [m.provider_name for m in test_models]
# Get the indices in PROVIDER_ORDER
indices = [PROVIDER_ORDER.get(name, 999) for name in provider_names_in_order]
# Verify the list is sorted by provider order
assert indices == sorted(indices), f"Models should be sorted by PROVIDER_ORDER, got: {provider_names_in_order}"
@pytest.mark.asyncio
async def test_create_agent_with_byok_handle_dynamic_fetch(default_user, provider_manager):
"""Test that creating an agent with a BYOK model handle works via dynamic fetch.
This tests the case where BYOK models are NOT synced to the database, but are
instead fetched dynamically from the provider when resolving the handle.
This is the expected behavior after the provider models persistence refactor.
"""
test_id = generate_test_id()
byok_provider_name = f"my-openai-byok-{test_id}"
model_name = "gpt-4o"
byok_handle = f"{byok_provider_name}/{model_name}"
# Create a BYOK OpenAI provider (do NOT sync models to DB)
provider_create = ProviderCreate(
name=byok_provider_name,
provider_type=ProviderType.openai,
api_key="sk-test-byok-key",
)
byok_provider = await provider_manager.create_provider_async(provider_create, actor=default_user, is_byok=True)
assert byok_provider.provider_category == ProviderCategory.byok
assert byok_provider.name == byok_provider_name
# Verify the model is NOT in the database (dynamic fetch scenario)
model_in_db = await provider_manager.get_model_by_handle_async(
handle=byok_handle,
actor=default_user,
model_type="llm",
)
assert model_in_db is None, "Model should NOT be in DB for this test (testing dynamic fetch)"
# Mock the provider's list_llm_models_async to return our test model
mock_llm_config = LLMConfig(
model=model_name,
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=byok_handle,
max_tokens=16384,
provider_name=byok_provider_name,
provider_category=ProviderCategory.byok,
)
# Create embedding config for the agent
mock_embedding_config = EmbeddingConfig(
embedding_model="text-embedding-3-small",
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_dim=1536,
embedding_chunk_size=300,
handle=f"{byok_provider_name}/text-embedding-3-small",
)
# Initialize server
server = SyncServer(init_with_default_org_and_user=False)
await server.init_async(init_with_default_org_and_user=False)
server.provider_manager = provider_manager
# Mock the BYOK provider's list_llm_models_async method
with patch.object(
OpenAIProvider,
"list_llm_models_async",
new_callable=AsyncMock,
return_value=[mock_llm_config],
):
with patch.object(
OpenAIProvider,
"list_embedding_models_async",
new_callable=AsyncMock,
return_value=[mock_embedding_config],
):
# Create agent using BYOK handle - this should dynamically fetch from provider
agent = await server.create_agent_async(
request=CreateAgent(
name=f"test-agent-byok-{test_id}",
model=byok_handle, # BYOK handle format: "{provider_name}/{model_name}"
embedding=f"{byok_provider_name}/text-embedding-3-small",
),
actor=default_user,
)
# Verify agent was created with the correct LLM config
assert agent is not None
assert agent.llm_config is not None
assert agent.llm_config.model == model_name
assert agent.llm_config.handle == byok_handle
assert agent.llm_config.provider_name == byok_provider_name
assert agent.llm_config.provider_category == ProviderCategory.byok
# Note: context_window comes from the actual provider's list_llm_models_async
# which may differ from mock if mocking doesn't take effect on instance method
# Cleanup
await server.agent_manager.delete_agent_async(agent_id=agent.id, actor=default_user)
@pytest.mark.asyncio
async def test_byok_provider_last_synced_triggers_sync_when_null(default_user, provider_manager):
"""Test that BYOK providers with last_synced=null trigger a sync on first model listing."""
from letta.schemas.providers import Provider
from letta.server.server import SyncServer
test_id = generate_test_id()
# Create a BYOK provider (last_synced will be null by default)
byok_provider_create = ProviderCreate(
name=f"test-byok-sync-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-byok-key",
)
byok_provider = await provider_manager.create_provider_async(byok_provider_create, actor=default_user, is_byok=True)
# Verify last_synced is null initially
assert byok_provider.last_synced is None
# Create server
server = SyncServer(init_with_default_org_and_user=False)
server.default_user = default_user
server.provider_manager = provider_manager
server._enabled_providers = []
# Mock the BYOK provider's list_llm_models_async to return test models
mock_byok_models = [
LLMConfig(
model=f"byok-gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=64000,
handle=f"test-byok-sync-{test_id}/gpt-4o",
provider_name=byok_provider.name,
provider_category=ProviderCategory.byok,
)
]
mock_typed_provider = MagicMock()
mock_typed_provider.list_llm_models_async = AsyncMock(return_value=mock_byok_models)
mock_typed_provider.list_embedding_models_async = AsyncMock(return_value=[])
with patch.object(Provider, "cast_to_subtype", return_value=mock_typed_provider):
# List BYOK models - should trigger sync because last_synced is null
await server.list_llm_models_async(
actor=default_user,
provider_category=[ProviderCategory.byok],
)
# Verify sync was triggered (cast_to_subtype was called to fetch from API)
# Note: may be called multiple times if other BYOK providers exist in DB
mock_typed_provider.list_llm_models_async.assert_called()
# Verify last_synced was updated for our provider
updated_providers = await provider_manager.list_providers_async(name=byok_provider.name, actor=default_user)
assert len(updated_providers) == 1
assert updated_providers[0].last_synced is not None
# Verify models were synced to database
synced_models = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
provider_id=byok_provider.id,
)
assert len(synced_models) == 1
assert synced_models[0].name == f"byok-gpt-4o-{test_id}"
@pytest.mark.asyncio
async def test_byok_provider_last_synced_skips_sync_when_set(default_user, provider_manager):
"""Test that BYOK providers with last_synced set skip sync and read from DB."""
from letta.schemas.providers import Provider
from letta.server.server import SyncServer
test_id = generate_test_id()
# Create a BYOK provider
byok_provider_create = ProviderCreate(
name=f"test-byok-cached-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-byok-key",
)
byok_provider = await provider_manager.create_provider_async(byok_provider_create, actor=default_user, is_byok=True)
# Manually sync models to DB
cached_model = LLMConfig(
model=f"cached-gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=64000,
handle=f"test-byok-cached-{test_id}/gpt-4o",
provider_name=byok_provider.name,
provider_category=ProviderCategory.byok,
)
await provider_manager.sync_provider_models_async(
provider=byok_provider,
llm_models=[cached_model],
embedding_models=[],
organization_id=default_user.organization_id,
)
# Set last_synced to indicate models are already synced
await provider_manager.update_provider_last_synced_async(byok_provider.id, actor=default_user)
# Create server
server = SyncServer(init_with_default_org_and_user=False)
server.default_user = default_user
server.provider_manager = provider_manager
server._enabled_providers = []
# Mock cast_to_subtype - should NOT be called since last_synced is set
mock_typed_provider = MagicMock()
mock_typed_provider.list_llm_models_async = AsyncMock(return_value=[])
mock_typed_provider.list_embedding_models_async = AsyncMock(return_value=[])
with patch.object(Provider, "cast_to_subtype", return_value=mock_typed_provider):
# List BYOK models - should read from DB, not trigger sync
byok_models = await server.list_llm_models_async(
actor=default_user,
provider_category=[ProviderCategory.byok],
)
# Verify sync was NOT triggered (cast_to_subtype should not be called)
mock_typed_provider.list_llm_models_async.assert_not_called()
# Verify we got the cached model from DB
byok_handles = [m.handle for m in byok_models]
assert f"test-byok-cached-{test_id}/gpt-4o" in byok_handles
@pytest.mark.asyncio
async def test_chatgpt_oauth_byok_resyncs_when_allowlist_expands(default_user, provider_manager):
"""ChatGPT OAuth providers should backfill newly added hardcoded models."""
test_id = generate_test_id()
provider_name = f"test-chatgpt-oauth-{test_id}"
oauth_credentials = json.dumps(
{
"access_token": "test-access-token",
"refresh_token": "test-refresh-token",
"account_id": "test-account-id",
"expires_at": 4_102_444_800, # year 2100 (seconds)
}
)
byok_provider = await provider_manager.create_provider_async(
ProviderCreate(
name=provider_name,
provider_type=ProviderType.chatgpt_oauth,
api_key=oauth_credentials,
),
actor=default_user,
is_byok=True,
)
# Simulate a stale provider model cache that predates gpt-5.3-codex.
stale_models = [
LLMConfig(
model="gpt-5.2-codex",
model_endpoint_type="chatgpt_oauth",
model_endpoint="https://chatgpt.com/backend-api/codex/responses",
context_window=272000,
handle=f"{provider_name}/gpt-5.2-codex",
provider_name=provider_name,
provider_category=ProviderCategory.byok,
)
]
await provider_manager.sync_provider_models_async(
provider=byok_provider,
llm_models=stale_models,
embedding_models=[],
organization_id=default_user.organization_id,
)
await provider_manager.update_provider_last_synced_async(byok_provider.id, actor=default_user)
server = SyncServer(init_with_default_org_and_user=False)
server.default_user = default_user
server.provider_manager = provider_manager
server._enabled_providers = []
byok_models = await server.list_llm_models_async(
actor=default_user,
provider_category=[ProviderCategory.byok],
provider_name=provider_name,
)
byok_handles = {model.handle for model in byok_models}
assert f"{provider_name}/gpt-5.3-codex" in byok_handles
@pytest.mark.asyncio
async def test_base_provider_updates_last_synced_on_sync(default_user, provider_manager):
"""Test that base provider sync updates the last_synced timestamp."""
test_id = generate_test_id()
# Create a base provider
base_provider_create = ProviderCreate(
name=f"test-base-sync-{test_id}",
provider_type=ProviderType.openai,
api_key="", # Base providers don't store API keys
)
base_provider = await provider_manager.create_provider_async(base_provider_create, actor=default_user, is_byok=False)
# Verify last_synced is null initially
assert base_provider.last_synced is None
# Sync models for the base provider
base_model = LLMConfig(
model=f"base-gpt-4o-{test_id}",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=64000,
handle=f"test-base-sync-{test_id}/gpt-4o",
)
await provider_manager.sync_provider_models_async(
provider=base_provider,
llm_models=[base_model],
embedding_models=[],
organization_id=None,
)
await provider_manager.update_provider_last_synced_async(base_provider.id, actor=default_user)
# Verify last_synced was updated
updated_providers = await provider_manager.list_providers_async(name=base_provider.name, actor=default_user)
assert len(updated_providers) == 1
assert updated_providers[0].last_synced is not None
@pytest.mark.asyncio
async def test_byok_provider_models_synced_on_creation(default_user, provider_manager):
"""Test that models are automatically synced when a BYOK provider is created.
When create_provider_async is called with is_byok=True, it should:
1. Create the provider in the database
2. Call _sync_default_models_for_provider to fetch and persist models from the provider API
3. Update last_synced timestamp
"""
from letta.schemas.providers import Provider
test_id = generate_test_id()
# Mock models that the provider API would return
mock_llm_models = [
LLMConfig(
model="gpt-4o",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-byok-creation-{test_id}/gpt-4o",
provider_name=f"test-byok-creation-{test_id}",
provider_category=ProviderCategory.byok,
),
LLMConfig(
model="gpt-4o-mini",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-byok-creation-{test_id}/gpt-4o-mini",
provider_name=f"test-byok-creation-{test_id}",
provider_category=ProviderCategory.byok,
),
]
mock_embedding_models = [
EmbeddingConfig(
embedding_model="text-embedding-3-small",
embedding_endpoint_type="openai",
embedding_endpoint="https://api.openai.com/v1",
embedding_dim=1536,
embedding_chunk_size=300,
handle=f"test-byok-creation-{test_id}/text-embedding-3-small",
),
]
# Create a mock typed provider that returns our test models
mock_typed_provider = MagicMock()
mock_typed_provider.list_llm_models_async = AsyncMock(return_value=mock_llm_models)
mock_typed_provider.list_embedding_models_async = AsyncMock(return_value=mock_embedding_models)
# Patch cast_to_subtype to return our mock when _sync_default_models_for_provider is called
with patch.object(Provider, "cast_to_subtype", return_value=mock_typed_provider):
# Create the BYOK provider - this should automatically sync models
byok_provider_create = ProviderCreate(
name=f"test-byok-creation-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
byok_provider = await provider_manager.create_provider_async(byok_provider_create, actor=default_user, is_byok=True)
# Verify the provider API was called during creation
mock_typed_provider.list_llm_models_async.assert_called_once()
mock_typed_provider.list_embedding_models_async.assert_called_once()
# Re-fetch the provider to get the updated last_synced value
# (the returned object from create_provider_async is stale since last_synced is set after)
byok_provider = await provider_manager.get_provider_async(byok_provider.id, default_user)
# Verify last_synced was set (indicating sync completed)
assert byok_provider.last_synced is not None
# Verify LLM models were persisted to the database
synced_llm_models = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
provider_id=byok_provider.id,
)
assert len(synced_llm_models) == 2
synced_llm_names = {m.name for m in synced_llm_models}
assert "gpt-4o" in synced_llm_names
assert "gpt-4o-mini" in synced_llm_names
# Verify embedding models were persisted to the database
synced_embedding_models = await provider_manager.list_models_async(
actor=default_user,
model_type="embedding",
provider_id=byok_provider.id,
)
assert len(synced_embedding_models) == 1
assert synced_embedding_models[0].name == "text-embedding-3-small"
@pytest.mark.asyncio
async def test_refresh_byok_provider_adds_new_models(default_user, provider_manager):
"""Test that refreshing a BYOK provider adds new models from the provider API.
When _sync_default_models_for_provider is called (via refresh endpoint):
1. It should fetch current models from the provider API
2. Add any new models that weren't previously synced
3. Update the last_synced timestamp
"""
from letta.schemas.providers import Provider
test_id = generate_test_id()
# Initial models when provider is created
initial_models = [
LLMConfig(
model="gpt-4o",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-refresh-add-{test_id}/gpt-4o",
provider_name=f"test-refresh-add-{test_id}",
provider_category=ProviderCategory.byok,
),
]
# Updated models after refresh (includes a new model)
updated_models = [
LLMConfig(
model="gpt-4o",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-refresh-add-{test_id}/gpt-4o",
provider_name=f"test-refresh-add-{test_id}",
provider_category=ProviderCategory.byok,
),
LLMConfig(
model="gpt-4.1", # New model added by provider
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=256000,
handle=f"test-refresh-add-{test_id}/gpt-4.1",
provider_name=f"test-refresh-add-{test_id}",
provider_category=ProviderCategory.byok,
),
]
# Create mock for initial sync during provider creation
mock_typed_provider_initial = MagicMock()
mock_typed_provider_initial.list_llm_models_async = AsyncMock(return_value=initial_models)
mock_typed_provider_initial.list_embedding_models_async = AsyncMock(return_value=[])
# Create the provider with initial models
with patch.object(Provider, "cast_to_subtype", return_value=mock_typed_provider_initial):
byok_provider_create = ProviderCreate(
name=f"test-refresh-add-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
byok_provider = await provider_manager.create_provider_async(byok_provider_create, actor=default_user, is_byok=True)
# Re-fetch the provider to get the updated last_synced value
byok_provider = await provider_manager.get_provider_async(byok_provider.id, default_user)
# Verify initial sync - should have 1 model
initial_synced_models = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
provider_id=byok_provider.id,
)
assert len(initial_synced_models) == 1
assert initial_synced_models[0].name == "gpt-4o"
initial_last_synced = byok_provider.last_synced
assert initial_last_synced is not None # Verify sync happened during creation
# Create mock for refresh with updated models
mock_typed_provider_refresh = MagicMock()
mock_typed_provider_refresh.list_llm_models_async = AsyncMock(return_value=updated_models)
mock_typed_provider_refresh.list_embedding_models_async = AsyncMock(return_value=[])
# Refresh the provider (simulating what the endpoint does)
with patch.object(Provider, "cast_to_subtype", return_value=mock_typed_provider_refresh):
await provider_manager._sync_default_models_for_provider(byok_provider, default_user)
# Verify the API was called during refresh
mock_typed_provider_refresh.list_llm_models_async.assert_called_once()
# Verify new model was added
refreshed_models = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
provider_id=byok_provider.id,
)
assert len(refreshed_models) == 2
refreshed_names = {m.name for m in refreshed_models}
assert "gpt-4o" in refreshed_names
assert "gpt-4.1" in refreshed_names
# Verify last_synced was updated
updated_provider = await provider_manager.get_provider_async(byok_provider.id, default_user)
assert updated_provider.last_synced is not None
assert updated_provider.last_synced >= initial_last_synced
@pytest.mark.asyncio
async def test_refresh_byok_provider_removes_old_models(default_user, provider_manager):
"""Test that refreshing a BYOK provider removes models no longer available from the provider API.
When _sync_default_models_for_provider is called (via refresh endpoint):
1. It should fetch current models from the provider API
2. Remove any models that are no longer available (soft delete)
3. Keep models that are still available
"""
from letta.schemas.providers import Provider
test_id = generate_test_id()
# Initial models when provider is created (includes a model that will be removed)
initial_models = [
LLMConfig(
model="gpt-4o",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-refresh-remove-{test_id}/gpt-4o",
provider_name=f"test-refresh-remove-{test_id}",
provider_category=ProviderCategory.byok,
),
LLMConfig(
model="gpt-4-turbo", # This model will be deprecated/removed
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-refresh-remove-{test_id}/gpt-4-turbo",
provider_name=f"test-refresh-remove-{test_id}",
provider_category=ProviderCategory.byok,
),
]
# Updated models after refresh (gpt-4-turbo is no longer available)
updated_models = [
LLMConfig(
model="gpt-4o",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=f"test-refresh-remove-{test_id}/gpt-4o",
provider_name=f"test-refresh-remove-{test_id}",
provider_category=ProviderCategory.byok,
),
]
# Create mock for initial sync during provider creation
mock_typed_provider_initial = MagicMock()
mock_typed_provider_initial.list_llm_models_async = AsyncMock(return_value=initial_models)
mock_typed_provider_initial.list_embedding_models_async = AsyncMock(return_value=[])
# Create the provider with initial models
with patch.object(Provider, "cast_to_subtype", return_value=mock_typed_provider_initial):
byok_provider_create = ProviderCreate(
name=f"test-refresh-remove-{test_id}",
provider_type=ProviderType.openai,
api_key="sk-test-key",
)
byok_provider = await provider_manager.create_provider_async(byok_provider_create, actor=default_user, is_byok=True)
# Verify initial sync - should have 2 models
initial_synced_models = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
provider_id=byok_provider.id,
)
assert len(initial_synced_models) == 2
initial_names = {m.name for m in initial_synced_models}
assert "gpt-4o" in initial_names
assert "gpt-4-turbo" in initial_names
# Create mock for refresh with fewer models
mock_typed_provider_refresh = MagicMock()
mock_typed_provider_refresh.list_llm_models_async = AsyncMock(return_value=updated_models)
mock_typed_provider_refresh.list_embedding_models_async = AsyncMock(return_value=[])
# Refresh the provider (simulating what the endpoint does)
with patch.object(Provider, "cast_to_subtype", return_value=mock_typed_provider_refresh):
await provider_manager._sync_default_models_for_provider(byok_provider, default_user)
# Verify the removed model is no longer in the list
refreshed_models = await provider_manager.list_models_async(
actor=default_user,
model_type="llm",
provider_id=byok_provider.id,
)
assert len(refreshed_models) == 1
assert refreshed_models[0].name == "gpt-4o"
# Verify gpt-4-turbo was removed (soft deleted)
refreshed_names = {m.name for m in refreshed_models}
assert "gpt-4-turbo" not in refreshed_names
@pytest.mark.asyncio
async def test_refresh_base_provider_fails(default_user, provider_manager):
"""Test that attempting to refresh a base provider returns an error.
The refresh endpoint should only work for BYOK providers, not base providers.
Base providers are managed by environment variables and shouldn't be refreshed.
"""
from fastapi import HTTPException
from letta.server.rest_api.routers.v1.providers import refresh_provider_models
from letta.server.server import SyncServer
test_id = generate_test_id()
# Create a base provider
base_provider_create = ProviderCreate(
name=f"test-base-refresh-{test_id}",
provider_type=ProviderType.openai,
api_key="", # Base providers don't store API keys
)
base_provider = await provider_manager.create_provider_async(base_provider_create, actor=default_user, is_byok=False)
# Verify it's a base provider
assert base_provider.provider_category == ProviderCategory.base
# Create a mock server
server = SyncServer(init_with_default_org_and_user=False)
server.provider_manager = provider_manager
# Create mock headers
mock_headers = MagicMock()
mock_headers.actor_id = default_user.id
# Mock get_actor_or_default_async to return our test user
server.user_manager = MagicMock()
server.user_manager.get_actor_or_default_async = AsyncMock(return_value=default_user)
# Attempt to refresh the base provider - should raise HTTPException
with pytest.raises(HTTPException) as exc_info:
await refresh_provider_models(
provider_id=base_provider.id,
headers=mock_headers,
server=server,
)
assert exc_info.value.status_code == 400
assert "BYOK" in exc_info.value.detail
@pytest.mark.asyncio
async def test_get_model_by_handle_prioritizes_byok_over_base(default_user, provider_manager):
"""Test that get_model_by_handle_async returns the BYOK model when both BYOK and base providers have the same handle.
This tests the legacy scenario where a user has both a BYOK provider and a base provider
with the same name (and thus models with the same handle). The BYOK model should be
returned because it's organization-specific, while base models are global.
"""
test_id = generate_test_id()
provider_name = f"test-duplicate-{test_id}"
model_handle = f"{provider_name}/gpt-4o"
# Step 1: Create a base provider and sync a model for it (global, organization_id=None)
base_provider_create = ProviderCreate(
name=provider_name,
provider_type=ProviderType.openai,
api_key="", # Base providers don't store API keys
)
base_provider = await provider_manager.create_provider_async(base_provider_create, actor=default_user, is_byok=False)
assert base_provider.provider_category == ProviderCategory.base
# Sync a model for the base provider (global model with organization_id=None)
base_llm_model = LLMConfig(
model="gpt-4o",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=model_handle,
provider_name=provider_name,
)
await provider_manager.sync_provider_models_async(
provider=base_provider,
llm_models=[base_llm_model],
embedding_models=[],
organization_id=None, # Global model
)
# Verify base model was created
base_model = await provider_manager.get_model_by_handle_async(
handle=model_handle,
actor=default_user,
model_type="llm",
)
assert base_model is not None
assert base_model.handle == model_handle
assert base_model.organization_id is None # Global model
# Step 2: Create a BYOK provider with the same name (simulating legacy duplicate)
# Note: In production, this is now prevented, but legacy data could have this
# We need to bypass the name conflict check for this test (simulating legacy data)
# Create the BYOK provider directly by manipulating the database
from letta.orm.provider import Provider as ProviderORM
from letta.schemas.providers import Provider as PydanticProvider
from letta.server.db import db_registry
# Create a pydantic provider first to generate an ID
byok_pydantic_provider = PydanticProvider(
name=provider_name, # Same name as base provider
provider_type=ProviderType.openai,
provider_category=ProviderCategory.byok,
organization_id=default_user.organization_id,
)
byok_pydantic_provider.resolve_identifier()
async with db_registry.async_session() as session:
byok_provider_orm = ProviderORM(**byok_pydantic_provider.model_dump(to_orm=True))
await byok_provider_orm.create_async(session, actor=default_user)
byok_provider = byok_provider_orm.to_pydantic()
assert byok_provider.provider_category == ProviderCategory.byok
# Sync a model for the BYOK provider (org-specific model)
byok_llm_model = LLMConfig(
model="gpt-4o",
model_endpoint_type="openai",
model_endpoint="https://api.openai.com/v1",
context_window=128000,
handle=model_handle, # Same handle as base model
provider_name=provider_name,
provider_category=ProviderCategory.byok,
)
await provider_manager.sync_provider_models_async(
provider=byok_provider,
llm_models=[byok_llm_model],
embedding_models=[],
organization_id=default_user.organization_id, # Org-specific model
)
# Step 3: Verify that get_model_by_handle_async returns the BYOK model (org-specific)
retrieved_model = await provider_manager.get_model_by_handle_async(
handle=model_handle,
actor=default_user,
model_type="llm",
)
assert retrieved_model is not None
assert retrieved_model.handle == model_handle
# The key assertion: org-specific (BYOK) model should be returned, not the global (base) model
assert retrieved_model.organization_id == default_user.organization_id
assert retrieved_model.provider_id == byok_provider.id
@pytest.mark.asyncio
async def test_byok_provider_uses_schema_default_base_url(default_user, provider_manager):
"""Test that BYOK providers with schema-default base_url get correct model_endpoint.
This tests a bug where providers like ZAI have a schema-default base_url
(e.g., "https://api.z.ai/api/paas/v4/") that isn't stored in the database.
When list_llm_models_async reads from DB, the base_url is NULL, and if the code
uses provider.base_url directly instead of typed_provider.base_url, the
model_endpoint would be None/wrong, causing requests to go to the wrong endpoint.
The fix uses cast_to_subtype() to get the typed provider with schema defaults.
"""
from letta.orm.provider import Provider as ProviderORM
from letta.schemas.providers import Provider as PydanticProvider
from letta.server.db import db_registry
test_id = generate_test_id()
provider_name = f"test-zai-{test_id}"
# Create a ZAI BYOK provider WITHOUT explicitly setting base_url
# This simulates what happens when a user creates a ZAI provider via the API
# The schema default "https://api.z.ai/api/paas/v4/" applies in memory but
# may not be stored in the database (base_url column is NULL)
byok_pydantic_provider = PydanticProvider(
name=provider_name,
provider_type=ProviderType.zai,
provider_category=ProviderCategory.byok,
organization_id=default_user.organization_id,
# NOTE: base_url is intentionally NOT set - this is the bug scenario
# The DB will have base_url=NULL
)
byok_pydantic_provider.resolve_identifier()
async with db_registry.async_session() as session:
byok_provider_orm = ProviderORM(**byok_pydantic_provider.model_dump(to_orm=True))
await byok_provider_orm.create_async(session, actor=default_user)
byok_provider = byok_provider_orm.to_pydantic()
# Verify base_url is None in the provider loaded from DB
assert byok_provider.base_url is None, "base_url should be NULL in DB for this test"
assert byok_provider.provider_type == ProviderType.zai
# Sync a model for the provider (simulating what happens after provider creation)
# Set last_synced so the server reads from DB instead of calling provider API
from datetime import datetime, timezone
async with db_registry.async_session() as session:
provider_orm = await ProviderORM.read_async(session, identifier=byok_provider.id, actor=None)
provider_orm.last_synced = datetime.now(timezone.utc)
await session.commit()
model_handle = f"{provider_name}/glm-4-flash"
byok_llm_model = LLMConfig(
model="glm-4-flash",
model_endpoint_type="zai",
model_endpoint="https://api.z.ai/api/paas/v4/", # The correct endpoint
context_window=128000,
handle=model_handle,
provider_name=provider_name,
provider_category=ProviderCategory.byok,
)
await provider_manager.sync_provider_models_async(
provider=byok_provider,
llm_models=[byok_llm_model],
embedding_models=[],
organization_id=default_user.organization_id,
)
# Create server and list LLM models
server = SyncServer(init_with_default_org_and_user=False)
server.default_user = default_user
server.provider_manager = provider_manager
# List LLM models - this should use typed_provider.base_url (schema default)
# NOT provider.base_url (which is NULL in DB)
models = await server.list_llm_models_async(
actor=default_user,
provider_category=[ProviderCategory.byok], # Only BYOK providers
)
# Find our ZAI model
zai_models = [m for m in models if m.handle == model_handle]
assert len(zai_models) == 1, f"Expected 1 ZAI model, got {len(zai_models)}"
zai_model = zai_models[0]
# THE KEY ASSERTION: model_endpoint should be the ZAI schema default,
# NOT None (which would cause requests to go to OpenAI's endpoint)
expected_endpoint = "https://api.z.ai/api/paas/v4/"
assert zai_model.model_endpoint == expected_endpoint, (
f"model_endpoint should be '{expected_endpoint}' from ZAI schema default, "
f"but got '{zai_model.model_endpoint}'. This indicates the bug where "
f"provider.base_url (NULL from DB) was used instead of typed_provider.base_url."
)
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_server_providers.py",
"license": "Apache License 2.0",
"lines": 2785,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:tests/test_temporal_metrics_local.py | """
Local test for temporal metrics.
Run with: uv run pytest tests/test_temporal_metrics_local.py -v -s
"""
import os
from unittest.mock import patch
import pytest
from letta.agents.temporal.metrics import (
ActivityMetrics,
TemporalMetrics,
WorkflowMetrics,
)
@pytest.fixture(autouse=True)
def setup_metrics():
"""Setup metrics for testing."""
# Force re-initialization
TemporalMetrics._initialized = False
# Enable metrics for testing
os.environ["DD_METRICS_ENABLED"] = "true"
os.environ["DD_AGENT_HOST"] = "localhost"
os.environ["DD_DOGSTATSD_PORT"] = "8125"
os.environ["DD_ENV"] = "local-test"
os.environ["DD_SERVICE"] = "letta-temporal-test"
yield
# Cleanup
TemporalMetrics._initialized = False
@pytest.mark.asyncio
async def test_metrics_initialization():
"""Test that metrics initialize correctly."""
TemporalMetrics.initialize()
assert TemporalMetrics._initialized is True
print(f"\n✓ Metrics initialized: enabled={TemporalMetrics.is_enabled()}")
@pytest.mark.asyncio
async def test_workflow_metrics():
"""Test workflow metrics recording."""
with patch("letta.agents.temporal.metrics.statsd") as mock_statsd:
TemporalMetrics._initialized = False
TemporalMetrics._enabled = True
TemporalMetrics._initialized = True
# Record workflow metrics
WorkflowMetrics.record_workflow_start(workflow_type="TemporalAgentWorkflow", workflow_id="test-workflow-123")
WorkflowMetrics.record_workflow_success(
workflow_type="TemporalAgentWorkflow",
workflow_id="test-workflow-123",
duration_ns=1_000_000_000, # 1 second
)
WorkflowMetrics.record_workflow_usage(
workflow_type="TemporalAgentWorkflow",
step_count=5,
completion_tokens=100,
prompt_tokens=50,
total_tokens=150,
)
# Verify metrics were called
assert mock_statsd.increment.called
assert mock_statsd.histogram.called
assert mock_statsd.gauge.called
print("\n✓ Workflow metrics recorded successfully")
print(f" - increment called {mock_statsd.increment.call_count} times")
print(f" - histogram called {mock_statsd.histogram.call_count} times")
print(f" - gauge called {mock_statsd.gauge.call_count} times")
@pytest.mark.asyncio
async def test_activity_metrics():
"""Test activity metrics recording."""
with patch("letta.agents.temporal.metrics.statsd") as mock_statsd:
TemporalMetrics._initialized = False
TemporalMetrics._enabled = True
TemporalMetrics._initialized = True
# Record activity metrics
ActivityMetrics.record_activity_start("llm_request")
ActivityMetrics.record_activity_success("llm_request", duration_ms=500.0)
# Verify metrics were called
assert mock_statsd.increment.called
assert mock_statsd.histogram.called
print("\n✓ Activity metrics recorded successfully")
print(f" - increment called {mock_statsd.increment.call_count} times")
@pytest.mark.asyncio
async def test_metrics_with_real_dogstatsd():
"""
Test metrics with real DogStatsD connection (requires Datadog agent running).
This test will skip if the agent is not available.
"""
import socket
# Check if DogStatsD is listening
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
sock.connect(("localhost", 8125))
dogstatsd_available = True
sock.close()
except Exception:
dogstatsd_available = False
if not dogstatsd_available:
pytest.skip("DogStatsD not available on localhost:8125")
# Force re-initialization with real connection
TemporalMetrics._initialized = False
TemporalMetrics.initialize()
# Send test metrics
TemporalMetrics.increment("temporal.test.counter", value=1, tags=["test:true"])
TemporalMetrics.gauge("temporal.test.gauge", value=42.0, tags=["test:true"])
TemporalMetrics.histogram("temporal.test.histogram", value=100.0, tags=["test:true"])
print("\n✓ Real metrics sent to DogStatsD at localhost:8125")
print(" Check your Datadog UI for metrics with prefix 'temporal.test.*'")
| {
"repo_id": "letta-ai/letta",
"file_path": "tests/test_temporal_metrics_local.py",
"license": "Apache License 2.0",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
letta-ai/letta:alembic/versions/066857381578_add_approvals_field_to_messages.py | """add approvals field to messages
Revision ID: 066857381578
Revises: c734cfc0d595
Create Date: 2025-10-09 17:56:07.333221
"""
from typing import Sequence, Union
import sqlalchemy as sa
import letta.orm
from alembic import op
from letta.settings import settings
# revision identifiers, used by Alembic.
revision: str = "066857381578"
down_revision: Union[str, None] = "c734cfc0d595"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
### commands auto generated by Alembic - please adjust! ###
op.add_column("messages", sa.Column("approvals", letta.orm.custom_columns.ApprovalsColumn(), nullable=True))
### end Alembic commands ###
def downgrade() -> None:
# Skip this migration for SQLite
if not settings.letta_pg_uri_no_default:
return
### commands auto generated by Alembic - please adjust! ###
op.drop_column("messages", "approvals")
### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/066857381578_add_approvals_field_to_messages.py",
"license": "Apache License 2.0",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/6756d04c3ddb_add_tools_used_field_to_run_metrics_.py | """Add tools_used field to run_metrics table
Revision ID: 6756d04c3ddb
Revises: e67961ed7c32
Create Date: 2025-10-17 14:52:53.601368
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "6756d04c3ddb"
down_revision: Union[str, None] = "e67961ed7c32"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("run_metrics", sa.Column("tools_used", sa.JSON(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("run_metrics", "tools_used")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/6756d04c3ddb_add_tools_used_field_to_run_metrics_.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/8149a781ac1b_backfill_encrypted_columns_for_.py | """backfill encrypted columns for providers, mcp, sandbox
Revision ID: 8149a781ac1b
Revises: 066857381578
Create Date: 2025-10-13 13:35:55.929562
"""
import os
from typing import Sequence, Union
import sqlalchemy as sa
from sqlalchemy import String, Text
from sqlalchemy.sql import column, table
from alembic import op
from letta.helpers.crypto_utils import CryptoUtils
# revision identifiers, used by Alembic.
revision: str = "8149a781ac1b"
down_revision: Union[str, None] = "066857381578"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Check if encryption key is available
encryption_key = os.environ.get("LETTA_ENCRYPTION_KEY")
if not encryption_key:
print("WARNING: LETTA_ENCRYPTION_KEY not set. Skipping data encryption migration.")
print("You can run a separate migration script later to encrypt existing data.")
return
# Get database connection
connection = op.get_bind()
# Batch processing configuration
BATCH_SIZE = 1000 # Process 1000 rows at a time
# Migrate providers data
print("Migrating providers encrypted fields...")
providers = table(
"providers",
column("id", String),
column("api_key", String),
column("api_key_enc", Text),
column("access_key", String),
column("access_key_enc", Text),
)
# Count total rows to process
total_count_result = connection.execute(
sa.select(sa.func.count())
.select_from(providers)
.where(
sa.and_(
sa.or_(providers.c.api_key.isnot(None), providers.c.access_key.isnot(None)),
# Only count rows that need encryption
sa.or_(
sa.and_(providers.c.api_key.isnot(None), providers.c.api_key_enc.is_(None)),
sa.and_(providers.c.access_key.isnot(None), providers.c.access_key_enc.is_(None)),
),
)
)
).scalar()
if total_count_result and total_count_result > 0:
print(f"Found {total_count_result} providers records that need encryption")
encrypted_count = 0
skipped_count = 0
offset = 0
# Process in batches
while True:
# Select batch of rows
provider_rows = connection.execute(
sa.select(
providers.c.id,
providers.c.api_key,
providers.c.api_key_enc,
providers.c.access_key,
providers.c.access_key_enc,
)
.where(
sa.and_(
sa.or_(providers.c.api_key.isnot(None), providers.c.access_key.isnot(None)),
# Only select rows that need encryption
sa.or_(
sa.and_(providers.c.api_key.isnot(None), providers.c.api_key_enc.is_(None)),
sa.and_(providers.c.access_key.isnot(None), providers.c.access_key_enc.is_(None)),
),
)
)
.order_by(providers.c.id) # Ensure consistent ordering
.limit(BATCH_SIZE)
.offset(offset)
).fetchall()
if not provider_rows:
break # No more rows to process
# Prepare batch updates
batch_updates = []
for row in provider_rows:
updates = {"id": row.id}
has_updates = False
# Encrypt api_key if present and not already encrypted
if row.api_key and not row.api_key_enc:
try:
updates["api_key_enc"] = CryptoUtils.encrypt(row.api_key, encryption_key)
has_updates = True
except Exception as e:
print(f"Warning: Failed to encrypt api_key for providers id={row.id}: {e}")
elif row.api_key_enc:
skipped_count += 1
# Encrypt access_key if present and not already encrypted
if row.access_key and not row.access_key_enc:
try:
updates["access_key_enc"] = CryptoUtils.encrypt(row.access_key, encryption_key)
has_updates = True
except Exception as e:
print(f"Warning: Failed to encrypt access_key for providers id={row.id}: {e}")
elif row.access_key_enc:
skipped_count += 1
if has_updates:
batch_updates.append(updates)
encrypted_count += 1
# Execute batch update if there are updates
if batch_updates:
# Use bulk update for better performance
for update_data in batch_updates:
row_id = update_data.pop("id")
if update_data: # Only update if there are fields to update
connection.execute(providers.update().where(providers.c.id == row_id).values(**update_data))
# Progress indicator for large datasets
if encrypted_count > 0 and encrypted_count % 10000 == 0:
print(f" Progress: Encrypted {encrypted_count} providers records...")
offset += BATCH_SIZE
# For very large datasets, commit periodically to avoid long transactions
if encrypted_count > 0 and encrypted_count % 50000 == 0:
connection.commit()
print(f"providers: Encrypted {encrypted_count} records, skipped {skipped_count} already encrypted fields")
else:
print("providers: No records need encryption")
# Migrate sandbox_environment_variables data
print("Migrating sandbox_environment_variables encrypted fields...")
sandbox_environment_variables = table(
"sandbox_environment_variables",
column("id", String),
column("value", String),
column("value_enc", Text),
)
# Count total rows to process
total_count_result = connection.execute(
sa.select(sa.func.count())
.select_from(sandbox_environment_variables)
.where(
sa.and_(
sandbox_environment_variables.c.value.isnot(None),
sandbox_environment_variables.c.value_enc.is_(None),
)
)
).scalar()
if total_count_result and total_count_result > 0:
print(f"Found {total_count_result} sandbox_environment_variables records that need encryption")
encrypted_count = 0
skipped_count = 0
offset = 0
# Process in batches
while True:
# Select batch of rows
env_var_rows = connection.execute(
sa.select(
sandbox_environment_variables.c.id,
sandbox_environment_variables.c.value,
sandbox_environment_variables.c.value_enc,
)
.where(
sa.and_(
sandbox_environment_variables.c.value.isnot(None),
sandbox_environment_variables.c.value_enc.is_(None),
)
)
.order_by(sandbox_environment_variables.c.id) # Ensure consistent ordering
.limit(BATCH_SIZE)
.offset(offset)
).fetchall()
if not env_var_rows:
break # No more rows to process
# Prepare batch updates
batch_updates = []
for row in env_var_rows:
updates = {"id": row.id}
has_updates = False
# Encrypt value if present and not already encrypted
if row.value and not row.value_enc:
try:
updates["value_enc"] = CryptoUtils.encrypt(row.value, encryption_key)
has_updates = True
except Exception as e:
print(f"Warning: Failed to encrypt value for sandbox_environment_variables id={row.id}: {e}")
elif row.value_enc:
skipped_count += 1
if has_updates:
batch_updates.append(updates)
encrypted_count += 1
# Execute batch update if there are updates
if batch_updates:
# Use bulk update for better performance
for update_data in batch_updates:
row_id = update_data.pop("id")
if update_data: # Only update if there are fields to update
connection.execute(
sandbox_environment_variables.update().where(sandbox_environment_variables.c.id == row_id).values(**update_data)
)
# Progress indicator for large datasets
if encrypted_count > 0 and encrypted_count % 10000 == 0:
print(f" Progress: Encrypted {encrypted_count} sandbox_environment_variables records...")
offset += BATCH_SIZE
# For very large datasets, commit periodically to avoid long transactions
if encrypted_count > 0 and encrypted_count % 50000 == 0:
connection.commit()
print(f"sandbox_environment_variables: Encrypted {encrypted_count} records, skipped {skipped_count} already encrypted fields")
else:
print("sandbox_environment_variables: No records need encryption")
# Migrate mcp_oauth data (only authorization_code field)
print("Migrating mcp_oauth encrypted fields...")
mcp_oauth = table(
"mcp_oauth",
column("id", String),
column("authorization_code", Text),
column("authorization_code_enc", Text),
)
# Count total rows to process
total_count_result = connection.execute(
sa.select(sa.func.count())
.select_from(mcp_oauth)
.where(
sa.and_(
mcp_oauth.c.authorization_code.isnot(None),
mcp_oauth.c.authorization_code_enc.is_(None),
)
)
).scalar()
if total_count_result and total_count_result > 0:
print(f"Found {total_count_result} mcp_oauth records that need encryption")
encrypted_count = 0
skipped_count = 0
offset = 0
# Process in batches
while True:
# Select batch of rows
oauth_rows = connection.execute(
sa.select(
mcp_oauth.c.id,
mcp_oauth.c.authorization_code,
mcp_oauth.c.authorization_code_enc,
)
.where(
sa.and_(
mcp_oauth.c.authorization_code.isnot(None),
mcp_oauth.c.authorization_code_enc.is_(None),
)
)
.order_by(mcp_oauth.c.id) # Ensure consistent ordering
.limit(BATCH_SIZE)
.offset(offset)
).fetchall()
if not oauth_rows:
break # No more rows to process
# Prepare batch updates
batch_updates = []
for row in oauth_rows:
updates = {"id": row.id}
has_updates = False
# Encrypt authorization_code if present and not already encrypted
if row.authorization_code and not row.authorization_code_enc:
try:
updates["authorization_code_enc"] = CryptoUtils.encrypt(row.authorization_code, encryption_key)
has_updates = True
except Exception as e:
print(f"Warning: Failed to encrypt authorization_code for mcp_oauth id={row.id}: {e}")
elif row.authorization_code_enc:
skipped_count += 1
if has_updates:
batch_updates.append(updates)
encrypted_count += 1
# Execute batch update if there are updates
if batch_updates:
# Use bulk update for better performance
for update_data in batch_updates:
row_id = update_data.pop("id")
if update_data: # Only update if there are fields to update
connection.execute(mcp_oauth.update().where(mcp_oauth.c.id == row_id).values(**update_data))
# Progress indicator for large datasets
if encrypted_count > 0 and encrypted_count % 10000 == 0:
print(f" Progress: Encrypted {encrypted_count} mcp_oauth records...")
offset += BATCH_SIZE
# For very large datasets, commit periodically to avoid long transactions
if encrypted_count > 0 and encrypted_count % 50000 == 0:
connection.commit()
print(f"mcp_oauth: Encrypted {encrypted_count} records, skipped {skipped_count} already encrypted fields")
else:
print("mcp_oauth: No records need encryption")
print("Migration complete. Plaintext columns are retained for rollback safety.")
def downgrade() -> None:
pass
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/8149a781ac1b_backfill_encrypted_columns_for_.py",
"license": "Apache License 2.0",
"lines": 291,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:alembic/versions/c6c43222e2de_add_mcp_tools_table.py | """Add mcp_tools table
Revision ID: c6c43222e2de
Revises: 6756d04c3ddb
Create Date: 2025-10-20 17:25:54.334037
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "c6c43222e2de"
down_revision: Union[str, None] = "6756d04c3ddb"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"mcp_tools",
sa.Column("mcp_server_id", sa.String(), nullable=False),
sa.Column("tool_id", sa.String(), nullable=False),
sa.Column("id", sa.String(), nullable=False),
sa.Column("created_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("updated_at", sa.DateTime(timezone=True), server_default=sa.text("now()"), nullable=True),
sa.Column("is_deleted", sa.Boolean(), server_default=sa.text("FALSE"), nullable=False),
sa.Column("_created_by_id", sa.String(), nullable=True),
sa.Column("_last_updated_by_id", sa.String(), nullable=True),
sa.Column("organization_id", sa.String(), nullable=False),
sa.ForeignKeyConstraint(
["organization_id"],
["organizations.id"],
),
sa.PrimaryKeyConstraint("id"),
)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("mcp_tools")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/c6c43222e2de_add_mcp_tools_table.py",
"license": "Apache License 2.0",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/e67961ed7c32_add_enable_parallel_execution_to_tools_table.py | """Add enable_parallel_execution to Tools table
Revision ID: e67961ed7c32
Revises: 8149a781ac1b
Create Date: 2025-10-17 15:47:00.447066
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "e67961ed7c32"
down_revision: Union[str, None] = "8149a781ac1b"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("tools", sa.Column("enable_parallel_execution", sa.Boolean(), nullable=True))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("tools", "enable_parallel_execution")
# ### end Alembic commands ###
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/e67961ed7c32_add_enable_parallel_execution_to_tools_table.py",
"license": "Apache License 2.0",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:alembic/versions/f6cd5a1e519d_add_embedding_config_field_to_archives_.py | """Add embedding config field to Archives table
Revision ID: f6cd5a1e519d
Revises: c6c43222e2de
Create Date: 2025-10-23 16:33:53.661122
"""
import json
from typing import Sequence, Union
import sqlalchemy as sa
from sqlalchemy import text
import letta.orm
from alembic import op
from letta.schemas.embedding_config import EmbeddingConfig
# revision identifiers, used by Alembic.
revision: str = "f6cd5a1e519d"
down_revision: Union[str, None] = "c6c43222e2de"
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# step 1: add column as nullable
op.add_column("archives", sa.Column("embedding_config", letta.orm.custom_columns.EmbeddingConfigColumn(), nullable=True))
# step 2: backfill existing archives with embedding configs in batches
connection = op.get_bind()
# default embedding config for archives without passages
default_config = EmbeddingConfig.default_config(model_name="letta")
default_embedding_config = default_config.model_dump()
batch_size = 100
processed = 0
# process in batches until no more archives need backfilling
while True:
archives = connection.execute(
text("SELECT id FROM archives WHERE embedding_config IS NULL LIMIT :batch_size"), {"batch_size": batch_size}
).fetchall()
if not archives:
break
for archive in archives:
archive_id = archive[0]
# check if archive has passages
first_passage = connection.execute(
text("SELECT embedding_config FROM archival_passages WHERE archive_id = :archive_id AND is_deleted = FALSE LIMIT 1"),
{"archive_id": archive_id},
).fetchone()
if first_passage and first_passage[0]:
embedding_config = first_passage[0]
else:
embedding_config = default_embedding_config
# serialize the embedding config to JSON string for raw SQL
config_json = json.dumps(embedding_config)
connection.execute(
text("UPDATE archives SET embedding_config = :config WHERE id = :archive_id"),
{"config": config_json, "archive_id": archive_id},
)
processed += len(archives)
print(f"Backfilled {processed} archives so far...")
connection.execute(text("COMMIT"))
print(f"Backfill complete. Total archives processed: {processed}")
# step 3: make column non-nullable
op.alter_column("archives", "embedding_config", nullable=False)
def downgrade() -> None:
op.drop_column("archives", "embedding_config")
| {
"repo_id": "letta-ai/letta",
"file_path": "alembic/versions/f6cd5a1e519d_add_embedding_config_field_to_archives_.py",
"license": "Apache License 2.0",
"lines": 59,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
letta-ai/letta:letta/groups/sleeptime_multi_agent_v4.py | from collections.abc import AsyncGenerator
from datetime import datetime, timezone
from letta.agents.letta_agent_v3 import LettaAgentV3
from letta.constants import DEFAULT_MAX_STEPS
from letta.groups.helpers import stringify_message
from letta.otel.tracing import trace_method
from letta.schemas.agent import AgentState
from letta.schemas.enums import RunStatus
from letta.schemas.group import Group, ManagerType
from letta.schemas.letta_message import MessageType
from letta.schemas.letta_message_content import TextContent
from letta.schemas.letta_request import ClientToolSchema
from letta.schemas.letta_response import LettaResponse
from letta.schemas.letta_stop_reason import StopReasonType
from letta.schemas.message import Message, MessageCreate
from letta.schemas.provider_trace import BillingContext
from letta.schemas.run import Run, RunUpdate
from letta.schemas.user import User
from letta.services.group_manager import GroupManager
from letta.utils import safe_create_task
class SleeptimeMultiAgentV4(LettaAgentV3):
def __init__(
self,
agent_state: AgentState,
actor: User,
group: Group,
):
super().__init__(agent_state, actor)
assert group.manager_type == ManagerType.sleeptime, f"Expected group type to be 'sleeptime', got {group.manager_type}"
self.group = group
self.run_ids = []
# Additional manager classes
self.group_manager = GroupManager()
@trace_method
async def step(
self,
input_messages: list[MessageCreate],
max_steps: int = DEFAULT_MAX_STEPS,
run_id: str | None = None,
use_assistant_message: bool = True,
include_return_message_types: list[MessageType] | None = None,
request_start_timestamp_ns: int | None = None,
conversation_id: str | None = None,
client_tools: list[ClientToolSchema] | None = None,
include_compaction_messages: bool = False,
billing_context: "BillingContext | None" = None,
) -> LettaResponse:
self.run_ids = []
for i in range(len(input_messages)):
input_messages[i].group_id = self.group.id
response = await super().step(
input_messages=input_messages,
max_steps=max_steps,
run_id=run_id,
use_assistant_message=use_assistant_message,
include_return_message_types=include_return_message_types,
request_start_timestamp_ns=request_start_timestamp_ns,
conversation_id=conversation_id,
client_tools=client_tools,
include_compaction_messages=include_compaction_messages,
billing_context=billing_context,
)
run_ids = await self.run_sleeptime_agents()
response.usage.run_ids = run_ids
return response
@trace_method
async def stream(
self,
input_messages: list[MessageCreate],
max_steps: int = DEFAULT_MAX_STEPS,
stream_tokens: bool = True,
run_id: str | None = None,
use_assistant_message: bool = True,
request_start_timestamp_ns: int | None = None,
include_return_message_types: list[MessageType] | None = None,
conversation_id: str | None = None,
client_tools: list[ClientToolSchema] | None = None,
include_compaction_messages: bool = False,
billing_context: "BillingContext | None" = None,
) -> AsyncGenerator[str, None]:
self.run_ids = []
for i in range(len(input_messages)):
input_messages[i].group_id = self.group.id
# Perform foreground agent step
try:
async for chunk in super().stream(
input_messages=input_messages,
max_steps=max_steps,
stream_tokens=stream_tokens,
run_id=run_id,
use_assistant_message=use_assistant_message,
include_return_message_types=include_return_message_types,
request_start_timestamp_ns=request_start_timestamp_ns,
conversation_id=conversation_id,
client_tools=client_tools,
include_compaction_messages=include_compaction_messages,
billing_context=billing_context,
):
yield chunk
finally:
# For some reason, stream is throwing a GeneratorExit even though it appears the that client
# is getting the whole stream. This pattern should work to ensure sleeptime agents run despite this.
await self.run_sleeptime_agents()
@trace_method
async def run_sleeptime_agents(self) -> list[str]:
# Get response messages
last_response_messages = self.response_messages
# Update turns counter
turns_counter = None
if self.group.sleeptime_agent_frequency is not None and self.group.sleeptime_agent_frequency > 0:
turns_counter = await self.group_manager.bump_turns_counter_async(group_id=self.group.id, actor=self.actor)
# Perform participant steps
if self.group.sleeptime_agent_frequency is None or (
turns_counter is not None and turns_counter % self.group.sleeptime_agent_frequency == 0
):
# Skip sleeptime processing if no response messages were generated
if not last_response_messages:
self.logger.warning("No response messages generated, skipping sleeptime agent processing")
return self.run_ids
last_processed_message_id = await self.group_manager.get_last_processed_message_id_and_update_async(
group_id=self.group.id, last_processed_message_id=last_response_messages[-1].id, actor=self.actor
)
for sleeptime_agent_id in self.group.agent_ids:
try:
sleeptime_run_id = await self._issue_background_task(
sleeptime_agent_id,
last_response_messages,
last_processed_message_id,
)
self.run_ids.append(sleeptime_run_id)
except Exception as e:
# Individual task failures
print(f"Sleeptime agent processing failed: {e!s}")
raise e
return self.run_ids
@trace_method
async def _issue_background_task(
self,
sleeptime_agent_id: str,
response_messages: list[Message],
last_processed_message_id: str,
) -> str:
run = Run(
agent_id=sleeptime_agent_id,
status=RunStatus.created,
metadata={
"run_type": "sleeptime_agent_send_message_async", # is this right?
"agent_id": sleeptime_agent_id,
},
)
run = await self.run_manager.create_run(pydantic_run=run, actor=self.actor)
safe_create_task(
self._participant_agent_step(
foreground_agent_id=self.agent_state.id,
sleeptime_agent_id=sleeptime_agent_id,
response_messages=response_messages,
last_processed_message_id=last_processed_message_id,
run_id=run.id,
),
label=f"participant_agent_step_{sleeptime_agent_id}",
)
return run.id
@trace_method
async def _participant_agent_step(
self,
foreground_agent_id: str,
sleeptime_agent_id: str,
response_messages: list[Message],
last_processed_message_id: str,
run_id: str,
) -> LettaResponse:
try:
# Update run status
run_update = RunUpdate(status=RunStatus.running)
await self.run_manager.update_run_by_id_async(run_id=run_id, update=run_update, actor=self.actor)
# Create conversation transcript
prior_messages = []
if self.group.sleeptime_agent_frequency:
try:
prior_messages = await self.message_manager.list_messages(
agent_id=foreground_agent_id,
actor=self.actor,
after=last_processed_message_id,
before=response_messages[0].id,
)
except Exception:
pass # continue with just latest messages
message_strings = [stringify_message(message) for message in prior_messages + response_messages]
message_strings = [s for s in message_strings if s is not None]
messages_text = "\n".join(message_strings)
message_text = (
"<system-reminder>\n"
"You are a sleeptime agent - a background agent that asynchronously processes conversations after they occur.\n\n"
"IMPORTANT: You are NOT the primary agent. You are reviewing a conversation that already happened between a primary agent and its user:\n"
'- Messages labeled "assistant" are from the primary agent (not you)\n'
'- Messages labeled "user" are from the primary agent\'s user\n\n'
"Your primary role is memory management. Review the conversation and use your memory tools to update any relevant memory blocks with information worth preserving. "
"Check your memory_persona block for any additional instructions or policies.\n"
"</system-reminder>\n\n"
f"Messages:\n{messages_text}"
)
sleeptime_agent_messages = [
MessageCreate(
role="user",
content=[TextContent(text=message_text)],
id=Message.generate_id(),
agent_id=sleeptime_agent_id,
group_id=self.group.id,
)
]
# Load sleeptime agent
sleeptime_agent_state = await self.agent_manager.get_agent_by_id_async(agent_id=sleeptime_agent_id, actor=self.actor)
sleeptime_agent = LettaAgentV3(
agent_state=sleeptime_agent_state,
actor=self.actor,
)
# Perform sleeptime agent step
result = await sleeptime_agent.step(
input_messages=sleeptime_agent_messages,
run_id=run_id,
)
# Update run status
run_update = RunUpdate(
status=RunStatus.completed,
completed_at=datetime.now(timezone.utc).replace(tzinfo=None),
stop_reason=result.stop_reason.stop_reason if result.stop_reason else StopReasonType.end_turn,
metadata={
"result": result.model_dump(mode="json"),
"agent_id": sleeptime_agent_state.id,
},
)
await self.run_manager.update_run_by_id_async(run_id=run_id, update=run_update, actor=self.actor)
return result
except Exception as e:
run_update = RunUpdate(
status=RunStatus.failed,
completed_at=datetime.now(timezone.utc).replace(tzinfo=None),
stop_reason=StopReasonType.error,
metadata={"error": str(e)},
)
await self.run_manager.update_run_by_id_async(run_id=run_id, update=run_update, actor=self.actor)
raise
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/groups/sleeptime_multi_agent_v4.py",
"license": "Apache License 2.0",
"lines": 242,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/schemas/mcp_server.py | from datetime import datetime
from typing import Annotated, Any, Dict, List, Literal, Optional, Union
from urllib.parse import urlparse
from pydantic import Field, field_validator
from letta.functions.mcp_client.types import (
MCP_AUTH_TOKEN_BEARER_PREFIX,
MCPServerType,
)
from letta.orm.mcp_oauth import OAuthSessionStatus
from letta.schemas.enums import PrimitiveType
from letta.schemas.letta_base import LettaBase
from letta.schemas.secret import Secret
class BaseMCPServer(LettaBase):
__id_prefix__ = PrimitiveType.MCP_SERVER.value
# Create Schemas (for POST requests)
class CreateStdioMCPServer(LettaBase):
"""Create a new Stdio MCP server"""
mcp_server_type: Literal[MCPServerType.STDIO] = MCPServerType.STDIO
command: str = Field(..., description="The command to run (MCP 'local' client will run this command)")
args: List[str] = Field(..., description="The arguments to pass to the command")
env: Optional[dict[str, str]] = Field(None, description="Environment variables to set")
class CreateSSEMCPServer(LettaBase):
"""Create a new SSE MCP server"""
mcp_server_type: Literal[MCPServerType.SSE] = MCPServerType.SSE
server_url: str = Field(..., description="The URL of the server")
auth_header: Optional[str] = Field(None, description="The name of the authentication header (e.g., 'Authorization')")
auth_token: Optional[str] = Field(None, description="The authentication token or API key value")
custom_headers: Optional[dict[str, str]] = Field(None, description="Custom HTTP headers to include with requests")
@field_validator("server_url")
@classmethod
def validate_server_url(cls, v: str) -> str:
"""Validate that server_url is a valid HTTP(S) URL."""
if not v:
raise ValueError("server_url cannot be empty")
parsed = urlparse(v)
if parsed.scheme not in ("http", "https"):
raise ValueError(f"server_url must start with 'http://' or 'https://', got: '{v}'")
if not parsed.netloc:
raise ValueError(f"server_url must have a valid host, got: '{v}'")
return v
class CreateStreamableHTTPMCPServer(LettaBase):
"""Create a new Streamable HTTP MCP server"""
mcp_server_type: Literal[MCPServerType.STREAMABLE_HTTP] = MCPServerType.STREAMABLE_HTTP
server_url: str = Field(..., description="The URL of the server")
auth_header: Optional[str] = Field(None, description="The name of the authentication header (e.g., 'Authorization')")
auth_token: Optional[str] = Field(None, description="The authentication token or API key value")
custom_headers: Optional[dict[str, str]] = Field(None, description="Custom HTTP headers to include with requests")
@field_validator("server_url")
@classmethod
def validate_server_url(cls, v: str) -> str:
"""Validate that server_url is a valid HTTP(S) URL."""
if not v:
raise ValueError("server_url cannot be empty")
parsed = urlparse(v)
if parsed.scheme not in ("http", "https"):
raise ValueError(f"server_url must start with 'http://' or 'https://', got: '{v}'")
if not parsed.netloc:
raise ValueError(f"server_url must have a valid host, got: '{v}'")
return v
CreateMCPServerUnion = Union[CreateStdioMCPServer, CreateSSEMCPServer, CreateStreamableHTTPMCPServer]
class StdioMCPServer(CreateStdioMCPServer):
"""A Stdio MCP server"""
id: str = BaseMCPServer.generate_id_field()
server_name: str = Field(..., description="The name of the MCP server")
class SSEMCPServer(CreateSSEMCPServer):
"""An SSE MCP server"""
id: str = BaseMCPServer.generate_id_field()
server_name: str = Field(..., description="The name of the MCP server")
class StreamableHTTPMCPServer(CreateStreamableHTTPMCPServer):
"""A Streamable HTTP MCP server"""
id: str = BaseMCPServer.generate_id_field()
server_name: str = Field(..., description="The name of the MCP server")
MCPServerUnion = Union[StdioMCPServer, SSEMCPServer, StreamableHTTPMCPServer]
# Update Schemas (for PATCH requests) - same shape as Create/Config, but all fields optional.
# We exclude fields that aren't persisted on the server model to avoid invalid ORM assignments.
class UpdateStdioMCPServer(LettaBase):
"""Update schema for Stdio MCP server - all fields optional"""
mcp_server_type: Literal[MCPServerType.STDIO] = MCPServerType.STDIO
command: Optional[str] = Field(..., description="The command to run (MCP 'local' client will run this command)")
args: Optional[List[str]] = Field(..., description="The arguments to pass to the command")
env: Optional[dict[str, str]] = Field(None, description="Environment variables to set")
class UpdateSSEMCPServer(LettaBase):
"""Update schema for SSE MCP server - all fields optional"""
mcp_server_type: Literal[MCPServerType.SSE] = MCPServerType.SSE
server_url: Optional[str] = Field(..., description="The URL of the server")
auth_header: Optional[str] = Field(None, description="The name of the authentication header (e.g., 'Authorization')")
auth_token: Optional[str] = Field(None, description="The authentication token or API key value")
custom_headers: Optional[dict[str, str]] = Field(None, description="Custom HTTP headers to include with requests")
@field_validator("server_url")
@classmethod
def validate_server_url(cls, v: Optional[str]) -> Optional[str]:
"""Validate that server_url is a valid HTTP(S) URL if provided."""
if v is None:
return v
if not v:
raise ValueError("server_url cannot be empty")
parsed = urlparse(v)
if parsed.scheme not in ("http", "https"):
raise ValueError(f"server_url must start with 'http://' or 'https://', got: '{v}'")
if not parsed.netloc:
raise ValueError(f"server_url must have a valid host, got: '{v}'")
return v
class UpdateStreamableHTTPMCPServer(LettaBase):
"""Update schema for Streamable HTTP MCP server - all fields optional"""
mcp_server_type: Literal[MCPServerType.STREAMABLE_HTTP] = MCPServerType.STREAMABLE_HTTP
server_url: Optional[str] = Field(..., description="The URL of the server")
auth_header: Optional[str] = Field(None, description="The name of the authentication header (e.g., 'Authorization')")
auth_token: Optional[str] = Field(None, description="The authentication token or API key value")
custom_headers: Optional[dict[str, str]] = Field(None, description="Custom HTTP headers to include with requests")
@field_validator("server_url")
@classmethod
def validate_server_url(cls, v: Optional[str]) -> Optional[str]:
"""Validate that server_url is a valid HTTP(S) URL if provided."""
if v is None:
return v
if not v:
raise ValueError("server_url cannot be empty")
parsed = urlparse(v)
if parsed.scheme not in ("http", "https"):
raise ValueError(f"server_url must start with 'http://' or 'https://', got: '{v}'")
if not parsed.netloc:
raise ValueError(f"server_url must have a valid host, got: '{v}'")
return v
UpdateMCPServerUnion = Union[UpdateStdioMCPServer, UpdateSSEMCPServer, UpdateStreamableHTTPMCPServer]
# OAuth-related schemas
class BaseMCPOAuth(LettaBase):
__id_prefix__ = PrimitiveType.MCP_OAUTH.value
class MCPOAuthSession(BaseMCPOAuth):
"""OAuth session for MCP server authentication."""
id: str = BaseMCPOAuth.generate_id_field()
state: str = Field(..., description="OAuth state parameter")
server_id: Optional[str] = Field(None, description="MCP server ID")
server_url: str = Field(..., description="MCP server URL")
server_name: str = Field(..., description="MCP server display name")
# User and organization context
user_id: Optional[str] = Field(None, description="User ID associated with the session")
organization_id: str = Field(..., description="Organization ID associated with the session")
# OAuth flow data
authorization_url: Optional[str] = Field(None, description="OAuth authorization URL")
authorization_code: Optional[str] = Field(None, description="OAuth authorization code")
# Encrypted authorization code (for internal use)
authorization_code_enc: Secret | None = Field(None, description="Encrypted OAuth authorization code as Secret object")
# Token data
access_token: Optional[str] = Field(None, description="OAuth access token")
refresh_token: Optional[str] = Field(None, description="OAuth refresh token")
token_type: str = Field(default="Bearer", description="Token type")
expires_at: Optional[datetime] = Field(None, description="Token expiry time")
scope: Optional[str] = Field(None, description="OAuth scope")
# Encrypted token fields (for internal use)
access_token_enc: Secret | None = Field(None, description="Encrypted OAuth access token as Secret object")
refresh_token_enc: Secret | None = Field(None, description="Encrypted OAuth refresh token as Secret object")
# Client configuration
client_id: Optional[str] = Field(None, description="OAuth client ID")
client_secret: Optional[str] = Field(None, description="OAuth client secret")
redirect_uri: Optional[str] = Field(None, description="OAuth redirect URI")
# Encrypted client secret (for internal use)
client_secret_enc: Secret | None = Field(None, description="Encrypted OAuth client secret as Secret object")
# Session state
status: OAuthSessionStatus = Field(default=OAuthSessionStatus.PENDING, description="Session status")
# Timestamps
created_at: datetime = Field(default_factory=datetime.now, description="Session creation time")
updated_at: datetime = Field(default_factory=datetime.now, description="Last update time")
def get_access_token_secret(self) -> Secret:
"""Get the access token as a Secret object."""
return self.access_token_enc if self.access_token_enc is not None else Secret.from_plaintext(None)
def get_refresh_token_secret(self) -> Secret:
"""Get the refresh token as a Secret object."""
return self.refresh_token_enc if self.refresh_token_enc is not None else Secret.from_plaintext(None)
def get_client_secret_secret(self) -> Secret:
"""Get the client secret as a Secret object."""
return self.client_secret_enc if self.client_secret_enc is not None else Secret.from_plaintext(None)
def get_authorization_code_secret(self) -> Secret:
"""Get the authorization code as a Secret object."""
return self.authorization_code_enc if self.authorization_code_enc is not None else Secret.from_plaintext(None)
def set_access_token_secret(self, secret: Secret) -> None:
"""Set access token from a Secret object."""
self.access_token_enc = secret
def set_refresh_token_secret(self, secret: Secret) -> None:
"""Set refresh token from a Secret object."""
self.refresh_token_enc = secret
def set_client_secret_secret(self, secret: Secret) -> None:
"""Set client secret from a Secret object."""
self.client_secret_enc = secret
def set_authorization_code_secret(self, secret: Secret) -> None:
"""Set authorization code from a Secret object."""
self.authorization_code_enc = secret
class MCPOAuthSessionCreate(BaseMCPOAuth):
"""Create a new OAuth session."""
server_url: str = Field(..., description="MCP server URL")
server_name: str = Field(..., description="MCP server display name")
user_id: Optional[str] = Field(None, description="User ID associated with the session")
organization_id: str = Field(..., description="Organization ID associated with the session")
state: Optional[str] = Field(None, description="OAuth state parameter")
class MCPOAuthSessionUpdate(BaseMCPOAuth):
"""Update an existing OAuth session."""
authorization_url: Optional[str] = Field(None, description="OAuth authorization URL")
authorization_code: Optional[str] = Field(None, description="OAuth authorization code")
access_token: Optional[str] = Field(None, description="OAuth access token")
refresh_token: Optional[str] = Field(None, description="OAuth refresh token")
token_type: Optional[str] = Field(None, description="Token type")
expires_at: Optional[datetime] = Field(None, description="Token expiry time")
scope: Optional[str] = Field(None, description="OAuth scope")
client_id: Optional[str] = Field(None, description="OAuth client ID")
client_secret: Optional[str] = Field(None, description="OAuth client secret")
redirect_uri: Optional[str] = Field(None, description="OAuth redirect URI")
status: Optional[OAuthSessionStatus] = Field(None, description="Session status")
class MCPServerResyncResult(LettaBase):
"""Result of resyncing MCP server tools."""
deleted: List[str] = Field(default_factory=list, description="List of deleted tool names")
updated: List[str] = Field(default_factory=list, description="List of updated tool names")
added: List[str] = Field(default_factory=list, description="List of added tool names")
class ToolExecuteRequest(LettaBase):
"""Request to execute a tool."""
args: Dict[str, Any] = Field(default_factory=dict, description="Arguments to pass to the tool")
# Wrapper models for API requests with discriminated unions
class CreateMCPServerRequest(LettaBase):
"""Request to create a new MCP server with configuration."""
server_name: str = Field(..., description="The name of the MCP server")
config: Annotated[
CreateMCPServerUnion,
Field(..., discriminator="mcp_server_type", description="The MCP server configuration (Stdio, SSE, or Streamable HTTP)"),
]
class UpdateMCPServerRequest(LettaBase):
"""Request to update an existing MCP server configuration."""
server_name: Optional[str] = Field(None, description="The name of the MCP server")
config: Annotated[
UpdateMCPServerUnion,
Field(..., discriminator="mcp_server_type", description="The MCP server configuration updates (Stdio, SSE, or Streamable HTTP)"),
]
async def convert_generic_to_union(server) -> MCPServerUnion:
"""
Convert a generic MCPServer (from letta.schemas.mcp) to the appropriate MCPServerUnion type
based on the server_type field.
This is used to convert internal MCPServer representations to the API response types.
Args:
server: A GenericMCPServer instance from letta.schemas.mcp
Returns:
The appropriate MCPServerUnion type (StdioMCPServer, SSEMCPServer, or StreamableHTTPMCPServer)
"""
# Import here to avoid circular dependency
from letta.schemas.mcp import MCPServer as GenericMCPServer
if not isinstance(server, GenericMCPServer):
raise TypeError(f"Expected GenericMCPServer, got {type(server)}")
if server.server_type == MCPServerType.STDIO:
return StdioMCPServer(
id=server.id,
server_name=server.server_name,
mcp_server_type=MCPServerType.STDIO,
command=server.stdio_config.command if server.stdio_config else None,
args=server.stdio_config.args if server.stdio_config else None,
env=server.stdio_config.env if server.stdio_config else None,
)
elif server.server_type == MCPServerType.SSE:
# Get decrypted values from encrypted columns (async)
token = await server.token_enc.get_plaintext_async() if server.token_enc else None
headers = await server.get_custom_headers_dict_async()
return SSEMCPServer(
id=server.id,
server_name=server.server_name,
mcp_server_type=MCPServerType.SSE,
server_url=server.server_url,
auth_header="Authorization" if token else None,
auth_token=f"Bearer {token}" if token else None,
custom_headers=headers,
)
elif server.server_type == MCPServerType.STREAMABLE_HTTP:
# Get decrypted values from encrypted columns (async)
token = await server.token_enc.get_plaintext_async() if server.token_enc else None
headers = await server.get_custom_headers_dict_async()
return StreamableHTTPMCPServer(
id=server.id,
server_name=server.server_name,
mcp_server_type=MCPServerType.STREAMABLE_HTTP,
server_url=server.server_url,
auth_header="Authorization" if token else None,
auth_token=f"Bearer {token}" if token else None,
custom_headers=headers,
)
else:
raise ValueError(f"Unknown server type: {server.server_type}")
def convert_update_to_internal(request: UpdateMCPServerRequest):
"""Convert external UpdateMCPServerRequest to internal UpdateMCPServer union used by the manager.
External API Request Structure (UpdateMCPServerRequest):
- server_name: Optional[str] (at top level)
- config: UpdateMCPServerUnion
- UpdateStdioMCPServer: command, args, env (flat fields)
- UpdateSSEMCPServer: server_url, auth_header, auth_token, custom_headers
- UpdateStreamableHTTPMCPServer: server_url, auth_header, auth_token, custom_headers
Internal Layer (schemas/mcp.py):
- UpdateStdioMCPServer: server_name, stdio_config (wrapped in StdioServerConfig)
- UpdateSSEMCPServer: server_name, server_url, token (not auth_token!), custom_headers
- UpdateStreamableHTTPMCPServer: server_name, server_url, auth_header, auth_token, custom_headers
This function:
1. Extracts server_name from request (top level)
2. Wraps stdio fields into StdioServerConfig
3. Maps auth_token → token for SSE (internal uses 'token')
4. Passes through auth_header + auth_token for StreamableHTTP
5. Strips 'Bearer ' prefix from tokens if present
"""
# Local import to avoid circulars
from letta.functions.mcp_client.types import MCPServerType as MCPType, StdioServerConfig as StdioCfg
from letta.schemas.mcp import (
UpdateSSEMCPServer as InternalUpdateSSE,
UpdateStdioMCPServer as InternalUpdateStdio,
UpdateStreamableHTTPMCPServer as InternalUpdateHTTP,
)
config = request.config
server_name = request.server_name
if isinstance(config, UpdateStdioMCPServer):
# For Stdio: wrap command/args/env into StdioServerConfig
stdio_cfg = None
# Only build stdio_config if command and args are explicitly provided
if config.command is not None and config.args is not None:
# Note: server_name in StdioServerConfig should match the parent server's name
# Use empty string as placeholder if server_name update is not provided
stdio_cfg = StdioCfg(
server_name=server_name or "", # Will be overwritten by manager if needed
type=MCPType.STDIO,
command=config.command,
args=config.args,
env=config.env,
)
# Build kwargs with only non-None values
kwargs: dict = {}
if server_name is not None:
kwargs["server_name"] = server_name
if stdio_cfg is not None:
kwargs["stdio_config"] = stdio_cfg
return InternalUpdateStdio(**kwargs)
elif isinstance(config, UpdateSSEMCPServer):
# For SSE: map auth_token → token, strip Bearer prefix if present
token_value = None
if config.auth_token is not None:
# Strip 'Bearer ' prefix if present (internal storage doesn't include prefix)
token_value = config.auth_token
if token_value.startswith(f"{MCP_AUTH_TOKEN_BEARER_PREFIX} "):
token_value = token_value[len(f"{MCP_AUTH_TOKEN_BEARER_PREFIX} ") :]
# Build kwargs with only non-None values
kwargs: dict = {}
if server_name is not None:
kwargs["server_name"] = server_name
if config.server_url is not None:
kwargs["server_url"] = config.server_url
if token_value is not None:
kwargs["token"] = token_value
if config.custom_headers is not None:
kwargs["custom_headers"] = config.custom_headers
return InternalUpdateSSE(**kwargs)
elif isinstance(config, UpdateStreamableHTTPMCPServer):
# For StreamableHTTP: pass through auth_header + auth_token, strip Bearer prefix if present
auth_token_value = None
if config.auth_token is not None:
# Strip 'Bearer ' prefix if present (internal storage doesn't include prefix)
auth_token_value = config.auth_token
if auth_token_value.startswith(f"{MCP_AUTH_TOKEN_BEARER_PREFIX} "):
auth_token_value = auth_token_value[len(f"{MCP_AUTH_TOKEN_BEARER_PREFIX} ") :]
# Build kwargs with only non-None values
kwargs: dict = {}
if server_name is not None:
kwargs["server_name"] = server_name
if config.server_url is not None:
kwargs["server_url"] = config.server_url
if config.auth_header is not None:
kwargs["auth_header"] = config.auth_header
if auth_token_value is not None:
kwargs["auth_token"] = auth_token_value
if config.custom_headers is not None:
kwargs["custom_headers"] = config.custom_headers
return InternalUpdateHTTP(**kwargs)
else:
raise TypeError(f"Unsupported update config type: {type(config)}")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/schemas/mcp_server.py",
"license": "Apache License 2.0",
"lines": 371,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/server/rest_api/routers/v1/chat_completions.py | from typing import Optional, Union
from fastapi import APIRouter, Body, Depends
from fastapi.responses import StreamingResponse
from openai.types.chat import ChatCompletion
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
from pydantic import BaseModel, Field
from letta.errors import LettaInvalidArgumentError
from letta.log import get_logger
from letta.schemas.enums import MessageRole
from letta.schemas.letta_request import LettaStreamingRequest
from letta.schemas.message import MessageCreate
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
from letta.server.server import SyncServer
from letta.services.streaming_service import StreamingService
logger = get_logger(__name__)
router = APIRouter(tags=["chat"])
class ChatCompletionRequest(BaseModel):
"""OpenAI-compatible chat completion request - exactly matching OpenAI's schema."""
model: str = Field(..., description="ID of the model to use")
messages: list[ChatCompletionMessageParam] = Field(..., description="Messages comprising the conversation so far")
# optional parameters
temperature: Optional[float] = Field(None, ge=0, le=2, description="Sampling temperature")
top_p: Optional[float] = Field(None, ge=0, le=1, description="Nucleus sampling parameter")
n: Optional[int] = Field(1, ge=1, description="Number of chat completion choices to generate")
stream: Optional[bool] = Field(False, description="Whether to stream back partial progress")
stop: Optional[Union[str, list[str]]] = Field(None, description="Sequences where the API will stop generating")
max_tokens: Optional[int] = Field(None, description="Maximum number of tokens to generate")
presence_penalty: Optional[float] = Field(None, ge=-2, le=2, description="Presence penalty")
frequency_penalty: Optional[float] = Field(None, ge=-2, le=2, description="Frequency penalty")
user: Optional[str] = Field(None, description="A unique identifier representing your end-user")
async def _handle_chat_completion(
request: ChatCompletionRequest,
server: SyncServer,
headers: HeaderParams,
) -> Union[ChatCompletion, StreamingResponse]:
"""
Internal handler for chat completion logic.
Args:
request: OpenAI-compatible chat completion request
server: Letta server instance
headers: Request headers with user info
Returns:
Streaming or non-streaming chat completion response
"""
if request.user:
actor = await server.user_manager.get_actor_or_default_async(actor_id=request.user)
else:
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
resolved_agent_id = request.model
if not resolved_agent_id.startswith("agent-"):
raise LettaInvalidArgumentError(
f"For this endpoint, the 'model' field should contain an agent ID (format: 'agent-...'). Received: '{resolved_agent_id}'",
argument_name="model",
)
await server.agent_manager.validate_agent_exists_async(resolved_agent_id, actor)
# convert OpenAI messages to Letta MessageCreate format
# NOTE: we only process the last user message
if len(request.messages) > 1:
logger.warning(
f"Chat completions endpoint received {len(request.messages)} messages. "
"Letta maintains conversation state internally, so only the last user message will be processed. "
"Previous messages are already stored in the agent's memory."
)
last_user_message = None
for msg in reversed(request.messages):
role = msg.get("role", "user")
if role == "user":
last_user_message = msg
break
if not last_user_message:
raise LettaInvalidArgumentError(
"No user message found in the request. Please include at least one message with role='user'.",
argument_name="messages",
)
letta_messages = [
MessageCreate(
role=MessageRole.user,
content=last_user_message.get("content", ""),
)
]
letta_request = LettaStreamingRequest(
messages=letta_messages,
stream_tokens=True,
)
if request.stream:
streaming_service = StreamingService(server)
return await streaming_service.create_agent_stream_openai_chat_completions(
agent_id=resolved_agent_id,
actor=actor,
request=letta_request,
)
else:
raise LettaInvalidArgumentError(
"Non-streaming chat completions not yet implemented. Please set stream=true.",
argument_name="stream",
)
@router.post(
"/chat/completions",
response_model=ChatCompletion,
responses={
200: {
"description": "Successful response",
"content": {
"application/json": {"schema": {"$ref": "#/components/schemas/ChatCompletion"}},
"text/event-stream": {"description": "Server-Sent Events stream (when stream=true)"},
},
}
},
operation_id="create_chat_completion",
)
async def create_chat_completion(
request: ChatCompletionRequest = Body(...),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
) -> Union[ChatCompletion, StreamingResponse]:
"""
Create a chat completion using a Letta agent (OpenAI-compatible).
This endpoint provides full OpenAI API compatibility. The agent is selected based on:
- The 'model' parameter in the request (should contain an agent ID in format 'agent-...')
When streaming is enabled (stream=true), the response will be Server-Sent Events
with ChatCompletionChunk objects.
"""
return await _handle_chat_completion(request, server, headers)
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/routers/v1/chat_completions.py",
"license": "Apache License 2.0",
"lines": 123,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/server/rest_api/routers/v1/internal_runs.py | from datetime import datetime
from typing import List, Literal, Optional
from fastapi import APIRouter, Depends, Query
from letta.schemas.enums import ComparisonOperator, RunStatus
from letta.schemas.letta_stop_reason import StopReasonType
from letta.schemas.run import Run
from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
from letta.server.server import SyncServer
router = APIRouter(prefix="/_internal_runs", tags=["_internal_runs"])
def convert_statuses_to_enum(statuses: Optional[List[str]]) -> Optional[List[RunStatus]]:
"""Convert a list of status strings to RunStatus enum values.
Args:
statuses: List of status strings or None
Returns:
List of RunStatus enum values or None if input is None
"""
if statuses is None:
return None
return [RunStatus(status) for status in statuses]
@router.get("/", response_model=List[Run], operation_id="list_internal_runs")
async def list_runs(
server: "SyncServer" = Depends(get_letta_server),
run_id: Optional[str] = Query(None, description="Filter by a specific run ID."),
agent_id: Optional[str] = Query(None, description="The unique identifier of the agent associated with the run."),
agent_ids: Optional[List[str]] = Query(
None,
description="The unique identifiers of the agents associated with the run. Deprecated in favor of agent_id field.",
deprecated=True,
),
statuses: Optional[List[str]] = Query(None, description="Filter runs by status. Can specify multiple statuses."),
background: Optional[bool] = Query(None, description="If True, filters for runs that were created in background mode."),
stop_reason: Optional[StopReasonType] = Query(None, description="Filter runs by stop reason."),
template_family: Optional[str] = Query(None, description="Filter runs by template family (base_template_id)."),
step_count: Optional[int] = Query(None, description="Filter runs by step count. Must be provided with step_count_operator."),
step_count_operator: ComparisonOperator = Query(
ComparisonOperator.EQ,
description="Operator for step_count filter: 'eq' for equals, 'gte' for greater than or equal, 'lte' for less than or equal.",
),
tools_used: Optional[List[str]] = Query(None, description="Filter runs that used any of the specified tools."),
before: Optional[str] = Query(
None, description="Run ID cursor for pagination. Returns runs that come before this run ID in the specified sort order"
),
after: Optional[str] = Query(
None, description="Run ID cursor for pagination. Returns runs that come after this run ID in the specified sort order"
),
limit: Optional[int] = Query(100, description="Maximum number of runs to return", ge=1, le=1000),
order: Literal["asc", "desc"] = Query(
"desc", description="Sort order for runs by creation time. 'asc' for oldest first, 'desc' for newest first"
),
order_by: Literal["created_at", "duration"] = Query("created_at", description="Field to sort by"),
active: bool = Query(False, description="Filter for active runs."),
ascending: bool = Query(
False,
description="Whether to sort agents oldest to newest (True) or newest to oldest (False, default). Deprecated in favor of order field.",
deprecated=True,
),
project_id: Optional[str] = Query(None, description="Filter runs by project ID."),
conversation_id: Optional[str] = Query(None, description="Filter runs by conversation ID."),
duration_percentile: Optional[int] = Query(
None, description="Filter runs by duration percentile (1-100). Returns runs slower than this percentile."
),
duration_value: Optional[int] = Query(
None, description="Duration value in nanoseconds for filtering. Must be used with duration_operator."
),
duration_operator: Optional[Literal["gt", "lt", "eq"]] = Query(
None, description="Comparison operator for duration filter: 'gt' (greater than), 'lt' (less than), 'eq' (equals)."
),
start_date: Optional[datetime] = Query(None, description="Filter runs created on or after this date (ISO 8601 format)."),
end_date: Optional[datetime] = Query(None, description="Filter runs created on or before this date (ISO 8601 format)."),
headers: HeaderParams = Depends(get_headers),
):
"""
List all runs.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
runs_manager = server.run_manager
# Handle backwards compatibility: if statuses not provided but active=True, filter by active statuses
if statuses is None and active:
statuses = [RunStatus.created, RunStatus.running]
if agent_id:
# NOTE: we are deprecating agent_ids so this will the primary path soon
agent_ids = [agent_id]
# Handle backward compatibility: if ascending is explicitly set, use it; otherwise use order
if ascending is not False:
# ascending was explicitly set to True
sort_ascending = ascending
else:
# Use the new order parameter
sort_ascending = order == "asc"
# Convert string statuses to RunStatus enum
parsed_statuses = convert_statuses_to_enum(statuses)
# Create duration filter dict if both parameters provided
duration_filter = None
if duration_value is not None and duration_operator is not None:
duration_filter = {"value": duration_value, "operator": duration_operator}
runs = await runs_manager.list_runs(
actor=actor,
run_id=run_id,
agent_ids=agent_ids,
statuses=parsed_statuses,
limit=limit,
before=before,
after=after,
ascending=sort_ascending,
stop_reason=stop_reason,
background=background,
template_family=template_family,
step_count=step_count,
step_count_operator=step_count_operator,
tools_used=tools_used,
project_id=project_id,
conversation_id=conversation_id,
order_by=order_by,
duration_percentile=duration_percentile,
duration_filter=duration_filter,
start_date=start_date,
end_date=end_date,
)
return runs
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/routers/v1/internal_runs.py",
"license": "Apache License 2.0",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
letta-ai/letta:letta/server/rest_api/routers/v1/mcp_servers.py | from typing import AsyncGenerator, List, Optional, Union
from fastapi import APIRouter, Body, Depends, Request
from httpx import HTTPStatusError
from starlette.responses import StreamingResponse
from letta.errors import LettaMCPConnectionError
from letta.functions.mcp_client.types import SSEServerConfig, StdioServerConfig, StreamableHTTPServerConfig
from letta.log import get_logger
from letta.schemas.mcp_server import (
CreateMCPServerRequest,
MCPServerUnion,
ToolExecuteRequest,
UpdateMCPServerRequest,
convert_generic_to_union,
convert_update_to_internal,
)
from letta.schemas.tool import Tool
from letta.schemas.tool_execution_result import ToolExecutionResult
from letta.server.rest_api.dependencies import (
HeaderParams,
get_headers,
get_letta_server,
)
from letta.server.rest_api.streaming_response import StreamingResponseWithStatusCode
from letta.server.server import SyncServer
from letta.services.mcp.oauth_utils import drill_down_exception, oauth_stream_event
from letta.services.mcp.stdio_client import AsyncStdioMCPClient
from letta.services.mcp.types import OauthStreamEvent
router = APIRouter(prefix="/mcp-servers", tags=["mcp-servers"])
logger = get_logger(__name__)
@router.post(
"/",
response_model=MCPServerUnion,
operation_id="mcp_create_mcp_server",
)
async def create_mcp_server(
request: CreateMCPServerRequest = Body(...),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Add a new MCP server to the Letta MCP server config
"""
# TODO: add the tools to the MCP server table we made.
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
new_server = await server.mcp_server_manager.create_mcp_server_from_request(request, actor=actor)
return await convert_generic_to_union(new_server)
@router.get(
"/",
response_model=List[MCPServerUnion],
operation_id="mcp_list_mcp_servers",
)
async def list_mcp_servers(
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Get a list of all configured MCP servers
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
mcp_servers = await server.mcp_server_manager.list_mcp_servers(actor=actor)
result = []
for mcp_server in mcp_servers:
result.append(await convert_generic_to_union(mcp_server))
return result
@router.get(
"/{mcp_server_id}",
response_model=MCPServerUnion,
operation_id="mcp_retrieve_mcp_server",
)
async def retrieve_mcp_server(
mcp_server_id: str,
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Get a specific MCP server
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
current_server = await server.mcp_server_manager.get_mcp_server_by_id_async(mcp_server_id=mcp_server_id, actor=actor)
return await convert_generic_to_union(current_server)
@router.delete(
"/{mcp_server_id}",
status_code=204,
operation_id="mcp_delete_mcp_server",
)
async def delete_mcp_server(
mcp_server_id: str,
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Delete an MCP server by its ID
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
await server.mcp_server_manager.delete_mcp_server_by_id(mcp_server_id, actor=actor)
@router.patch(
"/{mcp_server_id}",
response_model=MCPServerUnion,
operation_id="mcp_update_mcp_server",
)
async def update_mcp_server(
mcp_server_id: str,
request: UpdateMCPServerRequest = Body(...),
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Update an existing MCP server configuration
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
# Convert external update payload to internal manager union
internal_update = convert_update_to_internal(request)
updated_server = await server.mcp_server_manager.update_mcp_server_by_id(
mcp_server_id=mcp_server_id, mcp_server_update=internal_update, actor=actor
)
return await convert_generic_to_union(updated_server)
@router.get("/{mcp_server_id}/tools", response_model=List[Tool], operation_id="mcp_list_tools_for_mcp_server")
async def list_tools_for_mcp_server(
mcp_server_id: str,
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Get a list of all tools for a specific MCP server
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
# Use the new efficient method that queries from the database using MCPTools mapping
tools = await server.mcp_server_manager.list_tools_by_mcp_server_from_db(mcp_server_id, actor=actor)
return tools
@router.get("/{mcp_server_id}/tools/{tool_id}", response_model=Tool, operation_id="mcp_retrieve_mcp_tool")
async def retrieve_mcp_tool(
mcp_server_id: str,
tool_id: str,
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
):
"""
Get a specific MCP tool by its ID
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
tool = await server.mcp_server_manager.get_tool_by_mcp_server(mcp_server_id, tool_id, actor=actor)
return tool
@router.post("/{mcp_server_id}/tools/{tool_id}/run", response_model=ToolExecutionResult, operation_id="mcp_run_tool")
async def run_mcp_tool(
mcp_server_id: str,
tool_id: str,
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
request: ToolExecuteRequest = Body(default=ToolExecuteRequest()),
):
"""
Execute a specific MCP tool
The request body should contain the tool arguments in the ToolExecuteRequest format.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
# Execute the tool
result, success = await server.mcp_server_manager.execute_mcp_server_tool(
mcp_server_id=mcp_server_id,
tool_id=tool_id,
tool_args=request.args,
environment_variables={}, # TODO: Get environment variables from somewhere if needed
actor=actor,
)
# Create a ToolExecutionResult
return ToolExecutionResult(
status="success" if success else "error",
func_return=result,
)
@router.patch("/{mcp_server_id}/refresh", operation_id="mcp_refresh_mcp_server_tools")
async def refresh_mcp_server_tools(
mcp_server_id: str,
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
agent_id: Optional[str] = None,
):
"""
Refresh tools for an MCP server by:
1. Fetching current tools from the MCP server
2. Deleting tools that no longer exist on the server
3. Updating schemas for existing tools
4. Adding new tools from the server
Returns a summary of changes made.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
result = await server.mcp_server_manager.resync_mcp_server_tools(mcp_server_id, actor=actor, agent_id=agent_id)
return result
@router.get(
"/connect/{mcp_server_id}",
response_model=None,
# TODO: make this into a model?
responses={
200: {
"description": "Successful response",
"content": {
"text/event-stream": {"description": "Server-Sent Events stream"},
},
}
},
operation_id="mcp_connect_mcp_server",
)
async def connect_mcp_server(
mcp_server_id: str,
request: Request,
server: SyncServer = Depends(get_letta_server),
headers: HeaderParams = Depends(get_headers),
) -> StreamingResponse:
"""
Connect to an MCP server with support for OAuth via SSE.
Returns a stream of events handling authorization state and exchange if OAuth is required.
"""
actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
mcp_server = await server.mcp_server_manager.get_mcp_server_by_id_async(mcp_server_id=mcp_server_id, actor=actor)
# Convert the MCP server to the appropriate config type
config = await mcp_server.to_config_async(resolve_variables=False)
async def oauth_stream_generator(
mcp_config: Union[StdioServerConfig, SSEServerConfig, StreamableHTTPServerConfig],
http_request: Request,
) -> AsyncGenerator[str, None]:
client = None
oauth_flow_attempted = False
try:
# Acknowledge connection attempt
yield oauth_stream_event(OauthStreamEvent.CONNECTION_ATTEMPT, server_name=mcp_config.server_name)
# Create MCP client with respective transport type
try:
mcp_config.resolve_environment_variables()
client = await server.mcp_server_manager.get_mcp_client(mcp_config, actor)
except ValueError as e:
yield oauth_stream_event(OauthStreamEvent.ERROR, message=str(e))
return
# Try normal connection first for flows that don't require OAuth
try:
await client.connect_to_server()
tools = await client.list_tools(serialize=True)
yield oauth_stream_event(OauthStreamEvent.SUCCESS, tools=tools)
return
except (ConnectionError, LettaMCPConnectionError):
if isinstance(client, AsyncStdioMCPClient):
logger.warning("OAuth not supported for stdio")
yield oauth_stream_event(OauthStreamEvent.ERROR, message="OAuth not supported for stdio")
return
# Continue to OAuth flow
logger.info(f"Attempting OAuth flow for {mcp_config}...")
except Exception as e:
yield oauth_stream_event(OauthStreamEvent.ERROR, message=f"Connection failed: {str(e)}")
return
finally:
if client:
try:
await client.cleanup()
# This is a workaround to catch the expected 401 Unauthorized from the official MCP SDK, see their streamable_http.py
# For SSE transport types, we catch the ConnectionError above, but Streamable HTTP doesn't bubble up the exception
except HTTPStatusError:
oauth_flow_attempted = True
async for event in server.mcp_server_manager.handle_oauth_flow(
request=mcp_config, actor=actor, http_request=http_request
):
yield event
# Failsafe to make sure we don't try to handle OAuth flow twice
if not oauth_flow_attempted:
async for event in server.mcp_server_manager.handle_oauth_flow(request=mcp_config, actor=actor, http_request=http_request):
yield event
return
except Exception as e:
detailed_error = drill_down_exception(e)
logger.error(f"Error in OAuth stream:\n{detailed_error}")
yield oauth_stream_event(OauthStreamEvent.ERROR, message=f"Internal error: {detailed_error}")
finally:
if client:
try:
await client.cleanup()
except Exception as cleanup_error:
logger.warning(f"Error during temp MCP client cleanup: {cleanup_error}")
return StreamingResponseWithStatusCode(oauth_stream_generator(config, request), media_type="text/event-stream")
| {
"repo_id": "letta-ai/letta",
"file_path": "letta/server/rest_api/routers/v1/mcp_servers.py",
"license": "Apache License 2.0",
"lines": 274,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.