sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
langchain-ai/langchain:libs/partners/openrouter/tests/unit_tests/test_chat_models.py | """Unit tests for `ChatOpenRouter` chat model."""
from __future__ import annotations
import warnings
from typing import Any, Literal
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from langchain_core.load import dumpd, dumps, load
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
ChatMessage,
ChatMessageChunk,
HumanMessage,
HumanMessageChunk,
SystemMessage,
SystemMessageChunk,
ToolMessage,
)
from langchain_core.runnables import RunnableBinding
from pydantic import BaseModel, Field, SecretStr
from langchain_openrouter.chat_models import (
ChatOpenRouter,
_convert_chunk_to_message_chunk,
_convert_dict_to_message,
_convert_file_block_to_openrouter,
_convert_message_to_dict,
_convert_video_block_to_openrouter,
_create_usage_metadata,
_format_message_content,
_has_file_content_blocks,
_wrap_messages_for_sdk,
)
MODEL_NAME = "openai/gpt-4o-mini"
def _make_model(**kwargs: Any) -> ChatOpenRouter:
"""Create a `ChatOpenRouter` with sane defaults for unit tests."""
defaults: dict[str, Any] = {"model": MODEL_NAME, "api_key": SecretStr("test-key")}
defaults.update(kwargs)
return ChatOpenRouter(**defaults)
# ---------------------------------------------------------------------------
# Pydantic schemas used across multiple test classes
# ---------------------------------------------------------------------------
class GetWeather(BaseModel):
"""Get the current weather in a given location."""
location: str = Field(description="The city and state")
class GenerateUsername(BaseModel):
"""Generate a username from a full name."""
name: str = Field(description="The full name")
hair_color: str = Field(description="The hair color")
# ---------------------------------------------------------------------------
# Mock helpers for SDK responses
# ---------------------------------------------------------------------------
_SIMPLE_RESPONSE_DICT: dict[str, Any] = {
"id": "gen-abc123",
"choices": [
{
"message": {"role": "assistant", "content": "Hello!"},
"finish_reason": "stop",
"index": 0,
}
],
"usage": {
"prompt_tokens": 10,
"completion_tokens": 5,
"total_tokens": 15,
},
"model": MODEL_NAME,
"object": "chat.completion",
"created": 1700000000.0,
}
_TOOL_RESPONSE_DICT: dict[str, Any] = {
"id": "gen-tool123",
"choices": [
{
"message": {
"role": "assistant",
"content": None,
"tool_calls": [
{
"id": "call_1",
"type": "function",
"function": {
"name": "GetWeather",
"arguments": '{"location": "San Francisco"}',
},
}
],
},
"finish_reason": "tool_calls",
"index": 0,
}
],
"usage": {"prompt_tokens": 20, "completion_tokens": 10, "total_tokens": 30},
"model": MODEL_NAME,
"object": "chat.completion",
"created": 1700000000.0,
}
_STREAM_CHUNKS: list[dict[str, Any]] = [
{
"choices": [{"delta": {"role": "assistant", "content": ""}, "index": 0}],
"model": MODEL_NAME,
"object": "chat.completion.chunk",
"created": 1700000000.0,
"id": "gen-stream1",
},
{
"choices": [{"delta": {"content": "Hello"}, "index": 0}],
"model": MODEL_NAME,
"object": "chat.completion.chunk",
"created": 1700000000.0,
"id": "gen-stream1",
},
{
"choices": [{"delta": {"content": " world"}, "index": 0}],
"model": MODEL_NAME,
"object": "chat.completion.chunk",
"created": 1700000000.0,
"id": "gen-stream1",
},
{
"choices": [{"delta": {}, "finish_reason": "stop", "index": 0}],
"usage": {"prompt_tokens": 5, "completion_tokens": 2, "total_tokens": 7},
"model": MODEL_NAME,
"object": "chat.completion.chunk",
"created": 1700000000.0,
"id": "gen-stream1",
},
]
def _make_sdk_response(response_dict: dict[str, Any]) -> MagicMock:
"""Build a MagicMock that behaves like an SDK ChatResponse."""
mock = MagicMock()
mock.model_dump.return_value = response_dict
return mock
class _MockSyncStream:
"""Synchronous iterator that mimics the SDK EventStream."""
def __init__(self, chunks: list[dict[str, Any]]) -> None:
self._chunks = chunks
def __iter__(self) -> _MockSyncStream:
return self
def __next__(self) -> MagicMock:
if not self._chunks:
raise StopIteration
chunk = self._chunks.pop(0)
mock = MagicMock()
mock.model_dump.return_value = chunk
return mock
class _MockAsyncStream:
"""Async iterator that mimics the SDK EventStreamAsync."""
def __init__(self, chunks: list[dict[str, Any]]) -> None:
self._chunks = list(chunks)
def __aiter__(self) -> _MockAsyncStream:
return self
async def __anext__(self) -> MagicMock:
if not self._chunks:
raise StopAsyncIteration
chunk = self._chunks.pop(0)
mock = MagicMock()
mock.model_dump.return_value = chunk
return mock
# ===========================================================================
# Instantiation tests
# ===========================================================================
class TestChatOpenRouterInstantiation:
"""Tests for `ChatOpenRouter` instantiation."""
def test_basic_instantiation(self) -> None:
"""Test basic model instantiation with required params."""
model = _make_model()
assert model.model_name == MODEL_NAME
assert model.openrouter_api_base is None
def test_api_key_from_field(self) -> None:
"""Test that API key is properly set."""
model = _make_model()
assert model.openrouter_api_key is not None
assert model.openrouter_api_key.get_secret_value() == "test-key"
def test_api_key_from_env(self, monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that API key is read from OPENROUTER_API_KEY env var."""
monkeypatch.setenv("OPENROUTER_API_KEY", "env-key-123")
model = ChatOpenRouter(model=MODEL_NAME)
assert model.openrouter_api_key is not None
assert model.openrouter_api_key.get_secret_value() == "env-key-123"
def test_missing_api_key_raises(self, monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that missing API key raises ValueError."""
monkeypatch.delenv("OPENROUTER_API_KEY", raising=False)
with pytest.raises(ValueError, match="OPENROUTER_API_KEY must be set"):
ChatOpenRouter(model=MODEL_NAME)
def test_model_required(self) -> None:
"""Test that model name is required."""
with pytest.raises((ValueError, TypeError)):
ChatOpenRouter(api_key=SecretStr("test-key")) # type: ignore[call-arg]
def test_secret_masking(self) -> None:
"""Test that API key is not exposed in string representation."""
model = _make_model(api_key=SecretStr("super-secret"))
model_str = str(model)
assert "super-secret" not in model_str
def test_secret_masking_repr(self) -> None:
"""Test that API key is masked in repr too."""
model = _make_model(api_key=SecretStr("super-secret"))
assert "super-secret" not in repr(model)
def test_api_key_is_secret_str(self) -> None:
"""Test that openrouter_api_key is a SecretStr instance."""
model = _make_model()
assert isinstance(model.openrouter_api_key, SecretStr)
def test_llm_type(self) -> None:
"""Test _llm_type property."""
model = _make_model()
assert model._llm_type == "openrouter-chat"
def test_ls_params(self) -> None:
"""Test LangSmith params include openrouter provider."""
model = _make_model()
ls_params = model._get_ls_params()
assert ls_params["ls_provider"] == "openrouter"
def test_ls_params_includes_max_tokens(self) -> None:
"""Test that ls_max_tokens is set when max_tokens is configured."""
model = _make_model(max_tokens=512)
ls_params = model._get_ls_params()
assert ls_params["ls_max_tokens"] == 512
def test_ls_params_stop_string_wrapped_in_list(self) -> None:
"""Test that a string stop value is wrapped in a list for ls_stop."""
model = _make_model(stop_sequences="END")
ls_params = model._get_ls_params()
assert ls_params["ls_stop"] == ["END"]
def test_ls_params_stop_list_passthrough(self) -> None:
"""Test that a list stop value is passed through directly."""
model = _make_model(stop_sequences=["END", "STOP"])
ls_params = model._get_ls_params()
assert ls_params["ls_stop"] == ["END", "STOP"]
def test_client_created(self) -> None:
"""Test that OpenRouter SDK client is created."""
model = _make_model()
assert model.client is not None
def test_client_reused_for_same_params(self) -> None:
"""Test that the SDK client is reused when model is re-validated."""
model = _make_model()
client_1 = model.client
# Re-validate does not replace the existing client
model.validate_environment() # type: ignore[operator]
assert model.client is client_1
def test_app_url_passed_to_client(self) -> None:
"""Test that app_url is passed as http_referer to the SDK client."""
with patch("openrouter.OpenRouter") as mock_cls:
mock_cls.return_value = MagicMock()
ChatOpenRouter(
model=MODEL_NAME,
api_key=SecretStr("test-key"),
app_url="https://myapp.com",
)
call_kwargs = mock_cls.call_args[1]
assert call_kwargs["http_referer"] == "https://myapp.com"
def test_app_title_passed_to_client(self) -> None:
"""Test that app_title is passed as x_title to the SDK client."""
with patch("openrouter.OpenRouter") as mock_cls:
mock_cls.return_value = MagicMock()
ChatOpenRouter(
model=MODEL_NAME,
api_key=SecretStr("test-key"),
app_title="My App",
)
call_kwargs = mock_cls.call_args[1]
assert call_kwargs["x_title"] == "My App"
def test_default_attribution_headers(self) -> None:
"""Test that default attribution headers are sent when not overridden."""
with patch("openrouter.OpenRouter") as mock_cls:
mock_cls.return_value = MagicMock()
ChatOpenRouter(
model=MODEL_NAME,
api_key=SecretStr("test-key"),
)
call_kwargs = mock_cls.call_args[1]
assert call_kwargs["http_referer"] == ("https://docs.langchain.com/oss")
assert call_kwargs["x_title"] == "langchain"
def test_user_attribution_overrides_defaults(self) -> None:
"""Test that user-supplied attribution overrides the defaults."""
with patch("openrouter.OpenRouter") as mock_cls:
mock_cls.return_value = MagicMock()
ChatOpenRouter(
model=MODEL_NAME,
api_key=SecretStr("test-key"),
app_url="https://my-custom-app.com",
app_title="My Custom App",
)
call_kwargs = mock_cls.call_args[1]
assert call_kwargs["http_referer"] == "https://my-custom-app.com"
assert call_kwargs["x_title"] == "My Custom App"
def test_reasoning_in_params(self) -> None:
"""Test that `reasoning` is included in default params."""
model = _make_model(reasoning={"effort": "high"})
params = model._default_params
assert params["reasoning"] == {"effort": "high"}
def test_openrouter_provider_in_params(self) -> None:
"""Test that `openrouter_provider` is included in default params."""
model = _make_model(openrouter_provider={"order": ["Anthropic"]})
params = model._default_params
assert params["provider"] == {"order": ["Anthropic"]}
def test_route_in_params(self) -> None:
"""Test that `route` is included in default params."""
model = _make_model(route="fallback")
params = model._default_params
assert params["route"] == "fallback"
def test_optional_params_excluded_when_none(self) -> None:
"""Test that None optional params are not in default params."""
model = _make_model()
params = model._default_params
assert "temperature" not in params
assert "max_tokens" not in params
assert "top_p" not in params
assert "reasoning" not in params
def test_temperature_included_when_set(self) -> None:
"""Test that temperature is included when explicitly set."""
model = _make_model(temperature=0.5)
params = model._default_params
assert params["temperature"] == 0.5
# ===========================================================================
# Serialization tests
# ===========================================================================
class TestSerialization:
"""Tests for serialization round-trips."""
def test_is_lc_serializable(self) -> None:
"""Test that ChatOpenRouter declares itself as serializable."""
assert ChatOpenRouter.is_lc_serializable() is True
def test_dumpd_load_roundtrip(self) -> None:
"""Test that dumpd/load round-trip preserves model config."""
model = _make_model(temperature=0.7, max_tokens=100)
serialized = dumpd(model)
deserialized = load(
serialized,
valid_namespaces=["langchain_openrouter"],
allowed_objects="all",
secrets_from_env=False,
secrets_map={"OPENROUTER_API_KEY": "test-key"},
)
assert isinstance(deserialized, ChatOpenRouter)
assert deserialized.model_name == MODEL_NAME
assert deserialized.temperature == 0.7
assert deserialized.max_tokens == 100
def test_dumps_does_not_leak_secrets(self) -> None:
"""Test that dumps output does not contain the raw API key."""
model = _make_model(api_key=SecretStr("super-secret-key"))
serialized = dumps(model)
assert "super-secret-key" not in serialized
# ===========================================================================
# Mocked generate / stream tests
# ===========================================================================
class TestMockedGenerate:
"""Tests for _generate / _agenerate with a mocked SDK client."""
def test_invoke_basic(self) -> None:
"""Test basic invoke returns an AIMessage via mocked SDK."""
model = _make_model()
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(_SIMPLE_RESPONSE_DICT)
result = model.invoke("Hello")
assert isinstance(result, AIMessage)
assert result.content == "Hello!"
model.client.chat.send.assert_called_once()
def test_invoke_with_tool_response(self) -> None:
"""Test invoke that returns tool calls."""
model = _make_model()
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(_TOOL_RESPONSE_DICT)
result = model.invoke("What's the weather?")
assert isinstance(result, AIMessage)
assert len(result.tool_calls) == 1
assert result.tool_calls[0]["name"] == "GetWeather"
def test_invoke_passes_correct_messages(self) -> None:
"""Test that invoke converts messages and passes them to the SDK."""
model = _make_model()
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(_SIMPLE_RESPONSE_DICT)
model.invoke([HumanMessage(content="Hi")])
call_kwargs = model.client.chat.send.call_args[1]
assert call_kwargs["messages"] == [{"role": "user", "content": "Hi"}]
def test_invoke_strips_internal_kwargs(self) -> None:
"""Test that LangChain-internal kwargs are stripped before SDK call."""
model = _make_model()
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(_SIMPLE_RESPONSE_DICT)
model._generate(
[HumanMessage(content="Hi")],
ls_structured_output_format={"kwargs": {"method": "function_calling"}},
)
call_kwargs = model.client.chat.send.call_args[1]
assert "ls_structured_output_format" not in call_kwargs
def test_invoke_usage_metadata(self) -> None:
"""Test that usage metadata is populated on the response."""
model = _make_model()
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(_SIMPLE_RESPONSE_DICT)
result = model.invoke("Hello")
assert isinstance(result, AIMessage)
assert result.usage_metadata is not None
assert result.usage_metadata["input_tokens"] == 10
assert result.usage_metadata["output_tokens"] == 5
assert result.usage_metadata["total_tokens"] == 15
def test_stream_basic(self) -> None:
"""Test streaming returns AIMessageChunks via mocked SDK."""
model = _make_model()
model.client = MagicMock()
model.client.chat.send.return_value = _MockSyncStream(
[dict(c) for c in _STREAM_CHUNKS]
)
chunks = list(model.stream("Hello"))
assert len(chunks) > 0
assert all(isinstance(c, AIMessageChunk) for c in chunks)
# Concatenated content should be "Hello world"
full_content = "".join(c.content for c in chunks if isinstance(c.content, str))
assert "Hello" in full_content
assert "world" in full_content
def test_stream_passes_stream_true(self) -> None:
"""Test that stream sends stream=True to the SDK."""
model = _make_model()
model.client = MagicMock()
model.client.chat.send.return_value = _MockSyncStream(
[dict(c) for c in _STREAM_CHUNKS]
)
list(model.stream("Hello"))
call_kwargs = model.client.chat.send.call_args[1]
assert call_kwargs["stream"] is True
def test_invoke_with_streaming_flag(self) -> None:
"""Test that invoke delegates to stream when streaming=True."""
model = _make_model(streaming=True)
model.client = MagicMock()
model.client.chat.send.return_value = _MockSyncStream(
[dict(c) for c in _STREAM_CHUNKS]
)
result = model.invoke("Hello")
assert isinstance(result, AIMessage)
call_kwargs = model.client.chat.send.call_args[1]
assert call_kwargs["stream"] is True
async def test_ainvoke_basic(self) -> None:
"""Test async invoke returns an AIMessage via mocked SDK."""
model = _make_model()
model.client = MagicMock()
model.client.chat.send_async = AsyncMock(
return_value=_make_sdk_response(_SIMPLE_RESPONSE_DICT)
)
result = await model.ainvoke("Hello")
assert isinstance(result, AIMessage)
assert result.content == "Hello!"
model.client.chat.send_async.assert_awaited_once()
async def test_astream_basic(self) -> None:
"""Test async streaming returns AIMessageChunks via mocked SDK."""
model = _make_model()
model.client = MagicMock()
model.client.chat.send_async = AsyncMock(
return_value=_MockAsyncStream(_STREAM_CHUNKS)
)
chunks = [c async for c in model.astream("Hello")]
assert len(chunks) > 0
assert all(isinstance(c, AIMessageChunk) for c in chunks)
def test_stream_response_metadata_fields(self) -> None:
"""Test response-level metadata in streaming response_metadata."""
model = _make_model()
model.client = MagicMock()
stream_chunks: list[dict[str, Any]] = [
{
"choices": [
{"delta": {"role": "assistant", "content": "Hi"}, "index": 0}
],
"model": "anthropic/claude-sonnet-4-5",
"system_fingerprint": "fp_stream123",
"object": "chat.completion.chunk",
"created": 1700000000.0,
"id": "gen-stream-meta",
},
{
"choices": [
{
"delta": {},
"finish_reason": "stop",
"native_finish_reason": "end_turn",
"index": 0,
}
],
"model": "anthropic/claude-sonnet-4-5",
"system_fingerprint": "fp_stream123",
"object": "chat.completion.chunk",
"created": 1700000000.0,
"id": "gen-stream-meta",
},
]
model.client.chat.send.return_value = _MockSyncStream(stream_chunks)
chunks = list(model.stream("Hello"))
assert len(chunks) >= 2
# Find the chunk with finish_reason (final metadata chunk)
final = [
c for c in chunks if c.response_metadata.get("finish_reason") == "stop"
]
assert len(final) == 1
meta = final[0].response_metadata
assert meta["model_name"] == "anthropic/claude-sonnet-4-5"
assert meta["system_fingerprint"] == "fp_stream123"
assert meta["native_finish_reason"] == "end_turn"
assert meta["finish_reason"] == "stop"
assert meta["id"] == "gen-stream-meta"
assert meta["created"] == 1700000000
assert meta["object"] == "chat.completion.chunk"
async def test_astream_response_metadata_fields(self) -> None:
"""Test response-level metadata in async streaming response_metadata."""
model = _make_model()
model.client = MagicMock()
stream_chunks: list[dict[str, Any]] = [
{
"choices": [
{"delta": {"role": "assistant", "content": "Hi"}, "index": 0}
],
"model": "anthropic/claude-sonnet-4-5",
"system_fingerprint": "fp_async123",
"object": "chat.completion.chunk",
"created": 1700000000.0,
"id": "gen-astream-meta",
},
{
"choices": [
{
"delta": {},
"finish_reason": "stop",
"native_finish_reason": "end_turn",
"index": 0,
}
],
"model": "anthropic/claude-sonnet-4-5",
"system_fingerprint": "fp_async123",
"object": "chat.completion.chunk",
"created": 1700000000.0,
"id": "gen-astream-meta",
},
]
model.client.chat.send_async = AsyncMock(
return_value=_MockAsyncStream(stream_chunks)
)
chunks = [c async for c in model.astream("Hello")]
assert len(chunks) >= 2
# Find the chunk with finish_reason (final metadata chunk)
final = [
c for c in chunks if c.response_metadata.get("finish_reason") == "stop"
]
assert len(final) == 1
meta = final[0].response_metadata
assert meta["model_name"] == "anthropic/claude-sonnet-4-5"
assert meta["system_fingerprint"] == "fp_async123"
assert meta["native_finish_reason"] == "end_turn"
assert meta["id"] == "gen-astream-meta"
assert meta["created"] == 1700000000
assert meta["object"] == "chat.completion.chunk"
# ===========================================================================
# Request payload verification
# ===========================================================================
class TestRequestPayload:
"""Tests verifying the exact dict sent to the SDK."""
def test_message_format_in_payload(self) -> None:
"""Test that messages are formatted correctly in the SDK call."""
model = _make_model(temperature=0)
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(_SIMPLE_RESPONSE_DICT)
model.invoke(
[
SystemMessage(content="You are helpful."),
HumanMessage(content="Hi"),
]
)
call_kwargs = model.client.chat.send.call_args[1]
assert call_kwargs["messages"] == [
{"role": "system", "content": "You are helpful."},
{"role": "user", "content": "Hi"},
]
def test_model_kwargs_forwarded(self) -> None:
"""Test that extra model_kwargs are included in the SDK call."""
model = _make_model(model_kwargs={"top_k": 50})
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(_SIMPLE_RESPONSE_DICT)
model.invoke("Hi")
call_kwargs = model.client.chat.send.call_args[1]
assert call_kwargs["top_k"] == 50
def test_stop_sequences_in_payload(self) -> None:
"""Test that stop sequences are passed to the SDK."""
model = _make_model()
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(_SIMPLE_RESPONSE_DICT)
model.invoke("Hi", stop=["END"])
call_kwargs = model.client.chat.send.call_args[1]
assert call_kwargs["stop"] == ["END"]
def test_tool_format_in_payload(self) -> None:
"""Test that tools are formatted in OpenAI-compatible structure."""
model = _make_model()
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(_TOOL_RESPONSE_DICT)
bound = model.bind_tools([GetWeather])
bound.invoke("What's the weather?")
call_kwargs = model.client.chat.send.call_args[1]
tools = call_kwargs["tools"]
assert len(tools) == 1
assert tools[0]["type"] == "function"
assert tools[0]["function"]["name"] == "GetWeather"
assert "parameters" in tools[0]["function"]
def test_openrouter_params_in_payload(self) -> None:
"""Test that OpenRouter-specific params appear in the SDK call."""
model = _make_model(
reasoning={"effort": "high"},
openrouter_provider={"order": ["Anthropic"]},
route="fallback",
)
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(_SIMPLE_RESPONSE_DICT)
model.invoke("Hi")
call_kwargs = model.client.chat.send.call_args[1]
assert call_kwargs["reasoning"] == {"effort": "high"}
assert call_kwargs["provider"] == {"order": ["Anthropic"]}
assert call_kwargs["route"] == "fallback"
# ===========================================================================
# bind_tools tests
# ===========================================================================
class TestBindTools:
"""Tests for the bind_tools public method."""
@pytest.mark.parametrize(
"tool_choice",
[
"auto",
"none",
"required",
"GetWeather",
{"type": "function", "function": {"name": "GetWeather"}},
None,
],
)
def test_bind_tools_tool_choice(self, tool_choice: Any) -> None:
"""Test bind_tools accepts various tool_choice values."""
model = _make_model()
bound = model.bind_tools(
[GetWeather, GenerateUsername], tool_choice=tool_choice
)
assert isinstance(bound, RunnableBinding)
def test_bind_tools_bool_true_single_tool(self) -> None:
"""Test bind_tools with tool_choice=True and a single tool."""
model = _make_model()
bound = model.bind_tools([GetWeather], tool_choice=True)
assert isinstance(bound, RunnableBinding)
kwargs = bound.kwargs
assert kwargs["tool_choice"] == {
"type": "function",
"function": {"name": "GetWeather"},
}
def test_bind_tools_bool_true_multiple_tools_raises(self) -> None:
"""Test bind_tools with tool_choice=True and multiple tools raises."""
model = _make_model()
with pytest.raises(ValueError, match="tool_choice can only be True"):
model.bind_tools([GetWeather, GenerateUsername], tool_choice=True)
def test_bind_tools_any_maps_to_required(self) -> None:
"""Test that tool_choice='any' is mapped to 'required'."""
model = _make_model()
bound = model.bind_tools([GetWeather], tool_choice="any")
assert isinstance(bound, RunnableBinding)
assert bound.kwargs["tool_choice"] == "required"
def test_bind_tools_string_name_becomes_dict(self) -> None:
"""Test that a specific tool name string is converted to a dict."""
model = _make_model()
bound = model.bind_tools([GetWeather], tool_choice="GetWeather")
assert isinstance(bound, RunnableBinding)
assert bound.kwargs["tool_choice"] == {
"type": "function",
"function": {"name": "GetWeather"},
}
def test_bind_tools_formats_tools_correctly(self) -> None:
"""Test that tools are converted to OpenAI format."""
model = _make_model()
bound = model.bind_tools([GetWeather])
assert isinstance(bound, RunnableBinding)
tools = bound.kwargs["tools"]
assert len(tools) == 1
assert tools[0]["type"] == "function"
assert tools[0]["function"]["name"] == "GetWeather"
def test_bind_tools_no_choice_omits_key(self) -> None:
"""Test that tool_choice=None does not set tool_choice in kwargs."""
model = _make_model()
bound = model.bind_tools([GetWeather], tool_choice=None)
assert isinstance(bound, RunnableBinding)
assert "tool_choice" not in bound.kwargs
def test_bind_tools_strict_forwarded(self) -> None:
"""Test that strict param is forwarded to tool definitions."""
model = _make_model()
bound = model.bind_tools([GetWeather], strict=True)
assert isinstance(bound, RunnableBinding)
tools = bound.kwargs["tools"]
assert tools[0]["function"]["strict"] is True
def test_bind_tools_strict_none_by_default(self) -> None:
"""Test that strict is not set when not provided."""
model = _make_model()
bound = model.bind_tools([GetWeather])
assert isinstance(bound, RunnableBinding)
tools = bound.kwargs["tools"]
assert "strict" not in tools[0]["function"]
# ===========================================================================
# with_structured_output tests
# ===========================================================================
class TestWithStructuredOutput:
"""Tests for the with_structured_output public method."""
@pytest.mark.parametrize("method", ["function_calling", "json_schema"])
@pytest.mark.parametrize("include_raw", ["yes", "no"])
def test_with_structured_output_pydantic(
self,
method: Literal["function_calling", "json_schema"],
include_raw: str,
) -> None:
"""Test with_structured_output using a Pydantic schema."""
model = _make_model()
structured = model.with_structured_output(
GenerateUsername, method=method, include_raw=(include_raw == "yes")
)
assert structured is not None
@pytest.mark.parametrize("method", ["function_calling", "json_schema"])
def test_with_structured_output_dict_schema(
self,
method: Literal["function_calling", "json_schema"],
) -> None:
"""Test with_structured_output using a JSON schema dict."""
schema = GenerateUsername.model_json_schema()
model = _make_model()
structured = model.with_structured_output(schema, method=method)
assert structured is not None
def test_with_structured_output_none_schema_function_calling_raises(self) -> None:
"""Test that schema=None with function_calling raises ValueError."""
model = _make_model()
with pytest.raises(ValueError, match="schema must be specified"):
model.with_structured_output(None, method="function_calling")
def test_with_structured_output_none_schema_json_schema_raises(self) -> None:
"""Test that schema=None with json_schema raises ValueError."""
model = _make_model()
with pytest.raises(ValueError, match="schema must be specified"):
model.with_structured_output(None, method="json_schema")
def test_with_structured_output_invalid_method_raises(self) -> None:
"""Test that an unrecognized method raises ValueError."""
model = _make_model()
with pytest.raises(ValueError, match="Unrecognized method"):
model.with_structured_output(
GenerateUsername,
method="invalid", # type: ignore[arg-type]
)
def test_with_structured_output_json_schema_sets_response_format(self) -> None:
"""Test that json_schema method sets response_format correctly."""
model = _make_model()
structured = model.with_structured_output(
GenerateUsername, method="json_schema"
)
# The first step in the chain should be the bound model
bound = structured.first # type: ignore[attr-defined]
assert isinstance(bound, RunnableBinding)
rf = bound.kwargs["response_format"]
assert rf["type"] == "json_schema"
assert rf["json_schema"]["name"] == "GenerateUsername"
def test_with_structured_output_json_mode_warns_and_falls_back(self) -> None:
"""Test that json_mode warns and falls back to json_schema."""
model = _make_model()
with pytest.warns(match="Defaulting to 'json_schema'"):
structured = model.with_structured_output(
GenerateUsername,
method="json_mode", # type: ignore[arg-type]
)
bound = structured.first # type: ignore[attr-defined]
assert isinstance(bound, RunnableBinding)
rf = bound.kwargs["response_format"]
assert rf["type"] == "json_schema"
def test_with_structured_output_strict_function_calling(self) -> None:
"""Test that strict is forwarded for function_calling method."""
model = _make_model()
structured = model.with_structured_output(
GenerateUsername, method="function_calling", strict=True
)
bound = structured.first # type: ignore[attr-defined]
assert isinstance(bound, RunnableBinding)
tools = bound.kwargs["tools"]
assert tools[0]["function"]["strict"] is True
def test_with_structured_output_strict_json_schema(self) -> None:
"""Test that strict is forwarded for json_schema method."""
model = _make_model()
structured = model.with_structured_output(
GenerateUsername, method="json_schema", strict=True
)
bound = structured.first # type: ignore[attr-defined]
assert isinstance(bound, RunnableBinding)
rf = bound.kwargs["response_format"]
assert rf["json_schema"]["strict"] is True
def test_with_structured_output_json_mode_with_strict_warns_and_forwards(
self,
) -> None:
"""Test json_mode with strict warns and falls back to json_schema."""
model = _make_model()
with pytest.warns(match="Defaulting to 'json_schema'"):
structured = model.with_structured_output(
GenerateUsername,
method="json_mode", # type: ignore[arg-type]
strict=True,
)
bound = structured.first # type: ignore[attr-defined]
assert isinstance(bound, RunnableBinding)
rf = bound.kwargs["response_format"]
assert rf["type"] == "json_schema"
assert rf["json_schema"]["strict"] is True
# ===========================================================================
# Message conversion tests
# ===========================================================================
class TestMessageConversion:
"""Tests for message conversion functions."""
def test_human_message_to_dict(self) -> None:
"""Test converting HumanMessage to dict."""
msg = HumanMessage(content="Hello")
result = _convert_message_to_dict(msg)
assert result == {"role": "user", "content": "Hello"}
def test_system_message_to_dict(self) -> None:
"""Test converting SystemMessage to dict."""
msg = SystemMessage(content="You are helpful.")
result = _convert_message_to_dict(msg)
assert result == {"role": "system", "content": "You are helpful."}
def test_ai_message_to_dict(self) -> None:
"""Test converting AIMessage to dict."""
msg = AIMessage(content="Hi there!")
result = _convert_message_to_dict(msg)
assert result == {"role": "assistant", "content": "Hi there!"}
def test_ai_message_with_reasoning_content_to_dict(self) -> None:
"""Test that reasoning_content is preserved when converting back to dict."""
msg = AIMessage(
content="The answer is 42.",
additional_kwargs={"reasoning_content": "Let me think about this..."},
)
result = _convert_message_to_dict(msg)
assert result["role"] == "assistant"
assert result["content"] == "The answer is 42."
assert result["reasoning"] == "Let me think about this..."
def test_ai_message_with_reasoning_details_to_dict(self) -> None:
"""Test that reasoning_details is preserved when converting back to dict."""
details = [
{"type": "reasoning.text", "text": "Step 1: analyze"},
{"type": "reasoning.text", "text": "Step 2: solve"},
]
msg = AIMessage(
content="Answer",
additional_kwargs={"reasoning_details": details},
)
result = _convert_message_to_dict(msg)
assert result["reasoning_details"] == details
assert "reasoning" not in result
def test_ai_message_with_both_reasoning_fields_to_dict(self) -> None:
"""Test that both reasoning_content and reasoning_details are preserved."""
details = [{"type": "reasoning.text", "text": "detailed thinking"}]
msg = AIMessage(
content="Answer",
additional_kwargs={
"reasoning_content": "I thought about it",
"reasoning_details": details,
},
)
result = _convert_message_to_dict(msg)
assert result["reasoning"] == "I thought about it"
assert result["reasoning_details"] == details
def test_reasoning_roundtrip_through_dict(self) -> None:
"""Test that reasoning survives dict -> message -> dict roundtrip."""
original_dict = {
"role": "assistant",
"content": "The answer",
"reasoning": "My thinking process",
"reasoning_details": [{"type": "reasoning.text", "text": "step-by-step"}],
}
msg = _convert_dict_to_message(original_dict)
result = _convert_message_to_dict(msg)
assert result["reasoning"] == "My thinking process"
assert result["reasoning_details"] == original_dict["reasoning_details"]
def test_tool_message_to_dict(self) -> None:
"""Test converting ToolMessage to dict."""
msg = ToolMessage(content="result", tool_call_id="call_123")
result = _convert_message_to_dict(msg)
assert result == {
"role": "tool",
"content": "result",
"tool_call_id": "call_123",
}
def test_chat_message_to_dict(self) -> None:
"""Test converting ChatMessage to dict."""
msg = ChatMessage(content="Hello", role="developer")
result = _convert_message_to_dict(msg)
assert result == {"role": "developer", "content": "Hello"}
def test_ai_message_with_tool_calls_to_dict(self) -> None:
"""Test converting AIMessage with tool calls to dict."""
msg = AIMessage(
content="",
tool_calls=[
{
"name": "get_weather",
"args": {"location": "SF"},
"id": "call_1",
"type": "tool_call",
}
],
)
result = _convert_message_to_dict(msg)
assert result["role"] == "assistant"
assert result["content"] is None
assert len(result["tool_calls"]) == 1
assert result["tool_calls"][0]["function"]["name"] == "get_weather"
def test_dict_to_ai_message(self) -> None:
"""Test converting dict to AIMessage."""
d = {"role": "assistant", "content": "Hello!"}
msg = _convert_dict_to_message(d)
assert isinstance(msg, AIMessage)
assert msg.content == "Hello!"
def test_dict_to_ai_message_with_reasoning(self) -> None:
"""Test that reasoning is extracted from response dict."""
d = {
"role": "assistant",
"content": "Answer",
"reasoning": "Let me think...",
}
msg = _convert_dict_to_message(d)
assert isinstance(msg, AIMessage)
assert msg.additional_kwargs["reasoning_content"] == "Let me think..."
def test_dict_to_ai_message_with_tool_calls(self) -> None:
"""Test converting dict with tool calls to AIMessage."""
d = {
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_1",
"type": "function",
"function": {
"name": "get_weather",
"arguments": '{"location": "SF"}',
},
}
],
}
msg = _convert_dict_to_message(d)
assert isinstance(msg, AIMessage)
assert len(msg.tool_calls) == 1
assert msg.tool_calls[0]["name"] == "get_weather"
def test_dict_to_ai_message_with_invalid_tool_calls(self) -> None:
"""Test that malformed tool calls produce invalid_tool_calls."""
d = {
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_bad",
"type": "function",
"function": {
"name": "get_weather",
"arguments": "not-valid-json{{{",
},
}
],
}
msg = _convert_dict_to_message(d)
assert isinstance(msg, AIMessage)
assert len(msg.invalid_tool_calls) == 1
assert len(msg.tool_calls) == 0
assert msg.invalid_tool_calls[0]["name"] == "get_weather"
def test_dict_to_human_message(self) -> None:
"""Test converting dict to HumanMessage."""
d = {"role": "user", "content": "Hi"}
msg = _convert_dict_to_message(d)
assert isinstance(msg, HumanMessage)
def test_dict_to_system_message(self) -> None:
"""Test converting dict to SystemMessage."""
d = {"role": "system", "content": "Be helpful"}
msg = _convert_dict_to_message(d)
assert isinstance(msg, SystemMessage)
def test_dict_to_tool_message(self) -> None:
"""Test converting dict with role=tool to ToolMessage."""
d = {
"role": "tool",
"content": "result data",
"tool_call_id": "call_42",
"name": "get_weather",
}
msg = _convert_dict_to_message(d)
assert isinstance(msg, ToolMessage)
assert msg.content == "result data"
assert msg.tool_call_id == "call_42"
assert msg.additional_kwargs["name"] == "get_weather"
def test_dict_to_chat_message_unknown_role(self) -> None:
"""Test that unrecognized roles fall back to ChatMessage."""
d = {"role": "developer", "content": "Some content"}
with pytest.warns(UserWarning, match="Unrecognized message role"):
msg = _convert_dict_to_message(d)
assert isinstance(msg, ChatMessage)
assert msg.role == "developer"
assert msg.content == "Some content"
def test_ai_message_with_list_content_filters_non_text(self) -> None:
"""Test that non-text blocks are filtered from AIMessage list content."""
msg = AIMessage(
content=[
{"type": "text", "text": "Hello"},
{"type": "image_url", "image_url": {"url": "http://example.com"}},
]
)
result = _convert_message_to_dict(msg)
assert result["content"] == [{"type": "text", "text": "Hello"}]
# ===========================================================================
# _create_chat_result tests
# ===========================================================================
class TestCreateChatResult:
"""Tests for _create_chat_result."""
def test_model_provider_in_response_metadata(self) -> None:
"""Test that model_provider is set in response metadata."""
model = _make_model()
result = model._create_chat_result(_SIMPLE_RESPONSE_DICT)
assert (
result.generations[0].message.response_metadata.get("model_provider")
== "openrouter"
)
def test_reasoning_from_response(self) -> None:
"""Test that reasoning content is extracted from response."""
model = _make_model()
response_dict: dict[str, Any] = {
"choices": [
{
"message": {
"role": "assistant",
"content": "Answer",
"reasoning": "Let me think...",
},
"finish_reason": "stop",
}
],
}
result = model._create_chat_result(response_dict)
assert (
result.generations[0].message.additional_kwargs.get("reasoning_content")
== "Let me think..."
)
def test_usage_metadata_created(self) -> None:
"""Test that usage metadata is created from token usage."""
model = _make_model()
result = model._create_chat_result(_SIMPLE_RESPONSE_DICT)
msg = result.generations[0].message
assert isinstance(msg, AIMessage)
usage = msg.usage_metadata
assert usage is not None
assert usage["input_tokens"] == 10
assert usage["output_tokens"] == 5
assert usage["total_tokens"] == 15
def test_tool_calls_in_response(self) -> None:
"""Test that tool calls are extracted from response."""
model = _make_model()
result = model._create_chat_result(_TOOL_RESPONSE_DICT)
msg = result.generations[0].message
assert isinstance(msg, AIMessage)
assert len(msg.tool_calls) == 1
assert msg.tool_calls[0]["name"] == "GetWeather"
def test_response_model_in_llm_output(self) -> None:
"""Test that the response model is included in llm_output."""
model = _make_model()
result = model._create_chat_result(_SIMPLE_RESPONSE_DICT)
assert result.llm_output is not None
assert result.llm_output["model_name"] == MODEL_NAME
def test_response_model_propagated_to_llm_output(self) -> None:
"""Test that llm_output uses response model when available."""
model = _make_model()
response = {
**_SIMPLE_RESPONSE_DICT,
"model": "openai/gpt-4o",
}
result = model._create_chat_result(response)
assert result.llm_output is not None
assert result.llm_output["model_name"] == "openai/gpt-4o"
def test_system_fingerprint_in_metadata(self) -> None:
"""Test that system_fingerprint is included in response_metadata."""
model = _make_model()
response = {
**_SIMPLE_RESPONSE_DICT,
"system_fingerprint": "fp_abc123",
}
result = model._create_chat_result(response)
msg = result.generations[0].message
assert isinstance(msg, AIMessage)
assert msg.response_metadata["system_fingerprint"] == "fp_abc123"
def test_native_finish_reason_in_metadata(self) -> None:
"""Test that native_finish_reason is included in response_metadata."""
model = _make_model()
response: dict[str, Any] = {
**_SIMPLE_RESPONSE_DICT,
"choices": [
{
"message": {"role": "assistant", "content": "Hello!"},
"finish_reason": "stop",
"native_finish_reason": "end_turn",
"index": 0,
}
],
}
result = model._create_chat_result(response)
msg = result.generations[0].message
assert isinstance(msg, AIMessage)
assert msg.response_metadata["native_finish_reason"] == "end_turn"
def test_cost_in_response_metadata(self) -> None:
"""Test that OpenRouter cost data is surfaced in response_metadata."""
model = _make_model()
response: dict[str, Any] = {
**_SIMPLE_RESPONSE_DICT,
"usage": {
**_SIMPLE_RESPONSE_DICT["usage"],
"cost": 7.5e-05,
"cost_details": {
"upstream_inference_cost": 7.745e-05,
"upstream_inference_prompt_cost": 8.95e-06,
"upstream_inference_completions_cost": 6.85e-05,
},
},
}
result = model._create_chat_result(response)
msg = result.generations[0].message
assert isinstance(msg, AIMessage)
assert msg.response_metadata["cost"] == 7.5e-05
assert msg.response_metadata["cost_details"] == {
"upstream_inference_cost": 7.745e-05,
"upstream_inference_prompt_cost": 8.95e-06,
"upstream_inference_completions_cost": 6.85e-05,
}
def test_cost_absent_when_not_in_usage(self) -> None:
"""Test that cost fields are not added when not present in usage."""
model = _make_model()
result = model._create_chat_result(_SIMPLE_RESPONSE_DICT)
msg = result.generations[0].message
assert isinstance(msg, AIMessage)
assert "cost" not in msg.response_metadata
assert "cost_details" not in msg.response_metadata
def test_stream_cost_survives_final_chunk(self) -> None:
"""Test that cost fields are preserved on the final streaming chunk.
The final chunk carries both finish_reason metadata and usage/cost data.
Regression test: generation_info must merge into response_metadata, not
replace it, so cost fields set by _convert_chunk_to_message_chunk are
not lost.
"""
model = _make_model()
model.client = MagicMock()
cost_details = {
"upstream_inference_cost": 7.745e-05,
"upstream_inference_prompt_cost": 8.95e-06,
"upstream_inference_completions_cost": 6.85e-05,
}
stream_chunks: list[dict[str, Any]] = [
{
"choices": [
{"delta": {"role": "assistant", "content": "Hi"}, "index": 0}
],
},
{
"choices": [
{
"delta": {},
"finish_reason": "stop",
"index": 0,
}
],
"model": "openai/gpt-4o-mini",
"id": "gen-cost-stream",
"usage": {
"prompt_tokens": 10,
"completion_tokens": 5,
"total_tokens": 15,
"cost": 7.5e-05,
"cost_details": cost_details,
},
},
]
model.client.chat.send.return_value = _MockSyncStream(stream_chunks)
chunks = list(model.stream("Hello"))
final = [
c for c in chunks if c.response_metadata.get("finish_reason") == "stop"
]
assert len(final) == 1
meta = final[0].response_metadata
assert meta["cost"] == 7.5e-05
assert meta["cost_details"] == cost_details
assert meta["finish_reason"] == "stop"
async def test_astream_cost_survives_final_chunk(self) -> None:
"""Test that cost fields are preserved on the final async streaming chunk.
Same regression coverage as the sync test above, for the _astream path.
"""
model = _make_model()
model.client = MagicMock()
cost_details = {
"upstream_inference_cost": 7.745e-05,
"upstream_inference_prompt_cost": 8.95e-06,
"upstream_inference_completions_cost": 6.85e-05,
}
stream_chunks: list[dict[str, Any]] = [
{
"choices": [
{"delta": {"role": "assistant", "content": "Hi"}, "index": 0}
],
},
{
"choices": [
{
"delta": {},
"finish_reason": "stop",
"index": 0,
}
],
"model": "openai/gpt-4o-mini",
"id": "gen-cost-astream",
"usage": {
"prompt_tokens": 10,
"completion_tokens": 5,
"total_tokens": 15,
"cost": 7.5e-05,
"cost_details": cost_details,
},
},
]
model.client.chat.send_async = AsyncMock(
return_value=_MockAsyncStream(stream_chunks)
)
chunks = [c async for c in model.astream("Hello")]
final = [
c for c in chunks if c.response_metadata.get("finish_reason") == "stop"
]
assert len(final) == 1
meta = final[0].response_metadata
assert meta["cost"] == 7.5e-05
assert meta["cost_details"] == cost_details
assert meta["finish_reason"] == "stop"
def test_missing_optional_metadata_excluded(self) -> None:
"""Test that absent optional fields are not added to response_metadata."""
model = _make_model()
response: dict[str, Any] = {
"choices": [
{
"message": {"role": "assistant", "content": "Hello!"},
"finish_reason": "stop",
}
],
}
result = model._create_chat_result(response)
msg = result.generations[0].message
assert isinstance(msg, AIMessage)
assert "system_fingerprint" not in msg.response_metadata
assert "native_finish_reason" not in msg.response_metadata
assert "model" not in msg.response_metadata
assert result.llm_output is not None
assert "id" not in result.llm_output
assert "created" not in result.llm_output
assert "object" not in result.llm_output
def test_id_created_object_in_llm_output(self) -> None:
"""Test that id, created, and object are included in llm_output."""
model = _make_model()
result = model._create_chat_result(_SIMPLE_RESPONSE_DICT)
assert result.llm_output is not None
assert result.llm_output["id"] == "gen-abc123"
assert result.llm_output["created"] == 1700000000
assert result.llm_output["object"] == "chat.completion"
def test_float_token_usage_normalized_to_int_in_usage_metadata(self) -> None:
"""Test that float token counts are cast to int in usage_metadata."""
model = _make_model()
response: dict[str, Any] = {
"choices": [
{
"message": {"role": "assistant", "content": "Hello!"},
"finish_reason": "stop",
}
],
"usage": {
"prompt_tokens": 585.0,
"completion_tokens": 56.0,
"total_tokens": 641.0,
"completion_tokens_details": {"reasoning_tokens": 10.0},
"prompt_tokens_details": {"cached_tokens": 20.0},
},
"model": MODEL_NAME,
}
result = model._create_chat_result(response)
msg = result.generations[0].message
assert isinstance(msg, AIMessage)
usage = msg.usage_metadata
assert usage is not None
assert usage["input_tokens"] == 585
assert isinstance(usage["input_tokens"], int)
assert usage["output_tokens"] == 56
assert isinstance(usage["output_tokens"], int)
assert usage["total_tokens"] == 641
assert isinstance(usage["total_tokens"], int)
assert usage["input_token_details"]["cache_read"] == 20
assert isinstance(usage["input_token_details"]["cache_read"], int)
assert usage["output_token_details"]["reasoning"] == 10
assert isinstance(usage["output_token_details"]["reasoning"], int)
# ===========================================================================
# Streaming chunk tests
# ===========================================================================
class TestStreamingChunks:
"""Tests for streaming chunk conversion."""
def test_reasoning_in_streaming_chunk(self) -> None:
"""Test that reasoning is extracted from streaming delta."""
chunk: dict[str, Any] = {
"choices": [
{
"delta": {
"content": "Main content",
"reasoning": "Streaming reasoning",
},
},
],
}
message_chunk = _convert_chunk_to_message_chunk(chunk, AIMessageChunk)
assert isinstance(message_chunk, AIMessageChunk)
assert (
message_chunk.additional_kwargs.get("reasoning_content")
== "Streaming reasoning"
)
def test_model_provider_in_streaming_chunk(self) -> None:
"""Test that model_provider is set in streaming chunk metadata."""
chunk: dict[str, Any] = {
"choices": [
{
"delta": {"content": "Hello"},
},
],
}
message_chunk = _convert_chunk_to_message_chunk(chunk, AIMessageChunk)
assert isinstance(message_chunk, AIMessageChunk)
assert message_chunk.response_metadata.get("model_provider") == "openrouter"
def test_chunk_without_reasoning(self) -> None:
"""Test that chunk without reasoning fields works correctly."""
chunk: dict[str, Any] = {"choices": [{"delta": {"content": "Hello"}}]}
message_chunk = _convert_chunk_to_message_chunk(chunk, AIMessageChunk)
assert isinstance(message_chunk, AIMessageChunk)
assert message_chunk.additional_kwargs.get("reasoning_content") is None
def test_chunk_with_empty_delta(self) -> None:
"""Test that chunk with empty delta works correctly."""
chunk: dict[str, Any] = {"choices": [{"delta": {}}]}
message_chunk = _convert_chunk_to_message_chunk(chunk, AIMessageChunk)
assert isinstance(message_chunk, AIMessageChunk)
assert message_chunk.additional_kwargs.get("reasoning_content") is None
def test_chunk_with_tool_calls(self) -> None:
"""Test that tool calls are extracted from streaming delta."""
chunk: dict[str, Any] = {
"choices": [
{
"delta": {
"tool_calls": [
{
"index": 0,
"id": "call_1",
"type": "function",
"function": {
"name": "get_weather",
"arguments": '{"loc',
},
}
],
},
},
],
}
message_chunk = _convert_chunk_to_message_chunk(chunk, AIMessageChunk)
assert isinstance(message_chunk, AIMessageChunk)
assert len(message_chunk.tool_call_chunks) == 1
assert message_chunk.tool_call_chunks[0]["name"] == "get_weather"
assert message_chunk.tool_call_chunks[0]["args"] == '{"loc'
assert message_chunk.tool_call_chunks[0]["id"] == "call_1"
assert message_chunk.tool_call_chunks[0]["index"] == 0
def test_chunk_with_malformed_tool_call_skips_bad_keeps_good(self) -> None:
"""Test that a malformed tool call chunk is skipped; valid ones kept."""
chunk: dict[str, Any] = {
"choices": [
{
"delta": {
"tool_calls": [
{
"index": 0,
"id": "call_good",
"type": "function",
"function": {
"name": "get_weather",
"arguments": "{}",
},
},
{
"index": 1,
"id": "call_bad",
"type": "function",
# missing "function" key
},
],
},
},
],
}
import warnings as _warnings # noqa: PLC0415
with _warnings.catch_warnings(record=True) as w:
_warnings.simplefilter("always")
message_chunk = _convert_chunk_to_message_chunk(chunk, AIMessageChunk)
assert isinstance(message_chunk, AIMessageChunk)
# The valid tool call is preserved; only the bad one is skipped
assert len(message_chunk.tool_call_chunks) == 1
assert message_chunk.tool_call_chunks[0]["name"] == "get_weather"
# A warning was emitted for the malformed chunk
assert any("malformed tool call chunk" in str(warning.message) for warning in w)
def test_chunk_with_user_role(self) -> None:
"""Test that a chunk with role=user produces HumanMessageChunk."""
chunk: dict[str, Any] = {
"choices": [{"delta": {"role": "user", "content": "test"}}]
}
msg = _convert_chunk_to_message_chunk(chunk, AIMessageChunk)
assert isinstance(msg, HumanMessageChunk)
def test_chunk_with_system_role(self) -> None:
"""Test that a chunk with role=system produces SystemMessageChunk."""
chunk: dict[str, Any] = {
"choices": [{"delta": {"role": "system", "content": "test"}}]
}
# Use ChatMessageChunk default so role dispatch isn't short-circuited
msg = _convert_chunk_to_message_chunk(chunk, ChatMessageChunk)
assert isinstance(msg, SystemMessageChunk)
def test_chunk_with_unknown_role(self) -> None:
"""Test that an unknown role falls back to ChatMessageChunk."""
chunk: dict[str, Any] = {
"choices": [{"delta": {"role": "developer", "content": "test"}}]
}
with pytest.warns(UserWarning, match="Unrecognized streaming chunk role"):
msg = _convert_chunk_to_message_chunk(chunk, ChatMessageChunk)
assert isinstance(msg, ChatMessageChunk)
def test_chunk_with_usage(self) -> None:
"""Test that usage metadata is extracted from streaming chunk."""
chunk: dict[str, Any] = {
"choices": [{"delta": {"content": ""}}],
"usage": {
"prompt_tokens": 10,
"completion_tokens": 5,
"total_tokens": 15,
},
}
message_chunk = _convert_chunk_to_message_chunk(chunk, AIMessageChunk)
assert isinstance(message_chunk, AIMessageChunk)
assert message_chunk.usage_metadata is not None
assert message_chunk.usage_metadata["input_tokens"] == 10
# ===========================================================================
# Usage metadata tests
# ===========================================================================
class TestUsageMetadata:
"""Tests for _create_usage_metadata."""
def test_basic_usage(self) -> None:
"""Test basic usage metadata creation."""
usage = _create_usage_metadata(
{"prompt_tokens": 10, "completion_tokens": 5, "total_tokens": 15}
)
assert usage["input_tokens"] == 10
assert usage["output_tokens"] == 5
assert usage["total_tokens"] == 15
def test_float_tokens_cast_to_int(self) -> None:
"""Test that float token counts are cast to int."""
usage = _create_usage_metadata(
{"prompt_tokens": 10.0, "completion_tokens": 5.0, "total_tokens": 15.0}
)
assert usage["input_tokens"] == 10
assert isinstance(usage["input_tokens"], int)
def test_missing_tokens_default_to_zero(self) -> None:
"""Test that missing token fields default to zero."""
usage = _create_usage_metadata({})
assert usage["input_tokens"] == 0
assert usage["output_tokens"] == 0
assert usage["total_tokens"] == 0
def test_total_tokens_computed_if_missing(self) -> None:
"""Test that total_tokens is computed if not provided."""
usage = _create_usage_metadata({"prompt_tokens": 10, "completion_tokens": 5})
assert usage["total_tokens"] == 15
def test_token_details(self) -> None:
"""Test that token details are extracted."""
usage = _create_usage_metadata(
{
"prompt_tokens": 100,
"completion_tokens": 50,
"total_tokens": 150,
"prompt_tokens_details": {"cached_tokens": 20},
"completion_tokens_details": {"reasoning_tokens": 10},
}
)
assert "input_token_details" in usage
assert usage["input_token_details"]["cache_read"] == 20
assert "output_token_details" in usage
assert usage["output_token_details"]["reasoning"] == 10
def test_cache_creation_details(self) -> None:
"""Test that cache_write_tokens maps to cache_creation."""
usage = _create_usage_metadata(
{
"prompt_tokens": 100,
"completion_tokens": 50,
"total_tokens": 150,
"prompt_tokens_details": {
"cached_tokens": 0,
"cache_write_tokens": 80,
},
}
)
assert "input_token_details" in usage
assert usage["input_token_details"]["cache_creation"] == 80
def test_zero_token_details_preserved(self) -> None:
"""Test that zero-value token details are preserved (not dropped)."""
usage = _create_usage_metadata(
{
"prompt_tokens": 100,
"completion_tokens": 50,
"total_tokens": 150,
"prompt_tokens_details": {"cached_tokens": 0},
"completion_tokens_details": {"reasoning_tokens": 0},
}
)
assert "input_token_details" in usage
assert usage["input_token_details"]["cache_read"] == 0
assert "output_token_details" in usage
assert usage["output_token_details"]["reasoning"] == 0
def test_alternative_token_key_names(self) -> None:
"""Test fallback to input_tokens/output_tokens key names."""
usage = _create_usage_metadata(
{
"input_tokens": 10,
"output_tokens": 5,
"total_tokens": 15,
}
)
assert usage["input_tokens"] == 10
assert usage["output_tokens"] == 5
assert usage["total_tokens"] == 15
# ===========================================================================
# Error-path tests
# ===========================================================================
class TestErrorPaths:
"""Tests for error handling in various code paths."""
def test_n_less_than_1_raises(self) -> None:
"""Test that n < 1 raises ValueError."""
with pytest.raises(ValueError, match="greater than or equal to 1"):
_make_model(n=0)
def test_n_greater_than_1_with_streaming_raises(self) -> None:
"""Test that n > 1 with streaming raises ValueError."""
with pytest.raises(ValueError, match="n must be 1 when streaming"):
_make_model(n=2, streaming=True)
def test_n_forwarded_in_params(self) -> None:
"""Test that n > 1 is included in _default_params."""
model = _make_model(n=3)
assert model._default_params["n"] == 3
def test_n_default_excluded_from_params(self) -> None:
"""Test that n=1 (default) is not in _default_params."""
model = _make_model()
assert "n" not in model._default_params
def test_error_response_raises(self) -> None:
"""Test that an error response from the API raises ValueError."""
model = _make_model()
error_response: dict[str, Any] = {
"error": {
"code": 429,
"message": "Rate limit exceeded",
},
}
with pytest.raises(ValueError, match="Rate limit exceeded"):
model._create_chat_result(error_response)
def test_error_response_without_message(self) -> None:
"""Test that an error response without a message still raises."""
model = _make_model()
error_response: dict[str, Any] = {
"error": {"code": 500},
}
with pytest.raises(ValueError, match="OpenRouter API returned an error"):
model._create_chat_result(error_response)
def test_empty_choices_raises(self) -> None:
"""Test that a response with no choices raises ValueError."""
model = _make_model()
response: dict[str, Any] = {
"choices": [],
"usage": {"prompt_tokens": 10, "completion_tokens": 0, "total_tokens": 10},
}
with pytest.raises(ValueError, match="no choices"):
model._create_chat_result(response)
def test_missing_role_raises(self) -> None:
"""Test that a response message missing 'role' raises ValueError."""
d: dict[str, Any] = {"content": "Hello"}
with pytest.raises(ValueError, match="missing the 'role' field"):
_convert_dict_to_message(d)
def test_unknown_message_type_raises(self) -> None:
"""Test that unknown message types raise TypeError."""
from langchain_core.messages import FunctionMessage # noqa: PLC0415
msg = FunctionMessage(content="result", name="fn")
with pytest.raises(TypeError, match="Got unknown type"):
_convert_message_to_dict(msg)
def test_duplicate_model_kwargs_raises(self) -> None:
"""Test that passing a param in both field and model_kwargs raises."""
with pytest.raises(ValueError, match="supplied twice"):
_make_model(temperature=0.5, model_kwargs={"temperature": 0.7})
def test_known_field_in_model_kwargs_raises(self) -> None:
"""Test that a known field passed in model_kwargs raises."""
with pytest.raises(ValueError, match="should be specified explicitly"):
_make_model(model_kwargs={"model_name": "some-model"})
def test_max_retries_zero_disables_retries(self) -> None:
"""Test that max_retries=0 does not configure retry."""
with patch("openrouter.OpenRouter") as mock_cls:
mock_cls.return_value = MagicMock()
ChatOpenRouter(
model=MODEL_NAME,
api_key=SecretStr("test-key"),
max_retries=0,
)
call_kwargs = mock_cls.call_args[1]
assert "retry_config" not in call_kwargs
def test_max_retries_scales_elapsed_time(self) -> None:
"""Test that max_retries value scales max_elapsed_time."""
with patch("openrouter.OpenRouter") as mock_cls:
mock_cls.return_value = MagicMock()
ChatOpenRouter(
model=MODEL_NAME,
api_key=SecretStr("test-key"),
max_retries=4,
)
call_kwargs = mock_cls.call_args[1]
retry_config = call_kwargs["retry_config"]
assert retry_config.backoff.max_elapsed_time == 4 * 150_000
# ===========================================================================
# Reasoning details tests
# ===========================================================================
class TestReasoningDetails:
"""Tests for reasoning_details extraction.
OpenRouter returns reasoning metadata via `reasoning_details` for models
like OpenAI o-series and Gemini (thought signatures). This verifies the
field is preserved in both streaming and non-streaming paths.
"""
def test_reasoning_details_in_non_streaming_response(self) -> None:
"""Test that reasoning_details are extracted from a non-streaming response."""
details = [
{"type": "reasoning.text", "text": "Step 1: analyze the problem"},
{"type": "reasoning.text", "text": "Step 2: solve it"},
]
d = {
"role": "assistant",
"content": "The answer is 42.",
"reasoning_details": details,
}
msg = _convert_dict_to_message(d)
assert isinstance(msg, AIMessage)
assert msg.additional_kwargs["reasoning_details"] == details
def test_reasoning_details_in_streaming_chunk(self) -> None:
"""Test that reasoning_details are extracted from a streaming chunk."""
details = [{"type": "reasoning.text", "text": "thinking..."}]
chunk: dict[str, Any] = {
"choices": [
{
"delta": {
"content": "Answer",
"reasoning_details": details,
},
}
],
}
message_chunk = _convert_chunk_to_message_chunk(chunk, AIMessageChunk)
assert isinstance(message_chunk, AIMessageChunk)
assert message_chunk.additional_kwargs["reasoning_details"] == details
def test_reasoning_and_reasoning_details_coexist(self) -> None:
"""Test that both reasoning and reasoning_details can be present."""
d = {
"role": "assistant",
"content": "Answer",
"reasoning": "I thought about it",
"reasoning_details": [
{"type": "reasoning.text", "text": "detailed thinking"},
],
}
msg = _convert_dict_to_message(d)
assert isinstance(msg, AIMessage)
assert msg.additional_kwargs["reasoning_content"] == "I thought about it"
assert len(msg.additional_kwargs["reasoning_details"]) == 1
def test_reasoning_in_full_invoke_flow(self) -> None:
"""Test reasoning extraction through the full invoke path."""
model = _make_model()
model.client = MagicMock()
response_dict: dict[str, Any] = {
"choices": [
{
"message": {
"role": "assistant",
"content": "9.9 is larger than 9.11",
"reasoning": "Comparing decimals: 9.9 = 9.90 > 9.11",
"reasoning_details": [
{
"type": "reasoning.text",
"text": "Let me compare these numbers...",
},
],
},
"finish_reason": "stop",
}
],
"usage": {"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30},
}
model.client.chat.send.return_value = _make_sdk_response(response_dict)
result = model.invoke("Which is larger: 9.11 or 9.9?")
assert isinstance(result, AIMessage)
assert result.content == "9.9 is larger than 9.11"
assert result.additional_kwargs["reasoning_content"] == (
"Comparing decimals: 9.9 = 9.90 > 9.11"
)
assert len(result.additional_kwargs["reasoning_details"]) == 1
def test_reasoning_in_streaming_flow(self) -> None:
"""Test reasoning extraction through the full streaming path."""
model = _make_model()
model.client = MagicMock()
reasoning_chunks = [
{
"choices": [
{"delta": {"role": "assistant", "content": ""}, "index": 0}
],
"model": MODEL_NAME,
"object": "chat.completion.chunk",
"created": 1700000000.0,
"id": "gen-reason",
},
{
"choices": [
{
"delta": {
"reasoning": "Thinking step 1...",
},
"index": 0,
}
],
"model": MODEL_NAME,
"object": "chat.completion.chunk",
"created": 1700000000.0,
"id": "gen-reason",
},
{
"choices": [
{
"delta": {"content": "The answer"},
"index": 0,
}
],
"model": MODEL_NAME,
"object": "chat.completion.chunk",
"created": 1700000000.0,
"id": "gen-reason",
},
{
"choices": [{"delta": {}, "finish_reason": "stop", "index": 0}],
"model": MODEL_NAME,
"object": "chat.completion.chunk",
"created": 1700000000.0,
"id": "gen-reason",
},
]
model.client.chat.send.return_value = _MockSyncStream(
[dict(c) for c in reasoning_chunks]
)
chunks = list(model.stream("Think about this"))
reasoning_found = any(
c.additional_kwargs.get("reasoning_content") for c in chunks
)
assert reasoning_found, "Expected reasoning_content in at least one chunk"
# ===========================================================================
# OpenRouter-specific params tests (issues #34797, #34962)
# ===========================================================================
class TestOpenRouterSpecificParams:
"""Tests for OpenRouter-specific parameter handling."""
def test_plugins_in_params(self) -> None:
"""Test that `plugins` is included in default params."""
plugins = [{"id": "web", "max_results": 3}]
model = _make_model(plugins=plugins)
params = model._default_params
assert params["plugins"] == plugins
def test_plugins_excluded_when_none(self) -> None:
"""Test that `plugins` key is absent when not set."""
model = _make_model()
params = model._default_params
assert "plugins" not in params
def test_plugins_in_payload(self) -> None:
"""Test that `plugins` appear in the actual SDK call."""
plugins = [{"id": "web", "max_results": 5}]
model = _make_model(plugins=plugins)
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(_SIMPLE_RESPONSE_DICT)
model.invoke("Search the web for LangChain")
call_kwargs = model.client.chat.send.call_args[1]
assert call_kwargs["plugins"] == plugins
def test_max_completion_tokens_in_params(self) -> None:
"""Test that max_completion_tokens is included when set."""
model = _make_model(max_completion_tokens=1024)
params = model._default_params
assert params["max_completion_tokens"] == 1024
def test_max_completion_tokens_excluded_when_none(self) -> None:
"""Test that max_completion_tokens is absent when not set."""
model = _make_model()
params = model._default_params
assert "max_completion_tokens" not in params
def test_base_url_passed_to_client(self) -> None:
"""Test that base_url is passed as server_url to the SDK client."""
with patch("openrouter.OpenRouter") as mock_cls:
mock_cls.return_value = MagicMock()
ChatOpenRouter(
model=MODEL_NAME,
api_key=SecretStr("test-key"),
base_url="https://custom.openrouter.ai/api/v1",
)
call_kwargs = mock_cls.call_args[1]
assert call_kwargs["server_url"] == "https://custom.openrouter.ai/api/v1"
def test_timeout_passed_to_client(self) -> None:
"""Test that timeout is passed as timeout_ms to the SDK client."""
with patch("openrouter.OpenRouter") as mock_cls:
mock_cls.return_value = MagicMock()
ChatOpenRouter(
model=MODEL_NAME,
api_key=SecretStr("test-key"),
timeout=30000,
)
call_kwargs = mock_cls.call_args[1]
assert call_kwargs["timeout_ms"] == 30000
def test_all_openrouter_params_in_single_payload(self) -> None:
"""Test that all OpenRouter-specific params coexist in a payload."""
model = _make_model(
reasoning={"effort": "high"},
openrouter_provider={"order": ["Anthropic"], "allow_fallbacks": True},
route="fallback",
plugins=[{"id": "web"}],
)
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(_SIMPLE_RESPONSE_DICT)
model.invoke("Hi")
call_kwargs = model.client.chat.send.call_args[1]
assert call_kwargs["reasoning"] == {"effort": "high"}
assert call_kwargs["provider"] == {
"order": ["Anthropic"],
"allow_fallbacks": True,
}
assert call_kwargs["route"] == "fallback"
assert call_kwargs["plugins"] == [{"id": "web"}]
# ===========================================================================
# Multimodal content formatting tests
# ===========================================================================
class TestFormatMessageContent:
"""Tests for `_format_message_content` handling of data blocks."""
def test_string_content_passthrough(self) -> None:
"""Test that plain string content passes through unchanged."""
assert _format_message_content("Hello") == "Hello"
def test_empty_string_passthrough(self) -> None:
"""Test that empty string passes through unchanged."""
assert _format_message_content("") == ""
def test_none_passthrough(self) -> None:
"""Test that None passes through unchanged."""
assert _format_message_content(None) is None
def test_text_block_passthrough(self) -> None:
"""Test that standard text content blocks pass through."""
content = [{"type": "text", "text": "Hello"}]
result = _format_message_content(content)
assert result == [{"type": "text", "text": "Hello"}]
def test_image_url_block_passthrough(self) -> None:
"""Test that image_url content blocks pass through."""
content = [
{"type": "text", "text": "What is in this image?"},
{
"type": "image_url",
"image_url": {"url": "https://example.com/img.png"},
},
]
result = _format_message_content(content)
assert len(result) == 2
assert result[0]["type"] == "text"
assert result[1]["type"] == "image_url"
def test_image_base64_block(self) -> None:
"""Test that base64 image blocks are converted to image_url format."""
content = [
{
"type": "image",
"base64": "iVBORw0KGgo=",
"mime_type": "image/png",
},
]
result = _format_message_content(content)
assert len(result) == 1
assert result[0]["type"] == "image_url"
assert result[0]["image_url"]["url"].startswith("data:image/png;base64,")
def test_audio_base64_block(self) -> None:
"""Test that base64 audio blocks are converted to input_audio format."""
content = [
{"type": "text", "text": "Transcribe this audio."},
{
"type": "audio",
"base64": "UklGR...",
"mime_type": "audio/wav",
},
]
result = _format_message_content(content)
assert len(result) == 2
assert result[0]["type"] == "text"
assert result[1]["type"] == "input_audio"
assert result[1]["input_audio"]["data"] == "UklGR..."
assert result[1]["input_audio"]["format"] == "wav"
def test_video_url_block(self) -> None:
"""Test that video URL blocks are converted to video_url format."""
content = [
{"type": "text", "text": "Describe this video."},
{
"type": "video",
"url": "https://example.com/video.mp4",
},
]
result = _format_message_content(content)
assert len(result) == 2
assert result[0]["type"] == "text"
assert result[1] == {
"type": "video_url",
"video_url": {"url": "https://example.com/video.mp4"},
}
def test_video_base64_block(self) -> None:
"""Test that base64 video blocks are converted to video_url data URI."""
content = [
{
"type": "video",
"base64": "AAAAIGZ0...",
"mime_type": "video/mp4",
},
]
result = _format_message_content(content)
assert len(result) == 1
assert result[0]["type"] == "video_url"
assert result[0]["video_url"]["url"] == ("data:video/mp4;base64,AAAAIGZ0...")
def test_video_base64_default_mime_type(self) -> None:
"""Test that video base64 defaults to video/mp4 when mime_type is missing."""
content = [
{
"type": "video",
"base64": "AAAAIGZ0...",
},
]
result = _format_message_content(content)
assert result[0]["video_url"]["url"].startswith("data:video/mp4;base64,")
def test_video_base64_source_type_format(self) -> None:
"""Test video block using ``source_type`` + ``data`` keys."""
block: dict[str, Any] = {
"type": "video",
"source_type": "base64",
"data": "AAAAIGZ0...",
"mime_type": "video/webm",
}
result = _convert_video_block_to_openrouter(block)
assert result["type"] == "video_url"
assert result["video_url"]["url"] == "data:video/webm;base64,AAAAIGZ0..."
def test_video_block_missing_source_raises(self) -> None:
"""Test that video blocks without url or base64 raise ValueError."""
block: dict[str, Any] = {"type": "video", "mime_type": "video/mp4"}
with pytest.raises(ValueError, match=r"url.*base64"):
_convert_video_block_to_openrouter(block)
# --- file block tests ---
def test_file_url_block(self) -> None:
"""Test that file URL blocks are converted to OpenRouter file format."""
content = [
{"type": "text", "text": "Summarize this document."},
{
"type": "file",
"url": "https://example.com/document.pdf",
"mime_type": "application/pdf",
},
]
result = _format_message_content(content)
assert len(result) == 2
assert result[0]["type"] == "text"
assert result[1] == {
"type": "file",
"file": {"file_data": "https://example.com/document.pdf"},
}
def test_file_url_block_with_filename(self) -> None:
"""Test that filename is included when present."""
block: dict[str, Any] = {
"type": "file",
"url": "https://example.com/report.pdf",
"mime_type": "application/pdf",
"filename": "report.pdf",
}
result = _convert_file_block_to_openrouter(block)
assert result == {
"type": "file",
"file": {
"file_data": "https://example.com/report.pdf",
"filename": "report.pdf",
},
}
def test_file_base64_block(self) -> None:
"""Test that base64 file blocks are converted to data URI format."""
content = [
{
"type": "file",
"base64": "JVBERi0xLjQ=",
"mime_type": "application/pdf",
"filename": "doc.pdf",
},
]
result = _format_message_content(content)
assert len(result) == 1
assert result[0] == {
"type": "file",
"file": {
"file_data": "data:application/pdf;base64,JVBERi0xLjQ=",
"filename": "doc.pdf",
},
}
def test_file_base64_source_type_format(self) -> None:
"""Test file block using ``source_type`` + ``data`` keys."""
block: dict[str, Any] = {
"type": "file",
"source_type": "base64",
"data": "JVBERi0xLjQ=",
"mime_type": "application/pdf",
}
result = _convert_file_block_to_openrouter(block)
assert result == {
"type": "file",
"file": {
"file_data": "data:application/pdf;base64,JVBERi0xLjQ=",
},
}
def test_file_filename_from_extras(self) -> None:
"""Test filename extraction from extras dict."""
block: dict[str, Any] = {
"type": "file",
"url": "https://example.com/doc.pdf",
"extras": {"filename": "my-doc.pdf"},
}
result = _convert_file_block_to_openrouter(block)
assert result["file"]["filename"] == "my-doc.pdf"
def test_file_filename_from_metadata(self) -> None:
"""Test filename extraction from metadata dict (backward compat)."""
block: dict[str, Any] = {
"type": "file",
"url": "https://example.com/doc.pdf",
"metadata": {"filename": "legacy.pdf"},
}
result = _convert_file_block_to_openrouter(block)
assert result["file"]["filename"] == "legacy.pdf"
def test_file_id_block_raises(self) -> None:
"""Test that file ID blocks raise ValueError (unsupported by OpenRouter)."""
block: dict[str, Any] = {"type": "file", "file_id": "file-abc123"}
with pytest.raises(ValueError, match="file IDs"):
_convert_file_block_to_openrouter(block)
def test_file_block_missing_source_raises(self) -> None:
"""Test that file blocks without url or base64 raise ValueError."""
block: dict[str, Any] = {"type": "file", "mime_type": "application/pdf"}
with pytest.raises(ValueError, match=r"url.*base64"):
_convert_file_block_to_openrouter(block)
def test_mixed_multimodal_content(self) -> None:
"""Test formatting a message with text, image, audio, video, and file."""
content = [
{"type": "text", "text": "Analyze these inputs."},
{"type": "image", "url": "https://example.com/img.png"},
{"type": "audio", "base64": "audio_data", "mime_type": "audio/mp3"},
{"type": "video", "url": "https://example.com/clip.mp4"},
{"type": "file", "url": "https://example.com/doc.pdf"},
]
result = _format_message_content(content)
assert len(result) == 5
assert result[0]["type"] == "text"
assert result[1]["type"] == "image_url"
assert result[2]["type"] == "input_audio"
assert result[3]["type"] == "video_url"
assert result[4] == {
"type": "file",
"file": {"file_data": "https://example.com/doc.pdf"},
}
class TestWrapMessagesForSdk:
"""Tests for ``_wrap_messages_for_sdk`` SDK validation bypass."""
def test_no_file_blocks_returns_dicts(self) -> None:
"""Messages without file blocks should be returned as plain dicts."""
msgs: list[dict[str, Any]] = [
{"role": "user", "content": "Hello"},
{"role": "assistant", "content": "Hi there"},
]
result = _wrap_messages_for_sdk(msgs)
# Should be the exact same list object (no wrapping needed)
assert result is msgs
def test_has_file_content_blocks_detection(self) -> None:
"""Test ``_has_file_content_blocks`` detects file blocks correctly."""
assert not _has_file_content_blocks([{"role": "user", "content": "plain text"}])
assert not _has_file_content_blocks(
[
{
"role": "user",
"content": [{"type": "text", "text": "hi"}],
}
]
)
assert _has_file_content_blocks(
[
{
"role": "user",
"content": [
{"type": "text", "text": "hi"},
{
"type": "file",
"file": {"file_data": "https://example.com/a.pdf"},
},
],
}
]
)
def test_wraps_as_pydantic_models(self) -> None:
"""File-containing messages should be wrapped as SDK Pydantic models."""
from openrouter import components # noqa: PLC0415
msgs: list[dict[str, Any]] = [
{"role": "system", "content": "You are helpful."},
{
"role": "user",
"content": [
{"type": "text", "text": "Summarize this."},
{
"type": "file",
"file": {
"file_data": "https://example.com/doc.pdf",
"filename": "doc.pdf",
},
},
],
},
]
result = _wrap_messages_for_sdk(msgs)
assert len(result) == 2
assert isinstance(result[0], components.SystemMessage)
assert isinstance(result[1], components.UserMessage)
def test_wrapped_serializes_correctly(self) -> None:
"""Wrapped models should serialize to the correct JSON payload."""
import warnings # noqa: PLC0415
msgs: list[dict[str, Any]] = [
{
"role": "user",
"content": [
{"type": "text", "text": "Read this."},
{
"type": "file",
"file": {"file_data": "data:application/pdf;base64,abc"},
},
],
},
]
result = _wrap_messages_for_sdk(msgs)
wrapped_msg = result[0]
assert hasattr(wrapped_msg, "model_dump")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
dumped = wrapped_msg.model_dump(by_alias=True, exclude_none=True)
assert dumped["role"] == "user"
assert dumped["content"][0] == {"type": "text", "text": "Read this."}
assert dumped["content"][1] == {
"type": "file",
"file": {"file_data": "data:application/pdf;base64,abc"},
}
def test_all_roles_wrapped(self) -> None:
"""All standard roles should be wrapped correctly."""
from openrouter import components # noqa: PLC0415
msgs: list[dict[str, Any]] = [
{"role": "system", "content": "System prompt."},
{
"role": "user",
"content": [
{"type": "file", "file": {"file_data": "https://x.com/f.pdf"}},
],
},
{
"role": "assistant",
"content": "Summary here.",
"tool_calls": [
{
"id": "c1",
"type": "function",
"function": {"name": "fn", "arguments": "{}"},
}
],
},
{"role": "tool", "content": "result", "tool_call_id": "c1"},
]
result = _wrap_messages_for_sdk(msgs)
assert isinstance(result[0], components.SystemMessage)
assert isinstance(result[1], components.UserMessage)
assert isinstance(result[2], components.AssistantMessage)
assert isinstance(result[3], components.ToolResponseMessage)
# ===========================================================================
# Structured output tests
# ===========================================================================
class TestStructuredOutputIntegration:
"""Tests for structured output covering issue-specific scenarios."""
def test_structured_output_function_calling_invokes_with_tools(self) -> None:
"""Test that `function_calling` structured output sends tools in payload."""
model = _make_model()
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(_TOOL_RESPONSE_DICT)
structured = model.with_structured_output(GetWeather, method="function_calling")
# The first step in the chain is the bound model
bound = structured.first # type: ignore[attr-defined]
assert isinstance(bound, RunnableBinding)
assert "tools" in bound.kwargs
assert bound.kwargs["tool_choice"] == {
"type": "function",
"function": {"name": "GetWeather"},
}
def test_structured_output_json_schema_no_beta_parse(self) -> None:
"""Test that `json_schema` method uses `response_format`, not `beta.parse`."""
model = _make_model()
structured = model.with_structured_output(GetWeather, method="json_schema")
bound = structured.first # type: ignore[attr-defined]
assert isinstance(bound, RunnableBinding)
rf = bound.kwargs["response_format"]
assert rf["type"] == "json_schema"
assert "schema" in rf["json_schema"]
def test_response_format_json_schema_reaches_sdk(self) -> None:
"""Test that `response_format` from json_schema method is sent to the SDK."""
model = _make_model()
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(
{
**_SIMPLE_RESPONSE_DICT,
"choices": [
{
"message": {
"role": "assistant",
"content": '{"location": "SF"}',
},
"finish_reason": "stop",
"index": 0,
}
],
}
)
structured = model.with_structured_output(GetWeather, method="json_schema")
structured.invoke("weather in SF")
call_kwargs = model.client.chat.send.call_args[1]
assert "response_format" in call_kwargs
assert call_kwargs["response_format"]["type"] == "json_schema"
def test_response_format_json_mode_falls_back_to_json_schema_in_sdk(self) -> None:
"""Test that json_mode warns, falls back to json_schema, and reaches SDK."""
model = _make_model()
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(
{
**_SIMPLE_RESPONSE_DICT,
"choices": [
{
"message": {
"role": "assistant",
"content": '{"location": "SF"}',
},
"finish_reason": "stop",
"index": 0,
}
],
}
)
with pytest.warns(match="Defaulting to 'json_schema'"):
structured = model.with_structured_output(
GetWeather,
method="json_mode", # type: ignore[arg-type]
)
structured.invoke("weather in SF")
call_kwargs = model.client.chat.send.call_args[1]
assert "response_format" in call_kwargs
assert call_kwargs["response_format"]["type"] == "json_schema"
def test_include_raw_returns_raw_and_parsed_on_success(self) -> None:
"""Test that `include_raw=True` returns raw message, parsed output, no error."""
model = _make_model()
model.client = MagicMock()
model.client.chat.send.return_value = _make_sdk_response(_TOOL_RESPONSE_DICT)
structured = model.with_structured_output(
GetWeather, method="function_calling", include_raw=True
)
result = structured.invoke("weather in SF")
assert isinstance(result, dict)
assert "raw" in result
assert "parsed" in result
assert "parsing_error" in result
assert isinstance(result["raw"], AIMessage)
assert result["parsing_error"] is None
# PydanticToolsParser returns a Pydantic instance, not a dict
assert isinstance(result["parsed"], GetWeather)
assert result["parsed"].location == "San Francisco"
def test_include_raw_preserves_raw_on_parse_failure(self) -> None:
"""Test that `include_raw=True` still returns the raw message on parse error."""
model = _make_model()
model.client = MagicMock()
# Return a tool call whose arguments fail Pydantic validation
# (missing required field "location")
bad_tool_response: dict[str, Any] = {
**_SIMPLE_RESPONSE_DICT,
"choices": [
{
"message": {
"role": "assistant",
"content": None,
"tool_calls": [
{
"id": "call_bad",
"type": "function",
"function": {
"name": "GetWeather",
"arguments": '{"wrong_field": "oops"}',
},
}
],
},
"finish_reason": "tool_calls",
"index": 0,
}
],
}
model.client.chat.send.return_value = _make_sdk_response(bad_tool_response)
structured = model.with_structured_output(
GetWeather, method="function_calling", include_raw=True
)
result = structured.invoke("weather in SF")
assert isinstance(result, dict)
assert "raw" in result
assert isinstance(result["raw"], AIMessage)
# Raw response should have the tool call even though parsing failed
assert len(result["raw"].tool_calls) == 1
# Parsed should be None since Pydantic validation failed
assert result["parsed"] is None
# parsing_error should capture the validation exception
assert result["parsing_error"] is not None
# ===========================================================================
# Multiple choices (n > 1) response tests
# ===========================================================================
class TestMultipleChoices:
"""Tests for handling responses with `n > 1`."""
def test_multiple_choices_in_response(self) -> None:
"""Test that multiple choices in a response produce multiple generations."""
model = _make_model(n=2)
response_dict: dict[str, Any] = {
"choices": [
{
"message": {"role": "assistant", "content": "Answer A"},
"finish_reason": "stop",
"index": 0,
},
{
"message": {"role": "assistant", "content": "Answer B"},
"finish_reason": "stop",
"index": 1,
},
],
"usage": {"prompt_tokens": 10, "completion_tokens": 10, "total_tokens": 20},
}
result = model._create_chat_result(response_dict)
assert len(result.generations) == 2
assert result.generations[0].message.content == "Answer A"
assert result.generations[1].message.content == "Answer B"
# ===========================================================================
# Environment variable configuration tests
# ===========================================================================
class TestEnvironmentConfiguration:
"""Tests for environment variable based configuration."""
def test_base_url_from_env(self, monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that OPENROUTER_API_BASE env var sets the base URL."""
monkeypatch.setenv("OPENROUTER_API_KEY", "env-key")
monkeypatch.setenv("OPENROUTER_API_BASE", "https://custom.example.com")
model = ChatOpenRouter(model=MODEL_NAME)
assert model.openrouter_api_base == "https://custom.example.com"
def test_app_url_from_env(self, monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that OPENROUTER_APP_URL env var sets the app URL."""
monkeypatch.setenv("OPENROUTER_API_KEY", "env-key")
monkeypatch.setenv("OPENROUTER_APP_URL", "https://myapp.com")
model = ChatOpenRouter(model=MODEL_NAME)
assert model.app_url == "https://myapp.com"
def test_app_title_from_env(self, monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that OPENROUTER_APP_TITLE env var sets the app title."""
monkeypatch.setenv("OPENROUTER_API_KEY", "env-key")
monkeypatch.setenv("OPENROUTER_APP_TITLE", "My LangChain App")
model = ChatOpenRouter(model=MODEL_NAME)
assert model.app_title == "My LangChain App"
# ===========================================================================
# Streaming error handling tests
# ===========================================================================
class TestStreamingErrors:
"""Tests for error handling during streaming."""
def test_stream_error_chunk_raises(self) -> None:
"""Test that a streaming error chunk raises ValueError."""
model = _make_model()
model.client = MagicMock()
error_chunks: list[dict[str, Any]] = [
{
"error": {"code": 429, "message": "Rate limit exceeded"},
},
]
model.client.chat.send.return_value = _MockSyncStream(error_chunks)
with pytest.raises(ValueError, match="Rate limit exceeded"):
list(model.stream("Hello"))
def test_stream_error_chunk_without_message(self) -> None:
"""Test that a streaming error chunk without a message still raises."""
model = _make_model()
model.client = MagicMock()
error_chunks: list[dict[str, Any]] = [
{
"error": {"code": 500},
},
]
model.client.chat.send.return_value = _MockSyncStream(error_chunks)
with pytest.raises(ValueError, match="OpenRouter API returned an error"):
list(model.stream("Hello"))
def test_stream_heartbeat_chunk_skipped(self) -> None:
"""Test that empty heartbeat chunks are silently skipped."""
model = _make_model()
model.client = MagicMock()
chunks_with_heartbeat: list[dict[str, Any]] = [
# Heartbeat -- no choices, no error
{"id": "heartbeat", "object": "chat.completion.chunk", "created": 0},
*[dict(c) for c in _STREAM_CHUNKS],
]
model.client.chat.send.return_value = _MockSyncStream(chunks_with_heartbeat)
chunks = list(model.stream("Hello"))
# Should still produce content from the real chunks
full_content = "".join(c.content for c in chunks if isinstance(c.content, str))
assert "Hello" in full_content
async def test_astream_error_chunk_raises(self) -> None:
"""Test that an async streaming error chunk raises ValueError."""
model = _make_model()
model.client = MagicMock()
error_chunks: list[dict[str, Any]] = [
{
"error": {"code": 429, "message": "Rate limit exceeded"},
},
]
model.client.chat.send_async = AsyncMock(
return_value=_MockAsyncStream(error_chunks)
)
with pytest.raises(ValueError, match="Rate limit exceeded"):
chunks = [c async for c in model.astream("Hello")] # noqa: F841
async def test_astream_heartbeat_chunk_skipped(self) -> None:
"""Test that empty heartbeat chunks are skipped in async streaming."""
model = _make_model()
model.client = MagicMock()
chunks_with_heartbeat: list[dict[str, Any]] = [
{"id": "heartbeat", "object": "chat.completion.chunk", "created": 0},
*[dict(c) for c in _STREAM_CHUNKS],
]
model.client.chat.send_async = AsyncMock(
return_value=_MockAsyncStream(chunks_with_heartbeat)
)
chunks = [c async for c in model.astream("Hello")]
full_content = "".join(c.content for c in chunks if isinstance(c.content, str))
assert "Hello" in full_content
async def test_ainvoke_with_streaming_flag(self) -> None:
"""Test that ainvoke delegates to _astream when streaming=True."""
model = _make_model(streaming=True)
model.client = MagicMock()
model.client.chat.send_async = AsyncMock(
return_value=_MockAsyncStream([dict(c) for c in _STREAM_CHUNKS])
)
result = await model.ainvoke("Hello")
assert isinstance(result, AIMessage)
model.client.chat.send_async.assert_awaited_once()
call_kwargs = model.client.chat.send_async.call_args[1]
assert call_kwargs["stream"] is True
def test_stream_logprobs_in_response_metadata(self) -> None:
"""Test that logprobs are propagated in streaming response_metadata."""
model = _make_model()
model.client = MagicMock()
logprobs_data = {
"content": [{"token": "Hello", "logprob": -0.5, "top_logprobs": []}]
}
stream_chunks: list[dict[str, Any]] = [
{
"choices": [
{
"delta": {"role": "assistant", "content": "Hello"},
"index": 0,
"logprobs": logprobs_data,
}
],
"model": MODEL_NAME,
"object": "chat.completion.chunk",
"created": 1700000000.0,
"id": "gen-logprobs",
},
{
"choices": [{"delta": {}, "finish_reason": "stop", "index": 0}],
"model": MODEL_NAME,
"object": "chat.completion.chunk",
"created": 1700000000.0,
"id": "gen-logprobs",
},
]
model.client.chat.send.return_value = _MockSyncStream(stream_chunks)
chunks = list(model.stream("Hello"))
# First chunk should carry logprobs in response_metadata
assert chunks[0].response_metadata.get("logprobs") == logprobs_data
def test_stream_malformed_tool_call_with_null_function(self) -> None:
"""Test that a tool call chunk with function=None is handled gracefully."""
chunk_data: dict[str, Any] = {
"choices": [
{
"delta": {
"role": "assistant",
"content": "",
"tool_calls": [
{"function": None, "index": 0},
],
},
"index": 0,
}
],
"model": MODEL_NAME,
}
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
result = _convert_chunk_to_message_chunk(chunk_data, AIMessageChunk)
assert isinstance(result, AIMessageChunk)
# Should have warned about the malformed tool call
assert any(
"malformed tool call chunk" in str(warning.message) for warning in w
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openrouter/tests/unit_tests/test_chat_models.py",
"license": "MIT License",
"lines": 2502,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/openrouter/tests/unit_tests/test_imports.py | """Test `langchain_openrouter` public API surface."""
from langchain_openrouter import __all__
EXPECTED_ALL = [
"ChatOpenRouter",
]
def test_all_imports() -> None:
"""Verify that __all__ exports match the expected public API."""
assert sorted(EXPECTED_ALL) == sorted(__all__)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openrouter/tests/unit_tests/test_imports.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/openrouter/tests/unit_tests/test_standard.py | """Standard unit tests for `ChatOpenRouter`."""
from langchain_tests.unit_tests import ChatModelUnitTests
from langchain_openrouter.chat_models import ChatOpenRouter
MODEL_NAME = "openai/gpt-4o-mini"
class TestChatOpenRouterUnit(ChatModelUnitTests):
"""Standard unit tests for `ChatOpenRouter` chat model."""
@property
def chat_model_class(self) -> type[ChatOpenRouter]:
"""Chat model class being tested."""
return ChatOpenRouter
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
"""Parameters to initialize from environment variables."""
return (
{
"OPENROUTER_API_KEY": "api_key",
},
{
"model": MODEL_NAME,
},
{
"openrouter_api_key": "api_key",
},
)
@property
def chat_model_params(self) -> dict:
"""Parameters to create chat model instance for testing."""
return {
"model": MODEL_NAME,
"api_key": "test-api-key",
}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_audio_inputs(self) -> bool:
return True
@property
def supports_video_inputs(self) -> bool:
return True
@property
def supports_pdf_inputs(self) -> bool:
return True
@property
def model_override_value(self) -> str:
return "openai/gpt-4o"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openrouter/tests/unit_tests/test_standard.py",
"license": "MIT License",
"lines": 49,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/core/langchain_core/_security/_ssrf_protection.py | """SSRF Protection for validating URLs against Server-Side Request Forgery attacks.
This module provides utilities to validate user-provided URLs and prevent SSRF attacks
by blocking requests to:
- Private IP ranges (RFC 1918, loopback, link-local)
- Cloud metadata endpoints (AWS, GCP, Azure, etc.)
- Localhost addresses
- Invalid URL schemes
Usage:
from lc_security.ssrf_protection import validate_safe_url, is_safe_url
# Validate a URL (raises ValueError if unsafe)
safe_url = validate_safe_url("https://example.com/webhook")
# Check if URL is safe (returns bool)
if is_safe_url("http://192.168.1.1"):
# URL is safe
pass
# Allow private IPs for development/testing (still blocks cloud metadata)
safe_url = validate_safe_url("http://localhost:8080", allow_private=True)
"""
import ipaddress
import os
import socket
from typing import Annotated, Any
from urllib.parse import urlparse
from pydantic import (
AnyHttpUrl,
BeforeValidator,
HttpUrl,
)
# Private IP ranges (RFC 1918, RFC 4193, RFC 3927, loopback)
PRIVATE_IP_RANGES = [
ipaddress.ip_network("10.0.0.0/8"), # Private Class A
ipaddress.ip_network("172.16.0.0/12"), # Private Class B
ipaddress.ip_network("192.168.0.0/16"), # Private Class C
ipaddress.ip_network("127.0.0.0/8"), # Loopback
ipaddress.ip_network("169.254.0.0/16"), # Link-local (includes cloud metadata)
ipaddress.ip_network("0.0.0.0/8"), # Current network
ipaddress.ip_network("::1/128"), # IPv6 loopback
ipaddress.ip_network("fc00::/7"), # IPv6 unique local
ipaddress.ip_network("fe80::/10"), # IPv6 link-local
ipaddress.ip_network("ff00::/8"), # IPv6 multicast
]
# Cloud provider metadata endpoints
CLOUD_METADATA_IPS = [
"169.254.169.254", # AWS, GCP, Azure, DigitalOcean, Oracle Cloud
"169.254.170.2", # AWS ECS task metadata
"100.100.100.200", # Alibaba Cloud metadata
]
CLOUD_METADATA_HOSTNAMES = [
"metadata.google.internal", # GCP
"metadata", # Generic
"instance-data", # AWS EC2
]
# Localhost variations
LOCALHOST_NAMES = [
"localhost",
"localhost.localdomain",
]
def is_private_ip(ip_str: str) -> bool:
"""Check if an IP address is in a private range.
Args:
ip_str: IP address as a string (e.g., "192.168.1.1")
Returns:
True if IP is in a private range, False otherwise
"""
try:
ip = ipaddress.ip_address(ip_str)
return any(ip in range_ for range_ in PRIVATE_IP_RANGES)
except ValueError:
return False
def is_cloud_metadata(hostname: str, ip_str: str | None = None) -> bool:
"""Check if hostname or IP is a cloud metadata endpoint.
Args:
hostname: Hostname to check
ip_str: Optional IP address to check
Returns:
True if hostname or IP is a known cloud metadata endpoint
"""
# Check hostname
if hostname.lower() in CLOUD_METADATA_HOSTNAMES:
return True
# Check IP
if ip_str and ip_str in CLOUD_METADATA_IPS: # noqa: SIM103
return True
return False
def is_localhost(hostname: str, ip_str: str | None = None) -> bool:
"""Check if hostname or IP is localhost.
Args:
hostname: Hostname to check
ip_str: Optional IP address to check
Returns:
True if hostname or IP is localhost
"""
# Check hostname
if hostname.lower() in LOCALHOST_NAMES:
return True
# Check IP
if ip_str:
try:
ip = ipaddress.ip_address(ip_str)
# Check if loopback
if ip.is_loopback:
return True
# Also check common localhost IPs
if ip_str in ("127.0.0.1", "::1", "0.0.0.0"): # noqa: S104
return True
except ValueError:
pass
return False
def validate_safe_url(
url: str | AnyHttpUrl,
*,
allow_private: bool = False,
allow_http: bool = True,
) -> str:
"""Validate a URL for SSRF protection.
This function validates URLs to prevent Server-Side Request Forgery (SSRF) attacks
by blocking requests to private networks and cloud metadata endpoints.
Args:
url: The URL to validate (string or Pydantic HttpUrl)
allow_private: If True, allows private IPs and localhost (for development).
Cloud metadata endpoints are ALWAYS blocked.
allow_http: If True, allows both HTTP and HTTPS. If False, only HTTPS.
Returns:
The validated URL as a string
Raises:
ValueError: If URL is invalid or potentially dangerous
Examples:
>>> validate_safe_url("https://hooks.slack.com/services/xxx")
'https://hooks.slack.com/services/xxx'
>>> validate_safe_url("http://127.0.0.1:8080")
ValueError: Localhost URLs are not allowed
>>> validate_safe_url("http://192.168.1.1")
ValueError: URL resolves to private IP: 192.168.1.1
>>> validate_safe_url("http://169.254.169.254/latest/meta-data/")
ValueError: URL resolves to cloud metadata IP: 169.254.169.254
>>> validate_safe_url("http://localhost:8080", allow_private=True)
'http://localhost:8080'
"""
url_str = str(url)
parsed = urlparse(url_str)
# Validate URL scheme
if not allow_http and parsed.scheme != "https":
msg = "Only HTTPS URLs are allowed"
raise ValueError(msg)
if parsed.scheme not in ("http", "https"):
msg = f"Only HTTP/HTTPS URLs are allowed, got scheme: {parsed.scheme}"
raise ValueError(msg)
# Extract hostname
hostname = parsed.hostname
if not hostname:
msg = "URL must have a valid hostname"
raise ValueError(msg)
# Special handling for test environments - allow test server hostnames
# testserver is used by FastAPI/Starlette test clients and doesn't resolve via DNS
# Only enabled when LANGCHAIN_ENV=local_test (set in conftest.py)
if (
os.environ.get("LANGCHAIN_ENV") == "local_test"
and hostname.startswith("test")
and "server" in hostname
):
return url_str
# ALWAYS block cloud metadata endpoints (even with allow_private=True)
if is_cloud_metadata(hostname):
msg = f"Cloud metadata endpoints are not allowed: {hostname}"
raise ValueError(msg)
# Check for localhost
if is_localhost(hostname) and not allow_private:
msg = f"Localhost URLs are not allowed: {hostname}"
raise ValueError(msg)
# Resolve hostname to IP addresses and validate each one.
# Note: DNS resolution results are cached by the OS, so repeated calls are fast.
try:
# Get all IP addresses for this hostname
addr_info = socket.getaddrinfo(
hostname,
parsed.port or (443 if parsed.scheme == "https" else 80),
socket.AF_UNSPEC, # Allow both IPv4 and IPv6
socket.SOCK_STREAM,
)
for result in addr_info:
ip_str: str = result[4][0] # type: ignore[assignment]
# ALWAYS block cloud metadata IPs
if is_cloud_metadata(hostname, ip_str):
msg = f"URL resolves to cloud metadata IP: {ip_str}"
raise ValueError(msg)
# Check for localhost IPs
if is_localhost(hostname, ip_str) and not allow_private:
msg = f"URL resolves to localhost IP: {ip_str}"
raise ValueError(msg)
# Check for private IPs
if not allow_private and is_private_ip(ip_str):
msg = f"URL resolves to private IP address: {ip_str}"
raise ValueError(msg)
except socket.gaierror as e:
# DNS resolution failed - fail closed for security
msg = f"Failed to resolve hostname '{hostname}': {e}"
raise ValueError(msg) from e
except OSError as e:
# Other network errors - fail closed
msg = f"Network error while validating URL: {e}"
raise ValueError(msg) from e
return url_str
def is_safe_url(
url: str | AnyHttpUrl,
*,
allow_private: bool = False,
allow_http: bool = True,
) -> bool:
"""Check if a URL is safe (non-throwing version of validate_safe_url).
Args:
url: The URL to check
allow_private: If True, allows private IPs and localhost
allow_http: If True, allows both HTTP and HTTPS
Returns:
True if URL is safe, False otherwise
Examples:
>>> is_safe_url("https://example.com")
True
>>> is_safe_url("http://127.0.0.1:8080")
False
>>> is_safe_url("http://localhost:8080", allow_private=True)
True
"""
try:
validate_safe_url(url, allow_private=allow_private, allow_http=allow_http)
except ValueError:
return False
else:
return True
def _validate_url_ssrf_strict(v: Any) -> Any:
"""Validate URL for SSRF protection (strict mode)."""
if isinstance(v, str):
validate_safe_url(v, allow_private=False, allow_http=True)
return v
def _validate_url_ssrf_https_only(v: Any) -> Any:
"""Validate URL for SSRF protection (HTTPS only, strict mode)."""
if isinstance(v, str):
validate_safe_url(v, allow_private=False, allow_http=False)
return v
def _validate_url_ssrf_relaxed(v: Any) -> Any:
"""Validate URL for SSRF protection (relaxed mode - allows private IPs)."""
if isinstance(v, str):
validate_safe_url(v, allow_private=True, allow_http=True)
return v
# Annotated types with SSRF protection
SSRFProtectedUrl = Annotated[HttpUrl, BeforeValidator(_validate_url_ssrf_strict)]
"""A Pydantic HttpUrl type with built-in SSRF protection.
This blocks private IPs, localhost, and cloud metadata endpoints.
Example:
class WebhookSchema(BaseModel):
url: SSRFProtectedUrl # Automatically validated for SSRF
headers: dict[str, str] | None = None
"""
SSRFProtectedUrlRelaxed = Annotated[
HttpUrl, BeforeValidator(_validate_url_ssrf_relaxed)
]
"""A Pydantic HttpUrl with relaxed SSRF protection (allows private IPs).
Use this for development/testing webhooks where localhost/private IPs are needed.
Cloud metadata endpoints are still blocked.
Example:
class DevWebhookSchema(BaseModel):
url: SSRFProtectedUrlRelaxed # Allows localhost, blocks cloud metadata
"""
SSRFProtectedHttpsUrl = Annotated[
HttpUrl, BeforeValidator(_validate_url_ssrf_https_only)
]
"""A Pydantic HttpUrl with SSRF protection that only allows HTTPS.
This blocks private IPs, localhost, cloud metadata endpoints, and HTTP URLs.
Example:
class SecureWebhookSchema(BaseModel):
url: SSRFProtectedHttpsUrl # Only HTTPS, blocks private IPs
"""
SSRFProtectedHttpsUrlStr = Annotated[
str, BeforeValidator(_validate_url_ssrf_https_only)
]
"""A string type with SSRF protection that only allows HTTPS URLs.
Same as SSRFProtectedHttpsUrl but returns a string instead of HttpUrl.
Useful for FastAPI query parameters where you need a string URL.
Example:
@router.get("/proxy")
async def proxy_get(url: SSRFProtectedHttpsUrlStr):
async with httpx.AsyncClient() as client:
resp = await client.get(url)
"""
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/langchain_core/_security/_ssrf_protection.py",
"license": "MIT License",
"lines": 284,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/core/tests/unit_tests/test_ssrf_protection.py | """Tests for SSRF protection utilities."""
from typing import Any
import pytest
from pydantic import BaseModel, ValidationError
from langchain_core._security._ssrf_protection import (
SSRFProtectedUrl,
SSRFProtectedUrlRelaxed,
is_cloud_metadata,
is_localhost,
is_private_ip,
is_safe_url,
validate_safe_url,
)
class TestIPValidation:
"""Tests for IP address validation functions."""
def test_is_private_ip_ipv4(self) -> None:
"""Test private IPv4 address detection."""
assert is_private_ip("10.0.0.1") is True
assert is_private_ip("172.16.0.1") is True
assert is_private_ip("192.168.1.1") is True
assert is_private_ip("127.0.0.1") is True
assert is_private_ip("169.254.169.254") is True
assert is_private_ip("0.0.0.1") is True
def test_is_private_ip_ipv6(self) -> None:
"""Test private IPv6 address detection."""
assert is_private_ip("::1") is True # Loopback
assert is_private_ip("fc00::1") is True # Unique local
assert is_private_ip("fe80::1") is True # Link-local
assert is_private_ip("ff00::1") is True # Multicast
def test_is_private_ip_public(self) -> None:
"""Test that public IPs are not flagged as private."""
assert is_private_ip("8.8.8.8") is False
assert is_private_ip("1.1.1.1") is False
assert is_private_ip("151.101.1.140") is False
def test_is_private_ip_invalid(self) -> None:
"""Test handling of invalid IP addresses."""
assert is_private_ip("not-an-ip") is False
assert is_private_ip("999.999.999.999") is False
def test_is_cloud_metadata_ips(self) -> None:
"""Test cloud metadata IP detection."""
assert is_cloud_metadata("example.com", "169.254.169.254") is True
assert is_cloud_metadata("example.com", "169.254.170.2") is True
assert is_cloud_metadata("example.com", "100.100.100.200") is True
def test_is_cloud_metadata_hostnames(self) -> None:
"""Test cloud metadata hostname detection."""
assert is_cloud_metadata("metadata.google.internal") is True
assert is_cloud_metadata("metadata") is True
assert is_cloud_metadata("instance-data") is True
assert is_cloud_metadata("METADATA.GOOGLE.INTERNAL") is True # Case insensitive
def test_is_cloud_metadata_safe(self) -> None:
"""Test that normal URLs are not flagged as cloud metadata."""
assert is_cloud_metadata("example.com", "8.8.8.8") is False
assert is_cloud_metadata("google.com") is False
def test_is_localhost_hostnames(self) -> None:
"""Test localhost hostname detection."""
assert is_localhost("localhost") is True
assert is_localhost("LOCALHOST") is True
assert is_localhost("localhost.localdomain") is True
def test_is_localhost_ips(self) -> None:
"""Test localhost IP detection."""
assert is_localhost("example.com", "127.0.0.1") is True
assert is_localhost("example.com", "::1") is True
assert is_localhost("example.com", "0.0.0.0") is True
def test_is_localhost_safe(self) -> None:
"""Test that normal hosts are not flagged as localhost."""
assert is_localhost("example.com", "8.8.8.8") is False
assert is_localhost("google.com") is False
class TestValidateSafeUrl:
"""Tests for validate_safe_url function."""
def test_valid_public_https_url(self) -> None:
"""Test that valid public HTTPS URLs are accepted."""
url = "https://hooks.slack.com/services/xxx"
result = validate_safe_url(url)
assert result == url
def test_valid_public_http_url(self) -> None:
"""Test that valid public HTTP URLs are accepted."""
url = "http://example.com/webhook"
result = validate_safe_url(url)
assert result == url
def test_localhost_blocked_by_default(self) -> None:
"""Test that localhost URLs are blocked by default."""
with pytest.raises(ValueError, match="Localhost"):
validate_safe_url("http://localhost:8080/webhook")
with pytest.raises(ValueError, match="localhost"):
validate_safe_url("http://127.0.0.1:8080/webhook")
def test_localhost_allowed_with_flag(self) -> None:
"""Test that localhost is allowed with allow_private=True."""
url = "http://localhost:8080/webhook"
result = validate_safe_url(url, allow_private=True)
assert result == url
url = "http://127.0.0.1:8080/webhook"
result = validate_safe_url(url, allow_private=True)
assert result == url
def test_private_ip_blocked_by_default(self) -> None:
"""Test that private IPs are blocked by default."""
with pytest.raises(ValueError, match="private IP"):
validate_safe_url("http://192.168.1.1/webhook")
with pytest.raises(ValueError, match="private IP"):
validate_safe_url("http://10.0.0.1/webhook")
with pytest.raises(ValueError, match="private IP"):
validate_safe_url("http://172.16.0.1/webhook")
def test_private_ip_allowed_with_flag(self) -> None:
"""Test that private IPs are allowed with allow_private=True."""
# Note: These will fail DNS resolution in tests, so we skip actual validation
# In production, they would be validated properly
def test_cloud_metadata_always_blocked(self) -> None:
"""Test that cloud metadata endpoints are always blocked."""
with pytest.raises(ValueError, match="metadata"):
validate_safe_url("http://169.254.169.254/latest/meta-data/")
# Even with allow_private=True
with pytest.raises(ValueError, match="metadata"):
validate_safe_url(
"http://169.254.169.254/latest/meta-data/",
allow_private=True,
)
def test_invalid_scheme_blocked(self) -> None:
"""Test that non-HTTP(S) schemes are blocked."""
with pytest.raises(ValueError, match="scheme"):
validate_safe_url("ftp://example.com/file")
with pytest.raises(ValueError, match="scheme"):
validate_safe_url("file:///etc/passwd")
with pytest.raises(ValueError, match="scheme"):
validate_safe_url("javascript:alert(1)")
def test_https_only_mode(self) -> None:
"""Test that HTTP is blocked when allow_http=False."""
with pytest.raises(ValueError, match="HTTPS"):
validate_safe_url("http://example.com/webhook", allow_http=False)
# HTTPS should still work
url = "https://example.com/webhook"
result = validate_safe_url(url, allow_http=False)
assert result == url
def test_url_without_hostname(self) -> None:
"""Test that URLs without hostname are rejected."""
with pytest.raises(ValueError, match="hostname"):
validate_safe_url("http:///path")
def test_dns_resolution_failure(self) -> None:
"""Test handling of DNS resolution failures."""
with pytest.raises(ValueError, match="resolve"):
validate_safe_url("http://this-domain-definitely-does-not-exist-12345.com")
def test_testserver_allowed(self, monkeypatch: Any) -> None:
"""Test that testserver hostname is allowed for test environments."""
# testserver is used by FastAPI/Starlette test clients
monkeypatch.setenv("LANGCHAIN_ENV", "local_test")
url = "http://testserver/webhook"
result = validate_safe_url(url)
assert result == url
class TestIsSafeUrl:
"""Tests for is_safe_url function (non-throwing version)."""
def test_safe_url_returns_true(self) -> None:
"""Test that safe URLs return True."""
assert is_safe_url("https://example.com/webhook") is True
assert is_safe_url("http://hooks.slack.com/services/xxx") is True
def test_unsafe_url_returns_false(self) -> None:
"""Test that unsafe URLs return False."""
assert is_safe_url("http://localhost:8080") is False
assert is_safe_url("http://127.0.0.1:8080") is False
assert is_safe_url("http://192.168.1.1") is False
assert is_safe_url("http://169.254.169.254") is False
def test_unsafe_url_safe_with_allow_private(self) -> None:
"""Test that private URLs are safe with allow_private=True."""
assert is_safe_url("http://localhost:8080", allow_private=True) is True
assert is_safe_url("http://127.0.0.1:8080", allow_private=True) is True
def test_cloud_metadata_always_unsafe(self) -> None:
"""Test that cloud metadata is always unsafe."""
assert is_safe_url("http://169.254.169.254") is False
assert is_safe_url("http://169.254.169.254", allow_private=True) is False
class TestSSRFProtectedUrlType:
"""Tests for SSRFProtectedUrl Pydantic type."""
def test_valid_url_accepted(self) -> None:
"""Test that valid URLs are accepted by Pydantic schema."""
class WebhookSchema(BaseModel):
url: SSRFProtectedUrl
schema = WebhookSchema(url="https://hooks.slack.com/services/xxx")
assert str(schema.url).startswith("https://hooks.slack.com/")
def test_localhost_rejected(self) -> None:
"""Test that localhost URLs are rejected by Pydantic schema."""
class WebhookSchema(BaseModel):
url: SSRFProtectedUrl
with pytest.raises(ValidationError):
WebhookSchema(url="http://localhost:8080")
def test_private_ip_rejected(self) -> None:
"""Test that private IPs are rejected by Pydantic schema."""
class WebhookSchema(BaseModel):
url: SSRFProtectedUrl
with pytest.raises(ValidationError):
WebhookSchema(url="http://192.168.1.1")
def test_cloud_metadata_rejected(self) -> None:
"""Test that cloud metadata is rejected by Pydantic schema."""
class WebhookSchema(BaseModel):
url: SSRFProtectedUrl
with pytest.raises(ValidationError):
WebhookSchema(url="http://169.254.169.254/latest/meta-data/")
class TestSSRFProtectedUrlRelaxedType:
"""Tests for SSRFProtectedUrlRelaxed Pydantic type."""
def test_localhost_accepted(self) -> None:
"""Test that localhost URLs are accepted by relaxed schema."""
class WebhookSchema(BaseModel):
url: SSRFProtectedUrlRelaxed
schema = WebhookSchema(url="http://localhost:8080")
assert str(schema.url).startswith("http://localhost")
def test_cloud_metadata_still_rejected(self) -> None:
"""Test that cloud metadata is still rejected by relaxed schema."""
class WebhookSchema(BaseModel):
url: SSRFProtectedUrlRelaxed
with pytest.raises(ValidationError):
WebhookSchema(url="http://169.254.169.254/latest/meta-data/")
class TestRealWorldURLs:
"""Tests with real-world webhook URLs."""
def test_slack_webhook(self) -> None:
"""Test Slack webhook URL."""
url = (
"https://hooks.slack.com/services/T00000000/B00000000/XXXXXXXXXXXXXXXXXXXX"
)
assert is_safe_url(url) is True
def test_discord_webhook(self) -> None:
"""Test Discord webhook URL."""
url = "https://discord.com/api/webhooks/123456789012345678/abcdefghijklmnopqrstuvwxyz"
assert is_safe_url(url) is True
def test_webhook_site(self) -> None:
"""Test webhook.site URL."""
url = "https://webhook.site/unique-id"
assert is_safe_url(url) is True
def test_ngrok_url(self) -> None:
"""Test ngrok URL (should be safe as it's public)."""
url = "https://abc123.ngrok.io/webhook"
assert is_safe_url(url) is True
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/test_ssrf_protection.py",
"license": "MIT License",
"lines": 226,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_wrap_model_call_state_update.py | """Unit tests for ExtendedModelResponse command support in wrap_model_call.
Tests that wrap_model_call middleware can return ExtendedModelResponse to provide
a Command alongside the model response. Commands are applied as separate state
updates through graph reducers (e.g. add_messages for messages).
"""
from collections.abc import Awaitable, Callable
import pytest
from langchain_core.language_models.fake_chat_models import GenericFakeChatModel
from langchain_core.messages import AIMessage, HumanMessage
from langgraph.errors import InvalidUpdateError
from langgraph.types import Command
from langchain.agents import AgentState, create_agent
from langchain.agents.middleware.types import (
AgentMiddleware,
ExtendedModelResponse,
ModelRequest,
ModelResponse,
wrap_model_call,
)
class TestBasicCommand:
"""Test basic ExtendedModelResponse functionality with Command."""
def test_command_messages_added_alongside_model_messages(self) -> None:
"""Command messages are added alongside model response messages (additive)."""
class AddMessagesMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
custom_msg = HumanMessage(content="Custom message", id="custom")
return ExtendedModelResponse(
model_response=response,
command=Command(update={"messages": [custom_msg]}),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello!")]))
agent = create_agent(model=model, middleware=[AddMessagesMiddleware()])
result = agent.invoke({"messages": [HumanMessage(content="Hi")]})
# Both model response AND command messages appear (additive via add_messages)
messages = result["messages"]
assert len(messages) == 3
assert messages[0].content == "Hi"
assert messages[1].content == "Hello!"
assert messages[2].content == "Custom message"
def test_command_with_extra_messages_and_model_response(self) -> None:
"""Middleware can add extra messages via command alongside model messages."""
class ExtraMessagesMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
summary = HumanMessage(content="Summary", id="summary")
return ExtendedModelResponse(
model_response=response,
command=Command(update={"messages": [summary]}),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello!")]))
agent = create_agent(model=model, middleware=[ExtraMessagesMiddleware()])
result = agent.invoke({"messages": [HumanMessage(content="Hi")]})
messages = result["messages"]
assert len(messages) == 3
assert messages[0].content == "Hi"
assert messages[1].content == "Hello!"
assert messages[2].content == "Summary"
def test_command_structured_response_conflicts_with_model_response(self) -> None:
"""Command and model response both setting structured_response raises."""
class OverrideMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
response_with_structured = ModelResponse(
result=response.result,
structured_response={"from": "model"},
)
return ExtendedModelResponse(
model_response=response_with_structured,
command=Command(
update={
"structured_response": {"from": "command"},
}
),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Model msg")]))
agent = create_agent(model=model, middleware=[OverrideMiddleware()])
# Two Commands both setting structured_response (a LastValue channel)
# in the same step raises InvalidUpdateError
with pytest.raises(InvalidUpdateError):
agent.invoke({"messages": [HumanMessage("Hi")]})
def test_command_with_custom_state_field(self) -> None:
"""When command updates a custom field, model response messages are preserved."""
class CustomFieldMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(update={"custom_key": "custom_value"}),
)
class CustomState(AgentState):
custom_key: str
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
agent = create_agent(
model=model,
middleware=[CustomFieldMiddleware()],
state_schema=CustomState,
)
result = agent.invoke({"messages": [HumanMessage("Hi")]})
assert result["messages"][-1].content == "Hello"
class TestCustomStateField:
"""Test ExtendedModelResponse with custom state fields defined via state_schema."""
def test_custom_field_via_state_schema(self) -> None:
"""Middleware updates a custom state field via ExtendedModelResponse."""
class MyState(AgentState):
summary: str
class SummaryMiddleware(AgentMiddleware):
state_schema = MyState # type: ignore[assignment]
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(update={"summary": "conversation summarized"}),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
agent = create_agent(model=model, middleware=[SummaryMiddleware()])
result = agent.invoke({"messages": [HumanMessage("Hi")]})
assert result["messages"][-1].content == "Hello"
def test_no_command(self) -> None:
"""ExtendedModelResponse with no command works like ModelResponse."""
class NoCommandMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
return ExtendedModelResponse(
model_response=response,
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
agent = create_agent(model=model, middleware=[NoCommandMiddleware()])
result = agent.invoke({"messages": [HumanMessage("Hi")]})
assert len(result["messages"]) == 2
assert result["messages"][1].content == "Hello"
class TestBackwardsCompatibility:
"""Test that existing ModelResponse and AIMessage returns still work."""
def test_model_response_return_unchanged(self) -> None:
"""Existing middleware returning ModelResponse works identically."""
class PassthroughMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
return handler(request)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
agent = create_agent(model=model, middleware=[PassthroughMiddleware()])
result = agent.invoke({"messages": [HumanMessage("Hi")]})
assert len(result["messages"]) == 2
assert result["messages"][1].content == "Hello"
def test_ai_message_return_unchanged(self) -> None:
"""Existing middleware returning AIMessage works identically."""
class ShortCircuitMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> AIMessage:
return AIMessage(content="Short-circuited")
model = GenericFakeChatModel(messages=iter([AIMessage(content="Should not appear")]))
agent = create_agent(model=model, middleware=[ShortCircuitMiddleware()])
result = agent.invoke({"messages": [HumanMessage("Hi")]})
assert len(result["messages"]) == 2
assert result["messages"][1].content == "Short-circuited"
def test_no_middleware_unchanged(self) -> None:
"""Agent without middleware works identically."""
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
agent = create_agent(model=model)
result = agent.invoke({"messages": [HumanMessage("Hi")]})
assert len(result["messages"]) == 2
assert result["messages"][1].content == "Hello"
class TestAsyncExtendedModelResponse:
"""Test async variant of ExtendedModelResponse."""
async def test_async_command_adds_messages(self) -> None:
"""awrap_model_call command adds messages alongside model response."""
class AsyncAddMiddleware(AgentMiddleware):
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ExtendedModelResponse:
response = await handler(request)
custom = HumanMessage(content="Async custom", id="async-custom")
return ExtendedModelResponse(
model_response=response,
command=Command(update={"messages": [custom]}),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Async hello!")]))
agent = create_agent(model=model, middleware=[AsyncAddMiddleware()])
result = await agent.ainvoke({"messages": [HumanMessage(content="Hi")]})
# Both model response and command messages are present (additive)
messages = result["messages"]
assert len(messages) == 3
assert messages[0].content == "Hi"
assert messages[1].content == "Async hello!"
assert messages[2].content == "Async custom"
async def test_async_decorator_command(self) -> None:
"""@wrap_model_call async decorator returns ExtendedModelResponse with command."""
@wrap_model_call
async def command_middleware(
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ExtendedModelResponse:
response = await handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(
update={
"messages": [
HumanMessage(content="Decorator msg", id="dec"),
]
}
),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Async response")]))
agent = create_agent(model=model, middleware=[command_middleware])
result = await agent.ainvoke({"messages": [HumanMessage(content="Hi")]})
messages = result["messages"]
assert len(messages) == 3
assert messages[1].content == "Async response"
assert messages[2].content == "Decorator msg"
class TestComposition:
"""Test ExtendedModelResponse with composed middleware.
Key semantics: Commands are collected inner-first, then outer.
For non-reducer fields, later Commands overwrite (outer wins).
For reducer fields (messages), all Commands are additive.
"""
def test_outer_command_messages_added_alongside_model(self) -> None:
"""Outer middleware's command messages are added alongside model messages."""
execution_order: list[str] = []
class OuterMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
execution_order.append("outer-before")
response = handler(request)
execution_order.append("outer-after")
return ExtendedModelResponse(
model_response=response,
command=Command(
update={"messages": [HumanMessage(content="Outer msg", id="outer-msg")]}
),
)
class InnerMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
execution_order.append("inner-before")
response = handler(request)
execution_order.append("inner-after")
return response
model = GenericFakeChatModel(messages=iter([AIMessage(content="Composed")]))
agent = create_agent(
model=model,
middleware=[OuterMiddleware(), InnerMiddleware()],
)
result = agent.invoke({"messages": [HumanMessage("Hi")]})
# Execution order: outer wraps inner
assert execution_order == [
"outer-before",
"inner-before",
"inner-after",
"outer-after",
]
# Model messages + outer command messages (additive)
messages = result["messages"]
assert len(messages) == 3
assert messages[0].content == "Hi"
assert messages[1].content == "Composed"
assert messages[2].content == "Outer msg"
def test_inner_command_propagated_through_composition(self) -> None:
"""Inner middleware's ExtendedModelResponse command is propagated.
When inner middleware returns ExtendedModelResponse, its command is
captured before normalizing to ModelResponse at the composition boundary
and collected into the final result.
"""
class OuterMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
# Outer sees a ModelResponse from handler (inner's ExtendedModelResponse
# was normalized at the composition boundary)
response = handler(request)
assert isinstance(response, ModelResponse)
return response
class InnerMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(
update={
"messages": [
HumanMessage(content="Inner msg", id="inner"),
]
}
),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
agent = create_agent(
model=model,
middleware=[OuterMiddleware(), InnerMiddleware()],
)
result = agent.invoke({"messages": [HumanMessage("Hi")]})
# Model messages + inner command messages (additive)
messages = result["messages"]
assert len(messages) == 3
assert messages[0].content == "Hi"
assert messages[1].content == "Hello"
assert messages[2].content == "Inner msg"
def test_non_reducer_key_conflict_raises(self) -> None:
"""Multiple Commands setting the same non-reducer key raises.
LastValue channels (like custom_key) can only receive one value per
step. Inner and outer both setting the same key is an error.
"""
class MyState(AgentState):
custom_key: str
class OuterMiddleware(AgentMiddleware):
state_schema = MyState # type: ignore[assignment]
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(
update={
"messages": [HumanMessage(content="Outer msg", id="outer")],
"custom_key": "outer_value",
}
),
)
class InnerMiddleware(AgentMiddleware):
state_schema = MyState # type: ignore[assignment]
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(
update={
"messages": [HumanMessage(content="Inner msg", id="inner")],
"custom_key": "inner_value",
}
),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
agent = create_agent(
model=model,
middleware=[OuterMiddleware(), InnerMiddleware()],
)
# Two Commands both setting custom_key (a LastValue channel)
# in the same step raises InvalidUpdateError
with pytest.raises(InvalidUpdateError):
agent.invoke({"messages": [HumanMessage("Hi")]})
def test_inner_state_preserved_when_outer_has_no_conflict(self) -> None:
"""Inner's command keys are preserved when outer doesn't conflict."""
class MyState(AgentState):
inner_key: str
outer_key: str
class OuterMiddleware(AgentMiddleware):
state_schema = MyState # type: ignore[assignment]
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(update={"outer_key": "from_outer"}),
)
class InnerMiddleware(AgentMiddleware):
state_schema = MyState # type: ignore[assignment]
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(update={"inner_key": "from_inner"}),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
agent = create_agent(
model=model,
middleware=[OuterMiddleware(), InnerMiddleware()],
)
result = agent.invoke({"messages": [HumanMessage("Hi")]})
# Both keys survive since there's no conflict
messages = result["messages"]
assert messages[-1].content == "Hello"
def test_inner_command_retry_safe(self) -> None:
"""When outer retries, only the last inner command is used."""
call_count = 0
class MyState(AgentState):
attempt: str
class OuterMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
# Call handler twice (simulating retry)
handler(request)
return handler(request)
class InnerMiddleware(AgentMiddleware):
state_schema = MyState # type: ignore[assignment]
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
nonlocal call_count
call_count += 1
response = handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(update={"attempt": f"attempt_{call_count}"}),
)
model = GenericFakeChatModel(
messages=iter([AIMessage(content="First"), AIMessage(content="Second")])
)
agent = create_agent(
model=model,
middleware=[OuterMiddleware(), InnerMiddleware()],
)
result = agent.invoke({"messages": [HumanMessage("Hi")]})
# Only the last retry's inner state should survive
messages = result["messages"]
assert messages[-1].content == "Second"
def test_decorator_returns_wrap_result(self) -> None:
"""@wrap_model_call decorator can return ExtendedModelResponse with command."""
@wrap_model_call
def command_middleware(
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(
update={
"messages": [
HumanMessage(content="From decorator", id="dec"),
]
}
),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Model response")]))
agent = create_agent(model=model, middleware=[command_middleware])
result = agent.invoke({"messages": [HumanMessage("Hi")]})
messages = result["messages"]
assert len(messages) == 3
assert messages[1].content == "Model response"
assert messages[2].content == "From decorator"
def test_structured_response_preserved(self) -> None:
"""ExtendedModelResponse preserves structured_response from ModelResponse."""
class StructuredMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
response_with_structured = ModelResponse(
result=response.result,
structured_response={"key": "value"},
)
return ExtendedModelResponse(
model_response=response_with_structured,
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
agent = create_agent(model=model, middleware=[StructuredMiddleware()])
result = agent.invoke({"messages": [HumanMessage("Hi")]})
assert result.get("structured_response") == {"key": "value"}
messages = result["messages"]
assert len(messages) == 2
assert messages[1].content == "Hello"
class TestAsyncComposition:
"""Test async ExtendedModelResponse propagation through composed middleware."""
async def test_async_inner_command_propagated(self) -> None:
"""Async: inner middleware's ExtendedModelResponse command is propagated."""
class OuterMiddleware(AgentMiddleware):
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelResponse:
response = await handler(request)
assert isinstance(response, ModelResponse)
return response
class InnerMiddleware(AgentMiddleware):
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ExtendedModelResponse:
response = await handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(
update={
"messages": [
HumanMessage(content="Inner msg", id="inner"),
]
}
),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
agent = create_agent(
model=model,
middleware=[OuterMiddleware(), InnerMiddleware()],
)
result = await agent.ainvoke({"messages": [HumanMessage("Hi")]})
# Model messages + inner command messages (additive)
messages = result["messages"]
assert len(messages) == 3
assert messages[0].content == "Hi"
assert messages[1].content == "Hello"
assert messages[2].content == "Inner msg"
async def test_async_both_commands_additive_messages(self) -> None:
"""Async: both inner and outer command messages are added alongside model."""
class OuterMiddleware(AgentMiddleware):
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ExtendedModelResponse:
response = await handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(
update={"messages": [HumanMessage(content="Outer msg", id="outer")]}
),
)
class InnerMiddleware(AgentMiddleware):
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ExtendedModelResponse:
response = await handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(
update={"messages": [HumanMessage(content="Inner msg", id="inner")]}
),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
agent = create_agent(
model=model,
middleware=[OuterMiddleware(), InnerMiddleware()],
)
result = await agent.ainvoke({"messages": [HumanMessage("Hi")]})
# All messages additive: model + inner + outer
messages = result["messages"]
assert len(messages) == 4
assert messages[0].content == "Hi"
assert messages[1].content == "Hello"
assert messages[2].content == "Inner msg"
assert messages[3].content == "Outer msg"
async def test_async_inner_command_retry_safe(self) -> None:
"""Async: when outer retries, only last inner command is used."""
call_count = 0
class MyState(AgentState):
attempt: str
class OuterMiddleware(AgentMiddleware):
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelResponse:
# Call handler twice (simulating retry)
await handler(request)
return await handler(request)
class InnerMiddleware(AgentMiddleware):
state_schema = MyState # type: ignore[assignment]
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ExtendedModelResponse:
nonlocal call_count
call_count += 1
response = await handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(update={"attempt": f"attempt_{call_count}"}),
)
model = GenericFakeChatModel(
messages=iter([AIMessage(content="First"), AIMessage(content="Second")])
)
agent = create_agent(
model=model,
middleware=[OuterMiddleware(), InnerMiddleware()],
)
result = await agent.ainvoke({"messages": [HumanMessage("Hi")]})
messages = result["messages"]
assert any(m.content == "Second" for m in messages)
class TestCommandGotoDisallowed:
"""Test that Command goto raises NotImplementedError in wrap_model_call."""
def test_command_goto_raises_not_implemented(self) -> None:
"""Command with goto in wrap_model_call raises NotImplementedError."""
class GotoMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(goto="__end__"),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello!")]))
agent = create_agent(model=model, middleware=[GotoMiddleware()])
with pytest.raises(NotImplementedError, match="Command goto is not yet supported"):
agent.invoke({"messages": [HumanMessage(content="Hi")]})
async def test_async_command_goto_raises_not_implemented(self) -> None:
"""Async: Command with goto in wrap_model_call raises NotImplementedError."""
class AsyncGotoMiddleware(AgentMiddleware):
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ExtendedModelResponse:
response = await handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(goto="tools"),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello!")]))
agent = create_agent(model=model, middleware=[AsyncGotoMiddleware()])
with pytest.raises(NotImplementedError, match="Command goto is not yet supported"):
await agent.ainvoke({"messages": [HumanMessage(content="Hi")]})
class TestCommandResumeDisallowed:
"""Test that Command resume raises NotImplementedError in wrap_model_call."""
def test_command_resume_raises_not_implemented(self) -> None:
"""Command with resume in wrap_model_call raises NotImplementedError."""
class ResumeMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(resume="some_value"),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello!")]))
agent = create_agent(model=model, middleware=[ResumeMiddleware()])
with pytest.raises(NotImplementedError, match="Command resume is not yet supported"):
agent.invoke({"messages": [HumanMessage(content="Hi")]})
async def test_async_command_resume_raises_not_implemented(self) -> None:
"""Async: Command with resume in wrap_model_call raises NotImplementedError."""
class AsyncResumeMiddleware(AgentMiddleware):
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ExtendedModelResponse:
response = await handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(resume="some_value"),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello!")]))
agent = create_agent(model=model, middleware=[AsyncResumeMiddleware()])
with pytest.raises(NotImplementedError, match="Command resume is not yet supported"):
await agent.ainvoke({"messages": [HumanMessage(content="Hi")]})
class TestCommandGraphDisallowed:
"""Test that Command graph raises NotImplementedError in wrap_model_call."""
def test_command_graph_raises_not_implemented(self) -> None:
"""Command with graph in wrap_model_call raises NotImplementedError."""
class GraphMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ExtendedModelResponse:
response = handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(graph=Command.PARENT, update={"messages": []}),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello!")]))
agent = create_agent(model=model, middleware=[GraphMiddleware()])
with pytest.raises(NotImplementedError, match="Command graph is not yet supported"):
agent.invoke({"messages": [HumanMessage(content="Hi")]})
async def test_async_command_graph_raises_not_implemented(self) -> None:
"""Async: Command with graph in wrap_model_call raises NotImplementedError."""
class AsyncGraphMiddleware(AgentMiddleware):
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ExtendedModelResponse:
response = await handler(request)
return ExtendedModelResponse(
model_response=response,
command=Command(graph=Command.PARENT, update={"messages": []}),
)
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello!")]))
agent = create_agent(model=model, middleware=[AsyncGraphMiddleware()])
with pytest.raises(NotImplementedError, match="Command graph is not yet supported"):
await agent.ainvoke({"messages": [HumanMessage(content="Hi")]})
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_wrap_model_call_state_update.py",
"license": "MIT License",
"lines": 745,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/standard-tests/langchain_tests/integration_tests/sandboxes.py | """Integration tests for the deepagents sandbox backend abstraction.
Implementers should subclass this test suite and provide a fixture that returns a
clean `SandboxBackendProtocol` instance.
Example:
```python
from __future__ import annotations
from collections.abc import Iterator
import pytest
from deepagents.backends.protocol import SandboxBackendProtocol
from langchain_tests.integration_tests import SandboxIntegrationTests
from my_pkg import make_sandbox
class TestMySandboxStandard(SandboxIntegrationTests):
@pytest.fixture(scope="class")
def sandbox(self) -> Iterator[SandboxBackendProtocol]:
backend = make_sandbox()
try:
yield backend
finally:
backend.delete()
```
"""
# ruff: noqa: E402, S108
from __future__ import annotations
from abc import abstractmethod
from typing import TYPE_CHECKING
import pytest
deepagents = pytest.importorskip("deepagents")
from deepagents.backends.protocol import (
FileDownloadResponse,
FileUploadResponse,
SandboxBackendProtocol,
)
from langchain_tests.base import BaseStandardTests
if TYPE_CHECKING:
from collections.abc import Iterator
class SandboxIntegrationTests(BaseStandardTests):
"""Standard integration tests for a `SandboxBackendProtocol` implementation."""
@pytest.fixture(scope="class")
def sandbox_backend(
self, sandbox: SandboxBackendProtocol
) -> SandboxBackendProtocol:
"""Provide the sandbox backend under test.
Resets the shared test directory before yielding.
"""
sandbox.execute(
"rm -rf /tmp/test_sandbox_ops && mkdir -p /tmp/test_sandbox_ops"
)
return sandbox
@abstractmethod
@pytest.fixture(scope="class")
def sandbox(self) -> Iterator[SandboxBackendProtocol]:
"""Yield a clean sandbox backend and tear it down after the class."""
@property
def has_sync(self) -> bool:
"""Whether the sandbox supports sync methods."""
return True
@property
def has_async(self) -> bool:
"""Whether the sandbox supports async methods."""
return True
@pytest.fixture(autouse=True)
def _setup_test_dir(self, sandbox_backend: SandboxBackendProtocol) -> None:
if not self.has_sync:
pytest.skip("Sync tests not supported.")
sandbox_backend.execute(
"rm -rf /tmp/test_sandbox_ops && mkdir -p /tmp/test_sandbox_ops"
)
def test_write_new_file(self, sandbox_backend: SandboxBackendProtocol) -> None:
"""Write a new file and verify it can be read back via command execution."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
test_path = "/tmp/test_sandbox_ops/new_file.txt"
content = "Hello, sandbox!\nLine 2\nLine 3"
result = sandbox_backend.write(test_path, content)
assert result.error is None
assert result.path == test_path
exec_result = sandbox_backend.execute(f"cat {test_path}")
assert exec_result.output.strip() == content
def test_read_basic_file(self, sandbox_backend: SandboxBackendProtocol) -> None:
"""Write a file and verify `read()` returns expected contents."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
test_path = "/tmp/test_sandbox_ops/read_test.txt"
content = "Line 1\nLine 2\nLine 3"
sandbox_backend.write(test_path, content)
result = sandbox_backend.read(test_path)
assert "Error:" not in result
assert all(line in result for line in ("Line 1", "Line 2", "Line 3"))
def test_edit_single_occurrence(
self, sandbox_backend: SandboxBackendProtocol
) -> None:
"""Edit a file and assert exactly one occurrence was replaced."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
test_path = "/tmp/test_sandbox_ops/edit_single.txt"
content = "Hello world\nGoodbye world\nHello again"
sandbox_backend.write(test_path, content)
result = sandbox_backend.edit(test_path, "Goodbye", "Farewell")
assert result.error is None
assert result.occurrences == 1
file_content = sandbox_backend.read(test_path)
assert "Farewell world" in file_content
assert "Goodbye" not in file_content
def test_ls_info_lists_files(self, sandbox_backend: SandboxBackendProtocol) -> None:
"""Create files and verify `ls_info()` lists them."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
sandbox_backend.write("/tmp/test_sandbox_ops/a.txt", "a")
sandbox_backend.write("/tmp/test_sandbox_ops/b.txt", "b")
info = sandbox_backend.ls_info("/tmp/test_sandbox_ops")
paths = sorted([i["path"] for i in info])
assert "/tmp/test_sandbox_ops/a.txt" in paths
assert "/tmp/test_sandbox_ops/b.txt" in paths
def test_glob_info(self, sandbox_backend: SandboxBackendProtocol) -> None:
"""Create files and verify `glob_info()` returns expected matches."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
sandbox_backend.write("/tmp/test_sandbox_ops/x.py", "print('x')")
sandbox_backend.write("/tmp/test_sandbox_ops/y.txt", "y")
matches = sandbox_backend.glob_info("*.py", path="/tmp/test_sandbox_ops")
assert [m["path"] for m in matches] == ["x.py"]
def test_grep_raw_literal(self, sandbox_backend: SandboxBackendProtocol) -> None:
"""Verify `grep_raw()` performs literal matching on special characters."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
sandbox_backend.write("/tmp/test_sandbox_ops/grep.txt", "a (b)\nstr | int\n")
matches = sandbox_backend.grep_raw("str | int", path="/tmp/test_sandbox_ops")
assert isinstance(matches, list)
assert matches[0]["path"].endswith("/grep.txt")
assert matches[0]["text"].strip() == "str | int"
def test_upload_single_file(self, sandbox_backend: SandboxBackendProtocol) -> None:
"""Upload one file and verify its contents on the sandbox."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
test_path = "/tmp/test_upload_single.txt"
test_content = b"Hello, Sandbox!"
upload_responses = sandbox_backend.upload_files([(test_path, test_content)])
assert len(upload_responses) == 1
assert upload_responses[0].path == test_path
assert upload_responses[0].error is None
result = sandbox_backend.execute(f"cat {test_path}")
assert result.output.strip() == test_content.decode()
def test_download_single_file(
self, sandbox_backend: SandboxBackendProtocol
) -> None:
"""Upload then download a file and verify bytes match."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
test_path = "/tmp/test_download_single.txt"
test_content = b"Download test content"
sandbox_backend.upload_files([(test_path, test_content)])
download_responses = sandbox_backend.download_files([test_path])
assert len(download_responses) == 1
assert download_responses[0].path == test_path
assert download_responses[0].content == test_content
assert download_responses[0].error is None
def test_upload_download_roundtrip(
self, sandbox_backend: SandboxBackendProtocol
) -> None:
"""Upload then download and verify bytes survive a roundtrip."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
test_path = "/tmp/test_roundtrip.txt"
test_content = b"Roundtrip test: special chars \n\t\r\x00"
upload_responses = sandbox_backend.upload_files([(test_path, test_content)])
assert upload_responses == [FileUploadResponse(path=test_path, error=None)]
download_responses = sandbox_backend.download_files([test_path])
assert download_responses == [
FileDownloadResponse(path=test_path, content=test_content, error=None)
]
def test_upload_multiple_files_order_preserved(
self,
sandbox_backend: SandboxBackendProtocol,
) -> None:
"""Uploading multiple files should preserve input order in responses."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
files = [
("/tmp/test_multi_1.txt", b"Content 1"),
("/tmp/test_multi_2.txt", b"Content 2"),
("/tmp/test_multi_3.txt", b"Content 3"),
]
upload_responses = sandbox_backend.upload_files(files)
assert upload_responses == [
FileUploadResponse(path=files[0][0], error=None),
FileUploadResponse(path=files[1][0], error=None),
FileUploadResponse(path=files[2][0], error=None),
]
def test_download_multiple_files_order_preserved(
self,
sandbox_backend: SandboxBackendProtocol,
) -> None:
"""Downloading multiple files should preserve input order in responses."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
files = [
("/tmp/test_batch_1.txt", b"Batch 1"),
("/tmp/test_batch_2.txt", b"Batch 2"),
("/tmp/test_batch_3.txt", b"Batch 3"),
]
sandbox_backend.upload_files(files)
paths = [p for p, _ in files]
download_responses = sandbox_backend.download_files(paths)
assert download_responses == [
FileDownloadResponse(path=files[0][0], content=files[0][1], error=None),
FileDownloadResponse(path=files[1][0], content=files[1][1], error=None),
FileDownloadResponse(path=files[2][0], content=files[2][1], error=None),
]
def test_upload_binary_content_roundtrip(
self, sandbox_backend: SandboxBackendProtocol
) -> None:
"""Upload and download binary bytes (0..255) without corruption."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
test_path = "/tmp/binary_file.bin"
test_content = bytes(range(256))
upload_responses = sandbox_backend.upload_files([(test_path, test_content)])
assert upload_responses == [FileUploadResponse(path=test_path, error=None)]
download_responses = sandbox_backend.download_files([test_path])
assert download_responses == [
FileDownloadResponse(path=test_path, content=test_content, error=None)
]
def test_download_error_file_not_found(
self, sandbox_backend: SandboxBackendProtocol
) -> None:
"""Downloading a missing file should return `error="file_not_found"`."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
missing_path = "/tmp/nonexistent_test_file.txt"
responses = sandbox_backend.download_files([missing_path])
assert responses == [
FileDownloadResponse(
path=missing_path, content=None, error="file_not_found"
)
]
def test_download_error_is_directory(
self, sandbox_backend: SandboxBackendProtocol
) -> None:
"""Downloading a directory should fail with a reasonable error code."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
dir_path = "/tmp/test_directory"
sandbox_backend.execute(f"rm -rf {dir_path} && mkdir -p {dir_path}")
responses = sandbox_backend.download_files([dir_path])
assert len(responses) == 1
assert responses[0].path == dir_path
assert responses[0].content is None
assert responses[0].error in {"is_directory", "file_not_found", "invalid_path"}
def test_download_error_permission_denied(
self, sandbox_backend: SandboxBackendProtocol
) -> None:
"""Downloading a chmod 000 file should fail with a reasonable error code."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
test_path = "/tmp/test_no_read.txt"
sandbox_backend.execute(
f"rm -f {test_path} && echo secret > {test_path} && chmod 000 {test_path}"
)
try:
responses = sandbox_backend.download_files([test_path])
finally:
sandbox_backend.execute(f"chmod 644 {test_path} || true")
assert len(responses) == 1
assert responses[0].path == test_path
assert responses[0].content is None
assert responses[0].error in {
"permission_denied",
"file_not_found",
"invalid_path",
}
def test_download_error_invalid_path_relative(
self,
sandbox_backend: SandboxBackendProtocol,
) -> None:
"""Downloading a relative path should fail with `error="invalid_path"`."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
responses = sandbox_backend.download_files(["relative/path.txt"])
assert responses == [
FileDownloadResponse(
path="relative/path.txt",
content=None,
error="invalid_path",
)
]
def test_upload_missing_parent_dir_or_roundtrip(
self,
sandbox_backend: SandboxBackendProtocol,
) -> None:
"""Uploading into a missing parent dir should error or roundtrip.
Some sandboxes auto-create parent directories; others return an error.
"""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
dir_path = "/tmp/test_upload_missing_parent_dir"
path = f"{dir_path}/deepagents_test_upload.txt"
content = b"nope"
sandbox_backend.execute(f"rm -rf {dir_path}")
responses = sandbox_backend.upload_files([(path, content)])
assert len(responses) == 1
assert responses[0].path == path
if responses[0].error is not None:
assert responses[0].error in {
"invalid_path",
"permission_denied",
"file_not_found",
}
return
download = sandbox_backend.download_files([path])
assert download == [
FileDownloadResponse(path=path, content=content, error=None)
]
def test_upload_relative_path_returns_invalid_path(
self,
sandbox_backend: SandboxBackendProtocol,
) -> None:
"""Uploading to a relative path should fail with `error="invalid_path"`."""
if not self.has_sync:
pytest.skip("Sync tests not supported.")
path = "relative_upload.txt"
content = b"nope"
responses = sandbox_backend.upload_files([(path, content)])
assert responses == [FileUploadResponse(path=path, error="invalid_path")]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/standard-tests/langchain_tests/integration_tests/sandboxes.py",
"license": "MIT License",
"lines": 323,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware_typing/test_middleware_backwards_compat.py | """Test backwards compatibility for middleware type parameters.
This file verifies that middlewares written BEFORE the ResponseT change still work.
All patterns that were valid before should remain valid.
Run type check: uv run --group typing mypy <this file>
Run tests: uv run --group test pytest <this file> -v
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import pytest
from langchain_core.language_models.fake_chat_models import GenericFakeChatModel
from langchain_core.messages import AIMessage, HumanMessage
from typing_extensions import TypedDict
from langchain.agents import create_agent
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ModelRequest,
ModelResponse,
before_model,
)
if TYPE_CHECKING:
from collections.abc import Awaitable, Callable
from langgraph.runtime import Runtime
# =============================================================================
# OLD PATTERN 1: Completely unparameterized AgentMiddleware
# This was the most common pattern for simple middlewares
# =============================================================================
class OldStyleMiddleware1(AgentMiddleware):
"""Middleware with no type parameters at all - most common old pattern."""
def before_model(self, state: AgentState[Any], runtime: Runtime[None]) -> dict[str, Any] | None:
# Simple middleware that just logs or does something
return None
def wrap_model_call(
self,
request: ModelRequest, # No type param
handler: Callable[[ModelRequest], ModelResponse], # No type params
) -> ModelResponse: # No type param
return handler(request)
# =============================================================================
# OLD PATTERN 2: AgentMiddleware with only 2 type parameters (StateT, ContextT)
# This was the pattern before ResponseT was added
# =============================================================================
class OldStyleMiddleware2(AgentMiddleware[AgentState[Any], ContextT]):
"""Middleware with 2 type params - the old signature before ResponseT."""
def wrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse],
) -> ModelResponse:
return handler(request)
# =============================================================================
# OLD PATTERN 3: Middleware with explicit None context
# =============================================================================
class OldStyleMiddleware3(AgentMiddleware[AgentState[Any], None]):
"""Middleware explicitly typed for no context."""
def wrap_model_call(
self,
request: ModelRequest[None],
handler: Callable[[ModelRequest[None]], ModelResponse],
) -> ModelResponse:
return handler(request)
# =============================================================================
# OLD PATTERN 4: Middleware with specific context type (2 params)
# =============================================================================
class MyContext(TypedDict):
user_id: str
class OldStyleMiddleware4(AgentMiddleware[AgentState[Any], MyContext]):
"""Middleware with specific context - old 2-param pattern."""
def wrap_model_call(
self,
request: ModelRequest[MyContext],
handler: Callable[[ModelRequest[MyContext]], ModelResponse],
) -> ModelResponse:
# Access context fields
_user_id: str = request.runtime.context["user_id"]
return handler(request)
# =============================================================================
# OLD PATTERN 5: Decorator-based middleware
# =============================================================================
@before_model
def old_style_decorator(state: AgentState[Any], runtime: Runtime[None]) -> dict[str, Any] | None:
"""Decorator middleware - old pattern."""
return None
# =============================================================================
# OLD PATTERN 6: Async middleware (2 params)
# =============================================================================
class OldStyleAsyncMiddleware(AgentMiddleware[AgentState[Any], ContextT]):
"""Async middleware with old 2-param pattern."""
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], Awaitable[ModelResponse]],
) -> ModelResponse:
return await handler(request)
# =============================================================================
# OLD PATTERN 7: ModelResponse without type parameter
# =============================================================================
class OldStyleModelResponseMiddleware(AgentMiddleware):
"""Middleware using ModelResponse without type param."""
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
response = handler(request)
# Access result - this always worked
_ = response.result
# structured_response was Any before, still works
_ = response.structured_response
return response
# =============================================================================
# TESTS: Verify all old patterns still work at runtime
# =============================================================================
@pytest.fixture
def fake_model() -> GenericFakeChatModel:
"""Create a fake model for testing."""
return GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
def test_old_pattern_1_unparameterized(fake_model: GenericFakeChatModel) -> None:
"""Old pattern 1: Completely unparameterized middleware."""
agent = create_agent(
model=fake_model,
middleware=[OldStyleMiddleware1()],
)
result = agent.invoke({"messages": [HumanMessage(content="hi")]})
assert "messages" in result
assert len(result["messages"]) >= 1
def test_old_pattern_2_two_params(fake_model: GenericFakeChatModel) -> None:
"""Old pattern 2: AgentMiddleware[StateT, ContextT] - 2 params."""
agent = create_agent(
model=fake_model,
middleware=[OldStyleMiddleware2()],
)
result = agent.invoke({"messages": [HumanMessage(content="hi")]})
assert "messages" in result
assert len(result["messages"]) >= 1
def test_old_pattern_3_explicit_none(fake_model: GenericFakeChatModel) -> None:
"""Old pattern 3: Explicit None context."""
agent = create_agent(
model=fake_model,
middleware=[OldStyleMiddleware3()],
)
result = agent.invoke({"messages": [HumanMessage(content="hi")]})
assert "messages" in result
assert len(result["messages"]) >= 1
def test_old_pattern_4_specific_context(fake_model: GenericFakeChatModel) -> None:
"""Old pattern 4: Specific context type with 2 params."""
agent = create_agent(
model=fake_model,
middleware=[OldStyleMiddleware4()],
context_schema=MyContext,
)
result = agent.invoke(
{"messages": [HumanMessage(content="hi")]},
context={"user_id": "test-user"},
)
assert "messages" in result
assert len(result["messages"]) >= 1
def test_old_pattern_5_decorator(fake_model: GenericFakeChatModel) -> None:
"""Old pattern 5: Decorator-based middleware."""
agent = create_agent(
model=fake_model,
middleware=[old_style_decorator],
)
result = agent.invoke({"messages": [HumanMessage(content="hi")]})
assert "messages" in result
assert len(result["messages"]) >= 1
async def test_old_pattern_6_async(fake_model: GenericFakeChatModel) -> None:
"""Old pattern 6: Async middleware with 2 params."""
agent = create_agent(
model=fake_model,
middleware=[OldStyleAsyncMiddleware()],
)
result = await agent.ainvoke({"messages": [HumanMessage(content="hi")]})
assert "messages" in result
assert len(result["messages"]) >= 1
def test_old_pattern_7_model_response_unparameterized(
fake_model: GenericFakeChatModel,
) -> None:
"""Old pattern 7: ModelResponse without type parameter."""
agent = create_agent(
model=fake_model,
middleware=[OldStyleModelResponseMiddleware()],
)
result = agent.invoke({"messages": [HumanMessage(content="hi")]})
assert "messages" in result
assert len(result["messages"]) >= 1
def test_multiple_old_style_middlewares(fake_model: GenericFakeChatModel) -> None:
"""Multiple old-style middlewares can be combined."""
agent = create_agent(
model=fake_model,
middleware=[
OldStyleMiddleware1(),
OldStyleMiddleware2(),
OldStyleMiddleware3(),
old_style_decorator,
OldStyleModelResponseMiddleware(),
],
)
result = agent.invoke({"messages": [HumanMessage(content="hi")]})
assert "messages" in result
assert len(result["messages"]) >= 1
def test_model_response_backwards_compat() -> None:
"""ModelResponse can be instantiated without type params."""
# Old way - no type param
response = ModelResponse(result=[AIMessage(content="test")])
assert response.structured_response is None
# Old way - accessing fields
response2 = ModelResponse(
result=[AIMessage(content="test")],
structured_response={"key": "value"},
)
assert response2.structured_response == {"key": "value"}
def test_model_request_backwards_compat() -> None:
"""ModelRequest can be instantiated without type params."""
# Old way - no type param
request = ModelRequest(
model=None, # type: ignore[arg-type]
messages=[HumanMessage(content="test")],
)
assert len(request.messages) == 1
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware_typing/test_middleware_backwards_compat.py",
"license": "MIT License",
"lines": 221,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware_typing/test_middleware_type_errors.py | """Demonstrate type errors that mypy catches for ContextT and ResponseT mismatches.
This file contains intentional type errors to demonstrate that mypy catches them.
Run: uv run --group typing mypy <this file>
Expected errors:
1. TypedDict "UserContext" has no key "session_id" - accessing wrong context field
2. Argument incompatible with supertype - mismatched ModelRequest type
3. Cannot infer value of type parameter - middleware/context_schema mismatch
4. "AnalysisResult" has no attribute "summary" - accessing wrong response field
5. Handler returns wrong ResponseT type
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
from pydantic import BaseModel
from typing_extensions import TypedDict
from langchain.agents import create_agent
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ModelRequest,
ModelResponse,
)
from tests.unit_tests.agents.model import FakeToolCallingModel
if TYPE_CHECKING:
from collections.abc import Callable
# =============================================================================
# Context and Response schemas
# =============================================================================
class UserContext(TypedDict):
user_id: str
user_name: str
class SessionContext(TypedDict):
session_id: str
expires_at: int
class AnalysisResult(BaseModel):
sentiment: str
confidence: float
class SummaryResult(BaseModel):
summary: str
key_points: list[str]
# =============================================================================
# ERROR 1: Using wrong context fields
# =============================================================================
class WrongContextFieldsMiddleware(AgentMiddleware[AgentState[Any], UserContext, Any]):
def wrap_model_call(
self,
request: ModelRequest[UserContext],
handler: Callable[[ModelRequest[UserContext]], ModelResponse[Any]],
) -> ModelResponse[Any]:
# TYPE ERROR: 'session_id' doesn't exist on UserContext
session_id: str = request.runtime.context["session_id"] # type: ignore[typeddict-item]
_ = session_id
return handler(request)
# =============================================================================
# ERROR 2: Mismatched ModelRequest type parameter in method signature
# =============================================================================
class MismatchedRequestMiddleware(AgentMiddleware[AgentState[Any], UserContext, Any]):
def wrap_model_call( # type: ignore[override]
self,
# TYPE ERROR: Should be ModelRequest[UserContext], not SessionContext
request: ModelRequest[SessionContext],
handler: Callable[[ModelRequest[SessionContext]], ModelResponse[Any]],
) -> ModelResponse[Any]:
return handler(request)
# =============================================================================
# ERROR 3: Middleware ContextT doesn't match context_schema
# =============================================================================
class SessionContextMiddleware(AgentMiddleware[AgentState[Any], SessionContext, Any]):
def wrap_model_call(
self,
request: ModelRequest[SessionContext],
handler: Callable[[ModelRequest[SessionContext]], ModelResponse[Any]],
) -> ModelResponse[Any]:
return handler(request)
def test_mismatched_context_schema() -> None:
# TYPE ERROR: SessionContextMiddleware expects SessionContext,
# but context_schema is UserContext
fake_model = FakeToolCallingModel()
_agent = create_agent( # type: ignore[misc]
model=fake_model,
middleware=[SessionContextMiddleware()],
context_schema=UserContext,
)
# =============================================================================
# ERROR 4: Backwards compatible middleware with typed context_schema
# =============================================================================
class BackwardsCompatibleMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
return handler(request)
def test_backwards_compat_with_context_schema() -> None:
# TYPE ERROR: BackwardsCompatibleMiddleware is AgentMiddleware[..., None]
# but context_schema=UserContext expects AgentMiddleware[..., UserContext]
fake_model = FakeToolCallingModel()
_agent = create_agent( # type: ignore[misc]
model=fake_model,
middleware=[BackwardsCompatibleMiddleware()],
context_schema=UserContext,
)
# =============================================================================
# ERROR 5: Using wrong response fields
# =============================================================================
class WrongResponseFieldsMiddleware(
AgentMiddleware[AgentState[AnalysisResult], ContextT, AnalysisResult]
):
def wrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse[AnalysisResult]],
) -> ModelResponse[AnalysisResult]:
response = handler(request)
if response.structured_response is not None:
# TYPE ERROR: 'summary' doesn't exist on AnalysisResult
summary: str = response.structured_response.summary # type: ignore[attr-defined]
_ = summary
return response
# =============================================================================
# ERROR 6: Mismatched ResponseT in method signature
# =============================================================================
class MismatchedResponseMiddleware(
AgentMiddleware[AgentState[AnalysisResult], ContextT, AnalysisResult]
):
def wrap_model_call( # type: ignore[override]
self,
request: ModelRequest[ContextT],
# TYPE ERROR: Handler should return ModelResponse[AnalysisResult], not SummaryResult
handler: Callable[[ModelRequest[ContextT]], ModelResponse[SummaryResult]],
) -> ModelResponse[AnalysisResult]:
# This would fail at runtime - types don't match
return handler(request) # type: ignore[return-value]
# =============================================================================
# ERROR 7: Middleware ResponseT doesn't match response_format
# =============================================================================
class AnalysisMiddleware(AgentMiddleware[AgentState[AnalysisResult], ContextT, AnalysisResult]):
def wrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse[AnalysisResult]],
) -> ModelResponse[AnalysisResult]:
return handler(request)
def test_mismatched_response_format() -> None:
# TODO: TYPE ERROR not yet detected by mypy - AnalysisMiddleware expects AnalysisResult,
# but response_format is SummaryResult. This requires more sophisticated typing.
fake_model = FakeToolCallingModel()
_agent = create_agent(
model=fake_model,
middleware=[AnalysisMiddleware()],
response_format=SummaryResult,
)
# =============================================================================
# ERROR 8: Wrong return type from wrap_model_call
# =============================================================================
class WrongReturnTypeMiddleware(
AgentMiddleware[AgentState[AnalysisResult], ContextT, AnalysisResult]
):
def wrap_model_call( # type: ignore[override]
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse[AnalysisResult]],
) -> ModelResponse[SummaryResult]: # TYPE ERROR: Should return ModelResponse[AnalysisResult]
return handler(request) # type: ignore[return-value]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware_typing/test_middleware_type_errors.py",
"license": "MIT License",
"lines": 164,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware_typing/test_middleware_typing.py | """Test file to verify type safety in middleware (ContextT and ResponseT).
This file demonstrates:
1. Backwards compatible middlewares (no type params specified) - works with defaults
2. Correctly typed middlewares (ContextT/ResponseT match) - full type safety
3. Type errors that are caught when types don't match
Run type check: uv run --group typing mypy <this file>
Run tests: uv run --group test pytest <this file> -v
To see type errors being caught, run:
uv run --group typing mypy .../test_middleware_type_errors.py
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import pytest
from langchain_core.language_models.fake_chat_models import GenericFakeChatModel
from langchain_core.messages import AIMessage, HumanMessage
from pydantic import BaseModel
from typing_extensions import TypedDict
from langchain.agents import create_agent
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ModelRequest,
ModelResponse,
ResponseT,
before_model,
)
if TYPE_CHECKING:
from collections.abc import Awaitable, Callable
from langgraph.graph.state import CompiledStateGraph
from langgraph.runtime import Runtime
# =============================================================================
# Context and Response schemas for testing
# =============================================================================
class UserContext(TypedDict):
"""Context with user information."""
user_id: str
user_name: str
class SessionContext(TypedDict):
"""Different context schema."""
session_id: str
expires_at: int
class AnalysisResult(BaseModel):
"""Structured response schema."""
sentiment: str
confidence: float
class SummaryResult(BaseModel):
"""Different structured response schema."""
summary: str
key_points: list[str]
# =============================================================================
# 1. BACKWARDS COMPATIBLE: Middlewares without type parameters
# These work when create_agent has NO context_schema or response_format
# =============================================================================
class BackwardsCompatibleMiddleware(AgentMiddleware):
"""Middleware that doesn't specify type parameters - backwards compatible."""
def before_model(self, state: AgentState[Any], runtime: Runtime[None]) -> dict[str, Any] | None:
return None
def wrap_model_call(
self,
request: ModelRequest, # No type param - backwards compatible!
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
return handler(request)
class BackwardsCompatibleMiddleware2(AgentMiddleware):
"""Another backwards compatible middleware using ModelRequest without params."""
def wrap_model_call(
self,
request: ModelRequest, # Unparameterized - defaults to ModelRequest[None]
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
_ = request.runtime
return handler(request)
@before_model
def backwards_compatible_decorator(
state: AgentState[Any], runtime: Runtime[None]
) -> dict[str, Any] | None:
"""Decorator middleware without explicit type parameters."""
return None
# =============================================================================
# 2. CORRECTLY TYPED: Middlewares with explicit ContextT
# These work when create_agent has MATCHING context_schema
# =============================================================================
class UserContextMiddleware(AgentMiddleware[AgentState[Any], UserContext, Any]):
"""Middleware with correctly specified UserContext."""
def before_model(
self, state: AgentState[Any], runtime: Runtime[UserContext]
) -> dict[str, Any] | None:
# Full type safety - IDE knows these fields exist
_user_id: str = runtime.context["user_id"]
_user_name: str = runtime.context["user_name"]
return None
def wrap_model_call(
self,
request: ModelRequest[UserContext], # Correctly parameterized!
handler: Callable[[ModelRequest[UserContext]], ModelResponse[Any]],
) -> ModelResponse[Any]:
# request.runtime.context is UserContext - fully typed!
_user_id: str = request.runtime.context["user_id"]
return handler(request)
class SessionContextMiddleware(AgentMiddleware[AgentState[Any], SessionContext, Any]):
"""Middleware with correctly specified SessionContext."""
def wrap_model_call(
self,
request: ModelRequest[SessionContext],
handler: Callable[[ModelRequest[SessionContext]], ModelResponse[Any]],
) -> ModelResponse[Any]:
_session_id: str = request.runtime.context["session_id"]
_expires: int = request.runtime.context["expires_at"]
return handler(request)
# =============================================================================
# 3. CORRECTLY TYPED: Middlewares with explicit ResponseT
# These work when create_agent has MATCHING response_format
# =============================================================================
class AnalysisResponseMiddleware(
AgentMiddleware[AgentState[AnalysisResult], ContextT, AnalysisResult]
):
"""Middleware with correctly specified AnalysisResult response type."""
def wrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse[AnalysisResult]],
) -> ModelResponse[AnalysisResult]:
response = handler(request)
# Full type safety on structured_response
if response.structured_response is not None:
_sentiment: str = response.structured_response.sentiment
_confidence: float = response.structured_response.confidence
return response
class SummaryResponseMiddleware(
AgentMiddleware[AgentState[SummaryResult], ContextT, SummaryResult]
):
"""Middleware with correctly specified SummaryResult response type."""
def wrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse[SummaryResult]],
) -> ModelResponse[SummaryResult]:
response = handler(request)
if response.structured_response is not None:
_summary: str = response.structured_response.summary
_points: list[str] = response.structured_response.key_points
return response
# =============================================================================
# 4. FULLY TYPED: Middlewares with both ContextT and ResponseT
# =============================================================================
class FullyTypedMiddleware(
AgentMiddleware[AgentState[AnalysisResult], UserContext, AnalysisResult]
):
"""Middleware with both ContextT and ResponseT fully specified."""
def wrap_model_call(
self,
request: ModelRequest[UserContext],
handler: Callable[[ModelRequest[UserContext]], ModelResponse[AnalysisResult]],
) -> ModelResponse[AnalysisResult]:
# Access context with full type safety
_user_id: str = request.runtime.context["user_id"]
response = handler(request)
# Access structured response with full type safety
if response.structured_response is not None:
_sentiment: str = response.structured_response.sentiment
return response
# =============================================================================
# 5. FLEXIBLE MIDDLEWARE: Works with any ContextT/ResponseT using Generic
# =============================================================================
class FlexibleMiddleware(AgentMiddleware[AgentState[ResponseT], ContextT, ResponseT]):
"""Middleware that works with any ContextT and ResponseT."""
def wrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse[ResponseT]],
) -> ModelResponse[ResponseT]:
# Can't access specific fields, but works with any schemas
_ = request.runtime
return handler(request)
# =============================================================================
# 6. CREATE_AGENT INTEGRATION TESTS
# =============================================================================
@pytest.fixture
def fake_model() -> GenericFakeChatModel:
"""Create a fake model for testing."""
return GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
def test_create_agent_no_context_schema(fake_model: GenericFakeChatModel) -> None:
"""Backwards compatible: No context_schema means ContextT=None."""
agent: CompiledStateGraph[Any, None, Any, Any] = create_agent(
model=fake_model,
middleware=[
BackwardsCompatibleMiddleware(),
BackwardsCompatibleMiddleware2(),
backwards_compatible_decorator,
],
# No context_schema - backwards compatible
)
assert agent is not None
def test_create_agent_with_user_context(fake_model: GenericFakeChatModel) -> None:
"""Typed: context_schema=UserContext requires matching middleware."""
agent: CompiledStateGraph[Any, UserContext, Any, Any] = create_agent(
model=fake_model,
middleware=[UserContextMiddleware()], # Matches UserContext
context_schema=UserContext,
)
assert agent is not None
def test_create_agent_with_session_context(fake_model: GenericFakeChatModel) -> None:
"""Typed: context_schema=SessionContext requires matching middleware."""
agent: CompiledStateGraph[Any, SessionContext, Any, Any] = create_agent(
model=fake_model,
middleware=[SessionContextMiddleware()], # Matches SessionContext
context_schema=SessionContext,
)
assert agent is not None
def test_create_agent_with_flexible_middleware(fake_model: GenericFakeChatModel) -> None:
"""Flexible middleware works with any context_schema."""
# With UserContext
agent1: CompiledStateGraph[Any, UserContext, Any, Any] = create_agent(
model=fake_model,
middleware=[FlexibleMiddleware[UserContext, Any]()],
context_schema=UserContext,
)
assert agent1 is not None
# With SessionContext
agent2: CompiledStateGraph[Any, SessionContext, Any, Any] = create_agent(
model=fake_model,
middleware=[FlexibleMiddleware[SessionContext, Any]()],
context_schema=SessionContext,
)
assert agent2 is not None
def test_create_agent_with_response_middleware(fake_model: GenericFakeChatModel) -> None:
"""Middleware with ResponseT works with response_format."""
agent = create_agent(
model=fake_model,
middleware=[AnalysisResponseMiddleware()],
response_format=AnalysisResult,
)
assert agent is not None
def test_create_agent_fully_typed(fake_model: GenericFakeChatModel) -> None:
"""Fully typed middleware with both ContextT and ResponseT."""
agent = create_agent(
model=fake_model,
middleware=[FullyTypedMiddleware()],
context_schema=UserContext,
response_format=AnalysisResult,
)
assert agent is not None
# =============================================================================
# 7. ASYNC VARIANTS
# =============================================================================
class AsyncUserContextMiddleware(AgentMiddleware[AgentState[Any], UserContext, Any]):
"""Async middleware with correctly typed ContextT."""
async def abefore_model(
self, state: AgentState[Any], runtime: Runtime[UserContext]
) -> dict[str, Any] | None:
_user_name: str = runtime.context["user_name"]
return None
async def awrap_model_call(
self,
request: ModelRequest[UserContext],
handler: Callable[[ModelRequest[UserContext]], Awaitable[ModelResponse[Any]]],
) -> ModelResponse[Any]:
_user_id: str = request.runtime.context["user_id"]
return await handler(request)
class AsyncResponseMiddleware(
AgentMiddleware[AgentState[AnalysisResult], ContextT, AnalysisResult]
):
"""Async middleware with correctly typed ResponseT."""
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], Awaitable[ModelResponse[AnalysisResult]]],
) -> ModelResponse[AnalysisResult]:
response = await handler(request)
if response.structured_response is not None:
_sentiment: str = response.structured_response.sentiment
return response
def test_async_middleware_with_context(fake_model: GenericFakeChatModel) -> None:
"""Async middleware with typed context."""
agent: CompiledStateGraph[Any, UserContext, Any, Any] = create_agent(
model=fake_model,
middleware=[AsyncUserContextMiddleware()],
context_schema=UserContext,
)
assert agent is not None
def test_async_middleware_with_response(fake_model: GenericFakeChatModel) -> None:
"""Async middleware with typed response."""
agent = create_agent(
model=fake_model,
middleware=[AsyncResponseMiddleware()],
response_format=AnalysisResult,
)
assert agent is not None
# =============================================================================
# 8. MODEL_REQUEST AND MODEL_RESPONSE TESTS
# =============================================================================
def test_model_request_preserves_context_type() -> None:
"""Test that ModelRequest.override() preserves ContextT."""
request: ModelRequest[UserContext] = ModelRequest(
model=None, # type: ignore[arg-type]
messages=[HumanMessage(content="test")],
runtime=None,
)
# Override should preserve the type parameter
new_request: ModelRequest[UserContext] = request.override(
messages=[HumanMessage(content="updated")]
)
assert type(request) is type(new_request)
def test_model_request_backwards_compatible() -> None:
"""Test that ModelRequest can be instantiated without type params."""
request = ModelRequest(
model=None, # type: ignore[arg-type]
messages=[HumanMessage(content="test")],
)
assert request.messages[0].content == "test"
def test_model_request_explicit_none() -> None:
"""Test ModelRequest[None] is same as unparameterized ModelRequest."""
request1: ModelRequest[None] = ModelRequest(
model=None, # type: ignore[arg-type]
messages=[HumanMessage(content="test")],
)
request2: ModelRequest = ModelRequest(
model=None, # type: ignore[arg-type]
messages=[HumanMessage(content="test")],
)
assert type(request1) is type(request2)
def test_model_response_with_response_type() -> None:
"""Test that ModelResponse preserves ResponseT."""
response: ModelResponse[AnalysisResult] = ModelResponse(
result=[AIMessage(content="test")],
structured_response=AnalysisResult(sentiment="positive", confidence=0.9),
)
# Type checker knows structured_response is AnalysisResult | None
if response.structured_response is not None:
_sentiment: str = response.structured_response.sentiment
_confidence: float = response.structured_response.confidence
def test_model_response_without_structured() -> None:
"""Test ModelResponse without structured response."""
response: ModelResponse[Any] = ModelResponse(
result=[AIMessage(content="test")],
structured_response=None,
)
assert response.structured_response is None
def test_model_response_backwards_compatible() -> None:
"""Test that ModelResponse can be instantiated without type params."""
response = ModelResponse(
result=[AIMessage(content="test")],
)
assert response.structured_response is None
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware_typing/test_middleware_typing.py",
"license": "MIT License",
"lines": 345,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_fetch_last_ai_and_tool_messages.py | """Unit tests for _fetch_last_ai_and_tool_messages helper function.
These tests verify that the helper function correctly handles edge cases,
including the scenario where no AIMessage exists in the message list
(fixes issue #34792).
"""
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
from langchain.agents.factory import _fetch_last_ai_and_tool_messages
def test_fetch_last_ai_and_tool_messages_normal() -> None:
"""Test normal case with AIMessage and subsequent ToolMessages."""
messages = [
HumanMessage(content="Hello"),
AIMessage(content="Hi there!", tool_calls=[{"name": "test", "id": "1", "args": {}}]),
ToolMessage(content="Tool result", tool_call_id="1"),
]
ai_msg, tool_msgs = _fetch_last_ai_and_tool_messages(messages)
assert ai_msg is not None
assert isinstance(ai_msg, AIMessage)
assert ai_msg.content == "Hi there!"
assert len(tool_msgs) == 1
assert tool_msgs[0].content == "Tool result"
def test_fetch_last_ai_and_tool_messages_multiple_ai() -> None:
"""Test that the last AIMessage is returned when multiple exist."""
messages = [
HumanMessage(content="First question"),
AIMessage(content="First answer", id="ai1"),
HumanMessage(content="Second question"),
AIMessage(content="Second answer", id="ai2"),
]
ai_msg, tool_msgs = _fetch_last_ai_and_tool_messages(messages)
assert ai_msg is not None
assert isinstance(ai_msg, AIMessage)
assert ai_msg.content == "Second answer"
assert ai_msg.id == "ai2"
assert len(tool_msgs) == 0
def test_fetch_last_ai_and_tool_messages_no_ai_message() -> None:
"""Test handling when no AIMessage exists in messages.
This is the edge case that caused issue #34792 - UnboundLocalError
when using RemoveMessage(id=REMOVE_ALL_MESSAGES) to clear thread messages.
The function now returns None for the AIMessage, allowing callers to
handle this edge case explicitly.
"""
messages = [
HumanMessage(content="Hello"),
SystemMessage(content="You are a helpful assistant"),
]
ai_msg, tool_msgs = _fetch_last_ai_and_tool_messages(messages)
# Should return None when no AIMessage is found
assert ai_msg is None
assert len(tool_msgs) == 0
def test_fetch_last_ai_and_tool_messages_empty_list() -> None:
"""Test handling of empty messages list.
This can occur after RemoveMessage(id=REMOVE_ALL_MESSAGES) clears all messages.
"""
messages: list = []
ai_msg, tool_msgs = _fetch_last_ai_and_tool_messages(messages)
# Should return None when no AIMessage is found
assert ai_msg is None
assert len(tool_msgs) == 0
def test_fetch_last_ai_and_tool_messages_only_human_messages() -> None:
"""Test handling when only HumanMessages exist."""
messages = [
HumanMessage(content="Hello"),
HumanMessage(content="Are you there?"),
]
ai_msg, tool_msgs = _fetch_last_ai_and_tool_messages(messages)
assert ai_msg is None
assert len(tool_msgs) == 0
def test_fetch_last_ai_and_tool_messages_ai_without_tool_calls() -> None:
"""Test AIMessage without tool_calls returns empty tool messages list."""
messages = [
HumanMessage(content="Hello"),
AIMessage(content="Hi! How can I help you today?"),
]
ai_msg, tool_msgs = _fetch_last_ai_and_tool_messages(messages)
assert ai_msg is not None
assert isinstance(ai_msg, AIMessage)
assert ai_msg.content == "Hi! How can I help you today?"
assert len(tool_msgs) == 0
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/test_fetch_last_ai_and_tool_messages.py",
"license": "MIT License",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_dynamic_tools.py | """Tests for dynamic tool registration via middleware.
These tests verify that middleware can dynamically register and handle tools
that are not declared upfront when creating the agent.
"""
import asyncio
from collections.abc import Awaitable, Callable
from typing import Any
import pytest
from langchain_core.messages import HumanMessage, ToolCall, ToolMessage
from langchain_core.tools import tool
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.types import Command
from langchain.agents.factory import create_agent
from langchain.agents.middleware.types import (
AgentMiddleware,
ModelCallResult,
ModelRequest,
ModelResponse,
ToolCallRequest,
)
from tests.unit_tests.agents.model import FakeToolCallingModel
@tool
def static_tool(value: str) -> str:
"""A static tool that is always available."""
return f"Static result: {value}"
@tool
def dynamic_tool(value: str) -> str:
"""A dynamically registered tool."""
return f"Dynamic result: {value}"
@tool
def another_dynamic_tool(x: int, y: int) -> str:
"""Another dynamically registered tool for calculations."""
return f"Sum: {x + y}"
# -----------------------------------------------------------------------------
# Middleware classes
# -----------------------------------------------------------------------------
class DynamicToolMiddleware(AgentMiddleware):
"""Middleware that dynamically adds and handles a tool (sync and async)."""
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
updated = request.override(tools=[*request.tools, dynamic_tool])
return handler(updated)
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
updated = request.override(tools=[*request.tools, dynamic_tool])
return await handler(updated)
def wrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], ToolMessage | Command[Any]],
) -> ToolMessage | Command[Any]:
if request.tool_call["name"] == "dynamic_tool":
return handler(request.override(tool=dynamic_tool))
return handler(request)
async def awrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]],
) -> ToolMessage | Command[Any]:
if request.tool_call["name"] == "dynamic_tool":
return await handler(request.override(tool=dynamic_tool))
return await handler(request)
class MultipleDynamicToolsMiddleware(AgentMiddleware):
"""Middleware that dynamically adds multiple tools (sync and async)."""
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
updated = request.override(tools=[*request.tools, dynamic_tool, another_dynamic_tool])
return handler(updated)
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
updated = request.override(tools=[*request.tools, dynamic_tool, another_dynamic_tool])
return await handler(updated)
def _handle_tool(self, request: ToolCallRequest) -> ToolCallRequest | None:
"""Return updated request if this is a dynamic tool, else None."""
tool_name = request.tool_call["name"]
if tool_name == "dynamic_tool":
return request.override(tool=dynamic_tool)
if tool_name == "another_dynamic_tool":
return request.override(tool=another_dynamic_tool)
return None
def wrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], ToolMessage | Command[Any]],
) -> ToolMessage | Command[Any]:
updated = self._handle_tool(request)
return handler(updated or request)
async def awrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]],
) -> ToolMessage | Command[Any]:
updated = self._handle_tool(request)
return await handler(updated or request)
class DynamicToolMiddlewareWithoutHandler(AgentMiddleware):
"""Middleware that adds a dynamic tool but doesn't handle it."""
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
updated = request.override(tools=[*request.tools, dynamic_tool])
return handler(updated)
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
updated = request.override(tools=[*request.tools, dynamic_tool])
return await handler(updated)
class ConditionalDynamicToolMiddleware(AgentMiddleware):
"""Middleware that conditionally adds a tool based on state (sync and async)."""
def _should_add_tool(self, request: ModelRequest) -> bool:
messages = request.state.get("messages", [])
return messages and "calculator" in str(messages[-1].content).lower()
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
if self._should_add_tool(request):
request = request.override(tools=[*request.tools, another_dynamic_tool])
return handler(request)
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
if self._should_add_tool(request):
request = request.override(tools=[*request.tools, another_dynamic_tool])
return await handler(request)
def wrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], ToolMessage | Command[Any]],
) -> ToolMessage | Command[Any]:
if request.tool_call["name"] == "another_dynamic_tool":
return handler(request.override(tool=another_dynamic_tool))
return handler(request)
async def awrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]],
) -> ToolMessage | Command[Any]:
if request.tool_call["name"] == "another_dynamic_tool":
return await handler(request.override(tool=another_dynamic_tool))
return await handler(request)
# -----------------------------------------------------------------------------
# Helper functions
# -----------------------------------------------------------------------------
def get_tool_messages(result: dict[str, Any]) -> list[ToolMessage]:
"""Extract ToolMessage objects from agent result."""
return [m for m in result["messages"] if isinstance(m, ToolMessage)]
async def invoke_agent(agent: Any, message: str, *, use_async: bool) -> dict[str, Any]:
"""Invoke agent synchronously or asynchronously based on flag."""
input_data = {"messages": [HumanMessage(message)]}
config = {"configurable": {"thread_id": "test"}}
if use_async:
return await agent.ainvoke(input_data, config)
# Run sync invoke in thread pool to avoid blocking the event loop
return await asyncio.to_thread(agent.invoke, input_data, config)
# -----------------------------------------------------------------------------
# Tests
# -----------------------------------------------------------------------------
@pytest.mark.parametrize("use_async", [False, True])
@pytest.mark.parametrize(
"tools",
[
pytest.param([static_tool], id="with_static_tools"),
pytest.param([], id="without_static_tools"),
pytest.param(None, id="with_none_tools"),
],
)
async def test_dynamic_tool_basic(*, use_async: bool, tools: list[Any] | None) -> None:
"""Test dynamic tool registration with various static tool configurations."""
model = FakeToolCallingModel(
tool_calls=[
[ToolCall(name="dynamic_tool", args={"value": "test"}, id="1")],
[],
]
)
agent = create_agent(
model=model,
tools=tools, # type: ignore[arg-type]
middleware=[DynamicToolMiddleware()],
checkpointer=InMemorySaver(),
)
result = await invoke_agent(agent, "Use the dynamic tool", use_async=use_async)
tool_messages = get_tool_messages(result)
assert len(tool_messages) == 1
assert tool_messages[0].name == "dynamic_tool"
assert "Dynamic result: test" in tool_messages[0].content
@pytest.mark.parametrize("use_async", [False, True])
async def test_multiple_dynamic_tools_with_static(*, use_async: bool) -> None:
"""Test multiple dynamic tools and mixing with static tool calls."""
model = FakeToolCallingModel(
tool_calls=[
[
ToolCall(name="static_tool", args={"value": "static-call"}, id="1"),
ToolCall(name="dynamic_tool", args={"value": "first"}, id="2"),
ToolCall(name="another_dynamic_tool", args={"x": 5, "y": 3}, id="3"),
],
[],
]
)
agent = create_agent(
model=model,
tools=[static_tool],
middleware=[MultipleDynamicToolsMiddleware()],
checkpointer=InMemorySaver(),
)
result = await invoke_agent(agent, "Use all tools", use_async=use_async)
tool_messages = get_tool_messages(result)
assert len(tool_messages) == 3
tool_results = {m.name: m.content for m in tool_messages}
assert "Static result: static-call" in tool_results["static_tool"]
assert "Dynamic result: first" in tool_results["dynamic_tool"]
assert "Sum: 8" in tool_results["another_dynamic_tool"]
@pytest.mark.parametrize("use_async", [False, True])
@pytest.mark.parametrize(
"tools",
[
pytest.param([static_tool], id="with_static_tools"),
pytest.param([], id="without_static_tools"),
],
)
async def test_dynamic_tool_without_handler_raises_error(
*, use_async: bool, tools: list[Any]
) -> None:
"""Test that a helpful error is raised when dynamic tool is not handled."""
model = FakeToolCallingModel(
tool_calls=[
[ToolCall(name="dynamic_tool", args={"value": "test"}, id="1")],
[],
]
)
agent = create_agent(
model=model,
tools=tools,
middleware=[DynamicToolMiddlewareWithoutHandler()],
checkpointer=InMemorySaver(),
)
with pytest.raises(
ValueError,
match=r"(?s)Middleware added tools.*Unknown tools:.*dynamic_tool",
):
await invoke_agent(agent, "Use the dynamic tool", use_async=use_async)
@pytest.mark.parametrize("use_async", [False, True])
async def test_conditional_dynamic_tool(*, use_async: bool) -> None:
"""Test that dynamic tools can be conditionally added based on state."""
model = FakeToolCallingModel(
tool_calls=[
[ToolCall(name="another_dynamic_tool", args={"x": 10, "y": 20}, id="1")],
[],
]
)
agent = create_agent(
model=model,
tools=[static_tool],
middleware=[ConditionalDynamicToolMiddleware()],
checkpointer=InMemorySaver(),
)
result = await invoke_agent(agent, "I need a calculator to add numbers", use_async=use_async)
tool_messages = get_tool_messages(result)
assert len(tool_messages) == 1
assert tool_messages[0].name == "another_dynamic_tool"
assert "Sum: 30" in tool_messages[0].content
@pytest.mark.parametrize("use_async", [False, True])
async def test_dynamic_tool_chained_middleware(*, use_async: bool) -> None:
"""Test dynamic tools work with multiple middleware in chain."""
call_log: list[str] = []
class LoggingMiddleware(AgentMiddleware):
def __init__(self, label: str) -> None:
self._label = label
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
call_log.append(f"{self._label}_model")
return handler(request)
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
call_log.append(f"{self._label}_model")
return await handler(request)
def wrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], ToolMessage | Command[Any]],
) -> ToolMessage | Command[Any]:
call_log.append(f"{self._label}_tool")
return handler(request)
async def awrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]],
) -> ToolMessage | Command[Any]:
call_log.append(f"{self._label}_tool")
return await handler(request)
model = FakeToolCallingModel(
tool_calls=[
[ToolCall(name="dynamic_tool", args={"value": "chained"}, id="1")],
[],
]
)
agent = create_agent(
model=model,
tools=[static_tool],
middleware=[LoggingMiddleware("first"), DynamicToolMiddleware()],
checkpointer=InMemorySaver(),
)
result = await invoke_agent(agent, "Use the dynamic tool", use_async=use_async)
tool_messages = get_tool_messages(result)
assert len(tool_messages) == 1
assert tool_messages[0].name == "dynamic_tool"
# Verify middleware chain was called
assert "first_model" in call_log
assert "first_tool" in call_log
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_dynamic_tools.py",
"license": "MIT License",
"lines": 333,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/core/scripts/check_version.py | """Check version consistency between `pyproject.toml` and `version.py`.
This script validates that the version defined in pyproject.toml matches the `VERSION`
variable in `langchain_core/version.py`. Intended for use as a pre-commit hook to
prevent version mismatches.
"""
import re
import sys
from pathlib import Path
def get_pyproject_version(pyproject_path: Path) -> str | None:
"""Extract version from `pyproject.toml`."""
content = pyproject_path.read_text(encoding="utf-8")
match = re.search(r'^version\s*=\s*"([^"]+)"', content, re.MULTILINE)
return match.group(1) if match else None
def get_version_py_version(version_path: Path) -> str | None:
"""Extract `VERSION` from `version.py`."""
content = version_path.read_text(encoding="utf-8")
match = re.search(r'^VERSION\s*=\s*"([^"]+)"', content, re.MULTILINE)
return match.group(1) if match else None
def main() -> int:
"""Validate version consistency."""
script_dir = Path(__file__).parent
package_dir = script_dir.parent
pyproject_path = package_dir / "pyproject.toml"
version_path = package_dir / "langchain_core" / "version.py"
if not pyproject_path.exists():
print(f"Error: {pyproject_path} not found")
return 1
if not version_path.exists():
print(f"Error: {version_path} not found")
return 1
pyproject_version = get_pyproject_version(pyproject_path)
version_py_version = get_version_py_version(version_path)
if pyproject_version is None:
print("Error: Could not find version in pyproject.toml")
return 1
if version_py_version is None:
print("Error: Could not find VERSION in langchain_core/version.py")
return 1
if pyproject_version != version_py_version:
print("Error: Version mismatch detected!")
print(f" pyproject.toml: {pyproject_version}")
print(f" langchain_core/version.py: {version_py_version}")
return 1
print(f"Version check passed: {pyproject_version}")
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/scripts/check_version.py",
"license": "MIT License",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain_v1/scripts/check_version.py | """Check version consistency between pyproject.toml and __init__.py.
This script validates that the version defined in pyproject.toml matches
the __version__ variable in langchain/__init__.py. Intended for use as
a pre-commit hook to prevent version mismatches.
"""
import re
import sys
from pathlib import Path
def get_pyproject_version(pyproject_path: Path) -> str | None:
"""Extract version from pyproject.toml."""
content = pyproject_path.read_text(encoding="utf-8")
match = re.search(r'^version\s*=\s*"([^"]+)"', content, re.MULTILINE)
return match.group(1) if match else None
def get_init_version(init_path: Path) -> str | None:
"""Extract __version__ from __init__.py."""
content = init_path.read_text(encoding="utf-8")
match = re.search(r'^__version__\s*=\s*"([^"]+)"', content, re.MULTILINE)
return match.group(1) if match else None
def main() -> int:
"""Validate version consistency."""
script_dir = Path(__file__).parent
package_dir = script_dir.parent
pyproject_path = package_dir / "pyproject.toml"
init_path = package_dir / "langchain" / "__init__.py"
if not pyproject_path.exists():
print(f"Error: {pyproject_path} not found")
return 1
if not init_path.exists():
print(f"Error: {init_path} not found")
return 1
pyproject_version = get_pyproject_version(pyproject_path)
init_version = get_init_version(init_path)
if pyproject_version is None:
print("Error: Could not find version in pyproject.toml")
return 1
if init_version is None:
print("Error: Could not find __version__ in langchain/__init__.py")
return 1
if pyproject_version != init_version:
print("Error: Version mismatch detected!")
print(f" pyproject.toml: {pyproject_version}")
print(f" langchain/__init__.py: {init_version}")
return 1
print(f"Version check passed: {pyproject_version}")
return 0
if __name__ == "__main__":
sys.exit(main())
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/scripts/check_version.py",
"license": "MIT License",
"lines": 47,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_kwargs_tool_runtime_injection.py | """Test that config/runtime in args_schema aren't injected to **kwargs functions."""
from __future__ import annotations
from typing import Any
from langchain_core.messages import HumanMessage, ToolMessage
from langchain_core.tools import StructuredTool
from pydantic import BaseModel, Field
from langchain.agents import create_agent
from tests.unit_tests.agents.model import FakeToolCallingModel
class ArgsSchema(BaseModel):
"""Args schema with config and runtime fields."""
query: str = Field(description="The query")
config: dict | None = Field(default=None)
runtime: dict | None = Field(default=None)
def test_config_and_runtime_not_injected_to_kwargs() -> None:
"""Config/runtime in args_schema are NOT injected when not in function signature."""
captured: dict[str, Any] = {}
def tool_func(**kwargs: Any) -> str:
"""Tool with only **kwargs."""
captured["keys"] = list(kwargs.keys())
captured["config"] = kwargs.get("config")
captured["runtime"] = kwargs.get("runtime")
captured["query"] = kwargs.get("query")
return f"query={kwargs.get('query')}"
tool = StructuredTool.from_function(
func=tool_func,
name="test_tool",
description="Test tool",
args_schema=ArgsSchema.model_json_schema(),
)
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[[{"name": "test_tool", "args": {"query": "test"}, "id": "c1"}], []]
),
tools=[tool],
system_prompt="",
)
result = agent.invoke({"messages": [HumanMessage("hi")]})
tool_msgs = [m for m in result["messages"] if isinstance(m, ToolMessage)]
assert len(tool_msgs) == 1
assert tool_msgs[0].content == "query=test"
# Only query passed - config/runtime NOT injected since not in function signature
assert captured["keys"] == ["query"]
assert captured["query"] == "test"
assert captured["config"] is None
assert captured["runtime"] is None
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/test_kwargs_tool_runtime_injection.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/test_version.py | """Test that package version is consistent across configuration files."""
from pathlib import Path
import toml
import langchain
def test_version_matches_pyproject() -> None:
"""Verify that __version__ in __init__.py matches version in pyproject.toml."""
# Get the version from the package __init__.py
init_version = langchain.__version__
# Read the version from pyproject.toml
pyproject_path = Path(__file__).parent.parent.parent / "pyproject.toml"
with pyproject_path.open() as f:
pyproject_data = toml.load(f)
pyproject_version = pyproject_data["project"]["version"]
# Assert they match
assert init_version == pyproject_version, (
f"Version mismatch: __init__.py has '{init_version}' but "
f"pyproject.toml has '{pyproject_version}'. "
f"Please update langchain/__init__.py to match pyproject.toml."
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/test_version.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/core/tests/unit_tests/utils/test_formatting.py | """Tests for langchain_core.utils.formatting."""
import pytest
from langchain_core.utils.formatting import StrictFormatter, formatter
class TestStrictFormatter:
"""Tests for the `StrictFormatter` class."""
def test_vformat_with_keyword_args(self) -> None:
"""Test that `vformat` works with keyword arguments."""
fmt = StrictFormatter()
result = fmt.vformat("Hello, {name}!", [], {"name": "World"})
assert result == "Hello, World!"
def test_vformat_with_multiple_keyword_args(self) -> None:
"""Test `vformat` with multiple keyword arguments."""
fmt = StrictFormatter()
result = fmt.vformat(
"{greeting}, {name}! You have {count} messages.",
[],
{"greeting": "Hello", "name": "Alice", "count": 5},
)
assert result == "Hello, Alice! You have 5 messages."
def test_vformat_with_empty_string(self) -> None:
"""Test `vformat` with empty format string."""
fmt = StrictFormatter()
result = fmt.vformat("", [], {})
assert result == ""
def test_vformat_with_no_placeholders(self) -> None:
"""Test `vformat` with no placeholders in format string."""
fmt = StrictFormatter()
result = fmt.vformat("Hello, World!", [], {})
assert result == "Hello, World!"
def test_vformat_raises_on_positional_args(self) -> None:
"""Test that `vformat` raises `ValueError` when positional args are provided."""
fmt = StrictFormatter()
with pytest.raises(
ValueError,
match=r"No arguments should be provided, "
r"everything should be passed as keyword arguments\.",
):
fmt.vformat("{}", ["arg"], {})
def test_vformat_raises_on_multiple_positional_args(self) -> None:
"""Test that `vformat` raises `ValueError` with multiple positional args."""
fmt = StrictFormatter()
with pytest.raises(ValueError, match=r"No arguments should be provided"):
fmt.vformat("{} {}", ["arg1", "arg2"], {})
def test_vformat_with_special_characters(self) -> None:
"""Test `vformat` with special characters in values."""
fmt = StrictFormatter()
result = fmt.vformat("{text}", [], {"text": "Hello\nWorld\t!"})
assert result == "Hello\nWorld\t!"
def test_vformat_with_unicode(self) -> None:
"""Test `vformat` with unicode characters."""
fmt = StrictFormatter()
result = fmt.vformat(
"{emoji} {text}", [], {"emoji": "🎉", "text": "こんにちは"}
)
assert result == "🎉 こんにちは"
def test_vformat_with_format_spec(self) -> None:
"""Test `vformat` with format specifications."""
fmt = StrictFormatter()
result = fmt.vformat("{num:.2f}", [], {"num": 3.14159})
assert result == "3.14"
def test_vformat_with_nested_braces(self) -> None:
"""Test `vformat` with escaped braces."""
fmt = StrictFormatter()
result = fmt.vformat("{{literal}} {var}", [], {"var": "value"})
assert result == "{literal} value"
def test_validate_input_variables_success(self) -> None:
"""Test that `validate_input_variables` succeeds with valid input."""
fmt = StrictFormatter()
# Should not raise
fmt.validate_input_variables("{name} {age}", ["name", "age"])
def test_validate_input_variables_with_extra_variables(self) -> None:
"""Test `validate_input_variables` with extra variables (should succeed)."""
fmt = StrictFormatter()
# Extra variables are allowed
fmt.validate_input_variables("{name}", ["name", "extra"])
def test_validate_input_variables_with_missing_variable(self) -> None:
"""Test `validate_input_variables` raises with missing variable."""
fmt = StrictFormatter()
with pytest.raises(KeyError):
fmt.validate_input_variables("{name} {missing}", ["name"])
def test_validate_input_variables_empty_format(self) -> None:
"""Test `validate_input_variables` with empty format string."""
fmt = StrictFormatter()
# Should not raise
fmt.validate_input_variables("", [])
def test_validate_input_variables_no_placeholders(self) -> None:
"""Test `validate_input_variables` with no placeholders."""
fmt = StrictFormatter()
# Should not raise
fmt.validate_input_variables("Hello, World!", [])
class TestFormatterSingleton:
"""Tests for the formatter singleton instance."""
def test_formatter_is_strict_formatter(self) -> None:
"""Test that the formatter singleton is a `StrictFormatter` instance."""
assert isinstance(formatter, StrictFormatter)
def test_formatter_format_works(self) -> None:
"""Test that the formatter singleton can format strings."""
result = formatter.format("{greeting}, {name}!", greeting="Hello", name="World")
assert result == "Hello, World!"
def test_formatter_rejects_positional_args(self) -> None:
"""Test that the formatter singleton rejects positional arguments."""
with pytest.raises(ValueError, match=r"No arguments should be provided"):
formatter.format("{}", "arg")
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/utils/test_formatting.py",
"license": "MIT License",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/core/langchain_core/tracers/_compat.py | """Compatibility helpers for Pydantic v1/v2 with langsmith `Run` objects.
!!! note
The generic helpers (`pydantic_to_dict`, `pydantic_copy`) detect Pydanti version
based on the langsmith `Run` model. They're intended for langsmith objects (`Run`,
`Example`) which migrate together.
For general Pydantic v1/v2 handling, see `langchain_core.utils.pydantic`.
"""
from __future__ import annotations
from typing import Any, TypeVar
from langchain_core.tracers.schemas import Run
# Detect Pydantic version once at import time based on Run model
_RUN_IS_PYDANTIC_V2 = hasattr(Run, "model_dump")
T = TypeVar("T")
def run_to_dict(run: Run, **kwargs: Any) -> dict[str, Any]:
"""Convert run to dict, compatible with both Pydantic v1 and v2.
Args:
run: The run to convert.
**kwargs: Additional arguments passed to `model_dump`/`dict`.
Returns:
Dictionary representation of the run.
"""
if _RUN_IS_PYDANTIC_V2:
return run.model_dump(**kwargs)
return run.dict(**kwargs) # type: ignore[deprecated]
def run_copy(run: Run, **kwargs: Any) -> Run:
"""Copy run, compatible with both Pydantic v1 and v2.
Args:
run: The run to copy.
**kwargs: Additional arguments passed to `model_copy`/`copy`.
Returns:
A copy of the run.
"""
if _RUN_IS_PYDANTIC_V2:
return run.model_copy(**kwargs)
return run.copy(**kwargs) # type: ignore[deprecated]
def run_construct(**kwargs: Any) -> Run:
"""Construct run without validation, compatible with both Pydantic v1 and v2.
Args:
**kwargs: Fields to set on the run.
Returns:
A new `Run` instance constructed without validation.
"""
if _RUN_IS_PYDANTIC_V2:
return Run.model_construct(**kwargs)
return Run.construct(**kwargs) # type: ignore[deprecated]
def pydantic_to_dict(obj: Any, **kwargs: Any) -> dict[str, Any]:
"""Convert any Pydantic model to dict, compatible with both v1 and v2.
Args:
obj: The Pydantic model to convert.
**kwargs: Additional arguments passed to `model_dump`/`dict`.
Returns:
Dictionary representation of the model.
"""
if _RUN_IS_PYDANTIC_V2:
return obj.model_dump(**kwargs) # type: ignore[no-any-return]
return obj.dict(**kwargs) # type: ignore[no-any-return]
def pydantic_copy(obj: T, **kwargs: Any) -> T:
"""Copy any Pydantic model, compatible with both v1 and v2.
Args:
obj: The Pydantic model to copy.
**kwargs: Additional arguments passed to `model_copy`/`copy`.
Returns:
A copy of the model.
"""
if _RUN_IS_PYDANTIC_V2:
return obj.model_copy(**kwargs) # type: ignore[attr-defined,no-any-return]
return obj.copy(**kwargs) # type: ignore[attr-defined,no-any-return]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/langchain_core/tracers/_compat.py",
"license": "MIT License",
"lines": 67,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/partners/fireworks/tests/unit_tests/test_chat_models.py | """Unit tests for ChatFireworks."""
from __future__ import annotations
from langchain_core.messages import AIMessage
from langchain_fireworks.chat_models import _convert_dict_to_message
def test_convert_dict_to_message_with_reasoning_content() -> None:
"""Test that reasoning_content is correctly extracted from API response."""
response_dict = {
"role": "assistant",
"content": "The answer is 42.",
"reasoning_content": "Let me think about this step by step...",
}
message = _convert_dict_to_message(response_dict)
assert isinstance(message, AIMessage)
assert message.content == "The answer is 42."
assert "reasoning_content" in message.additional_kwargs
expected_reasoning = "Let me think about this step by step..."
assert message.additional_kwargs["reasoning_content"] == expected_reasoning
def test_convert_dict_to_message_without_reasoning_content() -> None:
"""Test that messages without reasoning_content work correctly."""
response_dict = {
"role": "assistant",
"content": "The answer is 42.",
}
message = _convert_dict_to_message(response_dict)
assert isinstance(message, AIMessage)
assert message.content == "The answer is 42."
assert "reasoning_content" not in message.additional_kwargs
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/fireworks/tests/unit_tests/test_chat_models.py",
"license": "MIT License",
"lines": 27,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/core/langchain_core/load/_validation.py | """Validation utilities for LangChain serialization.
Provides escape-based protection against injection attacks in serialized objects. The
approach uses an allowlist design: only dicts explicitly produced by
`Serializable.to_json()` are treated as LC objects during deserialization.
## How escaping works
During serialization, plain dicts (user data) that contain an `'lc'` key are wrapped:
```python
{"lc": 1, ...} # user data that looks like LC object
# becomes:
{"__lc_escaped__": {"lc": 1, ...}}
```
During deserialization, escaped dicts are unwrapped and returned as plain dicts,
NOT instantiated as LC objects.
"""
from typing import Any
from langchain_core.load.serializable import (
Serializable,
to_json_not_implemented,
)
_LC_ESCAPED_KEY = "__lc_escaped__"
"""Sentinel key used to mark escaped user dicts during serialization.
When a plain dict contains 'lc' key (which could be confused with LC objects),
we wrap it as {"__lc_escaped__": {...original...}}.
"""
def _needs_escaping(obj: dict[str, Any]) -> bool:
"""Check if a dict needs escaping to prevent confusion with LC objects.
A dict needs escaping if:
1. It has an `'lc'` key (could be confused with LC serialization format)
2. It has only the escape key (would be mistaken for an escaped dict)
"""
return "lc" in obj or (len(obj) == 1 and _LC_ESCAPED_KEY in obj)
def _escape_dict(obj: dict[str, Any]) -> dict[str, Any]:
"""Wrap a dict in the escape marker.
Example:
```python
{"key": "value"} # becomes {"__lc_escaped__": {"key": "value"}}
```
"""
return {_LC_ESCAPED_KEY: obj}
def _is_escaped_dict(obj: dict[str, Any]) -> bool:
"""Check if a dict is an escaped user dict.
Example:
```python
{"__lc_escaped__": {...}} # is an escaped dict
```
"""
return len(obj) == 1 and _LC_ESCAPED_KEY in obj
def _serialize_value(obj: Any) -> Any:
"""Serialize a value with escaping of user dicts.
Called recursively on kwarg values to escape any plain dicts that could be confused
with LC objects.
Args:
obj: The value to serialize.
Returns:
The serialized value with user dicts escaped as needed.
"""
if isinstance(obj, Serializable):
# This is an LC object - serialize it properly (not escaped)
return _serialize_lc_object(obj)
if isinstance(obj, dict):
if not all(isinstance(k, (str, int, float, bool, type(None))) for k in obj):
# if keys are not json serializable
return to_json_not_implemented(obj)
# Check if dict needs escaping BEFORE recursing into values.
# If it needs escaping, wrap it as-is - the contents are user data that
# will be returned as-is during deserialization (no instantiation).
# This prevents re-escaping of already-escaped nested content.
if _needs_escaping(obj):
return _escape_dict(obj)
# Safe dict (no 'lc' key) - recurse into values
return {k: _serialize_value(v) for k, v in obj.items()}
if isinstance(obj, (list, tuple)):
return [_serialize_value(item) for item in obj]
if isinstance(obj, (str, int, float, bool, type(None))):
return obj
# Non-JSON-serializable object (datetime, custom objects, etc.)
return to_json_not_implemented(obj)
def _is_lc_secret(obj: Any) -> bool:
"""Check if an object is a LangChain secret marker."""
expected_num_keys = 3
return (
isinstance(obj, dict)
and obj.get("lc") == 1
and obj.get("type") == "secret"
and "id" in obj
and len(obj) == expected_num_keys
)
def _serialize_lc_object(obj: Any) -> dict[str, Any]:
"""Serialize a `Serializable` object with escaping of user data in kwargs.
Args:
obj: The `Serializable` object to serialize.
Returns:
The serialized dict with user data in kwargs escaped as needed.
Note:
Kwargs values are processed with `_serialize_value` to escape user data (like
metadata) that contains `'lc'` keys. Secret fields (from `lc_secrets`) are
skipped because `to_json()` replaces their values with secret markers.
"""
if not isinstance(obj, Serializable):
msg = f"Expected Serializable, got {type(obj)}"
raise TypeError(msg)
serialized: dict[str, Any] = dict(obj.to_json())
# Process kwargs to escape user data that could be confused with LC objects
# Skip secret fields - to_json() already converted them to secret markers
if serialized.get("type") == "constructor" and "kwargs" in serialized:
serialized["kwargs"] = {
k: v if _is_lc_secret(v) else _serialize_value(v)
for k, v in serialized["kwargs"].items()
}
return serialized
def _unescape_value(obj: Any) -> Any:
"""Unescape a value, processing escape markers in dict values and lists.
When an escaped dict is encountered (`{"__lc_escaped__": ...}`), it's
unwrapped and the contents are returned AS-IS (no further processing).
The contents represent user data that should not be modified.
For regular dicts and lists, we recurse to find any nested escape markers.
Args:
obj: The value to unescape.
Returns:
The unescaped value.
"""
if isinstance(obj, dict):
if _is_escaped_dict(obj):
# Unwrap and return the user data as-is (no further unescaping).
# The contents are user data that may contain more escape keys,
# but those are part of the user's actual data.
return obj[_LC_ESCAPED_KEY]
# Regular dict - recurse into values to find nested escape markers
return {k: _unescape_value(v) for k, v in obj.items()}
if isinstance(obj, list):
return [_unescape_value(item) for item in obj]
return obj
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/langchain_core/load/_validation.py",
"license": "MIT License",
"lines": 132,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/core/tests/unit_tests/load/test_secret_injection.py | """Tests for secret injection prevention in serialization.
Verify that user-provided data containing secret-like structures cannot be used to
extract environment variables during deserialization.
"""
import json
import os
import re
from typing import Any
from unittest import mock
import pytest
from pydantic import BaseModel
from langchain_core.documents import Document
from langchain_core.load import dumpd, dumps, load
from langchain_core.messages import AIMessage, HumanMessage
from langchain_core.outputs import ChatGeneration
SENTINEL_ENV_VAR = "TEST_SECRET_INJECTION_VAR"
"""Sentinel value that should NEVER appear in serialized output."""
SENTINEL_VALUE = "LEAKED_SECRET_MEOW_12345"
"""Sentinel value that should NEVER appear in serialized output."""
MALICIOUS_SECRET_DICT: dict[str, Any] = {
"lc": 1,
"type": "secret",
"id": [SENTINEL_ENV_VAR],
}
"""The malicious secret-like dict that tries to read the env var"""
@pytest.fixture(autouse=True)
def _set_sentinel_env_var() -> Any:
"""Set the sentinel env var for all tests in this module."""
with mock.patch.dict(os.environ, {SENTINEL_ENV_VAR: SENTINEL_VALUE}):
yield
def _assert_no_secret_leak(payload: Any) -> None:
"""Assert that serializing/deserializing payload doesn't leak the secret."""
# First serialize
serialized = dumps(payload)
# Deserialize with secrets_from_env=True (the dangerous setting)
deserialized = load(serialized, secrets_from_env=True)
# Re-serialize to string
reserialized = dumps(deserialized)
assert SENTINEL_VALUE not in reserialized, (
f"Secret was leaked! Found '{SENTINEL_VALUE}' in output.\n"
f"Original payload type: {type(payload)}\n"
f"Reserialized output: {reserialized[:500]}..."
)
assert SENTINEL_VALUE not in repr(deserialized), (
f"Secret was leaked in deserialized object! Found '{SENTINEL_VALUE}'.\n"
f"Deserialized: {deserialized!r}"
)
class TestSerializableTopLevel:
"""Tests with `Serializable` objects at the top level."""
def test_human_message_with_secret_in_content(self) -> None:
"""`HumanMessage` with secret-like dict in `content`."""
msg = HumanMessage(
content=[
{"type": "text", "text": "Hello"},
{"type": "text", "text": MALICIOUS_SECRET_DICT},
]
)
_assert_no_secret_leak(msg)
def test_human_message_with_secret_in_additional_kwargs(self) -> None:
"""`HumanMessage` with secret-like dict in `additional_kwargs`."""
msg = HumanMessage(
content="Hello",
additional_kwargs={"data": MALICIOUS_SECRET_DICT},
)
_assert_no_secret_leak(msg)
def test_human_message_with_secret_in_nested_additional_kwargs(self) -> None:
"""`HumanMessage` with secret-like dict nested in `additional_kwargs`."""
msg = HumanMessage(
content="Hello",
additional_kwargs={"nested": {"deep": MALICIOUS_SECRET_DICT}},
)
_assert_no_secret_leak(msg)
def test_human_message_with_secret_in_list_in_additional_kwargs(self) -> None:
"""`HumanMessage` with secret-like dict in a list in `additional_kwargs`."""
msg = HumanMessage(
content="Hello",
additional_kwargs={"items": [MALICIOUS_SECRET_DICT]},
)
_assert_no_secret_leak(msg)
def test_ai_message_with_secret_in_response_metadata(self) -> None:
"""`AIMessage` with secret-like dict in respo`nse_metadata."""
msg = AIMessage(
content="Hello",
response_metadata={"data": MALICIOUS_SECRET_DICT},
)
_assert_no_secret_leak(msg)
def test_document_with_secret_in_metadata(self) -> None:
"""Document with secret-like dict in `metadata`."""
doc = Document(
page_content="Hello",
metadata={"data": MALICIOUS_SECRET_DICT},
)
_assert_no_secret_leak(doc)
def test_nested_serializable_with_secret(self) -> None:
"""`AIMessage` containing `dumpd(HumanMessage)` with secret in kwargs."""
inner = HumanMessage(
content="Hello",
additional_kwargs={"secret": MALICIOUS_SECRET_DICT},
)
outer = AIMessage(
content="Outer",
additional_kwargs={"nested": [dumpd(inner)]},
)
_assert_no_secret_leak(outer)
class TestDictTopLevel:
"""Tests with plain dicts at the top level."""
def test_dict_with_serializable_containing_secret(self) -> None:
"""Dict containing a `Serializable` with secret-like dict."""
msg = HumanMessage(
content="Hello",
additional_kwargs={"data": MALICIOUS_SECRET_DICT},
)
payload = {"message": msg}
_assert_no_secret_leak(payload)
def test_dict_with_secret_no_serializable(self) -> None:
"""Dict with secret-like dict, no `Serializable` objects."""
payload = {"data": MALICIOUS_SECRET_DICT}
_assert_no_secret_leak(payload)
def test_dict_with_nested_secret_no_serializable(self) -> None:
"""Dict with nested secret-like dict, no `Serializable` objects."""
payload = {"outer": {"inner": MALICIOUS_SECRET_DICT}}
_assert_no_secret_leak(payload)
def test_dict_with_secret_in_list(self) -> None:
"""Dict with secret-like dict in a list."""
payload = {"items": [MALICIOUS_SECRET_DICT]}
_assert_no_secret_leak(payload)
def test_dict_mimicking_lc_constructor_with_secret(self) -> None:
"""Dict that looks like an LC constructor containing a secret."""
payload = {
"lc": 1,
"type": "constructor",
"id": ["langchain_core", "messages", "ai", "AIMessage"],
"kwargs": {
"content": "Hello",
"additional_kwargs": {"secret": MALICIOUS_SECRET_DICT},
},
}
_assert_no_secret_leak(payload)
class TestPydanticModelTopLevel:
"""Tests with Pydantic models (non-`Serializable`) at the top level."""
def test_pydantic_model_with_serializable_containing_secret(self) -> None:
"""Pydantic model containing a `Serializable` with secret-like dict."""
class MyModel(BaseModel):
message: Any
msg = HumanMessage(
content="Hello",
additional_kwargs={"data": MALICIOUS_SECRET_DICT},
)
payload = MyModel(message=msg)
_assert_no_secret_leak(payload)
def test_pydantic_model_with_secret_dict(self) -> None:
"""Pydantic model containing a secret-like dict directly."""
class MyModel(BaseModel):
data: dict[str, Any]
payload = MyModel(data=MALICIOUS_SECRET_DICT)
_assert_no_secret_leak(payload)
# Test treatment of "parsed" in additional_kwargs
msg = AIMessage(content=[], additional_kwargs={"parsed": payload})
gen = ChatGeneration(message=msg)
_assert_no_secret_leak(gen)
round_trip = load(dumpd(gen))
assert MyModel(**(round_trip.message.additional_kwargs["parsed"])) == payload
def test_pydantic_model_with_nested_secret(self) -> None:
"""Pydantic model with nested secret-like dict."""
class MyModel(BaseModel):
nested: dict[str, Any]
payload = MyModel(nested={"inner": MALICIOUS_SECRET_DICT})
_assert_no_secret_leak(payload)
class TestNonSerializableClassTopLevel:
"""Tests with classes at the top level."""
def test_custom_class_with_serializable_containing_secret(self) -> None:
"""Custom class containing a `Serializable` with secret-like dict."""
class MyClass:
def __init__(self, message: Any) -> None:
self.message = message
msg = HumanMessage(
content="Hello",
additional_kwargs={"data": MALICIOUS_SECRET_DICT},
)
payload = MyClass(message=msg)
# This will serialize as not_implemented, but let's verify no leak
_assert_no_secret_leak(payload)
def test_custom_class_with_secret_dict(self) -> None:
"""Custom class containing a secret-like dict directly."""
class MyClass:
def __init__(self, data: dict[str, Any]) -> None:
self.data = data
payload = MyClass(data=MALICIOUS_SECRET_DICT)
_assert_no_secret_leak(payload)
class TestDumpdInKwargs:
"""Tests for the specific pattern of `dumpd()` result stored in kwargs."""
def test_dumpd_human_message_in_ai_message_kwargs(self) -> None:
"""`AIMessage` with `dumpd(HumanMessage)` in `additional_kwargs`."""
h = HumanMessage("Hello")
a = AIMessage("foo", additional_kwargs={"bar": [dumpd(h)]})
_assert_no_secret_leak(a)
def test_dumpd_human_message_with_secret_in_ai_message_kwargs(self) -> None:
"""`AIMessage` with `dumpd(HumanMessage w/ secret)` in `additional_kwargs`."""
h = HumanMessage(
"Hello",
additional_kwargs={"secret": MALICIOUS_SECRET_DICT},
)
a = AIMessage("foo", additional_kwargs={"bar": [dumpd(h)]})
_assert_no_secret_leak(a)
def test_double_dumpd_nesting(self) -> None:
"""Double nesting: `dumpd(AIMessage(dumpd(HumanMessage)))`."""
h = HumanMessage(
"Hello",
additional_kwargs={"secret": MALICIOUS_SECRET_DICT},
)
a = AIMessage("foo", additional_kwargs={"bar": [dumpd(h)]})
outer = AIMessage("outer", additional_kwargs={"nested": [dumpd(a)]})
_assert_no_secret_leak(outer)
class TestRoundTrip:
"""Tests that verify round-trip serialization preserves data structure."""
def test_human_message_with_secret_round_trip(self) -> None:
"""Verify secret-like dict is preserved as dict after round-trip."""
msg = HumanMessage(
content="Hello",
additional_kwargs={"data": MALICIOUS_SECRET_DICT},
)
serialized = dumpd(msg)
deserialized = load(serialized, secrets_from_env=True)
# The secret-like dict should be preserved as a plain dict
assert deserialized.additional_kwargs["data"] == MALICIOUS_SECRET_DICT
assert isinstance(deserialized.additional_kwargs["data"], dict)
def test_document_with_secret_round_trip(self) -> None:
"""Verify secret-like dict in `Document` metadata is preserved."""
doc = Document(
page_content="Hello",
metadata={"data": MALICIOUS_SECRET_DICT},
)
serialized = dumpd(doc)
deserialized = load(
serialized, secrets_from_env=True, allowed_objects=[Document]
)
# The secret-like dict should be preserved as a plain dict
assert deserialized.metadata["data"] == MALICIOUS_SECRET_DICT
assert isinstance(deserialized.metadata["data"], dict)
def test_plain_dict_with_secret_round_trip(self) -> None:
"""Verify secret-like dict in plain dict is preserved."""
payload = {"data": MALICIOUS_SECRET_DICT}
serialized = dumpd(payload)
deserialized = load(serialized, secrets_from_env=True)
# The secret-like dict should be preserved as a plain dict
assert deserialized["data"] == MALICIOUS_SECRET_DICT
assert isinstance(deserialized["data"], dict)
class TestEscapingEfficiency:
"""Tests that escaping doesn't cause excessive nesting."""
def test_no_triple_escaping(self) -> None:
"""Verify dumpd doesn't cause triple/multiple escaping."""
h = HumanMessage(
"Hello",
additional_kwargs={"bar": [MALICIOUS_SECRET_DICT]},
)
a = AIMessage("foo", additional_kwargs={"bar": [dumpd(h)]})
d = dumpd(a)
serialized = json.dumps(d)
# Count nested escape markers -
# should be max 2 (one for HumanMessage, one for secret)
# Not 3+ which would indicate re-escaping of already-escaped content
escape_count = len(re.findall(r"__lc_escaped__", serialized))
# The HumanMessage dict gets escaped (1), the secret inside gets escaped (1)
# Total should be 2, not 4 (which would mean triple nesting)
assert escape_count <= 2, (
f"Found {escape_count} escape markers, expected <= 2. "
f"This indicates unnecessary re-escaping.\n{serialized}"
)
def test_double_nesting_no_quadruple_escape(self) -> None:
"""Verify double dumpd nesting doesn't explode escape markers."""
h = HumanMessage(
"Hello",
additional_kwargs={"secret": MALICIOUS_SECRET_DICT},
)
a = AIMessage("middle", additional_kwargs={"nested": [dumpd(h)]})
outer = AIMessage("outer", additional_kwargs={"deep": [dumpd(a)]})
d = dumpd(outer)
serialized = json.dumps(d)
escape_count = len(re.findall(r"__lc_escaped__", serialized))
# Should be:
# outer escapes middle (1),
# middle escapes h (1),
# h escapes secret (1) = 3
# Not 6+ which would indicate re-escaping
assert escape_count <= 3, (
f"Found {escape_count} escape markers, expected <= 3. "
f"This indicates unnecessary re-escaping."
)
class TestConstructorInjection:
"""Tests for constructor-type injection (not just secrets)."""
def test_constructor_in_metadata_not_instantiated(self) -> None:
"""Verify constructor-like dict in metadata is not instantiated."""
malicious_constructor = {
"lc": 1,
"type": "constructor",
"id": ["langchain_core", "messages", "ai", "AIMessage"],
"kwargs": {"content": "injected"},
}
doc = Document(
page_content="Hello",
metadata={"data": malicious_constructor},
)
serialized = dumpd(doc)
deserialized = load(
serialized,
secrets_from_env=True,
allowed_objects=[Document, AIMessage],
)
# The constructor-like dict should be a plain dict, NOT an AIMessage
assert isinstance(deserialized.metadata["data"], dict)
assert deserialized.metadata["data"] == malicious_constructor
def test_constructor_in_content_not_instantiated(self) -> None:
"""Verify constructor-like dict in message content is not instantiated."""
malicious_constructor = {
"lc": 1,
"type": "constructor",
"id": ["langchain_core", "messages", "human", "HumanMessage"],
"kwargs": {"content": "injected"},
}
msg = AIMessage(
content="Hello",
additional_kwargs={"nested": malicious_constructor},
)
serialized = dumpd(msg)
deserialized = load(
serialized,
secrets_from_env=True,
allowed_objects=[AIMessage, HumanMessage],
)
# The constructor-like dict should be a plain dict, NOT a HumanMessage
assert isinstance(deserialized.additional_kwargs["nested"], dict)
assert deserialized.additional_kwargs["nested"] == malicious_constructor
def test_allowed_objects() -> None:
# Core object
msg = AIMessage(content="foo")
serialized = dumpd(msg)
assert load(serialized) == msg
assert load(serialized, allowed_objects=[AIMessage]) == msg
assert load(serialized, allowed_objects="core") == msg
with pytest.raises(ValueError, match="not allowed"):
load(serialized, allowed_objects=[])
with pytest.raises(ValueError, match="not allowed"):
load(serialized, allowed_objects=[Document])
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/load/test_secret_injection.py",
"license": "MIT License",
"lines": 343,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/core/tests/unit_tests/tracers/test_automatic_metadata.py | """Test automatic tool call count storage in tracers."""
from __future__ import annotations
from unittest.mock import MagicMock
from langchain_core.messages import AIMessage
from langchain_core.messages.tool import ToolCall
from langchain_core.outputs import ChatGeneration, LLMResult
from langchain_core.tracers.core import _TracerCore
from langchain_core.tracers.schemas import Run
class MockTracerCore(_TracerCore):
"""Mock tracer core for testing LLM run completion."""
def __init__(self) -> None:
super().__init__()
def _persist_run(self, run: Run) -> None:
"""Mock implementation of _persist_run."""
def test_complete_llm_run_automatically_stores_tool_call_count() -> None:
"""Test that `_complete_llm_run` automatically stores tool call count."""
tracer = MockTracerCore()
run = MagicMock(spec=Run)
run.id = "test-llm-run-id"
run.run_type = "llm"
run.extra = {}
run.outputs = {}
run.events = []
run.end_time = None
run.inputs = {}
tracer.run_map[str(run.id)] = run
tool_calls = [
ToolCall(name="search", args={"query": "test"}, id="call_1"),
ToolCall(name="calculator", args={"expression": "2+2"}, id="call_2"),
]
message = AIMessage(content="Test", tool_calls=tool_calls)
generation = ChatGeneration(message=message)
response = LLMResult(generations=[[generation]])
# Complete the LLM run (this should trigger automatic metadata storage)
completed_run = tracer._complete_llm_run(response=response, run_id=run.id)
assert "tool_call_count" in completed_run.extra
assert completed_run.extra["tool_call_count"] == 2
def test_complete_llm_run_handles_no_tool_calls() -> None:
"""Test that `_complete_llm_run` handles runs with no tool calls gracefully."""
tracer = MockTracerCore()
run = MagicMock(spec=Run)
run.id = "test-llm-run-id-no-tools"
run.run_type = "llm"
run.extra = {}
run.outputs = {}
run.events = []
run.end_time = None
run.inputs = {}
tracer.run_map[str(run.id)] = run
message = AIMessage(content="No tools here")
generation = ChatGeneration(message=message)
response = LLMResult(generations=[[generation]])
completed_run = tracer._complete_llm_run(response=response, run_id=run.id)
# Verify tool call count is not stored when there are no tool calls
assert "tool_call_count" not in completed_run.extra
def test_complete_llm_run_handles_empty_generations() -> None:
"""Test that `_complete_llm_run` handles empty generations gracefully."""
tracer = MockTracerCore()
run = MagicMock(spec=Run)
run.id = "test-llm-run-id-empty"
run.run_type = "llm"
run.extra = {}
run.outputs = {}
run.events = []
run.end_time = None
run.inputs = {}
tracer.run_map[str(run.id)] = run
response = LLMResult(generations=[[]])
completed_run = tracer._complete_llm_run(response=response, run_id=run.id)
assert "tool_call_count" not in completed_run.extra
def test_complete_llm_run_counts_tool_calls_from_multiple_generations() -> None:
"""Test that tool calls are counted from multiple generations."""
tracer = MockTracerCore()
run = MagicMock(spec=Run)
run.id = "test-llm-run-id-multi"
run.run_type = "llm"
run.extra = {}
run.outputs = {}
run.events = []
run.end_time = None
run.inputs = {}
tracer.run_map[str(run.id)] = run
# Create multiple generations with tool calls
tool_calls_1 = [ToolCall(name="search", args={"query": "test"}, id="call_1")]
tool_calls_2 = [
ToolCall(name="calculator", args={"expression": "2+2"}, id="call_2"),
ToolCall(name="weather", args={"location": "NYC"}, id="call_3"),
]
gen1 = ChatGeneration(message=AIMessage(content="Gen1", tool_calls=tool_calls_1))
gen2 = ChatGeneration(message=AIMessage(content="Gen2", tool_calls=tool_calls_2))
response = LLMResult(generations=[[gen1], [gen2]])
completed_run = tracer._complete_llm_run(response=response, run_id=run.id)
assert completed_run.extra["tool_call_count"] == 3
def test_complete_llm_run_handles_null_tool_calls() -> None:
"""Test that `_complete_llm_run` handles null `tool_calls` gracefully."""
tracer = MockTracerCore()
run = MagicMock(spec=Run)
run.id = "test-llm-run-id-null-tools"
run.run_type = "llm"
run.extra = {}
run.outputs = {}
run.events = []
run.end_time = None
run.inputs = {}
tracer.run_map[str(run.id)] = run
message = AIMessage(content="Test with null tool_calls")
generation = ChatGeneration(message=message)
# Bypass Pydantic validation by directly setting attribute
object.__setattr__(message, "tool_calls", None)
response = LLMResult(generations=[[generation]])
# Should not raise TypeError from len(None)
completed_run = tracer._complete_llm_run(response=response, run_id=run.id)
assert "tool_call_count" not in completed_run.extra
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/tracers/test_automatic_metadata.py",
"license": "MIT License",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/perplexity/langchain_perplexity/_utils.py | import os
from typing import Any
from langchain_core.utils import convert_to_secret_str
from perplexity import Perplexity
def initialize_client(values: dict[str, Any]) -> dict[str, Any]:
"""Initialize the Perplexity client."""
pplx_api_key = (
values.get("pplx_api_key")
or os.environ.get("PPLX_API_KEY")
or os.environ.get("PERPLEXITY_API_KEY")
or ""
)
values["pplx_api_key"] = convert_to_secret_str(pplx_api_key)
api_key = (
values["pplx_api_key"].get_secret_value() if values["pplx_api_key"] else None
)
if not values.get("client"):
values["client"] = Perplexity(api_key=api_key)
return values
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/perplexity/langchain_perplexity/_utils.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/partners/perplexity/langchain_perplexity/retrievers.py | from __future__ import annotations
from typing import Any, Literal
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from pydantic import Field, SecretStr, model_validator
from langchain_perplexity._utils import initialize_client
class PerplexitySearchRetriever(BaseRetriever):
"""Perplexity Search retriever."""
k: int = Field(default=10, description="Max results (1-20)")
max_tokens: int = Field(default=25000, description="Max tokens across all results")
max_tokens_per_page: int = Field(default=1024, description="Max tokens per page")
country: str | None = Field(default=None, description="ISO country code")
search_domain_filter: list[str] | None = Field(
default=None, description="Domain filter (max 20)"
)
search_recency_filter: Literal["day", "week", "month", "year"] | None = None
search_after_date: str | None = Field(
default=None, description="Date filter (format: %m/%d/%Y)"
)
search_before_date: str | None = Field(
default=None, description="Date filter (format: %m/%d/%Y)"
)
client: Any = Field(default=None, exclude=True)
pplx_api_key: SecretStr = Field(default=SecretStr(""))
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate the environment."""
return initialize_client(values)
def _get_relevant_documents(
self, query: str, *, run_manager: CallbackManagerForRetrieverRun
) -> list[Document]:
params = {
"query": query,
"max_results": self.k,
"max_tokens": self.max_tokens,
"max_tokens_per_page": self.max_tokens_per_page,
"country": self.country,
"search_domain_filter": self.search_domain_filter,
"search_recency_filter": self.search_recency_filter,
"search_after_date": self.search_after_date,
"search_before_date": self.search_before_date,
}
params = {k: v for k, v in params.items() if v is not None}
response = self.client.search.create(**params)
return [
Document(
page_content=result.snippet,
metadata={
"title": result.title,
"url": result.url,
"date": result.date,
"last_updated": result.last_updated,
},
)
for result in response.results
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/perplexity/langchain_perplexity/retrievers.py",
"license": "MIT License",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/partners/perplexity/langchain_perplexity/tools.py | from __future__ import annotations
from typing import Any, Literal
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from pydantic import Field, SecretStr, model_validator
from langchain_perplexity._utils import initialize_client
class PerplexitySearchResults(BaseTool):
"""Perplexity Search tool."""
name: str = "perplexity_search_results_json"
description: str = (
"A wrapper around Perplexity Search. "
"Input should be a search query. "
"Output is a JSON array of the query results"
)
client: Any = Field(default=None, exclude=True)
pplx_api_key: SecretStr = Field(default=SecretStr(""))
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: dict) -> Any:
"""Validate the environment."""
return initialize_client(values)
def _run(
self,
query: str | list[str],
max_results: int = 10,
country: str | None = None,
search_domain_filter: list[str] | None = None,
search_recency_filter: Literal["day", "week", "month", "year"] | None = None,
search_after_date: str | None = None,
search_before_date: str | None = None,
run_manager: CallbackManagerForToolRun | None = None,
) -> list[dict] | str:
"""Use the tool."""
try:
params = {
"query": query,
"max_results": max_results,
"country": country,
"search_domain_filter": search_domain_filter,
"search_recency_filter": search_recency_filter,
"search_after_date": search_after_date,
"search_before_date": search_before_date,
}
params = {k: v for k, v in params.items() if v is not None}
response = self.client.search.create(**params)
return [
{
"title": result.title,
"url": result.url,
"snippet": result.snippet,
"date": result.date,
"last_updated": result.last_updated,
}
for result in response.results
]
except Exception as e:
msg = f"Perplexity search failed: {type(e).__name__}"
return msg
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/perplexity/langchain_perplexity/tools.py",
"license": "MIT License",
"lines": 58,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/partners/perplexity/langchain_perplexity/types.py | from __future__ import annotations
from typing import Literal
from pydantic import BaseModel
class UserLocation(BaseModel):
latitude: float | None = None
longitude: float | None = None
country: str | None = None
region: str | None = None
city: str | None = None
class WebSearchOptions(BaseModel):
search_context_size: Literal["low", "medium", "high"] | None = None
user_location: UserLocation | None = None
search_type: Literal["fast", "pro", "auto"] | None = None
image_search_relevance_enhanced: bool | None = None
class MediaResponseOverrides(BaseModel):
return_videos: bool | None = None
return_images: bool | None = None
class MediaResponse(BaseModel):
overrides: MediaResponseOverrides | None = None
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/perplexity/langchain_perplexity/types.py",
"license": "MIT License",
"lines": 19,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/partners/perplexity/tests/integration_tests/test_chat_models.py | """Integration tests for ChatPerplexity."""
import os
import pytest
from langchain_core.messages import HumanMessage
from langchain_perplexity import ChatPerplexity, MediaResponse, WebSearchOptions
@pytest.mark.skipif(not os.environ.get("PPLX_API_KEY"), reason="PPLX_API_KEY not set")
class TestChatPerplexityIntegration:
def test_standard_generation(self) -> None:
"""Test standard generation."""
chat = ChatPerplexity(model="sonar", temperature=0)
message = HumanMessage(content="Hello! How are you?")
response = chat.invoke([message])
assert response.content
assert isinstance(response.content, str)
async def test_async_generation(self) -> None:
"""Test async generation."""
chat = ChatPerplexity(model="sonar", temperature=0)
message = HumanMessage(content="Hello! How are you?")
response = await chat.ainvoke([message])
assert response.content
assert isinstance(response.content, str)
def test_pro_search(self) -> None:
"""Test Pro Search (reasoning_steps extraction)."""
# Pro search is available on sonar-pro
chat = ChatPerplexity(
model="sonar-pro",
temperature=0,
web_search_options=WebSearchOptions(search_type="pro"),
streaming=True,
)
message = HumanMessage(content="Who won the 2024 US election and why?")
# We need to collect chunks to check reasoning steps
chunks = list(chat.stream([message]))
full_content = "".join(c.content for c in chunks if isinstance(c.content, str))
assert full_content
# Check if any chunk has reasoning_steps
has_reasoning = any("reasoning_steps" in c.additional_kwargs for c in chunks)
if has_reasoning:
assert True
else:
# Fallback assertion if no reasoning steps returned
assert len(chunks) > 0
async def test_streaming(self) -> None:
"""Test streaming."""
chat = ChatPerplexity(model="sonar", temperature=0)
message = HumanMessage(content="Count to 5")
async for chunk in chat.astream([message]):
assert isinstance(chunk.content, str)
def test_citations_and_search_results(self) -> None:
"""Test that citations and search results are returned."""
chat = ChatPerplexity(model="sonar", temperature=0)
message = HumanMessage(content="Who is the CEO of OpenAI?")
response = chat.invoke([message])
# Citations are usually in additional_kwargs
assert "citations" in response.additional_kwargs
# Search results might be there too
# Note: presence depends on whether search was performed
if response.additional_kwargs.get("citations"):
assert len(response.additional_kwargs["citations"]) > 0
def test_search_control(self) -> None:
"""Test search control parameters."""
# Test disabled search (should complete without citations)
chat = ChatPerplexity(model="sonar", disable_search=True)
message = HumanMessage(content="What is 2+2?")
response = chat.invoke([message])
assert response.content
# Test search classifier
chat_classifier = ChatPerplexity(model="sonar", enable_search_classifier=True)
response_classifier = chat_classifier.invoke([message])
assert response_classifier.content
def test_search_recency_filter(self) -> None:
"""Test search_recency_filter parameter."""
chat = ChatPerplexity(model="sonar", search_recency_filter="month")
message = HumanMessage(content="Latest AI news")
response = chat.invoke([message])
assert response.content
def test_search_domain_filter(self) -> None:
"""Test search_domain_filter parameter."""
chat = ChatPerplexity(model="sonar", search_domain_filter=["wikipedia.org"])
message = HumanMessage(content="Python programming language")
response = chat.invoke([message])
# Verify citations come from wikipedia if any
if citations := response.additional_kwargs.get("citations"):
assert any("wikipedia.org" in c for c in citations)
def test_media_and_metadata(self) -> None:
"""Test related questions and images."""
chat = ChatPerplexity(
model="sonar-pro",
return_related_questions=True,
return_images=True,
# Media response overrides for video
media_response=MediaResponse(overrides={"return_videos": True}),
)
message = HumanMessage(content="Apollo 11 moon landing")
response = chat.invoke([message])
# Check related questions
if related := response.additional_kwargs.get("related_questions"):
assert len(related) > 0
# Check images
if images := response.additional_kwargs.get("images"):
assert len(images) > 0
# Check videos (might not always be present but structure should handle it)
if videos := response.additional_kwargs.get("videos"):
assert len(videos) > 0
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/perplexity/tests/integration_tests/test_chat_models.py",
"license": "MIT License",
"lines": 104,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/perplexity/tests/integration_tests/test_search_api.py | """Integration tests for Perplexity Search API."""
import os
import pytest
from langchain_core.documents import Document
from langchain_perplexity import PerplexitySearchResults, PerplexitySearchRetriever
@pytest.mark.skipif(not os.environ.get("PPLX_API_KEY"), reason="PPLX_API_KEY not set")
class TestPerplexitySearchAPI:
def test_search_retriever_basic(self) -> None:
"""Test basic search with retriever."""
retriever = PerplexitySearchRetriever(k=3)
docs = retriever.invoke("What is the capital of France?")
assert len(docs) > 0
assert isinstance(docs[0], Document)
assert "Paris" in docs[0].page_content
assert docs[0].metadata["title"]
assert docs[0].metadata["url"]
def test_search_retriever_with_filters(self) -> None:
"""Test search with filters."""
# Search for recent news (recency filter)
retriever = PerplexitySearchRetriever(
k=3, search_recency_filter="month", search_domain_filter=["wikipedia.org"]
)
docs = retriever.invoke("Python programming language")
assert len(docs) > 0
for doc in docs:
assert "wikipedia.org" in doc.metadata["url"]
def test_search_tool_basic(self) -> None:
"""Test basic search with tool."""
tool = PerplexitySearchResults(max_results=3)
results = tool.invoke("Who won the 2024 Super Bowl?")
# BaseTool.invoke calls _run. If return_direct is False (default),
# it returns the output of _run, which is a list of dicts.
assert isinstance(results, list)
assert len(results) > 0
assert "title" in results[0]
assert "url" in results[0]
assert "snippet" in results[0]
def test_search_tool_multi_query(self) -> None:
"""Test search tool with multiple queries."""
tool = PerplexitySearchResults(max_results=2)
queries = ["Apple stock price", "Microsoft stock price"]
# Pass input as dict to avoid BaseTool validation error with list
results = tool.invoke({"query": queries})
assert isinstance(results, list)
# Should have results for both (combined)
assert len(results) > 0
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/perplexity/tests/integration_tests/test_search_api.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/perplexity/tests/unit_tests/test_retrievers.py | from unittest.mock import MagicMock
from pytest_mock import MockerFixture
from langchain_perplexity import PerplexitySearchRetriever
def test_search_retriever_initialization() -> None:
retriever = PerplexitySearchRetriever(pplx_api_key="test")
assert retriever.pplx_api_key.get_secret_value() == "test"
assert retriever.k == 10
def test_search_retriever_get_relevant_documents(mocker: MockerFixture) -> None:
retriever = PerplexitySearchRetriever(pplx_api_key="test")
mock_result = MagicMock()
mock_result.title = "Test Title"
mock_result.url = "http://test.com"
mock_result.snippet = "Test snippet"
mock_result.date = "2023-01-01"
mock_result.last_updated = "2023-01-02"
mock_response = MagicMock()
mock_response.results = [mock_result]
mock_create = MagicMock(return_value=mock_response)
mocker.patch.object(retriever.client.search, "create", mock_create)
docs = retriever.invoke("query")
assert len(docs) == 1
assert docs[0].page_content == "Test snippet"
assert docs[0].metadata["title"] == "Test Title"
assert docs[0].metadata["url"] == "http://test.com"
mock_create.assert_called_once_with(
query="query",
max_results=10,
max_tokens=25000,
max_tokens_per_page=1024,
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/perplexity/tests/unit_tests/test_retrievers.py",
"license": "MIT License",
"lines": 30,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/perplexity/tests/unit_tests/test_tools.py | from unittest.mock import MagicMock
from pytest_mock import MockerFixture
from langchain_perplexity import PerplexitySearchResults
def test_search_tool_run(mocker: MockerFixture) -> None:
tool = PerplexitySearchResults(pplx_api_key="test")
mock_result = MagicMock()
mock_result.title = "Test Title"
mock_result.url = "http://test.com"
mock_result.snippet = "Test snippet"
mock_result.date = "2023-01-01"
mock_result.last_updated = "2023-01-02"
mock_response = MagicMock()
mock_response.results = [mock_result]
mock_create = MagicMock(return_value=mock_response)
mocker.patch.object(tool.client.search, "create", mock_create)
result = tool.invoke("query")
# result should be a list of dicts (converted by tool) or str if string output
# By default, tool.invoke returns the output of _run.
assert isinstance(result, list)
assert len(result) == 1
assert result[0]["title"] == "Test Title"
mock_create.assert_called_once_with(
query="query",
max_results=10,
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/perplexity/tests/unit_tests/test_tools.py",
"license": "MIT License",
"lines": 25,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_agent_name.py | """Test agent name parameter in create_agent.
This module tests that the name parameter correctly sets .name on AIMessage outputs.
"""
from __future__ import annotations
from langchain_core.messages import (
AIMessage,
HumanMessage,
ToolCall,
)
from langchain_core.tools import tool
from langchain.agents import create_agent
from tests.unit_tests.agents.model import FakeToolCallingModel
@tool
def simple_tool(x: int) -> str:
"""Simple tool for basic tests."""
return f"Result: {x}"
def test_agent_name_set_on_ai_message() -> None:
"""Test that agent name is set on AIMessage when name is provided."""
tool_calls: list[list[ToolCall]] = [[]]
agent = create_agent(
model=FakeToolCallingModel(tool_calls=tool_calls),
name="test_agent",
)
result = agent.invoke({"messages": [HumanMessage("Hello")]})
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) == 1
assert ai_messages[0].name == "test_agent"
def test_agent_name_not_set_when_none() -> None:
"""Test that AIMessage.name is not set when name is not provided."""
tool_calls: list[list[ToolCall]] = [[]]
agent = create_agent(
model=FakeToolCallingModel(tool_calls=tool_calls),
)
result = agent.invoke({"messages": [HumanMessage("Hello")]})
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) == 1
assert ai_messages[0].name is None
def test_agent_name_on_multiple_iterations() -> None:
"""Test that agent name is set on all AIMessages in multi-turn conversation."""
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[[{"args": {"x": 1}, "id": "call_1", "name": "simple_tool"}], []]
),
tools=[simple_tool],
name="multi_turn_agent",
)
result = agent.invoke({"messages": [HumanMessage("Call a tool")]})
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) == 2
for msg in ai_messages:
assert msg.name == "multi_turn_agent"
async def test_agent_name_async() -> None:
"""Test that agent name is set on AIMessage in async execution."""
tool_calls: list[list[ToolCall]] = [[]]
agent = create_agent(
model=FakeToolCallingModel(tool_calls=tool_calls),
name="async_agent",
)
result = await agent.ainvoke({"messages": [HumanMessage("Hello async")]})
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) == 1
assert ai_messages[0].name == "async_agent"
async def test_agent_name_async_multiple_iterations() -> None:
"""Test that agent name is set on all AIMessages in async multi-turn."""
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[[{"args": {"x": 5}, "id": "call_1", "name": "simple_tool"}], []]
),
tools=[simple_tool],
name="async_multi_agent",
)
result = await agent.ainvoke({"messages": [HumanMessage("Call tool async")]})
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) == 2
for msg in ai_messages:
assert msg.name == "async_multi_agent"
# Tests for lc_agent_name in streaming metadata
def test_lc_agent_name_in_stream_metadata() -> None:
"""Test that lc_agent_name is included in metadata when streaming with name."""
tool_calls: list[list[ToolCall]] = [[]]
agent = create_agent(
model=FakeToolCallingModel(tool_calls=tool_calls),
name="streaming_agent",
)
metadata_with_agent_name = []
for _chunk, metadata in agent.stream(
{"messages": [HumanMessage("Hello")]},
stream_mode="messages",
):
if "lc_agent_name" in metadata:
metadata_with_agent_name.append(metadata["lc_agent_name"])
assert len(metadata_with_agent_name) > 0
assert all(name == "streaming_agent" for name in metadata_with_agent_name)
def test_lc_agent_name_not_in_stream_metadata_when_name_not_provided() -> None:
"""Test that lc_agent_name is not in metadata when name is not provided."""
tool_calls: list[list[ToolCall]] = [[]]
agent = create_agent(
model=FakeToolCallingModel(tool_calls=tool_calls),
)
for _chunk, metadata in agent.stream(
{"messages": [HumanMessage("Hello")]},
stream_mode="messages",
):
assert "lc_agent_name" not in metadata
def test_lc_agent_name_in_stream_metadata_multiple_iterations() -> None:
"""Test that lc_agent_name is in metadata for all stream events in multi-turn."""
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[[{"args": {"x": 1}, "id": "call_1", "name": "simple_tool"}], []]
),
tools=[simple_tool],
name="multi_turn_streaming_agent",
)
metadata_with_agent_name = []
for _chunk, metadata in agent.stream(
{"messages": [HumanMessage("Call a tool")]},
stream_mode="messages",
):
if "lc_agent_name" in metadata:
metadata_with_agent_name.append(metadata["lc_agent_name"])
# Should have metadata entries for messages from both iterations
assert len(metadata_with_agent_name) > 0
assert all(name == "multi_turn_streaming_agent" for name in metadata_with_agent_name)
async def test_lc_agent_name_in_astream_metadata() -> None:
"""Test that lc_agent_name is included in metadata when async streaming with name."""
tool_calls: list[list[ToolCall]] = [[]]
agent = create_agent(
model=FakeToolCallingModel(tool_calls=tool_calls),
name="async_streaming_agent",
)
metadata_with_agent_name = []
async for _chunk, metadata in agent.astream(
{"messages": [HumanMessage("Hello async")]},
stream_mode="messages",
):
if "lc_agent_name" in metadata:
metadata_with_agent_name.append(metadata["lc_agent_name"])
assert len(metadata_with_agent_name) > 0
assert all(name == "async_streaming_agent" for name in metadata_with_agent_name)
async def test_lc_agent_name_not_in_astream_metadata_when_name_not_provided() -> None:
"""Test that lc_agent_name is not in async stream metadata when name not provided."""
tool_calls: list[list[ToolCall]] = [[]]
agent = create_agent(
model=FakeToolCallingModel(tool_calls=tool_calls),
)
async for _chunk, metadata in agent.astream(
{"messages": [HumanMessage("Hello async")]},
stream_mode="messages",
):
assert "lc_agent_name" not in metadata
async def test_lc_agent_name_in_astream_metadata_multiple_iterations() -> None:
"""Test that lc_agent_name is in metadata for all async stream events in multi-turn."""
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[[{"args": {"x": 5}, "id": "call_1", "name": "simple_tool"}], []]
),
tools=[simple_tool],
name="async_multi_turn_streaming_agent",
)
metadata_with_agent_name = []
async for _chunk, metadata in agent.astream(
{"messages": [HumanMessage("Call tool async")]},
stream_mode="messages",
):
if "lc_agent_name" in metadata:
metadata_with_agent_name.append(metadata["lc_agent_name"])
# Should have metadata entries for messages from both iterations
assert len(metadata_with_agent_name) > 0
assert all(name == "async_multi_turn_streaming_agent" for name in metadata_with_agent_name)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/test_agent_name.py",
"license": "MIT License",
"lines": 169,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/core/tests/unit_tests/messages/block_translators/test_google_genai.py | """Tests for Google GenAI block translator."""
from langchain_core.messages.block_translators.google_genai import (
translate_grounding_metadata_to_citations,
)
def test_translate_grounding_metadata_web() -> None:
"""Test translation of web grounding metadata to citations."""
grounding_metadata = {
"grounding_chunks": [
{
"web": {
"uri": "https://example.com",
"title": "Example Site",
},
"maps": None,
}
],
"grounding_supports": [
{
"segment": {
"start_index": 0,
"end_index": 13,
"text": "Test response",
},
"grounding_chunk_indices": [0],
"confidence_scores": [],
}
],
"web_search_queries": ["test query"],
}
citations = translate_grounding_metadata_to_citations(grounding_metadata)
assert len(citations) == 1
citation = citations[0]
assert citation["type"] == "citation"
assert citation.get("url") == "https://example.com"
assert citation.get("title") == "Example Site"
assert citation.get("start_index") == 0
assert citation.get("end_index") == 13
assert citation.get("cited_text") == "Test response"
extras = citation.get("extras", {})["google_ai_metadata"]
assert extras["web_search_queries"] == ["test query"]
assert extras["grounding_chunk_index"] == 0
assert "place_id" not in extras
def test_translate_grounding_metadata_maps() -> None:
"""Test translation of maps grounding metadata to citations."""
grounding_metadata = {
"grounding_chunks": [
{
"web": None,
"maps": {
"uri": "https://maps.google.com/?cid=13100894621228039586",
"title": "Heaven on 7th Marketplace",
"placeId": "places/ChIJ0-zA1vBZwokRon0fGj-6z7U",
},
}
],
"grounding_supports": [
{
"segment": {
"start_index": 0,
"end_index": 25,
"text": "Great Italian restaurant",
},
"grounding_chunk_indices": [0],
"confidence_scores": [0.95],
}
],
"web_search_queries": [],
}
citations = translate_grounding_metadata_to_citations(grounding_metadata)
assert len(citations) == 1
citation = citations[0]
assert citation["type"] == "citation"
assert citation.get("url") == "https://maps.google.com/?cid=13100894621228039586"
assert citation.get("title") == "Heaven on 7th Marketplace"
assert citation.get("start_index") == 0
assert citation.get("end_index") == 25
assert citation.get("cited_text") == "Great Italian restaurant"
extras = citation.get("extras", {})["google_ai_metadata"]
assert extras["web_search_queries"] == []
assert extras["grounding_chunk_index"] == 0
assert extras["confidence_scores"] == [0.95]
assert extras["place_id"] == "places/ChIJ0-zA1vBZwokRon0fGj-6z7U"
def test_translate_grounding_metadata_none() -> None:
"""Test translation when both web and maps are None."""
grounding_metadata = {
"grounding_chunks": [
{
"web": None,
"maps": None,
}
],
"grounding_supports": [
{
"segment": {
"start_index": 0,
"end_index": 10,
"text": "test text",
},
"grounding_chunk_indices": [0],
"confidence_scores": [],
}
],
"web_search_queries": [],
}
citations = translate_grounding_metadata_to_citations(grounding_metadata)
# Should still create citation but without url/title fields when None
assert len(citations) == 1
citation = citations[0]
assert citation["type"] == "citation"
# url and title are omitted when None
assert "url" not in citation
assert "title" not in citation
assert citation.get("start_index") == 0
assert citation.get("end_index") == 10
assert citation.get("cited_text") == "test text"
def test_translate_grounding_metadata_confidence_scores_none() -> None:
"""Test translation when confidence_scores is None (API returns this)."""
grounding_metadata = {
"grounding_chunks": [
{
"web": None,
"maps": {
"uri": "https://maps.google.com/?cid=123",
"title": "Test Restaurant",
"placeId": "places/ChIJ123",
},
}
],
"grounding_supports": [
{
"segment": {
"start_index": 0,
"end_index": 10,
"text": "test text",
},
"grounding_chunk_indices": [0],
"confidence_scores": None, # API returns None, not []
}
],
"web_search_queries": ["test query"],
}
citations = translate_grounding_metadata_to_citations(grounding_metadata)
assert len(citations) == 1
extras = citations[0].get("extras", {})["google_ai_metadata"]
# Should convert None to empty list
assert extras["confidence_scores"] == []
assert isinstance(extras["confidence_scores"], list)
def test_translate_grounding_metadata_multiple_chunks() -> None:
"""Test translation with multiple grounding chunks."""
grounding_metadata = {
"grounding_chunks": [
{
"web": {
"uri": "https://example1.com",
"title": "Example 1",
},
"maps": None,
},
{
"web": None,
"maps": {
"uri": "https://maps.google.com/?cid=123",
"title": "Place 1",
"placeId": "places/123",
},
},
],
"grounding_supports": [
{
"segment": {
"start_index": 0,
"end_index": 10,
"text": "First part",
},
"grounding_chunk_indices": [0, 1],
"confidence_scores": [],
}
],
"web_search_queries": [],
}
citations = translate_grounding_metadata_to_citations(grounding_metadata)
# Should create two citations, one for each chunk
assert len(citations) == 2
# First citation from web chunk
assert citations[0].get("url") == "https://example1.com"
assert citations[0].get("title") == "Example 1"
assert "place_id" not in citations[0].get("extras", {})["google_ai_metadata"]
# Second citation from maps chunk
assert citations[1].get("url") == "https://maps.google.com/?cid=123"
assert citations[1].get("title") == "Place 1"
assert (
citations[1].get("extras", {})["google_ai_metadata"]["place_id"] == "places/123"
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/messages/block_translators/test_google_genai.py",
"license": "MIT License",
"lines": 193,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/core/langchain_core/utils/uuid.py | """UUID utility functions.
This module exports a uuid7 function to generate monotonic, time-ordered UUIDs
for tracing and similar operations.
"""
from __future__ import annotations
import typing
from uuid import UUID
from uuid_utils.compat import uuid7 as _uuid_utils_uuid7
if typing.TYPE_CHECKING:
from uuid import UUID
_NANOS_PER_SECOND: typing.Final = 1_000_000_000
def _to_timestamp_and_nanos(nanoseconds: int) -> tuple[int, int]:
"""Split a nanosecond timestamp into seconds and remaining nanoseconds."""
seconds, nanos = divmod(nanoseconds, _NANOS_PER_SECOND)
return seconds, nanos
def uuid7(nanoseconds: int | None = None) -> UUID:
"""Generate a UUID from a Unix timestamp in nanoseconds and random bits.
UUIDv7 objects feature monotonicity within a millisecond.
Args:
nanoseconds: Optional ns timestamp. If not provided, uses current time.
Returns:
A UUIDv7 object.
"""
# --- 48 --- -- 4 -- --- 12 --- -- 2 -- --- 30 --- - 32 -
# unix_ts_ms | version | counter_hi | variant | counter_lo | random
#
# 'counter = counter_hi | counter_lo' is a 42-bit counter constructed
# with Method 1 of RFC 9562, §6.2, and its MSB is set to 0.
#
# 'random' is a 32-bit random value regenerated for every new UUID.
#
# If multiple UUIDs are generated within the same millisecond, the LSB
# of 'counter' is incremented by 1. When overflowing, the timestamp is
# advanced and the counter is reset to a random 42-bit integer with MSB
# set to 0.
# For now, just delegate to the uuid_utils implementation
if nanoseconds is None:
return _uuid_utils_uuid7()
seconds, nanos = _to_timestamp_and_nanos(nanoseconds)
return _uuid_utils_uuid7(timestamp=seconds, nanos=nanos)
__all__ = ["uuid7"]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/langchain_core/utils/uuid.py",
"license": "MIT License",
"lines": 41,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/core/tests/unit_tests/utils/test_uuid_utils.py | import time
from uuid import UUID
from langchain_core.utils.uuid import uuid7
def _uuid_v7_ms(uuid_obj: UUID | str) -> int:
"""Extract milliseconds since epoch from a UUIDv7 using string layout.
UUIDv7 stores Unix time in ms in the first 12 hex chars of the canonical
string representation (48 msb bits).
"""
s = str(uuid_obj).replace("-", "")
return int(s[:12], 16)
def test_uuid7() -> None:
"""Some simple tests."""
# Note the sequence value increments by 1 between each of these uuid7(...) calls
ns = time.time_ns()
ms = ns // 1_000_000
out1 = str(uuid7(ns))
# Verify that the timestamp part matches
out1_ms = _uuid_v7_ms(out1)
assert out1_ms == ms
def test_monotonicity() -> None:
"""Test that UUIDs are monotonically increasing."""
last = ""
for n in range(100_000):
i = str(uuid7())
if n > 0 and i <= last:
msg = f"UUIDs are not monotonic: {last} versus {i}"
raise RuntimeError(msg)
last = i
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/utils/test_uuid_utils.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_system_message.py | """Comprehensive unit tests for system message handling in agents.
This module consolidates all system message and dynamic prompt tests:
- Basic system message scenarios (none, string, SystemMessage)
- ModelRequest system_message field support
- System message updates via middleware
- Multiple middleware chaining
- Cache control preservation
- Metadata merging
- Dynamic system prompt middleware
- Edge cases and error handling
These tests replicate functionality from langchainjs PR #9459.
"""
from collections.abc import Callable
from dataclasses import dataclass
from typing import Any
import pytest
from langchain_core.language_models.fake_chat_models import GenericFakeChatModel
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, TextContentBlock
from langgraph.runtime import Runtime
from langchain.agents.factory import create_agent
from langchain.agents.middleware.types import AgentState, ModelRequest, ModelResponse
def _make_request(
system_message: SystemMessage | None = None,
system_prompt: str | None = None,
) -> ModelRequest:
"""Create a minimal ModelRequest for testing."""
model = GenericFakeChatModel(messages=iter([AIMessage(content="response")]))
return ModelRequest(
model=model,
system_message=system_message,
system_prompt=system_prompt,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=Runtime(),
model_settings={},
)
# =============================================================================
# ModelRequest Tests
# =============================================================================
class TestModelRequestSystemMessage:
"""Test ModelRequest with system_message field."""
@pytest.mark.parametrize(
("system_message", "system_prompt", "expected_msg", "expected_prompt"),
[
# Test with SystemMessage
(
SystemMessage(content="You are helpful"),
None,
SystemMessage(content="You are helpful"),
"You are helpful",
),
# Test with None
(None, None, None, None),
# Test with string (backward compat)
(None, "You are helpful", SystemMessage(content="You are helpful"), "You are helpful"),
],
)
def test_create_with_various_system_inputs(
self,
system_message: SystemMessage | None,
system_prompt: str | None,
expected_msg: SystemMessage | None,
expected_prompt: str | None,
) -> None:
"""Test creating ModelRequest with various system message inputs."""
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
request = ModelRequest(
model=model,
system_message=system_message,
system_prompt=system_prompt,
messages=[HumanMessage("Hi")],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=None,
)
if expected_msg is None:
assert request.system_message is None
else:
assert request.system_message is not None
assert request.system_message.content == expected_msg.content
assert request.system_prompt == expected_prompt
def test_system_prompt_property_with_list_content(self) -> None:
"""Test system_prompt property handles list content."""
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
system_msg = SystemMessage(content=["Part 1", "Part 2"])
request = ModelRequest(
model=model,
system_message=system_msg,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=None,
)
assert request.system_prompt is not None
assert "Part 1" in request.system_prompt
@pytest.mark.parametrize(
("override_with", "expected_text"),
[
("system_message", "New"),
("system_prompt", "New prompt"),
],
)
def test_override_methods(self, override_with: str, expected_text: str) -> None:
"""Test override() with system_message and system_prompt parameters."""
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
original_msg = SystemMessage(content="Original")
original_request = ModelRequest(
model=model,
system_message=original_msg,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=None,
)
if override_with == "system_message":
new_request = original_request.override(system_message=SystemMessage(content="New"))
else: # system_prompt
# system_prompt is deprecated but supported at runtime for backward compatibility
new_request = original_request.override(system_prompt="New prompt") # type: ignore[call-arg]
assert isinstance(new_request.system_message, SystemMessage)
assert new_request.system_prompt == expected_text
assert original_request.system_prompt == "Original"
def test_override_system_prompt_to_none(self) -> None:
"""Test override() setting system_prompt to None."""
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
original_request = ModelRequest(
model=model,
system_message=SystemMessage(content="Original"),
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=None,
)
# system_prompt is deprecated but supported at runtime for backward compatibility
new_request = original_request.override(system_prompt=None) # type: ignore[call-arg]
assert new_request.system_message is None
assert new_request.system_prompt is None
@pytest.mark.parametrize(
"use_constructor",
[True, False],
ids=["constructor", "override"],
)
def test_cannot_set_both_system_prompt_and_system_message(
self, *, use_constructor: bool
) -> None:
"""Test that setting both system_prompt and system_message raises error."""
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
if use_constructor:
with pytest.raises(ValueError, match="Cannot specify both"):
ModelRequest(
model=model,
system_prompt="String prompt",
system_message=SystemMessage(content="Message prompt"),
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=None,
)
else:
request = ModelRequest(
model=model,
system_message=None,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=None,
)
with pytest.raises(ValueError, match="Cannot specify both"):
# system_prompt is deprecated but supported at runtime for backward compatibility
request.override( # type: ignore[call-arg]
system_prompt="String prompt",
system_message=SystemMessage(content="Message prompt"),
)
@pytest.mark.parametrize(
("new_value", "should_be_none"),
[
("New prompt", False),
(None, True),
],
)
def test_setattr_system_prompt_deprecated(
self, new_value: str | None, *, should_be_none: bool
) -> None:
"""Test that setting system_prompt via setattr raises deprecation warning."""
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
request = ModelRequest(
model=model,
system_message=SystemMessage(content="Original") if not should_be_none else None,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=None,
)
with pytest.warns(DeprecationWarning, match="system_prompt is deprecated"):
request.system_prompt = new_value # type: ignore[misc]
if should_be_none:
assert request.system_message is None
assert request.system_prompt is None
else:
assert isinstance(request.system_message, SystemMessage)
assert request.system_message.content_blocks[0].get("text") == new_value
def test_system_message_with_complex_content(self) -> None:
"""Test SystemMessage with complex content (list of dicts)."""
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
system_msg = SystemMessage(
content=[
{"type": "text", "text": "You are helpful"},
{"type": "text", "text": "Be concise", "cache_control": {"type": "ephemeral"}},
]
)
request = ModelRequest(
model=model,
system_message=system_msg,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=None,
)
assert request.system_message is not None
assert isinstance(request.system_message.content_blocks, list)
assert len(request.system_message.content_blocks) == 2
assert request.system_message.content_blocks[1].get("cache_control") == {
"type": "ephemeral"
}
def test_multiple_overrides_with_system_message(self) -> None:
"""Test chaining overrides with system_message."""
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
original_request = ModelRequest(
model=model,
system_message=SystemMessage(content="Prompt 1"),
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=None,
)
final_request = (
original_request.override(system_message=SystemMessage(content="Prompt 2"))
.override(tool_choice="auto")
.override(system_message=SystemMessage(content="Prompt 3"))
)
assert final_request.system_prompt == "Prompt 3"
assert final_request.tool_choice == "auto"
assert original_request.system_prompt == "Prompt 1"
# =============================================================================
# create_agent Tests
# =============================================================================
class TestCreateAgentSystemMessage:
"""Test create_agent with various system message inputs."""
@pytest.mark.parametrize(
"system_prompt",
[
None,
"You are a helpful assistant",
SystemMessage(content="You are a helpful assistant"),
SystemMessage(
content="You are a helpful assistant",
additional_kwargs={"role": "system_admin", "priority": "high"},
response_metadata={"model": "gpt-4", "temperature": 0.7},
),
SystemMessage(
content=[
{"type": "text", "text": "You are a helpful assistant"},
{
"type": "text",
"text": "Follow these rules carefully",
"cache_control": {"type": "ephemeral"},
},
]
),
],
ids=[
"none",
"string",
"system_message",
"system_message_with_metadata",
"system_message_with_complex_content",
],
)
def test_create_agent_with_various_system_prompts(
self, system_prompt: SystemMessage | str | None
) -> None:
"""Test create_agent accepts various system_prompt formats."""
model = GenericFakeChatModel(messages=iter([AIMessage(content="Hello")]))
agent = create_agent(
model=model,
system_prompt=system_prompt,
)
assert agent is not None
# =============================================================================
# Middleware Tests
# =============================================================================
class TestSystemMessageUpdateViaMiddleware:
"""Test updating system messages through middleware."""
def test_middleware_can_set_initial_system_message(self) -> None:
"""Test middleware setting system message when none exists."""
def set_system_message_middleware(
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
"""Middleware that sets initial system message."""
new_request = request.override(
system_message=SystemMessage(content="Set by middleware")
)
return handler(new_request)
model = GenericFakeChatModel(messages=iter([AIMessage(content="response")]))
request = ModelRequest(
model=model,
system_message=None,
messages=[HumanMessage(content="Hello")],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=Runtime(),
)
captured_request = None
def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal captured_request
captured_request = req
return ModelResponse(result=[AIMessage(content="response")])
set_system_message_middleware(request, mock_handler)
assert captured_request is not None
assert captured_request.system_message is not None
assert len(captured_request.system_message.content_blocks) == 1
assert captured_request.system_message.content_blocks[0].get("text") == "Set by middleware"
def test_middleware_can_update_via_system_message_object(self) -> None:
"""Test middleware updating system message using SystemMessage objects."""
def append_with_metadata_middleware(
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
"""Append using SystemMessage to preserve metadata."""
base_content = request.system_message.text if request.system_message else ""
base_kwargs = request.system_message.additional_kwargs if request.system_message else {}
new_message = SystemMessage(
content=base_content + " Additional instructions.",
additional_kwargs={**base_kwargs, "middleware": "applied"},
)
new_request = request.override(system_message=new_message)
return handler(new_request)
model = GenericFakeChatModel(messages=iter([AIMessage(content="response")]))
request = ModelRequest(
model=model,
system_message=SystemMessage(
content="Base prompt", additional_kwargs={"base": "value"}
),
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=Runtime(),
)
captured_request = None
def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal captured_request
captured_request = req
return ModelResponse(result=[AIMessage(content="response")])
append_with_metadata_middleware(request, mock_handler)
assert captured_request is not None
assert captured_request.system_message is not None
assert captured_request.system_message.text == "Base prompt Additional instructions."
assert captured_request.system_message.additional_kwargs["base"] == "value"
assert captured_request.system_message.additional_kwargs["middleware"] == "applied"
class TestMultipleMiddlewareChaining:
"""Test multiple middleware modifying system message in sequence."""
def test_multiple_middleware_can_chain_modifications(self) -> None:
"""Test that multiple middleware can modify system message sequentially."""
def first_middleware(
request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
) -> ModelResponse:
"""First middleware sets base system message."""
new_request = request.override(
system_message=SystemMessage(
content="Base prompt",
additional_kwargs={"middleware_1": "applied"},
)
)
return handler(new_request)
def second_middleware(
request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
) -> ModelResponse:
"""Second middleware appends to system message."""
assert request.system_message is not None
current_content = request.system_message.text
current_kwargs = request.system_message.additional_kwargs
new_request = request.override(
system_message=SystemMessage(
content=current_content + " + middleware 2",
additional_kwargs={**current_kwargs, "middleware_2": "applied"},
)
)
return handler(new_request)
def third_middleware(
request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
) -> ModelResponse:
"""Third middleware appends to system message."""
assert request.system_message is not None
current_content = request.system_message.text
current_kwargs = request.system_message.additional_kwargs
new_request = request.override(
system_message=SystemMessage(
content=current_content + " + middleware 3",
additional_kwargs={**current_kwargs, "middleware_3": "applied"},
)
)
return handler(new_request)
model = GenericFakeChatModel(messages=iter([AIMessage(content="response")]))
request = ModelRequest(
model=model,
system_message=None,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=Runtime(),
)
def final_handler(req: ModelRequest) -> ModelResponse:
# Verify all middleware applied
assert req.system_message is not None
assert req.system_message.text == "Base prompt + middleware 2 + middleware 3"
assert req.system_message.additional_kwargs["middleware_1"] == "applied"
assert req.system_message.additional_kwargs["middleware_2"] == "applied"
assert req.system_message.additional_kwargs["middleware_3"] == "applied"
return ModelResponse(result=[AIMessage(content="response")])
# Chain middleware calls
first_middleware(
request,
lambda req: second_middleware(req, lambda req2: third_middleware(req2, final_handler)),
)
def test_middleware_can_mix_string_and_system_message_updates(self) -> None:
"""Test mixing string and SystemMessage updates across middleware."""
def string_middleware(
request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
) -> ModelResponse:
"""Use string-based update."""
new_request = request.override(system_message=SystemMessage(content="String prompt"))
return handler(new_request)
def system_message_middleware(
request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
) -> ModelResponse:
"""Use SystemMessage-based update."""
current_content = request.system_message.text if request.system_message else ""
new_request = request.override(
system_message=SystemMessage(
content=current_content + " + SystemMessage",
additional_kwargs={"metadata": "added"},
)
)
return handler(new_request)
model = GenericFakeChatModel(messages=iter([AIMessage(content="response")]))
request = ModelRequest(
model=model,
system_message=None,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=Runtime(),
)
def final_handler(req: ModelRequest) -> ModelResponse:
assert req.system_message is not None
assert req.system_message.text == "String prompt + SystemMessage"
assert req.system_message.additional_kwargs.get("metadata") == "added"
return ModelResponse(result=[AIMessage(content="response")])
string_middleware(request, lambda req: system_message_middleware(req, final_handler))
class TestCacheControlPreservation:
"""Test cache control metadata preservation in system messages."""
def test_middleware_can_add_cache_control(self) -> None:
"""Test middleware adding cache control to system message."""
def cache_control_middleware(
request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
) -> ModelResponse:
"""Add cache control to system message."""
new_message = SystemMessage(
content=[
{"type": "text", "text": "Base instructions"},
{
"type": "text",
"text": "Cached instructions",
"cache_control": {"type": "ephemeral"},
},
]
)
new_request = request.override(system_message=new_message)
return handler(new_request)
model = GenericFakeChatModel(messages=iter([AIMessage(content="response")]))
request = ModelRequest(
model=model,
system_message=None,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=Runtime(),
)
captured_request = None
def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal captured_request
captured_request = req
return ModelResponse(result=[AIMessage(content="response")])
cache_control_middleware(request, mock_handler)
assert captured_request is not None
assert captured_request.system_message is not None
assert isinstance(captured_request.system_message.content_blocks, list)
assert captured_request.system_message.content_blocks[1].get("cache_control") == {
"type": "ephemeral"
}
def test_cache_control_preserved_across_middleware(self) -> None:
"""Test that cache control is preserved when middleware modifies message."""
def first_middleware_with_cache(
request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
) -> ModelResponse:
"""Set system message with cache control."""
new_message = SystemMessage(
content=[
{
"type": "text",
"text": "Cached content",
"cache_control": {"type": "ephemeral"},
}
]
)
new_request = request.override(system_message=new_message)
return handler(new_request)
def second_middleware_appends(
request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
) -> ModelResponse:
"""Append to system message while preserving cache control."""
assert request.system_message is not None
existing_content = request.system_message.content_blocks
new_content = [*existing_content, TextContentBlock(type="text", text="Additional text")]
new_message = SystemMessage(content_blocks=new_content)
new_request = request.override(system_message=new_message)
return handler(new_request)
model = GenericFakeChatModel(messages=iter([AIMessage(content="response")]))
request = ModelRequest(
model=model,
system_message=None,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=Runtime(),
)
def final_handler(req: ModelRequest) -> ModelResponse:
# Verify cache control was preserved
assert req.system_message is not None
assert isinstance(req.system_message.content_blocks, list)
assert len(req.system_message.content_blocks) == 2
assert req.system_message.content_blocks[0].get("cache_control") == {
"type": "ephemeral"
}
return ModelResponse(result=[AIMessage(content="response")])
first_middleware_with_cache(
request, lambda req: second_middleware_appends(req, final_handler)
)
class TestMetadataMerging:
"""Test metadata merging behavior when updating system messages."""
@pytest.mark.parametrize(
("metadata_type", "initial_metadata", "update_metadata", "expected_result"),
[
# additional_kwargs merging
(
"additional_kwargs",
{"key1": "value1", "shared": "original"},
{"key2": "value2", "shared": "updated"},
{"key1": "value1", "key2": "value2", "shared": "updated"},
),
# response_metadata merging
(
"response_metadata",
{"model": "gpt-4", "region": "us-east"},
{"tokens": 100, "region": "eu-west"},
{"model": "gpt-4", "tokens": 100, "region": "eu-west"},
),
],
ids=["additional_kwargs", "response_metadata"],
)
def test_metadata_merge_across_updates(
self,
metadata_type: str,
initial_metadata: dict[str, Any],
update_metadata: dict[str, Any],
expected_result: dict[str, Any],
) -> None:
"""Test that metadata merges correctly when updating system message."""
base_message = SystemMessage(
content="Base",
**{metadata_type: initial_metadata},
)
def update_middleware(
request: ModelRequest, handler: Callable[[ModelRequest], ModelResponse]
) -> ModelResponse:
"""Update system message, merging metadata."""
current_metadata = getattr(request.system_message, metadata_type)
new_metadata = {**current_metadata, **update_metadata}
new_request = request.override(
system_message=SystemMessage(content="Updated", **{metadata_type: new_metadata})
)
return handler(new_request)
model = GenericFakeChatModel(messages=iter([AIMessage(content="response")]))
request = ModelRequest(
model=model,
system_message=base_message,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=Runtime(),
)
captured_request = None
def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal captured_request
captured_request = req
return ModelResponse(result=[AIMessage(content="response")])
update_middleware(request, mock_handler)
assert captured_request is not None
assert getattr(captured_request.system_message, metadata_type) == expected_result
# =============================================================================
# Dynamic System Prompt Middleware Tests
# =============================================================================
class TestDynamicSystemPromptMiddleware:
"""Test middleware that accepts SystemMessage return types."""
def test_middleware_can_return_system_message(self) -> None:
"""Test that middleware can return a SystemMessage with dynamic content."""
def dynamic_system_prompt_middleware(request: ModelRequest) -> SystemMessage:
"""Return a SystemMessage with dynamic content."""
region = getattr(request.runtime.context, "region", "n/a")
return SystemMessage(content=f"You are a helpful assistant. Region: {region}")
@dataclass
class RegionContext:
region: str
runtime = Runtime(context=RegionContext(region="EU"))
request = ModelRequest(
model=GenericFakeChatModel(messages=iter([AIMessage(content="response")])),
system_message=None,
messages=[HumanMessage(content="Hello")],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=runtime,
model_settings={},
)
new_system_message = dynamic_system_prompt_middleware(request)
assert isinstance(new_system_message, SystemMessage)
assert len(new_system_message.content_blocks) == 1
assert (
new_system_message.content_blocks[0].get("text")
== "You are a helpful assistant. Region: EU"
)
def test_middleware_can_use_system_message_with_metadata(self) -> None:
"""Test middleware creating SystemMessage with additional metadata."""
def metadata_middleware(request: ModelRequest) -> SystemMessage:
"""Return SystemMessage with metadata."""
return SystemMessage(
content="You are a helpful assistant",
additional_kwargs={"temperature": 0.7, "model": "gpt-4"},
response_metadata={"region": "us-east"},
)
request = _make_request()
new_system_message = metadata_middleware(request)
assert len(new_system_message.content_blocks) == 1
assert new_system_message.content_blocks[0].get("text") == "You are a helpful assistant"
assert new_system_message.additional_kwargs == {
"temperature": 0.7,
"model": "gpt-4",
}
assert new_system_message.response_metadata == {"region": "us-east"}
def test_middleware_handles_none_system_message(self) -> None:
"""Test middleware creating new SystemMessage when none exists."""
def create_if_none_middleware(request: ModelRequest) -> SystemMessage:
"""Create a system message if none exists."""
if request.system_message is None:
return SystemMessage(content="Default system prompt")
return request.system_message
request = _make_request(system_message=None)
new_system_message = create_if_none_middleware(request)
assert isinstance(new_system_message, SystemMessage)
assert len(new_system_message.content_blocks) == 1
assert new_system_message.content_blocks[0].get("text") == "Default system prompt"
def test_middleware_with_content_blocks(self) -> None:
"""Test middleware creating SystemMessage with content blocks."""
def content_blocks_middleware(request: ModelRequest) -> SystemMessage:
"""Create SystemMessage with content blocks including cache control."""
return SystemMessage(
content=[
{"type": "text", "text": "Base instructions"},
{
"type": "text",
"text": "Cached instructions",
"cache_control": {"type": "ephemeral"},
},
]
)
request = _make_request()
new_system_message = content_blocks_middleware(request)
assert isinstance(new_system_message.content_blocks, list)
assert len(new_system_message.content_blocks) == 2
assert new_system_message.content_blocks[0].get("text") == "Base instructions"
assert new_system_message.content_blocks[1].get("cache_control") == {"type": "ephemeral"}
class TestSystemMessageMiddlewareIntegration:
"""Test integration of SystemMessage with middleware chain."""
def test_multiple_middleware_can_modify_system_message(self) -> None:
"""Test that multiple middleware can modify system message in sequence."""
def first_middleware(request: ModelRequest) -> ModelRequest:
"""First middleware adds base system message."""
new_message = SystemMessage(
content="You are an assistant.",
additional_kwargs={"middleware_1": "applied"},
)
return request.override(system_message=new_message)
def second_middleware(request: ModelRequest) -> ModelRequest:
"""Second middleware appends to system message."""
assert request.system_message is not None
current_content = request.system_message.text
new_content = current_content + " Be helpful."
merged_kwargs = {
**request.system_message.additional_kwargs,
"middleware_2": "applied",
}
new_message = SystemMessage(
content=new_content,
additional_kwargs=merged_kwargs,
)
return request.override(system_message=new_message)
request = _make_request(system_message=None)
# Apply middleware in sequence
request = first_middleware(request)
assert request.system_message is not None
assert len(request.system_message.content_blocks) == 1
assert request.system_message.content_blocks[0].get("text") == "You are an assistant."
assert request.system_message.additional_kwargs["middleware_1"] == "applied"
request = second_middleware(request)
assert request.system_message is not None
assert len(request.system_message.content_blocks) == 1
assert (
request.system_message.content_blocks[0].get("text")
== "You are an assistant. Be helpful."
)
assert request.system_message.additional_kwargs["middleware_1"] == "applied"
assert request.system_message.additional_kwargs["middleware_2"] == "applied"
def test_middleware_preserves_system_message_metadata(self) -> None:
"""Test that metadata is preserved when middleware modifies system message."""
base_message = SystemMessage(
content="Base prompt",
additional_kwargs={"key1": "value1", "key2": "value2"},
response_metadata={"model": "gpt-4"},
)
def preserving_middleware(request: ModelRequest) -> ModelRequest:
"""Middleware that preserves existing metadata."""
assert request.system_message is not None
new_message = SystemMessage(
content=request.system_message.text + " Extended.",
additional_kwargs=request.system_message.additional_kwargs,
response_metadata=request.system_message.response_metadata,
)
return request.override(system_message=new_message)
request = _make_request(system_message=base_message)
new_request = preserving_middleware(request)
assert new_request.system_message is not None
assert len(new_request.system_message.content_blocks) == 1
assert new_request.system_message.content_blocks[0].get("text") == "Base prompt Extended."
assert new_request.system_message.additional_kwargs == {
"key1": "value1",
"key2": "value2",
}
assert new_request.system_message.response_metadata == {"model": "gpt-4"}
def test_backward_compatibility_with_string_system_prompt(self) -> None:
"""Test that middleware still works with string system prompts."""
def string_middleware(request: ModelRequest) -> ModelRequest:
"""Middleware using string system prompt (backward compatible)."""
current_prompt = request.system_prompt or ""
new_prompt = current_prompt + " Additional instructions."
# system_prompt is deprecated but supported at runtime for backward compatibility
return request.override(system_prompt=new_prompt.strip()) # type: ignore[call-arg]
request = _make_request(system_prompt="Base prompt")
new_request = string_middleware(request)
assert new_request.system_prompt == "Base prompt Additional instructions."
assert isinstance(new_request.system_message, SystemMessage)
@pytest.mark.parametrize(
"initial_value",
[
SystemMessage(content="Hello"),
"Hello",
None,
],
ids=["system_message", "string", "none"],
)
def test_middleware_can_switch_between_formats(
self, initial_value: SystemMessage | str | None
) -> None:
"""Test middleware can work with SystemMessage, string, or None."""
def flexible_middleware(request: ModelRequest) -> ModelRequest:
"""Middleware that works with various formats."""
if request.system_message:
new_message = SystemMessage(content=request.system_message.text + " [modified]")
return request.override(system_message=new_message)
new_message = SystemMessage(content="[created]")
return request.override(system_message=new_message)
if isinstance(initial_value, SystemMessage):
request = _make_request(system_message=initial_value)
expected_text = "Hello [modified]"
elif isinstance(initial_value, str):
request = _make_request(system_prompt=initial_value)
expected_text = "Hello [modified]"
else: # None
request = _make_request(system_message=None)
expected_text = "[created]"
result = flexible_middleware(request)
assert result.system_message is not None
assert len(result.system_message.content_blocks) == 1
assert result.system_message.content_blocks[0].get("text") == expected_text
# =============================================================================
# Edge Cases and Error Handling
# =============================================================================
class TestEdgeCasesAndErrorHandling:
"""Test edge cases and error handling for system messages."""
@pytest.mark.parametrize(
("content", "expected_blocks", "expected_prompt"),
[
("", 0, ""),
(
[
{"type": "text", "text": "Block 1"},
{"type": "text", "text": "Block 2"},
{"type": "text", "text": "Block 3"},
],
3,
None,
),
],
ids=["empty_content", "multiple_blocks"],
)
def test_system_message_content_variations(
self, content: str | list[str | dict[str, Any]], expected_blocks: int, expected_prompt: str
) -> None:
"""Test SystemMessage with various content variations."""
system_message = SystemMessage(content=content)
model = GenericFakeChatModel(messages=iter([AIMessage(content="response")]))
request = ModelRequest(
model=model,
system_message=system_message,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=Runtime(),
)
assert request.system_message is not None
if isinstance(content, list):
assert isinstance(request.system_message.content_blocks, list)
assert len(request.system_message.content_blocks) == expected_blocks
else:
assert len(request.system_message.content_blocks) == expected_blocks
assert request.system_prompt == expected_prompt
def test_reset_system_prompt_to_none(self) -> None:
"""Test resetting system prompt to None."""
base_message = SystemMessage(content="Original prompt")
model = GenericFakeChatModel(messages=iter([AIMessage(content="response")]))
request = ModelRequest(
model=model,
system_message=base_message,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=Runtime(),
)
new_request = request.override(system_message=None)
assert new_request.system_message is None
assert new_request.system_prompt is None
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/test_system_message.py",
"license": "MIT License",
"lines": 901,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/core/langchain_core/language_models/model_profile.py | """Model profile types and utilities."""
from typing_extensions import TypedDict
class ModelProfile(TypedDict, total=False):
"""Model profile.
!!! warning "Beta feature"
This is a beta feature. The format of model profiles is subject to change.
Provides information about chat model capabilities, such as context window sizes
and supported features.
"""
# --- Input constraints ---
max_input_tokens: int
"""Maximum context window (tokens)"""
text_inputs: bool
"""Whether text inputs are supported."""
image_inputs: bool
"""Whether image inputs are supported."""
# TODO: add more detail about formats?
image_url_inputs: bool
"""Whether [image URL inputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
are supported."""
pdf_inputs: bool
"""Whether [PDF inputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
are supported."""
# TODO: add more detail about formats? e.g. bytes or base64
audio_inputs: bool
"""Whether [audio inputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
are supported."""
# TODO: add more detail about formats? e.g. bytes or base64
video_inputs: bool
"""Whether [video inputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
are supported."""
# TODO: add more detail about formats? e.g. bytes or base64
image_tool_message: bool
"""Whether images can be included in tool messages."""
pdf_tool_message: bool
"""Whether PDFs can be included in tool messages."""
# --- Output constraints ---
max_output_tokens: int
"""Maximum output tokens"""
reasoning_output: bool
"""Whether the model supports [reasoning / chain-of-thought](https://docs.langchain.com/oss/python/langchain/models#reasoning)"""
text_outputs: bool
"""Whether text outputs are supported."""
image_outputs: bool
"""Whether [image outputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
are supported."""
audio_outputs: bool
"""Whether [audio outputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
are supported."""
video_outputs: bool
"""Whether [video outputs](https://docs.langchain.com/oss/python/langchain/models#multimodal)
are supported."""
# --- Tool calling ---
tool_calling: bool
"""Whether the model supports [tool calling](https://docs.langchain.com/oss/python/langchain/models#tool-calling)"""
tool_choice: bool
"""Whether the model supports [tool choice](https://docs.langchain.com/oss/python/langchain/models#forcing-tool-calls)"""
# --- Structured output ---
structured_output: bool
"""Whether the model supports a native [structured output](https://docs.langchain.com/oss/python/langchain/models#structured-outputs)
feature"""
ModelProfileRegistry = dict[str, ModelProfile]
"""Registry mapping model identifiers or names to their ModelProfile."""
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/langchain_core/language_models/model_profile.py",
"license": "MIT License",
"lines": 63,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/model-profiles/langchain_model_profiles/cli.py | """CLI for refreshing model profile data from models.dev."""
import argparse
import json
import re
import sys
import tempfile
from pathlib import Path
from typing import Any
import httpx
try:
import tomllib # type: ignore[import-not-found] # Python 3.11+
except ImportError:
import tomli as tomllib # type: ignore[import-not-found,no-redef]
def _validate_data_dir(data_dir: Path) -> Path:
"""Validate and canonicalize data directory path.
Args:
data_dir: User-provided data directory path.
Returns:
Resolved, canonical path.
Raises:
SystemExit: If user declines to write outside current directory.
"""
# Resolve to absolute, canonical path (follows symlinks)
try:
resolved = data_dir.resolve(strict=False)
except (OSError, RuntimeError) as e:
msg = f"Invalid data directory path: {e}"
print(f"❌ {msg}", file=sys.stderr)
sys.exit(1)
# Warn if writing outside current directory
cwd = Path.cwd().resolve()
try:
resolved.relative_to(cwd)
except ValueError:
# Not relative to cwd
print("⚠️ WARNING: Writing outside current directory", file=sys.stderr)
print(f" Current directory: {cwd}", file=sys.stderr)
print(f" Target directory: {resolved}", file=sys.stderr)
print(file=sys.stderr)
response = input("Continue? (y/N): ")
if response.lower() != "y":
print("Aborted.", file=sys.stderr)
sys.exit(1)
return resolved
def _load_augmentations(
data_dir: Path,
) -> tuple[dict[str, Any], dict[str, dict[str, Any]]]:
"""Load augmentations from `profile_augmentations.toml`.
Args:
data_dir: Directory containing `profile_augmentations.toml`.
Returns:
Tuple of `(provider_augmentations, model_augmentations)`.
"""
aug_file = data_dir / "profile_augmentations.toml"
if not aug_file.exists():
return {}, {}
try:
with aug_file.open("rb") as f:
data = tomllib.load(f)
except PermissionError:
msg = f"Permission denied reading augmentations file: {aug_file}"
print(f"❌ {msg}", file=sys.stderr)
sys.exit(1)
except tomllib.TOMLDecodeError as e:
msg = f"Invalid TOML syntax in augmentations file: {e}"
print(f"❌ {msg}", file=sys.stderr)
sys.exit(1)
except OSError as e:
msg = f"Failed to read augmentations file: {e}"
print(f"❌ {msg}", file=sys.stderr)
sys.exit(1)
overrides = data.get("overrides", {})
provider_aug: dict[str, Any] = {}
model_augs: dict[str, dict[str, Any]] = {}
for key, value in overrides.items():
if isinstance(value, dict):
model_augs[key] = value
else:
provider_aug[key] = value
return provider_aug, model_augs
def _model_data_to_profile(model_data: dict[str, Any]) -> dict[str, Any]:
"""Convert raw models.dev data into the canonical profile structure."""
limit = model_data.get("limit") or {}
modalities = model_data.get("modalities") or {}
input_modalities = modalities.get("input") or []
output_modalities = modalities.get("output") or []
profile = {
"max_input_tokens": limit.get("context"),
"max_output_tokens": limit.get("output"),
"text_inputs": "text" in input_modalities,
"image_inputs": "image" in input_modalities,
"audio_inputs": "audio" in input_modalities,
"pdf_inputs": "pdf" in input_modalities or model_data.get("pdf_inputs"),
"video_inputs": "video" in input_modalities,
"text_outputs": "text" in output_modalities,
"image_outputs": "image" in output_modalities,
"audio_outputs": "audio" in output_modalities,
"video_outputs": "video" in output_modalities,
"reasoning_output": model_data.get("reasoning"),
"tool_calling": model_data.get("tool_call"),
"tool_choice": model_data.get("tool_choice"),
"structured_output": model_data.get("structured_output"),
"image_url_inputs": model_data.get("image_url_inputs"),
"image_tool_message": model_data.get("image_tool_message"),
"pdf_tool_message": model_data.get("pdf_tool_message"),
}
return {k: v for k, v in profile.items() if v is not None}
def _apply_overrides(
profile: dict[str, Any], *overrides: dict[str, Any] | None
) -> dict[str, Any]:
"""Merge provider and model overrides onto the canonical profile."""
merged = dict(profile)
for override in overrides:
if not override:
continue
for key, value in override.items():
if value is not None:
merged[key] = value # noqa: PERF403
return merged
def _ensure_safe_output_path(base_dir: Path, output_file: Path) -> None:
"""Ensure the resolved output path remains inside the expected directory."""
if base_dir.exists() and base_dir.is_symlink():
msg = f"Data directory {base_dir} is a symlink; refusing to write profiles."
print(f"❌ {msg}", file=sys.stderr)
sys.exit(1)
if output_file.exists() and output_file.is_symlink():
msg = (
f"profiles.py at {output_file} is a symlink; refusing to overwrite it.\n"
"Delete the symlink or point --data-dir to a safe location."
)
print(f"❌ {msg}", file=sys.stderr)
sys.exit(1)
try:
output_file.resolve(strict=False).relative_to(base_dir.resolve())
except (OSError, RuntimeError) as e:
msg = f"Failed to resolve output path: {e}"
print(f"❌ {msg}", file=sys.stderr)
sys.exit(1)
except ValueError:
msg = f"Refusing to write outside of data directory: {output_file}"
print(f"❌ {msg}", file=sys.stderr)
sys.exit(1)
def _write_profiles_file(output_file: Path, contents: str) -> None:
"""Write the generated module atomically without following symlinks."""
_ensure_safe_output_path(output_file.parent, output_file)
temp_path: Path | None = None
try:
with tempfile.NamedTemporaryFile(
mode="w", encoding="utf-8", dir=output_file.parent, delete=False
) as tmp_file:
tmp_file.write(contents)
temp_path = Path(tmp_file.name)
temp_path.replace(output_file)
except PermissionError:
msg = f"Permission denied writing file: {output_file}"
print(f"❌ {msg}", file=sys.stderr)
if temp_path:
temp_path.unlink(missing_ok=True)
sys.exit(1)
except OSError as e:
msg = f"Failed to write file: {e}"
print(f"❌ {msg}", file=sys.stderr)
if temp_path:
temp_path.unlink(missing_ok=True)
sys.exit(1)
MODULE_ADMONITION = """Auto-generated model profiles.
DO NOT EDIT THIS FILE MANUALLY.
This file is generated by the langchain-profiles CLI tool.
It contains data derived from the models.dev project.
Source: https://github.com/sst/models.dev
License: MIT License
To update these data, refer to the instructions here:
https://docs.langchain.com/oss/python/langchain/models#updating-or-overwriting-profile-data
"""
def refresh(provider: str, data_dir: Path) -> None: # noqa: C901, PLR0915
"""Download and merge model profile data for a specific provider.
Args:
provider: Provider ID from models.dev (e.g., `'anthropic'`, `'openai'`).
data_dir: Directory containing `profile_augmentations.toml` and where
`profiles.py` will be written.
"""
# Validate and canonicalize data directory path
data_dir = _validate_data_dir(data_dir)
api_url = "https://models.dev/api.json"
print(f"Provider: {provider}")
print(f"Data directory: {data_dir}")
print()
# Download data from models.dev
print(f"Downloading data from {api_url}...")
try:
response = httpx.get(api_url, timeout=30)
response.raise_for_status()
except httpx.TimeoutException:
msg = f"Request timed out connecting to {api_url}"
print(f"❌ {msg}", file=sys.stderr)
sys.exit(1)
except httpx.HTTPStatusError as e:
msg = f"HTTP error {e.response.status_code} from {api_url}"
print(f"❌ {msg}", file=sys.stderr)
sys.exit(1)
except httpx.RequestError as e:
msg = f"Failed to connect to {api_url}: {e}"
print(f"❌ {msg}", file=sys.stderr)
sys.exit(1)
try:
all_data = response.json()
except json.JSONDecodeError as e:
msg = f"Invalid JSON response from API: {e}"
print(f"❌ {msg}", file=sys.stderr)
sys.exit(1)
# Basic validation
if not isinstance(all_data, dict):
msg = "Expected API response to be a dictionary"
print(f"❌ {msg}", file=sys.stderr)
sys.exit(1)
provider_count = len(all_data)
model_count = sum(len(p.get("models", {})) for p in all_data.values())
print(f"Downloaded {provider_count} providers with {model_count} models")
# Extract data for this provider
if provider not in all_data:
msg = f"Provider '{provider}' not found in models.dev data"
print(msg, file=sys.stderr)
sys.exit(1)
provider_data = all_data[provider]
models = provider_data.get("models", {})
print(f"Extracted {len(models)} models for {provider}")
# Load augmentations
print("Loading augmentations...")
provider_aug, model_augs = _load_augmentations(data_dir)
# Merge and convert to profiles
profiles: dict[str, dict[str, Any]] = {}
for model_id, model_data in models.items():
base_profile = _model_data_to_profile(model_data)
profiles[model_id] = _apply_overrides(
base_profile, provider_aug, model_augs.get(model_id)
)
# Include new models defined purely via augmentations
extra_models = set(model_augs) - set(models)
if extra_models:
print(f"Adding {len(extra_models)} models from augmentations only...")
for model_id in sorted(extra_models):
profiles[model_id] = _apply_overrides({}, provider_aug, model_augs[model_id])
# Ensure directory exists
try:
data_dir.mkdir(parents=True, exist_ok=True, mode=0o755)
except PermissionError:
msg = f"Permission denied creating directory: {data_dir}"
print(f"❌ {msg}", file=sys.stderr)
sys.exit(1)
except OSError as e:
msg = f"Failed to create directory: {e}"
print(f"❌ {msg}", file=sys.stderr)
sys.exit(1)
# Write as Python module
output_file = data_dir / "_profiles.py"
print(f"Writing to {output_file}...")
module_content = [f'"""{MODULE_ADMONITION}"""\n\n', "from typing import Any\n\n"]
module_content.append("_PROFILES: dict[str, dict[str, Any]] = ")
json_str = json.dumps(dict(sorted(profiles.items())), indent=4)
json_str = (
json_str.replace("true", "True")
.replace("false", "False")
.replace("null", "None")
)
# Add trailing commas for ruff format compliance
json_str = re.sub(r"([^\s,{\[])(?=\n\s*[\}\]])", r"\1,", json_str)
module_content.append(f"{json_str}\n")
_write_profiles_file(output_file, "".join(module_content))
print(
f"✓ Successfully refreshed {len(profiles)} model profiles "
f"({output_file.stat().st_size:,} bytes)"
)
def main() -> None:
"""CLI entrypoint."""
parser = argparse.ArgumentParser(
description="Refresh model profile data from models.dev",
prog="langchain-profiles",
)
subparsers = parser.add_subparsers(dest="command", required=True)
# refresh command
refresh_parser = subparsers.add_parser(
"refresh", help="Download and merge model profile data for a provider"
)
refresh_parser.add_argument(
"--provider",
required=True,
help="Provider ID from models.dev (e.g., 'anthropic', 'openai', 'google')",
)
refresh_parser.add_argument(
"--data-dir",
required=True,
type=Path,
help="Data directory containing profile_augmentations.toml",
)
args = parser.parse_args()
if args.command == "refresh":
refresh(args.provider, args.data_dir)
if __name__ == "__main__":
main()
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/model-profiles/langchain_model_profiles/cli.py",
"license": "MIT License",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/model-profiles/tests/unit_tests/test_cli.py | """Tests for CLI functionality."""
import importlib.util
from pathlib import Path
from unittest.mock import Mock, patch
import pytest
from langchain_model_profiles.cli import _model_data_to_profile, refresh
@pytest.fixture
def mock_models_dev_response() -> dict:
"""Create a mock response from models.dev API."""
return {
"anthropic": {
"id": "anthropic",
"name": "Anthropic",
"models": {
"claude-3-opus": {
"id": "claude-3-opus",
"name": "Claude 3 Opus",
"tool_call": True,
"limit": {"context": 200000, "output": 4096},
"modalities": {"input": ["text", "image"], "output": ["text"]},
},
"claude-3-sonnet": {
"id": "claude-3-sonnet",
"name": "Claude 3 Sonnet",
"tool_call": True,
"limit": {"context": 200000, "output": 4096},
"modalities": {"input": ["text", "image"], "output": ["text"]},
},
},
},
"openai": {
"id": "openai",
"name": "OpenAI",
"models": {
"gpt-4": {
"id": "gpt-4",
"name": "GPT-4",
"tool_call": True,
"limit": {"context": 8192, "output": 4096},
"modalities": {"input": ["text"], "output": ["text"]},
}
},
},
}
def test_refresh_generates_profiles_file(
tmp_path: Path, mock_models_dev_response: dict
) -> None:
"""Test that refresh command generates _profiles.py with merged data."""
data_dir = tmp_path / "data"
data_dir.mkdir()
# Create augmentations file
aug_file = data_dir / "profile_augmentations.toml"
aug_file.write_text("""
provider = "anthropic"
[overrides]
image_url_inputs = true
pdf_inputs = true
""")
# Mock the httpx.get call
mock_response = Mock()
mock_response.json.return_value = mock_models_dev_response
mock_response.raise_for_status = Mock()
with (
patch("langchain_model_profiles.cli.httpx.get", return_value=mock_response),
patch("builtins.input", return_value="y"),
):
refresh("anthropic", data_dir)
# Verify _profiles.py was created
profiles_file = data_dir / "_profiles.py"
assert profiles_file.exists()
# Import and verify content
profiles_content = profiles_file.read_text()
assert "DO NOT EDIT THIS FILE MANUALLY" in profiles_content
assert "PROFILES:" in profiles_content
assert "claude-3-opus" in profiles_content
assert "claude-3-sonnet" in profiles_content
# Check that augmentations were applied
assert "image_url_inputs" in profiles_content
assert "pdf_inputs" in profiles_content
def test_refresh_raises_error_for_missing_provider(
tmp_path: Path, mock_models_dev_response: dict
) -> None:
"""Test that refresh exits with error for non-existent provider."""
data_dir = tmp_path / "data"
data_dir.mkdir()
# Mock the httpx.get call
mock_response = Mock()
mock_response.json.return_value = mock_models_dev_response
mock_response.raise_for_status = Mock()
with (
patch("langchain_model_profiles.cli.httpx.get", return_value=mock_response),
patch("builtins.input", return_value="y"),
):
with pytest.raises(SystemExit) as exc_info:
refresh("nonexistent-provider", data_dir)
assert exc_info.value.code == 1
# Output file should not be created
profiles_file = data_dir / "_profiles.py"
assert not profiles_file.exists()
def test_refresh_works_without_augmentations(
tmp_path: Path, mock_models_dev_response: dict
) -> None:
"""Test that refresh works even without augmentations file."""
data_dir = tmp_path / "data"
data_dir.mkdir()
# Mock the httpx.get call
mock_response = Mock()
mock_response.json.return_value = mock_models_dev_response
mock_response.raise_for_status = Mock()
with (
patch("langchain_model_profiles.cli.httpx.get", return_value=mock_response),
patch("builtins.input", return_value="y"),
):
refresh("anthropic", data_dir)
# Verify _profiles.py was created
profiles_file = data_dir / "_profiles.py"
assert profiles_file.exists()
assert profiles_file.stat().st_size > 0
def test_refresh_aborts_when_user_declines_external_directory(
tmp_path: Path, mock_models_dev_response: dict
) -> None:
"""Test that refresh aborts when user declines writing to external directory."""
data_dir = tmp_path / "data"
data_dir.mkdir()
# Mock the httpx.get call
mock_response = Mock()
mock_response.json.return_value = mock_models_dev_response
mock_response.raise_for_status = Mock()
with (
patch("langchain_model_profiles.cli.httpx.get", return_value=mock_response),
patch("builtins.input", return_value="n"), # User declines
):
with pytest.raises(SystemExit) as exc_info:
refresh("anthropic", data_dir)
assert exc_info.value.code == 1
# Verify _profiles.py was NOT created
profiles_file = data_dir / "_profiles.py"
assert not profiles_file.exists()
def test_refresh_includes_models_defined_only_in_augmentations(
tmp_path: Path, mock_models_dev_response: dict
) -> None:
"""Ensure models that only exist in augmentations are emitted."""
data_dir = tmp_path / "data"
data_dir.mkdir()
aug_file = data_dir / "profile_augmentations.toml"
aug_file.write_text("""
provider = "anthropic"
[overrides."custom-offline-model"]
structured_output = true
pdf_inputs = true
max_input_tokens = 123
""")
mock_response = Mock()
mock_response.json.return_value = mock_models_dev_response
mock_response.raise_for_status = Mock()
with (
patch("langchain_model_profiles.cli.httpx.get", return_value=mock_response),
patch("builtins.input", return_value="y"),
):
refresh("anthropic", data_dir)
profiles_file = data_dir / "_profiles.py"
assert profiles_file.exists()
spec = importlib.util.spec_from_file_location(
"generated_profiles_aug_only", profiles_file
)
assert spec
assert spec.loader
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore[union-attr]
assert "custom-offline-model" in module._PROFILES # type: ignore[attr-defined]
assert (
module._PROFILES["custom-offline-model"]["structured_output"] is True # type: ignore[index]
)
assert (
module._PROFILES["custom-offline-model"]["max_input_tokens"] == 123 # type: ignore[index]
)
def test_refresh_generates_sorted_profiles(
tmp_path: Path, mock_models_dev_response: dict
) -> None:
"""Test that profiles are sorted alphabetically by model ID."""
data_dir = tmp_path / "data"
data_dir.mkdir()
# Inject models in reverse-alphabetical order so the API response
# is NOT already sorted.
mock_models_dev_response["anthropic"]["models"] = {
"z-model": {
"id": "z-model",
"name": "Z Model",
"tool_call": True,
"limit": {"context": 100000, "output": 2048},
"modalities": {"input": ["text"], "output": ["text"]},
},
"a-model": {
"id": "a-model",
"name": "A Model",
"tool_call": True,
"limit": {"context": 100000, "output": 2048},
"modalities": {"input": ["text"], "output": ["text"]},
},
"m-model": {
"id": "m-model",
"name": "M Model",
"tool_call": True,
"limit": {"context": 100000, "output": 2048},
"modalities": {"input": ["text"], "output": ["text"]},
},
}
mock_response = Mock()
mock_response.json.return_value = mock_models_dev_response
mock_response.raise_for_status = Mock()
with (
patch("langchain_model_profiles.cli.httpx.get", return_value=mock_response),
patch("builtins.input", return_value="y"),
):
refresh("anthropic", data_dir)
profiles_file = data_dir / "_profiles.py"
spec = importlib.util.spec_from_file_location(
"generated_profiles_sorted", profiles_file
)
assert spec
assert spec.loader
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore[union-attr]
model_ids = list(module._PROFILES.keys()) # type: ignore[attr-defined]
assert model_ids == sorted(model_ids), f"Profile keys are not sorted: {model_ids}"
def test_model_data_to_profile_text_modalities() -> None:
"""Test that text input/output modalities are correctly mapped."""
# Model with text in both input and output
model_with_text = {
"modalities": {"input": ["text", "image"], "output": ["text"]},
"limit": {"context": 128000, "output": 4096},
}
profile = _model_data_to_profile(model_with_text)
assert profile["text_inputs"] is True
assert profile["text_outputs"] is True
# Model without text input (e.g., Whisper-like audio model)
audio_only_model = {
"modalities": {"input": ["audio"], "output": ["text"]},
"limit": {"context": 0, "output": 0},
}
profile = _model_data_to_profile(audio_only_model)
assert profile["text_inputs"] is False
assert profile["text_outputs"] is True
# Model without text output (e.g., image generator)
image_gen_model = {
"modalities": {"input": ["text"], "output": ["image"]},
"limit": {},
}
profile = _model_data_to_profile(image_gen_model)
assert profile["text_inputs"] is True
assert profile["text_outputs"] is False
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/model-profiles/tests/unit_tests/test_cli.py",
"license": "MIT License",
"lines": 251,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/model_retry.py | """Model retry middleware for agents."""
from __future__ import annotations
import asyncio
import time
from typing import TYPE_CHECKING
from langchain_core.messages import AIMessage
from langchain.agents.middleware._retry import (
OnFailure,
RetryOn,
calculate_delay,
should_retry_exception,
validate_retry_params,
)
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ModelRequest,
ModelResponse,
ResponseT,
)
if TYPE_CHECKING:
from collections.abc import Awaitable, Callable
class ModelRetryMiddleware(AgentMiddleware[AgentState[ResponseT], ContextT, ResponseT]):
"""Middleware that automatically retries failed model calls with configurable backoff.
Supports retrying on specific exceptions and exponential backoff.
Examples:
!!! example "Basic usage with default settings (2 retries, exponential backoff)"
```python
from langchain.agents import create_agent
from langchain.agents.middleware import ModelRetryMiddleware
agent = create_agent(model, tools=[search_tool], middleware=[ModelRetryMiddleware()])
```
!!! example "Retry specific exceptions only"
```python
from anthropic import RateLimitError
from openai import APITimeoutError
retry = ModelRetryMiddleware(
max_retries=4,
retry_on=(APITimeoutError, RateLimitError),
backoff_factor=1.5,
)
```
!!! example "Custom exception filtering"
```python
from anthropic import APIStatusError
def should_retry(exc: Exception) -> bool:
# Only retry on 5xx errors
if isinstance(exc, APIStatusError):
return 500 <= exc.status_code < 600
return False
retry = ModelRetryMiddleware(
max_retries=3,
retry_on=should_retry,
)
```
!!! example "Custom error handling"
```python
def format_error(exc: Exception) -> str:
return "Model temporarily unavailable. Please try again later."
retry = ModelRetryMiddleware(
max_retries=4,
on_failure=format_error,
)
```
!!! example "Constant backoff (no exponential growth)"
```python
retry = ModelRetryMiddleware(
max_retries=5,
backoff_factor=0.0, # No exponential growth
initial_delay=2.0, # Always wait 2 seconds
)
```
!!! example "Raise exception on failure"
```python
retry = ModelRetryMiddleware(
max_retries=2,
on_failure="error", # Re-raise exception instead of returning message
)
```
"""
def __init__(
self,
*,
max_retries: int = 2,
retry_on: RetryOn = (Exception,),
on_failure: OnFailure = "continue",
backoff_factor: float = 2.0,
initial_delay: float = 1.0,
max_delay: float = 60.0,
jitter: bool = True,
) -> None:
"""Initialize `ModelRetryMiddleware`.
Args:
max_retries: Maximum number of retry attempts after the initial call.
Must be `>= 0`.
retry_on: Either a tuple of exception types to retry on, or a callable
that takes an exception and returns `True` if it should be retried.
Default is to retry on all exceptions.
on_failure: Behavior when all retries are exhausted.
Options:
- `'continue'`: Return an `AIMessage` with error details,
allowing the agent to continue with an error response.
- `'error'`: Re-raise the exception, stopping agent execution.
- **Custom callable:** Function that takes the exception and returns a
string for the `AIMessage` content, allowing custom error
formatting.
backoff_factor: Multiplier for exponential backoff.
Each retry waits `initial_delay * (backoff_factor ** retry_number)`
seconds.
Set to `0.0` for constant delay.
initial_delay: Initial delay in seconds before first retry.
max_delay: Maximum delay in seconds between retries.
Caps exponential backoff growth.
jitter: Whether to add random jitter (`±25%`) to delay to avoid thundering herd.
Raises:
ValueError: If `max_retries < 0` or delays are negative.
"""
super().__init__()
# Validate parameters
validate_retry_params(max_retries, initial_delay, max_delay, backoff_factor)
self.max_retries = max_retries
self.tools = [] # No additional tools registered by this middleware
self.retry_on = retry_on
self.on_failure = on_failure
self.backoff_factor = backoff_factor
self.initial_delay = initial_delay
self.max_delay = max_delay
self.jitter = jitter
@staticmethod
def _format_failure_message(exc: Exception, attempts_made: int) -> AIMessage:
"""Format the failure message when retries are exhausted.
Args:
exc: The exception that caused the failure.
attempts_made: Number of attempts actually made.
Returns:
`AIMessage` with formatted error message.
"""
exc_type = type(exc).__name__
exc_msg = str(exc)
attempt_word = "attempt" if attempts_made == 1 else "attempts"
content = (
f"Model call failed after {attempts_made} {attempt_word} with {exc_type}: {exc_msg}"
)
return AIMessage(content=content)
def _handle_failure(self, exc: Exception, attempts_made: int) -> ModelResponse[ResponseT]:
"""Handle failure when all retries are exhausted.
Args:
exc: The exception that caused the failure.
attempts_made: Number of attempts actually made.
Returns:
`ModelResponse` with error details.
Raises:
Exception: If `on_failure` is `'error'`, re-raises the exception.
"""
if self.on_failure == "error":
raise exc
if callable(self.on_failure):
content = self.on_failure(exc)
ai_msg = AIMessage(content=content)
else:
ai_msg = self._format_failure_message(exc, attempts_made)
return ModelResponse(result=[ai_msg])
def wrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse[ResponseT]],
) -> ModelResponse[ResponseT] | AIMessage:
"""Intercept model execution and retry on failure.
Args:
request: Model request with model, messages, state, and runtime.
handler: Callable to execute the model (can be called multiple times).
Returns:
`ModelResponse` or `AIMessage` (the final result).
Raises:
RuntimeError: If the retry loop completes without returning. (This should not happen.)
"""
# Initial attempt + retries
for attempt in range(self.max_retries + 1):
try:
return handler(request)
except Exception as exc:
attempts_made = attempt + 1 # attempt is 0-indexed
# Check if we should retry this exception
if not should_retry_exception(exc, self.retry_on):
# Exception is not retryable, handle failure immediately
return self._handle_failure(exc, attempts_made)
# Check if we have more retries left
if attempt < self.max_retries:
# Calculate and apply backoff delay
delay = calculate_delay(
attempt,
backoff_factor=self.backoff_factor,
initial_delay=self.initial_delay,
max_delay=self.max_delay,
jitter=self.jitter,
)
if delay > 0:
time.sleep(delay)
# Continue to next retry
else:
# No more retries, handle failure
return self._handle_failure(exc, attempts_made)
# Unreachable: loop always returns via handler success or _handle_failure
msg = "Unexpected: retry loop completed without returning"
raise RuntimeError(msg)
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]],
) -> ModelResponse[ResponseT] | AIMessage:
"""Intercept and control async model execution with retry logic.
Args:
request: Model request with model, messages, state, and runtime.
handler: Async callable to execute the model and returns `ModelResponse`.
Returns:
`ModelResponse` or `AIMessage` (the final result).
Raises:
RuntimeError: If the retry loop completes without returning. (This should not happen.)
"""
# Initial attempt + retries
for attempt in range(self.max_retries + 1):
try:
return await handler(request)
except Exception as exc:
attempts_made = attempt + 1 # attempt is 0-indexed
# Check if we should retry this exception
if not should_retry_exception(exc, self.retry_on):
# Exception is not retryable, handle failure immediately
return self._handle_failure(exc, attempts_made)
# Check if we have more retries left
if attempt < self.max_retries:
# Calculate and apply backoff delay
delay = calculate_delay(
attempt,
backoff_factor=self.backoff_factor,
initial_delay=self.initial_delay,
max_delay=self.max_delay,
jitter=self.jitter,
)
if delay > 0:
await asyncio.sleep(delay)
# Continue to next retry
else:
# No more retries, handle failure
return self._handle_failure(exc, attempts_made)
# Unreachable: loop always returns via handler success or _handle_failure
msg = "Unexpected: retry loop completed without returning"
raise RuntimeError(msg)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/middleware/model_retry.py",
"license": "MIT License",
"lines": 249,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_model_retry.py | """Tests for ModelRetryMiddleware functionality."""
import time
from collections.abc import Callable
from typing import Any
import pytest
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from langgraph.checkpoint.memory import InMemorySaver
from pydantic import Field
from langchain.agents.factory import create_agent
from langchain.agents.middleware._retry import calculate_delay
from langchain.agents.middleware.model_retry import ModelRetryMiddleware
from langchain.agents.middleware.types import (
ModelCallResult,
ModelRequest,
ModelResponse,
wrap_model_call,
)
from tests.unit_tests.agents.model import FakeToolCallingModel
class TemporaryFailureModel(FakeToolCallingModel):
"""Model that fails a certain number of times before succeeding."""
fail_count: int = Field(default=0)
attempt: int = Field(default=0)
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
"""Execute the model.
Args:
messages: Input messages.
stop: Optional stop sequences.
run_manager: Optional callback manager.
**kwargs: Additional keyword arguments.
Returns:
ChatResult with success message if attempt >= fail_count.
Raises:
ValueError: If attempt < fail_count.
"""
self.attempt += 1
if self.attempt <= self.fail_count:
msg = f"Temporary failure {self.attempt}"
raise ValueError(msg)
# Return success message
ai_msg = AIMessage(content=f"Success after {self.attempt} attempts", id=str(self.index))
self.index += 1
return ChatResult(generations=[ChatGeneration(message=ai_msg)])
class AlwaysFailingModel(FakeToolCallingModel):
"""Model that always fails with a specific exception."""
error_message: str = Field(default="Model error")
error_type: type[Exception] = Field(default=ValueError)
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
"""Execute the model and raise exception.
Args:
messages: Input messages.
stop: Optional stop sequences.
run_manager: Optional callback manager.
**kwargs: Additional keyword arguments.
Raises:
Exception: Always raises the configured exception.
"""
raise self.error_type(self.error_message)
def test_model_retry_initialization_defaults() -> None:
"""Test ModelRetryMiddleware initialization with default values."""
retry = ModelRetryMiddleware()
assert retry.max_retries == 2
assert retry.tools == []
assert retry.on_failure == "continue"
assert retry.backoff_factor == 2.0
assert retry.initial_delay == 1.0
assert retry.max_delay == 60.0
assert retry.jitter is True
def test_model_retry_initialization_custom() -> None:
"""Test ModelRetryMiddleware initialization with custom values."""
retry = ModelRetryMiddleware(
max_retries=5,
retry_on=(ValueError, RuntimeError),
on_failure="error",
backoff_factor=1.5,
initial_delay=0.5,
max_delay=30.0,
jitter=False,
)
assert retry.max_retries == 5
assert retry.tools == []
assert retry.retry_on == (ValueError, RuntimeError)
assert retry.on_failure == "error"
assert retry.backoff_factor == 1.5
assert retry.initial_delay == 0.5
assert retry.max_delay == 30.0
assert retry.jitter is False
def test_model_retry_invalid_max_retries() -> None:
"""Test ModelRetryMiddleware raises error for invalid max_retries."""
with pytest.raises(ValueError, match="max_retries must be >= 0"):
ModelRetryMiddleware(max_retries=-1)
def test_model_retry_invalid_initial_delay() -> None:
"""Test ModelRetryMiddleware raises error for invalid initial_delay."""
with pytest.raises(ValueError, match="initial_delay must be >= 0"):
ModelRetryMiddleware(initial_delay=-1.0)
def test_model_retry_invalid_max_delay() -> None:
"""Test ModelRetryMiddleware raises error for invalid max_delay."""
with pytest.raises(ValueError, match="max_delay must be >= 0"):
ModelRetryMiddleware(max_delay=-1.0)
def test_model_retry_invalid_backoff_factor() -> None:
"""Test ModelRetryMiddleware raises error for invalid backoff_factor."""
with pytest.raises(ValueError, match="backoff_factor must be >= 0"):
ModelRetryMiddleware(backoff_factor=-1.0)
def test_model_retry_working_model_no_retry_needed() -> None:
"""Test ModelRetryMiddleware with a working model (no retry needed)."""
model = FakeToolCallingModel()
retry = ModelRetryMiddleware(max_retries=2, initial_delay=0.01, jitter=False)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
assert "Hello" in ai_messages[-1].content
def test_model_retry_failing_model_returns_message() -> None:
"""Test ModelRetryMiddleware with failing model returns error message."""
model = AlwaysFailingModel(error_message="Model error", error_type=ValueError)
retry = ModelRetryMiddleware(
max_retries=2,
initial_delay=0.01,
jitter=False,
on_failure="continue",
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# Should contain error message with attempts
last_msg = ai_messages[-1].content
assert "failed after 3 attempts" in last_msg
assert "ValueError" in last_msg
def test_model_retry_failing_model_raises() -> None:
"""Test ModelRetryMiddleware with on_failure='error' re-raises exception."""
model = AlwaysFailingModel(error_message="Model error", error_type=ValueError)
retry = ModelRetryMiddleware(
max_retries=2,
initial_delay=0.01,
jitter=False,
on_failure="error",
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
# Should raise the ValueError from the model
with pytest.raises(ValueError, match="Model error"):
agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
def test_model_retry_custom_failure_formatter() -> None:
"""Test ModelRetryMiddleware with custom failure message formatter."""
def custom_formatter(exc: Exception) -> str:
return f"Custom error: {type(exc).__name__}"
model = AlwaysFailingModel(error_message="Model error", error_type=ValueError)
retry = ModelRetryMiddleware(
max_retries=1,
initial_delay=0.01,
jitter=False,
on_failure=custom_formatter,
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
assert "Custom error: ValueError" in ai_messages[-1].content
def test_model_retry_succeeds_after_retries() -> None:
"""Test ModelRetryMiddleware succeeds after temporary failures."""
model = TemporaryFailureModel(fail_count=2)
retry = ModelRetryMiddleware(
max_retries=3,
initial_delay=0.01,
jitter=False,
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# Should succeed on 3rd attempt
assert "Success after 3 attempts" in ai_messages[-1].content
assert model.attempt == 3
def test_model_retry_specific_exceptions() -> None:
"""Test ModelRetryMiddleware only retries specific exception types."""
# This model will fail with RuntimeError, which we won't retry
model = AlwaysFailingModel(error_message="Runtime error", error_type=RuntimeError)
# Only retry ValueError
retry = ModelRetryMiddleware(
max_retries=2,
retry_on=(ValueError,),
initial_delay=0.01,
jitter=False,
on_failure="continue",
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# RuntimeError should fail immediately (1 attempt only)
assert "1 attempt" in ai_messages[-1].content
def test_model_retry_custom_exception_filter() -> None:
"""Test ModelRetryMiddleware with custom exception filter function."""
class CustomError(Exception):
"""Custom exception with retry_me attribute."""
def __init__(self, message: str, *, retry_me: bool):
"""Initialize custom error.
Args:
message: Error message.
retry_me: Whether this error should be retried.
"""
super().__init__(message)
self.retry_me = retry_me
attempt_count = {"value": 0}
class CustomErrorModel(FakeToolCallingModel):
"""Model that raises CustomError."""
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
"""Execute the model and raise CustomError.
Args:
messages: Input messages.
stop: Optional stop sequences.
run_manager: Optional callback manager.
**kwargs: Additional keyword arguments.
Raises:
CustomError: Always raises CustomError.
"""
attempt_count["value"] += 1
if attempt_count["value"] == 1:
msg = "Retryable error"
raise CustomError(msg, retry_me=True)
msg = "Non-retryable error"
raise CustomError(msg, retry_me=False)
def should_retry(exc: Exception) -> bool:
return isinstance(exc, CustomError) and exc.retry_me
model = CustomErrorModel()
retry = ModelRetryMiddleware(
max_retries=3,
retry_on=should_retry,
initial_delay=0.01,
jitter=False,
on_failure="continue",
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# Should retry once (attempt 1 with retry_me=True), then fail on attempt 2 (retry_me=False)
assert attempt_count["value"] == 2
assert "2 attempts" in ai_messages[-1].content
def test_model_retry_backoff_timing() -> None:
"""Test ModelRetryMiddleware applies correct backoff delays."""
model = TemporaryFailureModel(fail_count=3)
retry = ModelRetryMiddleware(
max_retries=3,
initial_delay=0.1,
backoff_factor=2.0,
jitter=False,
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
start_time = time.time()
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
elapsed = time.time() - start_time
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# Expected delays: 0.1 + 0.2 + 0.4 = 0.7 seconds
# Allow some margin for execution time
assert elapsed >= 0.6, f"Expected at least 0.6s, got {elapsed}s"
def test_model_retry_constant_backoff() -> None:
"""Test ModelRetryMiddleware with constant backoff (backoff_factor=0)."""
model = TemporaryFailureModel(fail_count=2)
retry = ModelRetryMiddleware(
max_retries=2,
initial_delay=0.1,
backoff_factor=0.0, # Constant backoff
jitter=False,
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
start_time = time.time()
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
elapsed = time.time() - start_time
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# Expected delays: 0.1 + 0.1 = 0.2 seconds (constant)
assert elapsed >= 0.15, f"Expected at least 0.15s, got {elapsed}s"
assert elapsed < 0.5, f"Expected less than 0.5s (exponential would be longer), got {elapsed}s"
def test_model_retry_max_delay_cap() -> None:
"""Test calculate_delay caps delay at max_delay."""
# Test delay calculation with aggressive backoff and max_delay cap
delay_0 = calculate_delay(
0,
backoff_factor=10.0, # Very aggressive backoff
initial_delay=1.0,
max_delay=2.0, # Cap at 2 seconds
jitter=False,
) # 1.0
delay_1 = calculate_delay(
1,
backoff_factor=10.0,
initial_delay=1.0,
max_delay=2.0,
jitter=False,
) # 10.0 -> capped to 2.0
delay_2 = calculate_delay(
2,
backoff_factor=10.0,
initial_delay=1.0,
max_delay=2.0,
jitter=False,
) # 100.0 -> capped to 2.0
assert delay_0 == 1.0
assert delay_1 == 2.0
assert delay_2 == 2.0
def test_model_retry_jitter_variation() -> None:
"""Test calculate_delay adds jitter to delays."""
# Generate multiple delays and ensure they vary
delays = [
calculate_delay(
0,
backoff_factor=1.0,
initial_delay=1.0,
max_delay=60.0,
jitter=True,
)
for _ in range(10)
]
# All delays should be within ±25% of 1.0 (i.e., between 0.75 and 1.25)
for delay in delays:
assert 0.75 <= delay <= 1.25
# Delays should vary (not all the same)
assert len(set(delays)) > 1
@pytest.mark.asyncio
async def test_model_retry_async_working_model() -> None:
"""Test ModelRetryMiddleware with async execution and working model."""
model = FakeToolCallingModel()
retry = ModelRetryMiddleware(max_retries=2, initial_delay=0.01, jitter=False)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = await agent.ainvoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
assert "Hello" in ai_messages[-1].content
@pytest.mark.asyncio
async def test_model_retry_async_failing_model() -> None:
"""Test ModelRetryMiddleware with async execution and failing model."""
model = AlwaysFailingModel(error_message="Model error", error_type=ValueError)
retry = ModelRetryMiddleware(
max_retries=2,
initial_delay=0.01,
jitter=False,
on_failure="continue",
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = await agent.ainvoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
last_msg = ai_messages[-1].content
assert "failed after 3 attempts" in last_msg
assert "ValueError" in last_msg
@pytest.mark.asyncio
async def test_model_retry_async_succeeds_after_retries() -> None:
"""Test ModelRetryMiddleware async execution succeeds after temporary failures."""
model = TemporaryFailureModel(fail_count=2)
retry = ModelRetryMiddleware(
max_retries=3,
initial_delay=0.01,
jitter=False,
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = await agent.ainvoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
assert "Success after 3 attempts" in ai_messages[-1].content
@pytest.mark.asyncio
async def test_model_retry_async_backoff_timing() -> None:
"""Test ModelRetryMiddleware async applies correct backoff delays."""
model = TemporaryFailureModel(fail_count=3)
retry = ModelRetryMiddleware(
max_retries=3,
initial_delay=0.1,
backoff_factor=2.0,
jitter=False,
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
start_time = time.time()
result = await agent.ainvoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
elapsed = time.time() - start_time
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# Expected delays: 0.1 + 0.2 + 0.4 = 0.7 seconds
assert elapsed >= 0.6, f"Expected at least 0.6s, got {elapsed}s"
def test_model_retry_zero_retries() -> None:
"""Test ModelRetryMiddleware with max_retries=0 (no retries)."""
model = AlwaysFailingModel(error_message="Model error", error_type=ValueError)
retry = ModelRetryMiddleware(
max_retries=0, # No retries
on_failure="continue",
)
agent = create_agent(
model=model,
tools=[],
middleware=[retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
# Should fail after 1 attempt (no retries)
assert "1 attempt" in ai_messages[-1].content
def test_model_retry_multiple_middleware_composition() -> None:
"""Test ModelRetryMiddleware composes correctly with other middleware."""
call_log = []
# Custom middleware that logs calls
@wrap_model_call
def logging_middleware(
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
call_log.append("before_model")
response = handler(request)
call_log.append("after_model")
return response
model = FakeToolCallingModel()
retry = ModelRetryMiddleware(max_retries=2, initial_delay=0.01, jitter=False)
agent = create_agent(
model=model,
tools=[],
middleware=[logging_middleware, retry],
checkpointer=InMemorySaver(),
)
result = agent.invoke(
{"messages": [HumanMessage("Hello")]},
{"configurable": {"thread_id": "test"}},
)
# Both middleware should be called
assert call_log == ["before_model", "after_model"]
ai_messages = [m for m in result["messages"] if isinstance(m, AIMessage)]
assert len(ai_messages) >= 1
assert "Hello" in ai_messages[-1].content
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_model_retry.py",
"license": "MIT License",
"lines": 553,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_response_format_integration.py | r"""Test response_format for langchain-openai.
If tests fail, cassettes may need to be re-recorded.
To re-record cassettes:
1. Delete existing cassettes (`rm tests/cassettes/test_inference_to_*.yaml.gz`)
2. Re run the tests with a valid OPENAI_API_KEY in your environment:
```bash
OPENAI_API_KEY=... uv run python -m pytest tests/unit_tests/agents/test_response_format_integration.py
```
The cassettes are compressed. To read them:
```bash
gunzip -c "tests/cassettes/test_inference_to_native_output[True].yaml.gz" | \
yq -o json . | \
jq '.requests[].body |= (gsub("\n";"") | @base64d | fromjson) |
.responses[].body.string |= (gsub("\n";"") | @base64d | fromjson)'
```
Or, in Python:
```python
import json
from langchain_tests.conftest import CustomPersister, CustomSerializer
def bytes_encoder(obj):
return obj.decode("utf-8", errors="replace")
path = "tests/cassettes/test_inference_to_native_output[True].yaml.gz"
requests, responses = CustomPersister().load_cassette(path, CustomSerializer())
assert len(requests) == len(responses)
for request, response in list(zip(requests, responses)):
print("------ REQUEST ------")
req = request._to_dict()
req["body"] = json.loads(req["body"])
print(json.dumps(req, indent=2, default=bytes_encoder))
print("\n\n ------ RESPONSE ------")
resp = response
print(json.dumps(resp, indent=2, default=bytes_encoder))
print("\n\n")
```
""" # noqa: E501
import os
from typing import TYPE_CHECKING, Any
from unittest.mock import patch
import pytest
from langchain_core.messages import HumanMessage
from pydantic import BaseModel, Field
from langchain.agents import create_agent
from langchain.agents.structured_output import ProviderStrategy, ToolStrategy
if TYPE_CHECKING:
from langchain_openai import ChatOpenAI
else:
ChatOpenAI = pytest.importorskip("langchain_openai").ChatOpenAI
class WeatherBaseModel(BaseModel):
"""Weather response."""
temperature: float = Field(description="The temperature in fahrenheit")
condition: str = Field(description="Weather condition")
def get_weather(city: str) -> str:
"""Get the weather for a city."""
return f"The weather in {city} is sunny and 75°F."
@pytest.mark.vcr
@pytest.mark.parametrize("use_responses_api", [False, True])
def test_inference_to_native_output(*, use_responses_api: bool) -> None:
"""Test that native output is inferred when a model supports it."""
model_kwargs: dict[str, Any] = {"model": "gpt-5", "use_responses_api": use_responses_api}
if "OPENAI_API_KEY" not in os.environ:
model_kwargs["api_key"] = "foo"
model = ChatOpenAI(**model_kwargs)
agent = create_agent(
model,
system_prompt=(
"You are a helpful weather assistant. Please call the get_weather tool "
"once, then use the WeatherReport tool to generate the final response."
),
tools=[get_weather],
response_format=WeatherBaseModel,
)
response = agent.invoke({"messages": [HumanMessage("What's the weather in Boston?")]})
assert isinstance(response["structured_response"], WeatherBaseModel)
assert response["structured_response"].temperature == 75.0
assert response["structured_response"].condition.lower() == "sunny"
assert len(response["messages"]) == 4
assert [m.type for m in response["messages"]] == [
"human", # "What's the weather?"
"ai", # "What's the weather?"
"tool", # "The weather is sunny and 75°F."
"ai", # structured response
]
@pytest.mark.vcr
@pytest.mark.parametrize("use_responses_api", [False, True])
def test_inference_to_tool_output(*, use_responses_api: bool) -> None:
"""Test that tool output is inferred when a model supports it."""
model_kwargs: dict[str, Any] = {"model": "gpt-5", "use_responses_api": use_responses_api}
if "OPENAI_API_KEY" not in os.environ:
model_kwargs["api_key"] = "foo"
model = ChatOpenAI(**model_kwargs)
agent = create_agent(
model,
system_prompt=(
"You are a helpful weather assistant. Please call the get_weather tool "
"once, then use the WeatherReport tool to generate the final response."
),
tools=[get_weather],
response_format=ToolStrategy(WeatherBaseModel),
)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert isinstance(response["structured_response"], WeatherBaseModel)
assert response["structured_response"].temperature == 75.0
assert response["structured_response"].condition.lower() == "sunny"
assert len(response["messages"]) == 5
assert [m.type for m in response["messages"]] == [
"human", # "What's the weather?"
"ai", # "What's the weather?"
"tool", # "The weather is sunny and 75°F."
"ai", # structured response
"tool", # artificial tool message
]
@pytest.mark.vcr
@pytest.mark.parametrize("use_responses_api", [False, True])
def test_strict_mode(*, use_responses_api: bool) -> None:
model_kwargs: dict[str, Any] = {"model": "gpt-5", "use_responses_api": use_responses_api}
if "OPENAI_API_KEY" not in os.environ:
model_kwargs["api_key"] = "foo"
model = ChatOpenAI(**model_kwargs)
# spy on _get_request_payload to check that `strict` is enabled
original_method = model._get_request_payload
payloads = []
def capture_payload(*args: Any, **kwargs: Any) -> dict[str, Any]:
result = original_method(*args, **kwargs)
payloads.append(result)
return result
with patch.object(model, "_get_request_payload", side_effect=capture_payload):
agent = create_agent(
model,
tools=[get_weather],
response_format=ProviderStrategy(WeatherBaseModel, strict=True),
)
response = agent.invoke({"messages": [HumanMessage("What's the weather in Boston?")]})
assert len(payloads) == 2
if use_responses_api:
assert payloads[-1]["text"]["format"]["strict"]
else:
assert payloads[-1]["response_format"]["json_schema"]["strict"]
assert isinstance(response["structured_response"], WeatherBaseModel)
assert response["structured_response"].temperature == 75.0
assert response["structured_response"].condition.lower() == "sunny"
assert len(response["messages"]) == 4
assert [m.type for m in response["messages"]] == [
"human", # "What's the weather?"
"ai", # "What's the weather?"
"tool", # "The weather is sunny and 75°F."
"ai", # structured response
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/test_response_format_integration.py",
"license": "MIT License",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_diagram.py | from collections.abc import Callable
from typing import Any
from langgraph.runtime import Runtime
from syrupy.assertion import SnapshotAssertion
from langchain.agents import AgentState
from langchain.agents.factory import create_agent
from langchain.agents.middleware.types import AgentMiddleware, ModelRequest, ModelResponse
from tests.unit_tests.agents.model import FakeToolCallingModel
def test_create_agent_diagram(
snapshot: SnapshotAssertion,
) -> None:
class NoopOne(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
class NoopTwo(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
class NoopThree(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
class NoopFour(AgentMiddleware):
def after_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
class NoopFive(AgentMiddleware):
def after_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
class NoopSix(AgentMiddleware):
def after_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
class NoopSeven(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
def after_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
class NoopEight(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
def after_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
class NoopNine(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
def after_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
class NoopTen(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
return handler(request)
def after_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
class NoopEleven(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
return handler(request)
def after_model(self, state: AgentState[Any], runtime: Runtime[None]) -> None:
pass
agent_zero = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
)
assert agent_zero.get_graph().draw_mermaid() == snapshot
agent_one = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[NoopOne()],
)
assert agent_one.get_graph().draw_mermaid() == snapshot
agent_two = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[NoopOne(), NoopTwo()],
)
assert agent_two.get_graph().draw_mermaid() == snapshot
agent_three = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[NoopOne(), NoopTwo(), NoopThree()],
)
assert agent_three.get_graph().draw_mermaid() == snapshot
agent_four = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[NoopFour()],
)
assert agent_four.get_graph().draw_mermaid() == snapshot
agent_five = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[NoopFour(), NoopFive()],
)
assert agent_five.get_graph().draw_mermaid() == snapshot
agent_six = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[NoopFour(), NoopFive(), NoopSix()],
)
assert agent_six.get_graph().draw_mermaid() == snapshot
agent_seven = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[NoopSeven()],
)
assert agent_seven.get_graph().draw_mermaid() == snapshot
agent_eight = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[NoopSeven(), NoopEight()],
)
assert agent_eight.get_graph().draw_mermaid() == snapshot
agent_nine = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[NoopSeven(), NoopEight(), NoopNine()],
)
assert agent_nine.get_graph().draw_mermaid() == snapshot
agent_ten = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[NoopTen()],
)
assert agent_ten.get_graph().draw_mermaid() == snapshot
agent_eleven = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[NoopTen(), NoopEleven()],
)
assert agent_eleven.get_graph().draw_mermaid() == snapshot
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_diagram.py",
"license": "MIT License",
"lines": 149,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_framework.py | import sys
from collections.abc import Awaitable, Callable
from typing import Annotated, Any, Generic
import pytest
from langchain_core.language_models import GenericFakeChatModel
from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage
from langchain_core.tools import InjectedToolCallId, tool
from langgraph.checkpoint.base import BaseCheckpointSaver
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.runtime import Runtime
from pydantic import BaseModel, Field
from syrupy.assertion import SnapshotAssertion
from typing_extensions import override
from langchain.agents.factory import create_agent
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ModelCallResult,
ModelRequest,
ModelResponse,
OmitFromInput,
OmitFromOutput,
PrivateStateAttr,
ResponseT,
after_agent,
after_model,
before_agent,
before_model,
hook_config,
)
from langchain.agents.structured_output import ToolStrategy
from langchain.tools import InjectedState
from tests.unit_tests.agents.messages import _AnyIdHumanMessage, _AnyIdToolMessage
from tests.unit_tests.agents.model import FakeToolCallingModel
def test_create_agent_invoke(
snapshot: SnapshotAssertion,
sync_checkpointer: BaseCheckpointSaver[str],
) -> None:
calls = []
class NoopSeven(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("NoopSeven.before_model")
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
calls.append("NoopSeven.wrap_model_call")
return handler(request)
def after_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("NoopSeven.after_model")
class NoopEight(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("NoopEight.before_model")
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
calls.append("NoopEight.wrap_model_call")
return handler(request)
def after_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("NoopEight.after_model")
@tool
def my_tool(value: str) -> str:
"""A great tool."""
calls.append("my_tool")
return value.upper()
agent_one = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[
{"args": {"value": "yo"}, "id": "1", "name": "my_tool"},
],
[],
]
),
tools=[my_tool],
system_prompt="You are a helpful assistant.",
middleware=[NoopSeven(), NoopEight()],
checkpointer=sync_checkpointer,
)
thread1 = {"configurable": {"thread_id": "1"}}
assert agent_one.invoke({"messages": ["hello"]}, thread1) == {
"messages": [
_AnyIdHumanMessage(content="hello"),
AIMessage(
content="You are a helpful assistant.-hello",
additional_kwargs={},
response_metadata={},
id="0",
tool_calls=[
{
"name": "my_tool",
"args": {"value": "yo"},
"id": "1",
"type": "tool_call",
}
],
),
_AnyIdToolMessage(content="YO", name="my_tool", tool_call_id="1"),
AIMessage(
content="You are a helpful assistant.-hello-You are a helpful assistant.-hello-YO",
additional_kwargs={},
response_metadata={},
id="1",
),
],
}
assert calls == [
"NoopSeven.before_model",
"NoopEight.before_model",
"NoopSeven.wrap_model_call",
"NoopEight.wrap_model_call",
"NoopEight.after_model",
"NoopSeven.after_model",
"my_tool",
"NoopSeven.before_model",
"NoopEight.before_model",
"NoopSeven.wrap_model_call",
"NoopEight.wrap_model_call",
"NoopEight.after_model",
"NoopSeven.after_model",
]
def test_create_agent_jump(
snapshot: SnapshotAssertion,
sync_checkpointer: BaseCheckpointSaver[str],
) -> None:
calls = []
class NoopSeven(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("NoopSeven.before_model")
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
calls.append("NoopSeven.wrap_model_call")
return handler(request)
def after_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("NoopSeven.after_model")
class NoopEight(AgentMiddleware):
@hook_config(can_jump_to=["end"])
def before_model(self, state: AgentState[Any], runtime: Runtime) -> dict[str, Any]:
calls.append("NoopEight.before_model")
return {"jump_to": "end"}
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
calls.append("NoopEight.wrap_model_call")
return handler(request)
def after_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("NoopEight.after_model")
@tool
def my_tool(value: str) -> str:
"""A great tool."""
calls.append("my_tool")
return value.upper()
agent_one = create_agent(
model=FakeToolCallingModel(
tool_calls=[[ToolCall(id="1", name="my_tool", args={"value": "yo"})]],
),
tools=[my_tool],
system_prompt="You are a helpful assistant.",
middleware=[NoopSeven(), NoopEight()],
checkpointer=sync_checkpointer,
)
if isinstance(sync_checkpointer, InMemorySaver):
assert agent_one.get_graph().draw_mermaid() == snapshot
thread1 = {"configurable": {"thread_id": "1"}}
assert agent_one.invoke({"messages": []}, thread1) == {"messages": []}
assert calls == ["NoopSeven.before_model", "NoopEight.before_model"]
def test_simple_agent_graph(snapshot: SnapshotAssertion) -> None:
@tool
def my_tool(input_string: str) -> str:
"""A great tool."""
return input_string
agent_one = create_agent(
model=FakeToolCallingModel(
tool_calls=[[ToolCall(id="1", name="my_tool", args={"input": "yo"})]],
),
tools=[my_tool],
system_prompt="You are a helpful assistant.",
)
assert agent_one.get_graph().draw_mermaid() == snapshot
def test_agent_graph_with_jump_to_end_as_after_agent(snapshot: SnapshotAssertion) -> None:
@tool
def my_tool(input_string: str) -> str:
"""A great tool."""
return input_string
class NoopZero(AgentMiddleware):
@hook_config(can_jump_to=["end"])
def before_agent(self, state: AgentState[Any], runtime: Runtime) -> None:
return None
class NoopOne(AgentMiddleware):
def after_agent(self, state: AgentState[Any], runtime: Runtime) -> None:
return None
class NoopTwo(AgentMiddleware):
def after_agent(self, state: AgentState[Any], runtime: Runtime) -> None:
return None
agent_one = create_agent(
model=FakeToolCallingModel(
tool_calls=[[ToolCall(id="1", name="my_tool", args={"input": "yo"})]],
),
tools=[my_tool],
system_prompt="You are a helpful assistant.",
middleware=[NoopZero(), NoopOne(), NoopTwo()],
)
assert agent_one.get_graph().draw_mermaid() == snapshot
def test_on_model_call() -> None:
class ModifyMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
request.messages.append(HumanMessage("remember to be nice!"))
return handler(request)
agent = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[ModifyMiddleware()],
)
result = agent.invoke({"messages": [HumanMessage("Hello")]})
assert result["messages"][0].content == "Hello"
assert result["messages"][1].content == "remember to be nice!"
assert (
result["messages"][2].content == "You are a helpful assistant.-Hello-remember to be nice!"
)
def test_tools_to_model_edge_with_structured_and_regular_tool_calls() -> None:
"""Test tools to model edge with structured and regular tool calls.
Test that when there are both structured and regular tool calls, we execute regular
and jump to END.
"""
class WeatherResponse(BaseModel):
"""Weather response."""
temperature: float = Field(description="Temperature in fahrenheit")
condition: str = Field(description="Weather condition")
@tool
def regular_tool(query: str) -> str:
"""A regular tool that returns a string."""
return f"Regular tool result for: {query}"
# Create a fake model that returns both structured and regular tool calls
class FakeModelWithBothToolCalls(FakeToolCallingModel):
def __init__(self) -> None:
super().__init__()
self.tool_calls = [
[
ToolCall(
name="WeatherResponse",
args={"temperature": 72.0, "condition": "sunny"},
id="structured_call_1",
),
ToolCall(
name="regular_tool", args={"query": "test query"}, id="regular_call_1"
),
]
]
# Create agent with both structured output and regular tools
agent = create_agent(
model=FakeModelWithBothToolCalls(),
tools=[regular_tool],
response_format=ToolStrategy(schema=WeatherResponse),
)
# Invoke the agent (already compiled)
result = agent.invoke(
{"messages": [HumanMessage("What's the weather and help me with a query?")]}
)
# Verify that we have the expected messages:
# 1. Human message
# 2. AI message with both tool calls
# 3. Tool message from structured tool call
# 4. Tool message from regular tool call
messages = result["messages"]
assert len(messages) >= 4
# Check that we have the AI message with both tool calls
ai_message = messages[1]
assert isinstance(ai_message, AIMessage)
assert len(ai_message.tool_calls) == 2
# Check that we have a tool message from the regular tool
tool_messages = [m for m in messages if isinstance(m, ToolMessage)]
assert len(tool_messages) >= 1
# The regular tool should have been executed
regular_tool_message = next((m for m in tool_messages if m.name == "regular_tool"), None)
assert regular_tool_message is not None
assert "Regular tool result for: test query" in regular_tool_message.content
# Verify that the structured response is available in the result
assert "structured_response" in result
assert result["structured_response"] is not None
assert hasattr(result["structured_response"], "temperature")
assert result["structured_response"].temperature == 72.0
assert result["structured_response"].condition == "sunny"
def test_public_private_state_for_custom_middleware() -> None:
"""Test public and private state for custom middleware."""
class CustomState(AgentState[Any]):
omit_input: Annotated[str, OmitFromInput]
omit_output: Annotated[str, OmitFromOutput]
private_state: Annotated[str, PrivateStateAttr]
class CustomMiddleware(AgentMiddleware[CustomState]):
state_schema: type[CustomState] = CustomState
@override
def before_model(self, state: AgentState[Any], runtime: Runtime) -> dict[str, Any]:
assert "omit_input" not in state
assert "omit_output" in state
assert "private_state" not in state
return {"omit_input": "test", "omit_output": "test", "private_state": "test"}
agent = create_agent(model=FakeToolCallingModel(), middleware=[CustomMiddleware()])
result = agent.invoke(
{
"messages": [HumanMessage("Hello")],
"omit_input": "test in",
"private_state": "test in",
"omit_output": "test in",
}
)
assert "omit_input" in result
assert "omit_output" not in result
assert "private_state" not in result
def test_runtime_injected_into_middleware() -> None:
"""Test that the runtime is injected into the middleware."""
class CustomMiddleware(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime) -> None:
assert runtime is not None
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
assert request.runtime is not None
return handler(request)
def after_model(self, state: AgentState[Any], runtime: Runtime) -> None:
assert runtime is not None
agent = create_agent(model=FakeToolCallingModel(), middleware=[CustomMiddleware()])
agent.invoke({"messages": [HumanMessage("Hello")]})
# test setup defined at this scope bc of pydantic issues inferring the namespace of
# custom state w/in a function
class CustomState(AgentState[ResponseT], Generic[ResponseT]):
custom_state: str
@tool(description="Test the state")
def test_state_tool(
state: Annotated[CustomState, InjectedState], tool_call_id: Annotated[str, InjectedToolCallId]
) -> str:
"""Test tool that accesses injected state."""
assert "custom_state" in state
return "success"
class CustomMiddleware(AgentMiddleware):
state_schema = CustomState
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[{"args": {}, "id": "test_call_1", "name": "test_state_tool"}],
[],
]
),
tools=[test_state_tool],
system_prompt="You are a helpful assistant.",
middleware=[CustomMiddleware()],
)
@pytest.mark.skipif(
sys.version_info >= (3, 14), reason="pydantic 2.12 namespace management not working w/ 3.14"
)
def test_injected_state_in_middleware_agent() -> None:
"""Test that custom state is properly injected into tools when using middleware."""
result = agent.invoke(
{
"custom_state": "I love pizza",
"messages": [HumanMessage("Call the test state tool")],
}
)
messages = result["messages"]
assert len(messages) == 4 # Human message, AI message with tool call, tool message, AI message
# Find the tool message
tool_messages = [msg for msg in messages if isinstance(msg, ToolMessage)]
assert len(tool_messages) == 1
tool_message = tool_messages[0]
assert tool_message.name == "test_state_tool"
assert "success" in tool_message.content
assert tool_message.tool_call_id == "test_call_1"
def test_jump_to_is_ephemeral() -> None:
class MyMiddleware(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime) -> dict[str, Any]:
assert "jump_to" not in state
return {"jump_to": "model"}
def after_model(self, state: AgentState[Any], runtime: Runtime) -> dict[str, Any]:
assert "jump_to" not in state
return {"jump_to": "model"}
agent = create_agent(model=FakeToolCallingModel(), middleware=[MyMiddleware()])
result = agent.invoke({"messages": [HumanMessage("Hello")]})
assert "jump_to" not in result
def test_create_agent_sync_invoke_with_only_async_middleware_raises_error() -> None:
"""Test that sync invoke with only async middleware works via run_in_executor."""
class AsyncOnlyMiddleware(AgentMiddleware):
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
return await handler(request)
agent = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[AsyncOnlyMiddleware()],
)
with pytest.raises(NotImplementedError):
agent.invoke({"messages": [HumanMessage("hello")]})
def test_create_agent_sync_invoke_with_mixed_middleware() -> None:
"""Test that sync invoke works with mixed sync/async middleware when sync versions exist."""
calls = []
class MixedMiddleware(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("MixedMiddleware.before_model")
async def abefore_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("MixedMiddleware.abefore_model")
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
calls.append("MixedMiddleware.wrap_model_call")
return handler(request)
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
calls.append("MixedMiddleware.awrap_model_call")
return await handler(request)
agent = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[MixedMiddleware()],
)
agent.invoke({"messages": [HumanMessage("hello")]})
# In sync mode, only sync methods should be called
assert calls == [
"MixedMiddleware.before_model",
"MixedMiddleware.wrap_model_call",
]
# =============================================================================
# Async Middleware Tests
# =============================================================================
async def test_create_agent_async_invoke() -> None:
"""Test async invoke with async middleware hooks."""
calls = []
class AsyncMiddleware(AgentMiddleware):
async def abefore_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("AsyncMiddleware.abefore_model")
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
calls.append("AsyncMiddleware.awrap_model_call")
request.messages.append(HumanMessage("async middleware message"))
return await handler(request)
async def aafter_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("AsyncMiddleware.aafter_model")
@tool
def my_tool_async(value: str) -> str:
"""A great tool."""
calls.append("my_tool_async")
return value.upper()
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[{"args": {"value": "yo"}, "id": "1", "name": "my_tool_async"}],
[],
]
),
tools=[my_tool_async],
system_prompt="You are a helpful assistant.",
middleware=[AsyncMiddleware()],
)
result = await agent.ainvoke({"messages": [HumanMessage("hello")]})
# Should have:
# 1. Original hello message
# 2. Async middleware message (first invoke)
# 3. AI message with tool call
# 4. Tool message
# 5. Async middleware message (second invoke)
# 6. Final AI message
assert len(result["messages"]) == 6
assert result["messages"][0].content == "hello"
assert result["messages"][1].content == "async middleware message"
assert calls == [
"AsyncMiddleware.abefore_model",
"AsyncMiddleware.awrap_model_call",
"AsyncMiddleware.aafter_model",
"my_tool_async",
"AsyncMiddleware.abefore_model",
"AsyncMiddleware.awrap_model_call",
"AsyncMiddleware.aafter_model",
]
async def test_create_agent_async_invoke_multiple_middleware() -> None:
"""Test async invoke with multiple async middleware hooks."""
calls = []
class AsyncMiddlewareOne(AgentMiddleware):
async def abefore_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("AsyncMiddlewareOne.abefore_model")
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
calls.append("AsyncMiddlewareOne.awrap_model_call")
return await handler(request)
async def aafter_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("AsyncMiddlewareOne.aafter_model")
class AsyncMiddlewareTwo(AgentMiddleware):
async def abefore_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("AsyncMiddlewareTwo.abefore_model")
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
calls.append("AsyncMiddlewareTwo.awrap_model_call")
return await handler(request)
async def aafter_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("AsyncMiddlewareTwo.aafter_model")
agent = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[AsyncMiddlewareOne(), AsyncMiddlewareTwo()],
)
await agent.ainvoke({"messages": [HumanMessage("hello")]})
assert calls == [
"AsyncMiddlewareOne.abefore_model",
"AsyncMiddlewareTwo.abefore_model",
"AsyncMiddlewareOne.awrap_model_call",
"AsyncMiddlewareTwo.awrap_model_call",
"AsyncMiddlewareTwo.aafter_model",
"AsyncMiddlewareOne.aafter_model",
]
async def test_create_agent_async_jump() -> None:
"""Test async invoke with async middleware using jump_to."""
calls = []
class AsyncMiddlewareOne(AgentMiddleware):
async def abefore_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("AsyncMiddlewareOne.abefore_model")
class AsyncMiddlewareTwo(AgentMiddleware):
@hook_config(can_jump_to=["end"])
async def abefore_model(self, state: AgentState[Any], runtime: Runtime) -> dict[str, Any]:
calls.append("AsyncMiddlewareTwo.abefore_model")
return {"jump_to": "end"}
@tool
def my_tool_jump(value: str) -> str:
"""A great tool."""
calls.append("my_tool_jump")
return value.upper()
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[[ToolCall(id="1", name="my_tool_jump", args={"value": "yo"})]],
),
tools=[my_tool_jump],
system_prompt="You are a helpful assistant.",
middleware=[AsyncMiddlewareOne(), AsyncMiddlewareTwo()],
)
result = await agent.ainvoke({"messages": []})
assert result == {"messages": []}
assert calls == ["AsyncMiddlewareOne.abefore_model", "AsyncMiddlewareTwo.abefore_model"]
async def test_create_agent_mixed_sync_async_middleware_async_invoke() -> None:
"""Test async invoke with mixed sync and async middleware."""
calls = []
class MostlySyncMiddleware(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("MostlySyncMiddleware.before_model")
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
calls.append("MostlySyncMiddleware.wrap_model_call")
return handler(request)
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
calls.append("MostlySyncMiddleware.awrap_model_call")
return await handler(request)
def after_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("MostlySyncMiddleware.after_model")
class AsyncMiddleware(AgentMiddleware):
async def abefore_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("AsyncMiddleware.abefore_model")
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
calls.append("AsyncMiddleware.awrap_model_call")
return await handler(request)
async def aafter_model(self, state: AgentState[Any], runtime: Runtime) -> None:
calls.append("AsyncMiddleware.aafter_model")
agent = create_agent(
model=FakeToolCallingModel(),
tools=[],
system_prompt="You are a helpful assistant.",
middleware=[MostlySyncMiddleware(), AsyncMiddleware()],
)
await agent.ainvoke({"messages": [HumanMessage("hello")]})
# In async mode, both sync and async middleware should work
# Note: Sync wrap_model_call is not called when running in async mode,
# as the async version is preferred
assert calls == [
"MostlySyncMiddleware.before_model",
"AsyncMiddleware.abefore_model",
"MostlySyncMiddleware.awrap_model_call",
"AsyncMiddleware.awrap_model_call",
"AsyncMiddleware.aafter_model",
"MostlySyncMiddleware.after_model",
]
# =============================================================================
# Before/After Agent Hook Tests
# =============================================================================
class TestAgentMiddlewareHooks:
"""Test before_agent and after_agent middleware hooks."""
@pytest.mark.parametrize("is_async", [False, True])
@pytest.mark.parametrize("hook_type", ["before", "after"])
async def test_hook_execution(self, *, is_async: bool, hook_type: str) -> None:
"""Test that agent hooks are called in both sync and async modes."""
execution_log: list[str] = []
if is_async:
if hook_type == "before":
@before_agent
async def log_hook(
state: AgentState[Any], *_args: Any, **_kwargs: Any
) -> dict[str, Any] | None:
execution_log.append(f"{hook_type}_agent_called")
execution_log.append(f"message_count: {len(state['messages'])}")
return None
else:
@after_agent
async def log_hook(
state: AgentState[Any], *_args: Any, **_kwargs: Any
) -> dict[str, Any] | None:
execution_log.append(f"{hook_type}_agent_called")
execution_log.append(f"message_count: {len(state['messages'])}")
return None
elif hook_type == "before":
@before_agent
def log_hook(
state: AgentState[Any], *_args: Any, **_kwargs: Any
) -> dict[str, Any] | None:
execution_log.append(f"{hook_type}_agent_called")
execution_log.append(f"message_count: {len(state['messages'])}")
return None
else:
@after_agent
def log_hook(
state: AgentState[Any], *_args: Any, **_kwargs: Any
) -> dict[str, Any] | None:
execution_log.append(f"{hook_type}_agent_called")
execution_log.append(f"message_count: {len(state['messages'])}")
return None
model = GenericFakeChatModel(messages=iter([AIMessage(content="Response")]))
agent = create_agent(model=model, tools=[], middleware=[log_hook])
if is_async:
await agent.ainvoke({"messages": [HumanMessage("Hi")]})
else:
agent.invoke({"messages": [HumanMessage("Hi")]})
assert f"{hook_type}_agent_called" in execution_log
assert any("message_count:" in log for log in execution_log)
@pytest.mark.parametrize("is_async", [False, True])
@pytest.mark.parametrize("hook_type", ["before", "after"])
async def test_hook_with_class_inheritance(self, *, is_async: bool, hook_type: str) -> None:
"""Test agent hooks using class inheritance in both sync and async modes."""
execution_log: list[str] = []
class AsyncCustomMiddleware(AgentMiddleware):
async def abefore_agent(
self, state: AgentState[Any], runtime: Runtime
) -> dict[str, Any] | None:
if hook_type == "before":
execution_log.append("hook_called")
return None
async def aafter_agent(
self, state: AgentState[Any], runtime: Runtime
) -> dict[str, Any] | None:
if hook_type == "after":
execution_log.append("hook_called")
return None
class CustomMiddleware(AgentMiddleware):
def before_agent(
self, state: AgentState[Any], runtime: Runtime
) -> dict[str, Any] | None:
if hook_type == "before":
execution_log.append("hook_called")
return None
def after_agent(
self, state: AgentState[Any], runtime: Runtime
) -> dict[str, Any] | None:
if hook_type == "after":
execution_log.append("hook_called")
return None
middleware = AsyncCustomMiddleware() if is_async else CustomMiddleware()
model = GenericFakeChatModel(messages=iter([AIMessage(content="Response")]))
agent = create_agent(model=model, tools=[], middleware=[middleware])
if is_async:
await agent.ainvoke({"messages": [HumanMessage("Test")]})
else:
agent.invoke({"messages": [HumanMessage("Test")]})
assert "hook_called" in execution_log
class TestAgentHooksCombined:
"""Test before_agent and after_agent hooks working together."""
@pytest.mark.parametrize("is_async", [False, True])
async def test_execution_order(self, *, is_async: bool) -> None:
"""Test that before_agent executes before after_agent in both sync and async modes."""
execution_log: list[str] = []
if is_async:
@before_agent
async def log_before(*_args: Any, **_kwargs: Any) -> None:
execution_log.append("before")
@after_agent
async def log_after(*_args: Any, **_kwargs: Any) -> None:
execution_log.append("after")
else:
@before_agent
def log_before(*_args: Any, **_kwargs: Any) -> None:
execution_log.append("before")
@after_agent
def log_after(*_args: Any, **_kwargs: Any) -> None:
execution_log.append("after")
model = GenericFakeChatModel(messages=iter([AIMessage(content="Response")]))
agent = create_agent(model=model, tools=[], middleware=[log_before, log_after])
if is_async:
await agent.ainvoke({"messages": [HumanMessage("Test")]})
else:
agent.invoke({"messages": [HumanMessage("Test")]})
assert execution_log == ["before", "after"]
def test_state_passthrough(self) -> None:
"""Test that state modifications in before_agent are visible to after_agent."""
@before_agent
def modify_in_before(*_args: Any, **_kwargs: Any) -> dict[str, Any]:
return {"messages": [HumanMessage("Added by before_agent")]}
model = GenericFakeChatModel(messages=iter([AIMessage(content="Response")]))
agent = create_agent(model=model, tools=[], middleware=[modify_in_before])
result = agent.invoke({"messages": [HumanMessage("Original")]})
message_contents = [msg.content for msg in result["messages"]]
assert message_contents[1] == "Added by before_agent"
def test_multiple_middleware_instances(self) -> None:
"""Test multiple before_agent and after_agent middleware instances."""
execution_log = []
@before_agent
def before_one(*_args: Any, **_kwargs: Any) -> None:
execution_log.append("before_1")
@before_agent
def before_two(*_args: Any, **_kwargs: Any) -> None:
execution_log.append("before_2")
@after_agent
def after_one(*_args: Any, **_kwargs: Any) -> None:
execution_log.append("after_1")
@after_agent
def after_two(*_args: Any, **_kwargs: Any) -> None:
execution_log.append("after_2")
model = GenericFakeChatModel(messages=iter([AIMessage(content="Response")]))
agent = create_agent(
model=model, tools=[], middleware=[before_one, before_two, after_one, after_two]
)
agent.invoke({"messages": [HumanMessage("Test")]})
assert execution_log == ["before_1", "before_2", "after_2", "after_1"]
def test_agent_hooks_run_once_with_multiple_model_calls(self) -> None:
"""Test that before_agent and after_agent run only once per thread.
This test verifies that agent-level hooks (before_agent, after_agent) execute
exactly once per agent invocation, regardless of how many tool calling loops occur.
This is different from model-level hooks (before_model, after_model) which run
on every model invocation within the tool calling loop.
"""
execution_log = []
@tool
def sample_tool_agent(query: str) -> str:
"""A sample tool for testing."""
return f"Result for: {query}"
@before_agent
def log_before_agent(*_args: Any, **_kwargs: Any) -> None:
execution_log.append("before_agent")
@before_model
def log_before_model(*_args: Any, **_kwargs: Any) -> None:
execution_log.append("before_model")
@after_agent
def log_after_agent(*_args: Any, **_kwargs: Any) -> None:
execution_log.append("after_agent")
@after_model
def log_after_model(*_args: Any, **_kwargs: Any) -> None:
execution_log.append("after_model")
# Model will call a tool twice, then respond with final answer
# This creates 3 model invocations total, but agent hooks should still run once
model = FakeToolCallingModel(
tool_calls=[
[{"name": "sample_tool_agent", "args": {"query": "first"}, "id": "1"}],
[{"name": "sample_tool_agent", "args": {"query": "second"}, "id": "2"}],
[], # Third call returns no tool calls (final answer)
]
)
agent = create_agent(
model=model,
tools=[sample_tool_agent],
middleware=[log_before_agent, log_before_model, log_after_model, log_after_agent],
)
agent.invoke(
{"messages": [HumanMessage("Test")]}, config={"configurable": {"thread_id": "abc"}}
)
assert execution_log == [
"before_agent",
"before_model",
"after_model",
"before_model",
"after_model",
"before_model",
"after_model",
"after_agent",
]
agent.invoke(
{"messages": [HumanMessage("Test")]}, config={"configurable": {"thread_id": "abc"}}
)
assert execution_log == [
"before_agent",
"before_model",
"after_model",
"before_model",
"after_model",
"before_model",
"after_model",
"after_agent",
"before_agent",
"before_model",
"after_model",
"before_model",
"after_model",
"before_model",
"after_model",
"after_agent",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware/core/test_framework.py",
"license": "MIT License",
"lines": 839,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_human_in_the_loop.py | import re
from typing import Any
from unittest.mock import patch
import pytest
from langchain_core.messages import AIMessage, HumanMessage, ToolCall, ToolMessage
from langgraph.runtime import Runtime
from langchain.agents.middleware import InterruptOnConfig
from langchain.agents.middleware.human_in_the_loop import (
Action,
HumanInTheLoopMiddleware,
)
from langchain.agents.middleware.types import AgentState
def test_human_in_the_loop_middleware_initialization() -> None:
"""Test HumanInTheLoopMiddleware initialization."""
middleware = HumanInTheLoopMiddleware(
interrupt_on={"test_tool": {"allowed_decisions": ["approve", "edit", "reject"]}},
description_prefix="Custom prefix",
)
assert middleware.interrupt_on == {
"test_tool": {"allowed_decisions": ["approve", "edit", "reject"]}
}
assert middleware.description_prefix == "Custom prefix"
def test_human_in_the_loop_middleware_no_interrupts_needed() -> None:
"""Test HumanInTheLoopMiddleware when no interrupts are needed."""
middleware = HumanInTheLoopMiddleware(
interrupt_on={"test_tool": {"allowed_decisions": ["approve", "edit", "reject"]}}
)
# Test with no messages
state = AgentState[Any](messages=[])
result = middleware.after_model(state, Runtime())
assert result is None
# Test with message but no tool calls
state = AgentState[Any](messages=[HumanMessage(content="Hello"), AIMessage(content="Hi there")])
result = middleware.after_model(state, Runtime())
assert result is None
# Test with tool calls that don't require interrupts
ai_message = AIMessage(
content="I'll help you",
tool_calls=[{"name": "other_tool", "args": {"input": "test"}, "id": "1"}],
)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
result = middleware.after_model(state, Runtime())
assert result is None
def test_human_in_the_loop_middleware_single_tool_accept() -> None:
"""Test HumanInTheLoopMiddleware with single tool accept response."""
middleware = HumanInTheLoopMiddleware(
interrupt_on={"test_tool": {"allowed_decisions": ["approve", "edit", "reject"]}}
)
ai_message = AIMessage(
content="I'll help you",
tool_calls=[{"name": "test_tool", "args": {"input": "test"}, "id": "1"}],
)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
def mock_accept(_: Any) -> dict[str, Any]:
return {"decisions": [{"type": "approve"}]}
with patch("langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_accept):
result = middleware.after_model(state, Runtime())
assert result is not None
assert "messages" in result
assert len(result["messages"]) == 1
assert result["messages"][0] == ai_message
assert result["messages"][0].tool_calls == ai_message.tool_calls
state["messages"].append(
ToolMessage(content="Tool message", name="test_tool", tool_call_id="1")
)
state["messages"].append(AIMessage(content="test_tool called with result: Tool message"))
result = middleware.after_model(state, Runtime())
# No interrupts needed
assert result is None
def test_human_in_the_loop_middleware_single_tool_edit() -> None:
"""Test HumanInTheLoopMiddleware with single tool edit response."""
middleware = HumanInTheLoopMiddleware(
interrupt_on={"test_tool": {"allowed_decisions": ["approve", "edit", "reject"]}}
)
ai_message = AIMessage(
content="I'll help you",
tool_calls=[{"name": "test_tool", "args": {"input": "test"}, "id": "1"}],
)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
def mock_edit(_: Any) -> dict[str, Any]:
return {
"decisions": [
{
"type": "edit",
"edited_action": Action(
name="test_tool",
args={"input": "edited"},
),
}
]
}
with patch("langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_edit):
result = middleware.after_model(state, Runtime())
assert result is not None
assert "messages" in result
assert len(result["messages"]) == 1
assert result["messages"][0].tool_calls[0]["args"] == {"input": "edited"}
assert result["messages"][0].tool_calls[0]["id"] == "1" # ID should be preserved
def test_human_in_the_loop_middleware_single_tool_response() -> None:
"""Test HumanInTheLoopMiddleware with single tool response with custom message."""
middleware = HumanInTheLoopMiddleware(
interrupt_on={"test_tool": {"allowed_decisions": ["approve", "edit", "reject"]}}
)
ai_message = AIMessage(
content="I'll help you",
tool_calls=[{"name": "test_tool", "args": {"input": "test"}, "id": "1"}],
)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
def mock_response(_: Any) -> dict[str, Any]:
return {"decisions": [{"type": "reject", "message": "Custom response message"}]}
with patch(
"langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_response
):
result = middleware.after_model(state, Runtime())
assert result is not None
assert "messages" in result
assert len(result["messages"]) == 2
assert isinstance(result["messages"][0], AIMessage)
assert isinstance(result["messages"][1], ToolMessage)
assert result["messages"][1].content == "Custom response message"
assert result["messages"][1].name == "test_tool"
assert result["messages"][1].tool_call_id == "1"
def test_human_in_the_loop_middleware_multiple_tools_mixed_responses() -> None:
"""Test HumanInTheLoopMiddleware with multiple tools and mixed response types."""
middleware = HumanInTheLoopMiddleware(
interrupt_on={
"get_forecast": {"allowed_decisions": ["approve", "edit", "reject"]},
"get_temperature": {"allowed_decisions": ["approve", "edit", "reject"]},
}
)
ai_message = AIMessage(
content="I'll help you with weather",
tool_calls=[
{"name": "get_forecast", "args": {"location": "San Francisco"}, "id": "1"},
{"name": "get_temperature", "args": {"location": "San Francisco"}, "id": "2"},
],
)
state = AgentState[Any](messages=[HumanMessage(content="What's the weather?"), ai_message])
def mock_mixed_responses(_: Any) -> dict[str, Any]:
return {
"decisions": [
{"type": "approve"},
{"type": "reject", "message": "User rejected this tool call"},
]
}
with patch(
"langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_mixed_responses
):
result = middleware.after_model(state, Runtime())
assert result is not None
assert "messages" in result
assert (
len(result["messages"]) == 2
) # AI message with accepted tool call + tool message for rejected
# First message should be the AI message with both tool calls
updated_ai_message = result["messages"][0]
assert len(updated_ai_message.tool_calls) == 2 # Both tool calls remain
assert updated_ai_message.tool_calls[0]["name"] == "get_forecast" # Accepted
assert updated_ai_message.tool_calls[1]["name"] == "get_temperature" # Got response
# Second message should be the tool message for the rejected tool call
tool_message = result["messages"][1]
assert isinstance(tool_message, ToolMessage)
assert tool_message.content == "User rejected this tool call"
assert tool_message.name == "get_temperature"
def test_human_in_the_loop_middleware_multiple_tools_edit_responses() -> None:
"""Test HumanInTheLoopMiddleware with multiple tools and edit responses."""
middleware = HumanInTheLoopMiddleware(
interrupt_on={
"get_forecast": {"allowed_decisions": ["approve", "edit", "reject"]},
"get_temperature": {"allowed_decisions": ["approve", "edit", "reject"]},
}
)
ai_message = AIMessage(
content="I'll help you with weather",
tool_calls=[
{"name": "get_forecast", "args": {"location": "San Francisco"}, "id": "1"},
{"name": "get_temperature", "args": {"location": "San Francisco"}, "id": "2"},
],
)
state = AgentState[Any](messages=[HumanMessage(content="What's the weather?"), ai_message])
def mock_edit_responses(_: Any) -> dict[str, Any]:
return {
"decisions": [
{
"type": "edit",
"edited_action": Action(
name="get_forecast",
args={"location": "New York"},
),
},
{
"type": "edit",
"edited_action": Action(
name="get_temperature",
args={"location": "New York"},
),
},
]
}
with patch(
"langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_edit_responses
):
result = middleware.after_model(state, Runtime())
assert result is not None
assert "messages" in result
assert len(result["messages"]) == 1
updated_ai_message = result["messages"][0]
assert updated_ai_message.tool_calls[0]["args"] == {"location": "New York"}
assert updated_ai_message.tool_calls[0]["id"] == "1" # ID preserved
assert updated_ai_message.tool_calls[1]["args"] == {"location": "New York"}
assert updated_ai_message.tool_calls[1]["id"] == "2" # ID preserved
def test_human_in_the_loop_middleware_edit_with_modified_args() -> None:
"""Test HumanInTheLoopMiddleware with edit action that includes modified args."""
middleware = HumanInTheLoopMiddleware(
interrupt_on={"test_tool": {"allowed_decisions": ["approve", "edit", "reject"]}}
)
ai_message = AIMessage(
content="I'll help you",
tool_calls=[{"name": "test_tool", "args": {"input": "test"}, "id": "1"}],
)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
def mock_edit_with_args(_: Any) -> dict[str, Any]:
return {
"decisions": [
{
"type": "edit",
"edited_action": Action(
name="test_tool",
args={"input": "modified"},
),
}
]
}
with patch(
"langchain.agents.middleware.human_in_the_loop.interrupt",
side_effect=mock_edit_with_args,
):
result = middleware.after_model(state, Runtime())
assert result is not None
assert "messages" in result
assert len(result["messages"]) == 1
# Should have modified args
updated_ai_message = result["messages"][0]
assert updated_ai_message.tool_calls[0]["args"] == {"input": "modified"}
assert updated_ai_message.tool_calls[0]["id"] == "1" # ID preserved
def test_human_in_the_loop_middleware_unknown_response_type() -> None:
"""Test HumanInTheLoopMiddleware with unknown response type."""
middleware = HumanInTheLoopMiddleware(
interrupt_on={"test_tool": {"allowed_decisions": ["approve", "edit", "reject"]}}
)
ai_message = AIMessage(
content="I'll help you",
tool_calls=[{"name": "test_tool", "args": {"input": "test"}, "id": "1"}],
)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
def mock_unknown(_: Any) -> dict[str, Any]:
return {"decisions": [{"type": "unknown"}]}
with (
patch("langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_unknown),
pytest.raises(
ValueError,
match=re.escape(
"Unexpected human decision: {'type': 'unknown'}. "
"Decision type 'unknown' is not allowed for tool 'test_tool'. "
"Expected one of ['approve', 'edit', 'reject'] based on the tool's "
"configuration."
),
),
):
middleware.after_model(state, Runtime())
def test_human_in_the_loop_middleware_disallowed_action() -> None:
"""Test HumanInTheLoopMiddleware with action not allowed by tool config."""
# edit is not allowed by tool config
middleware = HumanInTheLoopMiddleware(
interrupt_on={"test_tool": {"allowed_decisions": ["approve", "reject"]}}
)
ai_message = AIMessage(
content="I'll help you",
tool_calls=[{"name": "test_tool", "args": {"input": "test"}, "id": "1"}],
)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
def mock_disallowed_action(_: Any) -> dict[str, Any]:
return {
"decisions": [
{
"type": "edit",
"edited_action": Action(
name="test_tool",
args={"input": "modified"},
),
}
]
}
with (
patch(
"langchain.agents.middleware.human_in_the_loop.interrupt",
side_effect=mock_disallowed_action,
),
pytest.raises(
ValueError,
match=re.escape(
"Unexpected human decision: {'type': 'edit', 'edited_action': "
"{'name': 'test_tool', 'args': {'input': 'modified'}}}. "
"Decision type 'edit' is not allowed for tool 'test_tool'. "
"Expected one of ['approve', 'reject'] based on the tool's "
"configuration."
),
),
):
middleware.after_model(state, Runtime())
def test_human_in_the_loop_middleware_mixed_auto_approved_and_interrupt() -> None:
"""Test HumanInTheLoopMiddleware with mix of auto-approved and interrupt tools."""
middleware = HumanInTheLoopMiddleware(
interrupt_on={"interrupt_tool": {"allowed_decisions": ["approve", "edit", "reject"]}}
)
ai_message = AIMessage(
content="I'll help you",
tool_calls=[
{"name": "auto_tool", "args": {"input": "auto"}, "id": "1"},
{"name": "interrupt_tool", "args": {"input": "interrupt"}, "id": "2"},
],
)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
def mock_accept(_: Any) -> dict[str, Any]:
return {"decisions": [{"type": "approve"}]}
with patch("langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_accept):
result = middleware.after_model(state, Runtime())
assert result is not None
assert "messages" in result
assert len(result["messages"]) == 1
updated_ai_message = result["messages"][0]
# Should have both tools: auto-approved first, then interrupt tool
assert len(updated_ai_message.tool_calls) == 2
assert updated_ai_message.tool_calls[0]["name"] == "auto_tool"
assert updated_ai_message.tool_calls[1]["name"] == "interrupt_tool"
def test_human_in_the_loop_middleware_interrupt_request_structure() -> None:
"""Test that interrupt requests are structured correctly."""
middleware = HumanInTheLoopMiddleware(
interrupt_on={"test_tool": {"allowed_decisions": ["approve", "edit", "reject"]}},
description_prefix="Custom prefix",
)
ai_message = AIMessage(
content="I'll help you",
tool_calls=[{"name": "test_tool", "args": {"input": "test", "location": "SF"}, "id": "1"}],
)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
captured_request = None
def mock_capture_requests(request: Any) -> dict[str, Any]:
nonlocal captured_request
captured_request = request
return {"decisions": [{"type": "approve"}]}
with patch(
"langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_capture_requests
):
middleware.after_model(state, Runtime())
assert captured_request is not None
assert "action_requests" in captured_request
assert "review_configs" in captured_request
assert len(captured_request["action_requests"]) == 1
action_request = captured_request["action_requests"][0]
assert action_request["name"] == "test_tool"
assert action_request["args"] == {"input": "test", "location": "SF"}
assert "Custom prefix" in action_request["description"]
assert "Tool: test_tool" in action_request["description"]
assert "Args: {'input': 'test', 'location': 'SF'}" in action_request["description"]
assert len(captured_request["review_configs"]) == 1
review_config = captured_request["review_configs"][0]
assert review_config["action_name"] == "test_tool"
assert review_config["allowed_decisions"] == ["approve", "edit", "reject"]
def test_human_in_the_loop_middleware_boolean_configs() -> None:
"""Test HITL middleware with boolean tool configs."""
middleware = HumanInTheLoopMiddleware(interrupt_on={"test_tool": True})
ai_message = AIMessage(
content="I'll help you",
tool_calls=[{"name": "test_tool", "args": {"input": "test"}, "id": "1"}],
)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
# Test accept
with patch(
"langchain.agents.middleware.human_in_the_loop.interrupt",
return_value={"decisions": [{"type": "approve"}]},
):
result = middleware.after_model(state, Runtime())
assert result is not None
assert "messages" in result
assert len(result["messages"]) == 1
assert result["messages"][0].tool_calls == ai_message.tool_calls
# Test edit
with patch(
"langchain.agents.middleware.human_in_the_loop.interrupt",
return_value={
"decisions": [
{
"type": "edit",
"edited_action": Action(
name="test_tool",
args={"input": "edited"},
),
}
]
},
):
result = middleware.after_model(state, Runtime())
assert result is not None
assert "messages" in result
assert len(result["messages"]) == 1
assert result["messages"][0].tool_calls[0]["args"] == {"input": "edited"}
middleware = HumanInTheLoopMiddleware(interrupt_on={"test_tool": False})
result = middleware.after_model(state, Runtime())
# No interruption should occur
assert result is None
def test_human_in_the_loop_middleware_sequence_mismatch() -> None:
"""Test that sequence mismatch in resume raises an error."""
middleware = HumanInTheLoopMiddleware(interrupt_on={"test_tool": True})
ai_message = AIMessage(
content="I'll help you",
tool_calls=[{"name": "test_tool", "args": {"input": "test"}, "id": "1"}],
)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
# Test with too few responses
with (
patch(
"langchain.agents.middleware.human_in_the_loop.interrupt",
return_value={"decisions": []}, # No responses for 1 tool call
),
pytest.raises(
ValueError,
match=re.escape(
"Number of human decisions (0) does not match number of hanging tool calls (1)."
),
),
):
middleware.after_model(state, Runtime())
# Test with too many responses
with (
patch(
"langchain.agents.middleware.human_in_the_loop.interrupt",
return_value={
"decisions": [
{"type": "approve"},
{"type": "approve"},
]
}, # 2 responses for 1 tool call
),
pytest.raises(
ValueError,
match=re.escape(
"Number of human decisions (2) does not match number of hanging tool calls (1)."
),
),
):
middleware.after_model(state, Runtime())
def test_human_in_the_loop_middleware_description_as_callable() -> None:
"""Test that description field accepts both string and callable."""
def custom_description(
tool_call: ToolCall, state: AgentState[Any], runtime: Runtime[None]
) -> str:
"""Generate a custom description."""
return f"Custom: {tool_call['name']} with args {tool_call['args']}"
middleware = HumanInTheLoopMiddleware(
interrupt_on={
"tool_with_callable": InterruptOnConfig(
allowed_decisions=["approve"],
description=custom_description,
),
"tool_with_string": InterruptOnConfig(
allowed_decisions=["approve"],
description="Static description",
),
}
)
ai_message = AIMessage(
content="I'll help you",
tool_calls=[
{"name": "tool_with_callable", "args": {"x": 1}, "id": "1"},
{"name": "tool_with_string", "args": {"y": 2}, "id": "2"},
],
)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
captured_request = None
def mock_capture_requests(request: Any) -> dict[str, Any]:
nonlocal captured_request
captured_request = request
return {"decisions": [{"type": "approve"}, {"type": "approve"}]}
with patch(
"langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_capture_requests
):
middleware.after_model(state, Runtime())
assert captured_request is not None
assert "action_requests" in captured_request
assert len(captured_request["action_requests"]) == 2
# Check callable description
assert (
captured_request["action_requests"][0]["description"]
== "Custom: tool_with_callable with args {'x': 1}"
)
# Check string description
assert captured_request["action_requests"][1]["description"] == "Static description"
def test_human_in_the_loop_middleware_preserves_tool_call_order() -> None:
"""Test that middleware preserves the original order of tool calls.
This test verifies that when mixing auto-approved and interrupt tools,
the final tool call order matches the original order from the AI message.
"""
middleware = HumanInTheLoopMiddleware(
interrupt_on={
"tool_b": {"allowed_decisions": ["approve", "edit", "reject"]},
"tool_d": {"allowed_decisions": ["approve", "edit", "reject"]},
}
)
# Create AI message with interleaved auto-approved and interrupt tools
# Order: auto (A) -> interrupt (B) -> auto (C) -> interrupt (D) -> auto (E)
ai_message = AIMessage(
content="Processing multiple tools",
tool_calls=[
{"name": "tool_a", "args": {"val": 1}, "id": "id_a"},
{"name": "tool_b", "args": {"val": 2}, "id": "id_b"},
{"name": "tool_c", "args": {"val": 3}, "id": "id_c"},
{"name": "tool_d", "args": {"val": 4}, "id": "id_d"},
{"name": "tool_e", "args": {"val": 5}, "id": "id_e"},
],
)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
def mock_approve_all(_: Any) -> dict[str, Any]:
# Approve both interrupt tools (B and D)
return {"decisions": [{"type": "approve"}, {"type": "approve"}]}
with patch(
"langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_approve_all
):
result = middleware.after_model(state, Runtime())
assert result is not None
assert "messages" in result
updated_ai_message = result["messages"][0]
assert len(updated_ai_message.tool_calls) == 5
# Verify original order is preserved: A -> B -> C -> D -> E
assert updated_ai_message.tool_calls[0]["name"] == "tool_a"
assert updated_ai_message.tool_calls[0]["id"] == "id_a"
assert updated_ai_message.tool_calls[1]["name"] == "tool_b"
assert updated_ai_message.tool_calls[1]["id"] == "id_b"
assert updated_ai_message.tool_calls[2]["name"] == "tool_c"
assert updated_ai_message.tool_calls[2]["id"] == "id_c"
assert updated_ai_message.tool_calls[3]["name"] == "tool_d"
assert updated_ai_message.tool_calls[3]["id"] == "id_d"
assert updated_ai_message.tool_calls[4]["name"] == "tool_e"
assert updated_ai_message.tool_calls[4]["id"] == "id_e"
def test_human_in_the_loop_middleware_preserves_order_with_edits() -> None:
"""Test that order is preserved when interrupt tools are edited."""
middleware = HumanInTheLoopMiddleware(
interrupt_on={
"tool_b": {"allowed_decisions": ["approve", "edit", "reject"]},
"tool_d": {"allowed_decisions": ["approve", "edit", "reject"]},
}
)
ai_message = AIMessage(
content="Processing multiple tools",
tool_calls=[
{"name": "tool_a", "args": {"val": 1}, "id": "id_a"},
{"name": "tool_b", "args": {"val": 2}, "id": "id_b"},
{"name": "tool_c", "args": {"val": 3}, "id": "id_c"},
{"name": "tool_d", "args": {"val": 4}, "id": "id_d"},
],
)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
def mock_edit_responses(_: Any) -> dict[str, Any]:
# Edit tool_b, approve tool_d
return {
"decisions": [
{
"type": "edit",
"edited_action": Action(name="tool_b", args={"val": 200}),
},
{"type": "approve"},
]
}
with patch(
"langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_edit_responses
):
result = middleware.after_model(state, Runtime())
assert result is not None
updated_ai_message = result["messages"][0]
assert len(updated_ai_message.tool_calls) == 4
# Verify order: A (auto) -> B (edited) -> C (auto) -> D (approved)
assert updated_ai_message.tool_calls[0]["name"] == "tool_a"
assert updated_ai_message.tool_calls[0]["args"] == {"val": 1}
assert updated_ai_message.tool_calls[1]["name"] == "tool_b"
assert updated_ai_message.tool_calls[1]["args"] == {"val": 200} # Edited
assert updated_ai_message.tool_calls[1]["id"] == "id_b" # ID preserved
assert updated_ai_message.tool_calls[2]["name"] == "tool_c"
assert updated_ai_message.tool_calls[2]["args"] == {"val": 3}
assert updated_ai_message.tool_calls[3]["name"] == "tool_d"
assert updated_ai_message.tool_calls[3]["args"] == {"val": 4}
def test_human_in_the_loop_middleware_preserves_order_with_rejections() -> None:
"""Test that order is preserved when some interrupt tools are rejected."""
middleware = HumanInTheLoopMiddleware(
interrupt_on={
"tool_b": {"allowed_decisions": ["approve", "edit", "reject"]},
"tool_d": {"allowed_decisions": ["approve", "edit", "reject"]},
}
)
ai_message = AIMessage(
content="Processing multiple tools",
tool_calls=[
{"name": "tool_a", "args": {"val": 1}, "id": "id_a"},
{"name": "tool_b", "args": {"val": 2}, "id": "id_b"},
{"name": "tool_c", "args": {"val": 3}, "id": "id_c"},
{"name": "tool_d", "args": {"val": 4}, "id": "id_d"},
{"name": "tool_e", "args": {"val": 5}, "id": "id_e"},
],
)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), ai_message])
def mock_mixed_responses(_: Any) -> dict[str, Any]:
# Reject tool_b, approve tool_d
return {
"decisions": [
{"type": "reject", "message": "Rejected tool B"},
{"type": "approve"},
]
}
with patch(
"langchain.agents.middleware.human_in_the_loop.interrupt", side_effect=mock_mixed_responses
):
result = middleware.after_model(state, Runtime())
assert result is not None
assert len(result["messages"]) == 2 # AI message + tool message for rejection
updated_ai_message = result["messages"][0]
# tool_b is still in the list (with rejection handled via tool message)
assert len(updated_ai_message.tool_calls) == 5
# Verify order maintained: A (auto) -> B (rejected) -> C (auto) -> D (approved) -> E (auto)
assert updated_ai_message.tool_calls[0]["name"] == "tool_a"
assert updated_ai_message.tool_calls[1]["name"] == "tool_b"
assert updated_ai_message.tool_calls[2]["name"] == "tool_c"
assert updated_ai_message.tool_calls[3]["name"] == "tool_d"
assert updated_ai_message.tool_calls[4]["name"] == "tool_e"
# Check rejection tool message
tool_message = result["messages"][1]
assert isinstance(tool_message, ToolMessage)
assert tool_message.content == "Rejected tool B"
assert tool_message.tool_call_id == "id_b"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_human_in_the_loop.py",
"license": "MIT License",
"lines": 637,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_model_call_limit.py | import pytest
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langchain_core.tools import tool
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.runtime import Runtime
from langchain.agents.factory import create_agent
from langchain.agents.middleware.model_call_limit import (
ModelCallLimitExceededError,
ModelCallLimitMiddleware,
ModelCallLimitState,
)
from tests.unit_tests.agents.model import FakeToolCallingModel
@tool
def simple_tool(value: str) -> str:
"""A simple tool."""
return value
def test_middleware_unit_functionality() -> None:
"""Test that the middleware works as expected in isolation."""
# Test with end behavior
middleware = ModelCallLimitMiddleware(thread_limit=2, run_limit=1)
runtime = Runtime()
# Test when limits are not exceeded
state = ModelCallLimitState(messages=[], thread_model_call_count=0, run_model_call_count=0)
result = middleware.before_model(state, runtime)
assert result is None
# Test when thread limit is exceeded
state = ModelCallLimitState(messages=[], thread_model_call_count=2, run_model_call_count=0)
result = middleware.before_model(state, runtime)
assert result is not None
assert result["jump_to"] == "end"
assert "messages" in result
assert len(result["messages"]) == 1
assert "thread limit (2/2)" in result["messages"][0].content
# Test when run limit is exceeded
state = ModelCallLimitState(messages=[], thread_model_call_count=1, run_model_call_count=1)
result = middleware.before_model(state, runtime)
assert result is not None
assert result["jump_to"] == "end"
assert "messages" in result
assert len(result["messages"]) == 1
assert "run limit (1/1)" in result["messages"][0].content
# Test with error behavior
middleware_exception = ModelCallLimitMiddleware(
thread_limit=2, run_limit=1, exit_behavior="error"
)
# Test exception when thread limit exceeded
state = ModelCallLimitState(messages=[], thread_model_call_count=2, run_model_call_count=0)
with pytest.raises(ModelCallLimitExceededError) as exc_info:
middleware_exception.before_model(state, runtime)
assert "thread limit (2/2)" in str(exc_info.value)
# Test exception when run limit exceeded
state = ModelCallLimitState(messages=[], thread_model_call_count=1, run_model_call_count=1)
with pytest.raises(ModelCallLimitExceededError) as exc_info:
middleware_exception.before_model(state, runtime)
assert "run limit (1/1)" in str(exc_info.value)
def test_thread_limit_with_create_agent() -> None:
"""Test that thread limits work correctly with create_agent."""
model = FakeToolCallingModel()
# Set thread limit to 1 (should be exceeded after 1 call)
agent = create_agent(
model=model,
tools=[simple_tool],
middleware=[ModelCallLimitMiddleware(thread_limit=1)],
checkpointer=InMemorySaver(),
)
# First invocation should work - 1 model call, within thread limit
result = agent.invoke(
{"messages": [HumanMessage("Hello")]}, {"configurable": {"thread_id": "thread1"}}
)
# Should complete successfully with 1 model call
assert "messages" in result
assert len(result["messages"]) == 2 # Human + AI messages
# Second invocation in same thread should hit thread limit
# The agent should jump to end after detecting the limit
result2 = agent.invoke(
{"messages": [HumanMessage("Hello again")]}, {"configurable": {"thread_id": "thread1"}}
)
assert "messages" in result2
# The agent should have detected the limit and jumped to end with a limit exceeded message
# So we should have: previous messages + new human message + limit exceeded AI message
assert len(result2["messages"]) == 4 # Previous Human + AI + New Human + Limit AI
assert isinstance(result2["messages"][0], HumanMessage) # First human
assert isinstance(result2["messages"][1], AIMessage) # First AI response
assert isinstance(result2["messages"][2], HumanMessage) # Second human
assert isinstance(result2["messages"][3], AIMessage) # Limit exceeded message
assert "thread limit" in result2["messages"][3].content
def test_run_limit_with_create_agent() -> None:
"""Test that run limits work correctly with create_agent."""
# Create a model that will make 2 calls
model = FakeToolCallingModel(
tool_calls=[
[{"name": "simple_tool", "args": {"input": "test"}, "id": "1"}],
[], # No tool calls on second call
]
)
# Set run limit to 1 (should be exceeded after 1 call)
agent = create_agent(
model=model,
tools=[simple_tool],
middleware=[ModelCallLimitMiddleware(run_limit=1)],
checkpointer=InMemorySaver(),
)
# This should hit the run limit after the first model call
result = agent.invoke(
{"messages": [HumanMessage("Hello")]}, {"configurable": {"thread_id": "thread1"}}
)
assert "messages" in result
# The agent should have made 1 model call then jumped to end with limit exceeded message
# So we should have: Human + AI + Tool + Limit exceeded AI message
assert len(result["messages"]) == 4 # Human + AI + Tool + Limit AI
assert isinstance(result["messages"][0], HumanMessage)
assert isinstance(result["messages"][1], AIMessage)
assert isinstance(result["messages"][2], ToolMessage)
assert isinstance(result["messages"][3], AIMessage) # Limit exceeded message
assert "run limit" in result["messages"][3].content
def test_middleware_initialization_validation() -> None:
"""Test that middleware initialization validates parameters correctly."""
# Test that at least one limit must be specified
with pytest.raises(ValueError, match="At least one limit must be specified"):
ModelCallLimitMiddleware()
# Test invalid exit behavior
with pytest.raises(ValueError, match="Invalid exit_behavior"):
ModelCallLimitMiddleware(thread_limit=5, exit_behavior="invalid") # type: ignore[arg-type]
# Test valid initialization
middleware = ModelCallLimitMiddleware(thread_limit=5, run_limit=3)
assert middleware.thread_limit == 5
assert middleware.run_limit == 3
assert middleware.exit_behavior == "end"
# Test with only thread limit
middleware = ModelCallLimitMiddleware(thread_limit=5)
assert middleware.thread_limit == 5
assert middleware.run_limit is None
# Test with only run limit
middleware = ModelCallLimitMiddleware(run_limit=3)
assert middleware.thread_limit is None
assert middleware.run_limit == 3
def test_exception_error_message() -> None:
"""Test that the exception provides clear error messages."""
middleware = ModelCallLimitMiddleware(thread_limit=2, run_limit=1, exit_behavior="error")
# Test thread limit exceeded
state = ModelCallLimitState(messages=[], thread_model_call_count=2, run_model_call_count=0)
with pytest.raises(ModelCallLimitExceededError) as exc_info:
middleware.before_model(state, Runtime())
error_msg = str(exc_info.value)
assert "Model call limits exceeded" in error_msg
assert "thread limit (2/2)" in error_msg
# Test run limit exceeded
state = ModelCallLimitState(messages=[], thread_model_call_count=0, run_model_call_count=1)
with pytest.raises(ModelCallLimitExceededError) as exc_info:
middleware.before_model(state, Runtime())
error_msg = str(exc_info.value)
assert "Model call limits exceeded" in error_msg
assert "run limit (1/1)" in error_msg
# Test both limits exceeded
state = ModelCallLimitState(messages=[], thread_model_call_count=2, run_model_call_count=1)
with pytest.raises(ModelCallLimitExceededError) as exc_info:
middleware.before_model(state, Runtime())
error_msg = str(exc_info.value)
assert "Model call limits exceeded" in error_msg
assert "thread limit (2/2)" in error_msg
assert "run limit (1/1)" in error_msg
def test_run_limit_resets_between_invocations() -> None:
"""Test run limit resets between invocations.
Test that run_model_call_count resets between invocations, but
thread_model_call_count accumulates.
"""
# First: No tool calls per invocation, so model does not increment call counts internally
middleware = ModelCallLimitMiddleware(thread_limit=3, run_limit=1, exit_behavior="error")
model = FakeToolCallingModel(
tool_calls=[[], [], [], []]
) # No tool calls, so only model call per run
agent = create_agent(model=model, middleware=[middleware], checkpointer=InMemorySaver())
thread_config = {"configurable": {"thread_id": "test_thread"}}
agent.invoke({"messages": [HumanMessage("Hello")]}, thread_config)
agent.invoke({"messages": [HumanMessage("Hello again")]}, thread_config)
agent.invoke({"messages": [HumanMessage("Hello third")]}, thread_config)
# Fourth run: should raise, thread_model_call_count == 3 (limit)
with pytest.raises(ModelCallLimitExceededError) as exc_info:
agent.invoke({"messages": [HumanMessage("Hello fourth")]}, thread_config)
error_msg = str(exc_info.value)
assert "thread limit (3/3)" in error_msg
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_model_call_limit.py",
"license": "MIT License",
"lines": 181,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_summarization.py | from collections.abc import Iterable
from typing import Any
from unittest.mock import patch
import pytest
from langchain_core.callbacks import AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun
from langchain_core.language_models import ModelProfile
from langchain_core.language_models.base import (
LanguageModelInput,
)
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import (
AIMessage,
AnyMessage,
BaseMessage,
HumanMessage,
MessageLikeRepresentation,
RemoveMessage,
ToolMessage,
)
from langchain_core.messages.utils import count_tokens_approximately, get_buffer_string
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.runnables import RunnableConfig
from langgraph.graph.message import REMOVE_ALL_MESSAGES
from langgraph.runtime import Runtime
from pydantic import Field
from typing_extensions import override
from langchain.agents import AgentState
from langchain.agents.middleware.summarization import SummarizationMiddleware
from langchain.chat_models import init_chat_model
from tests.unit_tests.agents.model import FakeToolCallingModel
class MockChatModel(BaseChatModel):
"""Mock chat model for testing."""
@override
def invoke(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
*,
stop: list[str] | None = None,
**kwargs: Any,
) -> AIMessage:
return AIMessage(content="Generated summary")
@override
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(generations=[ChatGeneration(message=AIMessage(content="Summary"))])
@property
def _llm_type(self) -> str:
return "mock"
class ProfileChatModel(BaseChatModel):
"""Mock chat model with profile for testing."""
@override
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(generations=[ChatGeneration(message=AIMessage(content="Summary"))])
profile: ModelProfile | None = ModelProfile(max_input_tokens=1000)
@property
def _llm_type(self) -> str:
return "mock"
def test_summarization_middleware_initialization() -> None:
"""Test SummarizationMiddleware initialization."""
model = FakeToolCallingModel()
middleware = SummarizationMiddleware(
model=model,
trigger=("tokens", 1000),
keep=("messages", 10),
summary_prompt="Custom prompt: {messages}",
)
assert middleware.model == model
assert middleware.trigger == ("tokens", 1000)
assert middleware.keep == ("messages", 10)
assert middleware.summary_prompt == "Custom prompt: {messages}"
assert middleware.trim_tokens_to_summarize == 4000
with pytest.raises(
ValueError,
match="Model profile information is required to use fractional token limits, "
"and is unavailable for the specified model",
):
SummarizationMiddleware(model=model, keep=("fraction", 0.5)) # no model profile
# Test with string model
with patch(
"langchain.agents.middleware.summarization.init_chat_model",
return_value=FakeToolCallingModel(),
):
middleware = SummarizationMiddleware(model="fake-model")
assert isinstance(middleware.model, FakeToolCallingModel)
def test_summarization_middleware_no_summarization_cases() -> None:
"""Test SummarizationMiddleware when summarization is not needed or disabled."""
model = FakeToolCallingModel()
middleware = SummarizationMiddleware(model=model, trigger=("tokens", 1000))
# Test when summarization is disabled
middleware_disabled = SummarizationMiddleware(model=model, trigger=None)
state = AgentState[Any](messages=[HumanMessage(content="Hello"), AIMessage(content="Hi")])
result = middleware_disabled.before_model(state, Runtime())
assert result is None
# Test when token count is below threshold
def mock_token_counter(_: Iterable[MessageLikeRepresentation]) -> int:
return 500 # Below threshold
middleware.token_counter = mock_token_counter
result = middleware.before_model(state, Runtime())
assert result is None
def test_summarization_middleware_helper_methods() -> None:
"""Test SummarizationMiddleware helper methods."""
model = FakeToolCallingModel()
middleware = SummarizationMiddleware(model=model, trigger=("tokens", 1000))
# Test message ID assignment
messages: list[AnyMessage] = [HumanMessage(content="Hello"), AIMessage(content="Hi")]
middleware._ensure_message_ids(messages)
for msg in messages:
assert msg.id is not None
# Test message partitioning
messages = [
HumanMessage(content="1"),
HumanMessage(content="2"),
HumanMessage(content="3"),
HumanMessage(content="4"),
HumanMessage(content="5"),
]
to_summarize, preserved = middleware._partition_messages(messages, 2)
assert len(to_summarize) == 2
assert len(preserved) == 3
assert to_summarize == messages[:2]
assert preserved == messages[2:]
# Test summary message building
summary = "This is a test summary"
new_messages = middleware._build_new_messages(summary)
assert len(new_messages) == 1
assert isinstance(new_messages[0], HumanMessage)
assert "Here is a summary of the conversation to date:" in new_messages[0].content
assert summary in new_messages[0].content
assert new_messages[0].additional_kwargs.get("lc_source") == "summarization"
def test_summarization_middleware_summary_creation() -> None:
"""Test SummarizationMiddleware summary creation."""
middleware = SummarizationMiddleware(model=MockChatModel(), trigger=("tokens", 1000))
# Test normal summary creation
messages: list[AnyMessage] = [HumanMessage(content="Hello"), AIMessage(content="Hi")]
summary = middleware._create_summary(messages)
assert summary == "Generated summary"
# Test empty messages
summary = middleware._create_summary([])
assert summary == "No previous conversation history."
# Test error handling
class ErrorModel(BaseChatModel):
@override
def invoke(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
*,
stop: list[str] | None = None,
**kwargs: Any,
) -> AIMessage:
msg = "Model error"
raise ValueError(msg)
@override
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(generations=[ChatGeneration(message=AIMessage(content="Summary"))])
@property
def _llm_type(self) -> str:
return "mock"
middleware_error = SummarizationMiddleware(model=ErrorModel(), trigger=("tokens", 1000))
summary = middleware_error._create_summary(messages)
assert "Error generating summary: Model error" in summary
# Test we raise warning if max_tokens_before_summary or messages_to_keep is specified
with pytest.warns(DeprecationWarning, match="max_tokens_before_summary is deprecated"):
SummarizationMiddleware(model=MockChatModel(), max_tokens_before_summary=500)
with pytest.warns(DeprecationWarning, match="messages_to_keep is deprecated"):
SummarizationMiddleware(model=MockChatModel(), messages_to_keep=5)
def test_summarization_middleware_trim_limit_none_keeps_all_messages() -> None:
"""Verify disabling trim limit preserves full message sequence."""
messages: list[AnyMessage] = [HumanMessage(content=str(i)) for i in range(10)]
middleware = SummarizationMiddleware(
model=MockChatModel(),
trim_tokens_to_summarize=None,
)
def token_counter(messages: Iterable[MessageLikeRepresentation]) -> int:
return len(list(messages))
middleware.token_counter = token_counter
trimmed = middleware._trim_messages_for_summary(messages)
assert trimmed is messages
def test_summarization_middleware_profile_inference_triggers_summary() -> None:
"""Ensure automatic profile inference triggers summarization when limits are exceeded."""
def token_counter(messages: Iterable[MessageLikeRepresentation]) -> int:
return len(list(messages)) * 200
middleware = SummarizationMiddleware(
model=ProfileChatModel(),
trigger=("fraction", 0.81),
keep=("fraction", 0.5),
token_counter=token_counter,
)
state = AgentState[Any](
messages=[
HumanMessage(content="Message 1"),
AIMessage(content="Message 2"),
HumanMessage(content="Message 3"),
AIMessage(content="Message 4"),
]
)
# Test we don't engage summarization
# we have total_tokens = 4 * 200 = 800
# and max_input_tokens = 1000
# since 0.81 * 1000 == 810 > 800 -> summarization not triggered
result = middleware.before_model(state, Runtime())
assert result is None
# Engage summarization
# since 0.80 * 1000 == 800 <= 800
middleware = SummarizationMiddleware(
model=ProfileChatModel(),
trigger=("fraction", 0.80),
keep=("fraction", 0.5),
token_counter=token_counter,
)
result = middleware.before_model(state, Runtime())
assert result is not None
assert isinstance(result["messages"][0], RemoveMessage)
summary_message = result["messages"][1]
assert isinstance(summary_message, HumanMessage)
assert summary_message.text.startswith("Here is a summary of the conversation")
assert len(result["messages"][2:]) == 2 # Preserved messages
assert [message.content for message in result["messages"][2:]] == [
"Message 3",
"Message 4",
]
# With keep=("fraction", 0.6) the target token allowance becomes 600,
# so the cutoff shifts to keep the last three messages instead of two.
middleware = SummarizationMiddleware(
model=ProfileChatModel(),
trigger=("fraction", 0.80),
keep=("fraction", 0.6),
token_counter=token_counter,
)
result = middleware.before_model(state, Runtime())
assert result is not None
assert [message.content for message in result["messages"][2:]] == [
"Message 2",
"Message 3",
"Message 4",
]
# Once keep=("fraction", 0.8) the inferred limit equals the full
# context (target tokens = 800), so token-based retention keeps everything
# and summarization is skipped entirely.
middleware = SummarizationMiddleware(
model=ProfileChatModel(),
trigger=("fraction", 0.80),
keep=("fraction", 0.8),
token_counter=token_counter,
)
assert middleware.before_model(state, Runtime()) is None
# Test with tokens_to_keep as absolute int value
middleware_int = SummarizationMiddleware(
model=ProfileChatModel(),
trigger=("fraction", 0.80),
keep=("tokens", 400), # Keep exactly 400 tokens (2 messages)
token_counter=token_counter,
)
result = middleware_int.before_model(state, Runtime())
assert result is not None
assert [message.content for message in result["messages"][2:]] == [
"Message 3",
"Message 4",
]
# Test with tokens_to_keep as larger int value
middleware_int_large = SummarizationMiddleware(
model=ProfileChatModel(),
trigger=("fraction", 0.80),
keep=("tokens", 600), # Keep 600 tokens (3 messages)
token_counter=token_counter,
)
result = middleware_int_large.before_model(state, Runtime())
assert result is not None
assert [message.content for message in result["messages"][2:]] == [
"Message 2",
"Message 3",
"Message 4",
]
def test_summarization_middleware_token_retention_preserves_ai_tool_pairs() -> None:
"""Ensure token retention preserves AI/Tool message pairs together."""
def token_counter(messages: Iterable[MessageLikeRepresentation]) -> int:
return sum(len(getattr(message, "content", "")) for message in messages)
middleware = SummarizationMiddleware(
model=ProfileChatModel(),
trigger=("fraction", 0.1),
keep=("fraction", 0.5),
token_counter=token_counter,
)
# Total tokens: 300 + 200 + 50 + 180 + 160 = 890
# Target keep: 500 tokens (50% of 1000)
# Binary search finds cutoff around index 2 (ToolMessage)
# We move back to index 1 to preserve the AIMessage with its ToolMessage
messages: list[AnyMessage] = [
HumanMessage(content="H" * 300),
AIMessage(
content="A" * 200,
tool_calls=[{"name": "test", "args": {}, "id": "call-1"}],
),
ToolMessage(content="T" * 50, tool_call_id="call-1"),
HumanMessage(content="H" * 180),
HumanMessage(content="H" * 160),
]
state = AgentState[Any](messages=messages)
result = middleware.before_model(state, Runtime())
assert result is not None
preserved_messages = result["messages"][2:]
# We move the cutoff back to include the AIMessage with its ToolMessage
# So we preserve messages from index 1 onward (AI + Tool + Human + Human)
assert preserved_messages == messages[1:]
# Verify the AI/Tool pair is preserved together
assert isinstance(preserved_messages[0], AIMessage)
assert preserved_messages[0].tool_calls
assert isinstance(preserved_messages[1], ToolMessage)
assert preserved_messages[1].tool_call_id == preserved_messages[0].tool_calls[0]["id"]
def test_summarization_middleware_missing_profile() -> None:
"""Ensure automatic profile inference falls back when profiles are unavailable."""
class ImportErrorProfileModel(BaseChatModel):
@override
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
raise NotImplementedError
@property
def _llm_type(self) -> str:
return "mock"
# NOTE: Using __getattribute__ because @property cannot override Pydantic fields.
def __getattribute__(self, name: str) -> Any:
if name == "profile":
msg = "Profile not available"
raise AttributeError(msg)
return super().__getattribute__(name)
with pytest.raises(
ValueError,
match="Model profile information is required to use fractional token limits",
):
_ = SummarizationMiddleware(
model=ImportErrorProfileModel(), trigger=("fraction", 0.5), keep=("messages", 1)
)
def test_summarization_middleware_full_workflow() -> None:
"""Test SummarizationMiddleware complete summarization workflow."""
with pytest.warns(DeprecationWarning, match="messages_to_keep is deprecated"):
# keep test for functionality
middleware = SummarizationMiddleware(
model=MockChatModel(), max_tokens_before_summary=1000, messages_to_keep=2
)
# Mock high token count to trigger summarization
def mock_token_counter(_: Iterable[MessageLikeRepresentation]) -> int:
return 1500 # Above threshold
middleware.token_counter = mock_token_counter
messages: list[AnyMessage] = [
HumanMessage(content="1"),
HumanMessage(content="2"),
HumanMessage(content="3"),
HumanMessage(content="4"),
HumanMessage(content="5"),
]
state = AgentState[Any](messages=messages)
result = middleware.before_model(state, Runtime())
assert result is not None
assert "messages" in result
assert len(result["messages"]) > 0
# Should have RemoveMessage for cleanup
assert isinstance(result["messages"][0], RemoveMessage)
assert result["messages"][0].id == REMOVE_ALL_MESSAGES
# Should have summary message
summary_message = None
for msg in result["messages"]:
if isinstance(msg, HumanMessage) and "summary of the conversation" in msg.content:
summary_message = msg
break
assert summary_message is not None
assert "Generated summary" in summary_message.content
async def test_summarization_middleware_full_workflow_async() -> None:
"""Test SummarizationMiddleware complete summarization workflow."""
class MockModel(BaseChatModel):
@override
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(generations=[ChatGeneration(message=AIMessage(content="Blep"))])
@override
async def _agenerate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(generations=[ChatGeneration(message=AIMessage(content="Blip"))])
@property
def _llm_type(self) -> str:
return "mock"
middleware = SummarizationMiddleware(
model=MockModel(), trigger=("tokens", 1000), keep=("messages", 2)
)
# Mock high token count to trigger summarization
def mock_token_counter(_: Iterable[MessageLikeRepresentation]) -> int:
return 1500 # Above threshold
middleware.token_counter = mock_token_counter
messages: list[AnyMessage] = [
HumanMessage(content="1"),
HumanMessage(content="2"),
HumanMessage(content="3"),
HumanMessage(content="4"),
HumanMessage(content="5"),
]
state = AgentState[Any](messages=messages)
result = await middleware.abefore_model(state, Runtime())
assert result is not None
assert "messages" in result
assert len(result["messages"]) > 0
expected_types = ["remove", "human", "human", "human"]
actual_types = [message.type for message in result["messages"]]
assert actual_types == expected_types
assert [message.content for message in result["messages"][2:]] == ["4", "5"]
summary_message = result["messages"][1]
assert "Blip" in summary_message.text
def test_summarization_middleware_keep_messages() -> None:
"""Test SummarizationMiddleware with keep parameter specifying messages."""
# Test that summarization is triggered when message count reaches threshold
middleware = SummarizationMiddleware(
model=MockChatModel(), trigger=("messages", 5), keep=("messages", 2)
)
# Below threshold - no summarization
messages_below: list[AnyMessage] = [
HumanMessage(content="1"),
HumanMessage(content="2"),
HumanMessage(content="3"),
HumanMessage(content="4"),
]
state_below = AgentState[Any](messages=messages_below)
result = middleware.before_model(state_below, Runtime())
assert result is None
# At threshold - should trigger summarization
messages_at_threshold: list[AnyMessage] = [
HumanMessage(content="1"),
HumanMessage(content="2"),
HumanMessage(content="3"),
HumanMessage(content="4"),
HumanMessage(content="5"),
]
state_at = AgentState[Any](messages=messages_at_threshold)
result = middleware.before_model(state_at, Runtime())
assert result is not None
assert "messages" in result
expected_types = ["remove", "human", "human", "human"]
actual_types = [message.type for message in result["messages"]]
assert actual_types == expected_types
assert [message.content for message in result["messages"][2:]] == ["4", "5"]
# Above threshold - should also trigger summarization
messages_above: list[AnyMessage] = [*messages_at_threshold, HumanMessage(content="6")]
state_above = AgentState[Any](messages=messages_above)
result = middleware.before_model(state_above, Runtime())
assert result is not None
assert "messages" in result
expected_types = ["remove", "human", "human", "human"]
actual_types = [message.type for message in result["messages"]]
assert actual_types == expected_types
assert [message.content for message in result["messages"][2:]] == ["5", "6"]
# Test with both parameters disabled
middleware_disabled = SummarizationMiddleware(model=MockChatModel(), trigger=None)
result = middleware_disabled.before_model(state_above, Runtime())
assert result is None
@pytest.mark.parametrize(
("param_name", "param_value", "expected_error"),
[
("trigger", ("fraction", 0.0), "Fractional trigger values must be between 0 and 1"),
("trigger", ("fraction", 1.5), "Fractional trigger values must be between 0 and 1"),
("keep", ("fraction", -0.1), "Fractional keep values must be between 0 and 1"),
("trigger", ("tokens", 0), "trigger thresholds must be greater than 0"),
("trigger", ("messages", -5), "trigger thresholds must be greater than 0"),
("keep", ("tokens", 0), "keep thresholds must be greater than 0"),
("trigger", ("invalid", 100), "Unsupported context size type"),
("keep", ("invalid", 100), "Unsupported context size type"),
],
)
def test_summarization_middleware_validation_edge_cases(
param_name: str, param_value: Any, expected_error: str
) -> None:
"""Test validation of context size parameters with edge cases."""
model = FakeToolCallingModel()
with pytest.raises(ValueError, match=expected_error):
SummarizationMiddleware(model=model, **{param_name: param_value})
def test_summarization_middleware_multiple_triggers() -> None:
"""Test middleware with multiple trigger conditions."""
# Test with multiple triggers - should activate when ANY condition is met
middleware = SummarizationMiddleware(
model=MockChatModel(),
trigger=[("messages", 10), ("tokens", 500)],
keep=("messages", 2),
)
# Mock token counter to return low count
def mock_low_tokens(_: Iterable[MessageLikeRepresentation]) -> int:
return 100
middleware.token_counter = mock_low_tokens
# Should not trigger - neither condition met
messages: list[AnyMessage] = [HumanMessage(content=str(i)) for i in range(5)]
state = AgentState[Any](messages=messages)
result = middleware.before_model(state, Runtime())
assert result is None
# Should trigger - message count threshold met
messages = [HumanMessage(content=str(i)) for i in range(10)]
state = AgentState[Any](messages=messages)
result = middleware.before_model(state, Runtime())
assert result is not None
# Test token trigger
def mock_high_tokens(_: Iterable[MessageLikeRepresentation]) -> int:
return 600
middleware.token_counter = mock_high_tokens
messages = [HumanMessage(content=str(i)) for i in range(5)]
state = AgentState[Any](messages=messages)
result = middleware.before_model(state, Runtime())
assert result is not None
def test_summarization_middleware_profile_edge_cases() -> None:
"""Test profile retrieval with various edge cases."""
class NoProfileModel(BaseChatModel):
@override
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(generations=[ChatGeneration(message=AIMessage(content="Summary"))])
@property
def _llm_type(self) -> str:
return "mock"
# Model without profile attribute
middleware = SummarizationMiddleware(model=NoProfileModel(), trigger=("messages", 5))
assert middleware._get_profile_limits() is None
class InvalidProfileModel(BaseChatModel):
@override
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(generations=[ChatGeneration(message=AIMessage(content="Summary"))])
@property
def _llm_type(self) -> str:
return "mock"
# NOTE: Using __getattribute__ because @property cannot override Pydantic fields.
def __getattribute__(self, name: str) -> Any:
if name == "profile":
return "invalid_profile_type"
return super().__getattribute__(name)
# Model with non-dict profile
middleware = SummarizationMiddleware(model=InvalidProfileModel(), trigger=("messages", 5))
assert middleware._get_profile_limits() is None
class MissingTokensModel(BaseChatModel):
profile: ModelProfile | None = Field(default=ModelProfile(other_field=100), exclude=True) # type: ignore[typeddict-unknown-key]
@override
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(generations=[ChatGeneration(message=AIMessage(content="Summary"))])
@property
def _llm_type(self) -> str:
return "mock"
# Model with profile but no max_input_tokens
middleware = SummarizationMiddleware(model=MissingTokensModel(), trigger=("messages", 5))
assert middleware._get_profile_limits() is None
class InvalidTokenTypeModel(BaseChatModel):
profile: ModelProfile | None = Field(
default=ModelProfile(max_input_tokens="not_an_int"), # type: ignore[typeddict-item]
exclude=True,
)
@override
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(generations=[ChatGeneration(message=AIMessage(content="Summary"))])
@property
def _llm_type(self) -> str:
return "mock"
# Model with non-int max_input_tokens
middleware = SummarizationMiddleware(model=InvalidTokenTypeModel(), trigger=("messages", 5))
assert middleware._get_profile_limits() is None
def test_summarization_middleware_trim_messages_error_fallback() -> None:
"""Test that trim_messages_for_summary falls back gracefully on errors."""
middleware = SummarizationMiddleware(model=MockChatModel(), trigger=("messages", 5))
# Create a mock token counter that raises an exception
def failing_token_counter(_: Iterable[MessageLikeRepresentation]) -> int:
msg = "Token counting failed"
raise ValueError(msg)
middleware.token_counter = failing_token_counter
# Should fall back to last 15 messages
messages: list[AnyMessage] = [HumanMessage(content=str(i)) for i in range(20)]
trimmed = middleware._trim_messages_for_summary(messages)
assert len(trimmed) == 15
assert trimmed == messages[-15:]
def test_summarization_middleware_binary_search_edge_cases() -> None:
"""Test binary search in _find_token_based_cutoff with edge cases."""
middleware = SummarizationMiddleware(
model=MockChatModel(), trigger=("messages", 5), keep=("tokens", 100)
)
# Test with single message that's too large
def token_counter_single_large(messages: Iterable[MessageLikeRepresentation]) -> int:
return len(list(messages)) * 200
middleware.token_counter = token_counter_single_large
single_message: list[AnyMessage] = [HumanMessage(content="x" * 200)]
cutoff = middleware._find_token_based_cutoff(single_message)
assert cutoff == 0
# Test with empty messages
cutoff = middleware._find_token_based_cutoff([])
assert cutoff == 0
# Test when all messages fit within token budget
def token_counter_small(messages: Iterable[MessageLikeRepresentation]) -> int:
return len(list(messages)) * 10
middleware.token_counter = token_counter_small
messages: list[AnyMessage] = [HumanMessage(content=str(i)) for i in range(5)]
cutoff = middleware._find_token_based_cutoff(messages)
assert cutoff == 0
def test_summarization_middleware_find_safe_cutoff_point() -> None:
"""Test `_find_safe_cutoff_point` preserves AI/Tool message pairs."""
model = FakeToolCallingModel()
middleware = SummarizationMiddleware(
model=model, trigger=("messages", 10), keep=("messages", 2)
)
messages: list[AnyMessage] = [
HumanMessage(content="msg1"),
AIMessage(content="ai", tool_calls=[{"name": "tool", "args": {}, "id": "call1"}]),
ToolMessage(content="result1", tool_call_id="call1"),
ToolMessage(content="result2", tool_call_id="call2"), # orphan - no matching AI
HumanMessage(content="msg2"),
]
# Starting at a non-ToolMessage returns the same index
assert middleware._find_safe_cutoff_point(messages, 0) == 0
assert middleware._find_safe_cutoff_point(messages, 1) == 1
# Starting at ToolMessage with matching AIMessage moves back to include it
# ToolMessage at index 2 has tool_call_id="call1" which matches AIMessage at index 1
assert middleware._find_safe_cutoff_point(messages, 2) == 1
# Starting at orphan ToolMessage (no matching AIMessage) falls back to advancing
# ToolMessage at index 3 has tool_call_id="call2" with no matching AIMessage
# Since we only collect from cutoff_index onwards, only {call2} is collected
# No match found, so we fall back to advancing past ToolMessages
assert middleware._find_safe_cutoff_point(messages, 3) == 4
# Starting at the HumanMessage after tools returns that index
assert middleware._find_safe_cutoff_point(messages, 4) == 4
# Starting past the end returns the index unchanged
assert middleware._find_safe_cutoff_point(messages, 5) == 5
# Cutoff at or past length stays the same
assert middleware._find_safe_cutoff_point(messages, len(messages)) == len(messages)
assert middleware._find_safe_cutoff_point(messages, len(messages) + 5) == len(messages) + 5
def test_summarization_middleware_find_safe_cutoff_point_orphan_tool() -> None:
"""Test `_find_safe_cutoff_point` with truly orphan `ToolMessage` (no matching `AIMessage`)."""
model = FakeToolCallingModel()
middleware = SummarizationMiddleware(
model=model, trigger=("messages", 10), keep=("messages", 2)
)
# Messages where ToolMessage has no matching AIMessage at all
messages: list[AnyMessage] = [
HumanMessage(content="msg1"),
AIMessage(content="ai_no_tools"), # No tool_calls
ToolMessage(content="orphan_result", tool_call_id="orphan_call"),
HumanMessage(content="msg2"),
]
# Starting at orphan ToolMessage falls back to advancing forward
assert middleware._find_safe_cutoff_point(messages, 2) == 3
def test_summarization_cutoff_moves_backward_to_include_ai_message() -> None:
"""Test that cutoff moves backward to include `AIMessage` with its `ToolMessage`s.
Previously, when the cutoff landed on a `ToolMessage`, the code would advance
FORWARD past all `ToolMessage`s. This could result in orphaned `ToolMessage`s (kept
without their `AIMessage`) or aggressive summarization that removed AI/Tool pairs.
The fix searches backward from a `ToolMessage` to find the `AIMessage` with matching
`tool_calls`, ensuring the pair stays together in the preserved messages.
"""
model = FakeToolCallingModel()
middleware = SummarizationMiddleware(
model=model, trigger=("messages", 10), keep=("messages", 2)
)
# Scenario: cutoff lands on ToolMessage that has a matching AIMessage before it
messages: list[AnyMessage] = [
HumanMessage(content="initial question"), # index 0
AIMessage(
content="I'll use a tool",
tool_calls=[{"name": "search", "args": {"q": "test"}, "id": "call_abc"}],
), # index 1
ToolMessage(content="search result", tool_call_id="call_abc"), # index 2
HumanMessage(content="followup"), # index 3
]
# When cutoff is at index 2 (ToolMessage), it should move BACKWARD to index 1
# to include the AIMessage that generated the tool call
result = middleware._find_safe_cutoff_point(messages, 2)
assert result == 1, (
f"Expected cutoff to move backward to index 1 (AIMessage), got {result}. "
"The cutoff should preserve AI/Tool pairs together."
)
assert isinstance(messages[result], AIMessage)
assert messages[result].tool_calls # type: ignore[union-attr]
assert messages[result].tool_calls[0]["id"] == "call_abc" # type: ignore[union-attr]
def test_summarization_middleware_zero_and_negative_target_tokens() -> None:
"""Test handling of edge cases with target token calculations."""
# Test with very small fraction that rounds to zero
middleware = SummarizationMiddleware(
model=ProfileChatModel(), trigger=("fraction", 0.0001), keep=("fraction", 0.0001)
)
# Should set threshold to 1 when calculated value is <= 0
messages: list[AnyMessage] = [HumanMessage(content="test")]
# The trigger fraction calculation: int(1000 * 0.0001) = 0, but should be set to 1
# Token count of 1 message should exceed threshold of 1
def token_counter(_: Iterable[MessageLikeRepresentation]) -> int:
return 2
middleware.token_counter = token_counter
assert middleware._should_summarize(messages, 2)
async def test_summarization_middleware_async_error_handling() -> None:
"""Test async summary creation with errors."""
class ErrorAsyncModel(BaseChatModel):
@override
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(generations=[ChatGeneration(message=AIMessage(content="Summary"))])
@override
async def _agenerate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
msg = "Async model error"
raise ValueError(msg)
@property
def _llm_type(self) -> str:
return "mock"
middleware = SummarizationMiddleware(model=ErrorAsyncModel(), trigger=("messages", 5))
messages: list[AnyMessage] = [HumanMessage(content="test")]
summary = await middleware._acreate_summary(messages)
assert "Error generating summary: Async model error" in summary
def test_summarization_middleware_cutoff_at_boundary() -> None:
"""Test cutoff index determination at exact message boundaries."""
middleware = SummarizationMiddleware(
model=MockChatModel(), trigger=("messages", 5), keep=("messages", 5)
)
# When we want to keep exactly as many messages as we have
messages: list[AnyMessage] = [HumanMessage(content=str(i)) for i in range(5)]
cutoff = middleware._find_safe_cutoff(messages, 5)
assert cutoff == 0 # Should not cut anything
# When we want to keep more messages than we have
cutoff = middleware._find_safe_cutoff(messages, 10)
assert cutoff == 0
def test_summarization_middleware_deprecated_parameters_with_defaults() -> None:
"""Test that deprecated parameters work correctly with default values."""
# Test that deprecated max_tokens_before_summary is ignored when trigger is set
with pytest.warns(DeprecationWarning, match="max_tokens_before_summary is deprecated"):
middleware = SummarizationMiddleware(
model=MockChatModel(), trigger=("tokens", 2000), max_tokens_before_summary=1000
)
assert middleware.trigger == ("tokens", 2000)
# Test that messages_to_keep is ignored when keep is not default
with pytest.warns(DeprecationWarning, match="messages_to_keep is deprecated"):
middleware = SummarizationMiddleware(
model=MockChatModel(), keep=("messages", 5), messages_to_keep=10
)
assert middleware.keep == ("messages", 5)
def test_summarization_middleware_fraction_trigger_with_no_profile() -> None:
"""Test fractional trigger condition when profile data becomes unavailable."""
middleware = SummarizationMiddleware(
model=ProfileChatModel(),
trigger=[("fraction", 0.5), ("messages", 100)],
keep=("messages", 5),
)
# Test that when fractional condition can't be evaluated, other triggers still work
messages: list[AnyMessage] = [HumanMessage(content=str(i)) for i in range(100)]
# Mock _get_profile_limits to return None
with patch.object(middleware, "_get_profile_limits", autospec=True, return_value=None):
# Should still trigger based on message count
state = AgentState[Any](messages=messages)
result = middleware.before_model(state, Runtime())
assert result is not None
def test_summarization_adjust_token_counts() -> None:
test_message = HumanMessage(content="a" * 12)
middleware = SummarizationMiddleware(model=MockChatModel(), trigger=("messages", 5))
count_1 = middleware.token_counter([test_message])
class MockAnthropicModel(MockChatModel):
@property
def _llm_type(self) -> str:
return "anthropic-chat"
middleware = SummarizationMiddleware(model=MockAnthropicModel(), trigger=("messages", 5))
count_2 = middleware.token_counter([test_message])
assert count_1 != count_2
def test_summarization_middleware_many_parallel_tool_calls_safety() -> None:
"""Test cutoff safety preserves AI message with many parallel tool calls."""
middleware = SummarizationMiddleware(
model=MockChatModel(), trigger=("messages", 15), keep=("messages", 5)
)
tool_calls = [{"name": f"tool_{i}", "args": {}, "id": f"call_{i}"} for i in range(10)]
human_message = HumanMessage(content="calling 10 tools")
ai_message = AIMessage(content="calling 10 tools", tool_calls=tool_calls)
tool_messages = [
ToolMessage(content=f"result_{i}", tool_call_id=f"call_{i}") for i in range(10)
]
messages: list[AnyMessage] = [human_message, ai_message, *tool_messages]
# Cutoff at index 7 (a ToolMessage) moves back to index 1 (AIMessage)
# to preserve the AI/Tool pair together
assert middleware._find_safe_cutoff_point(messages, 7) == 1
# Any cutoff pointing at a ToolMessage (indices 2-11) moves back to index 1
for i in range(2, 12):
assert middleware._find_safe_cutoff_point(messages, i) == 1
# Cutoff at index 0, 1 (before tool messages) stays the same
assert middleware._find_safe_cutoff_point(messages, 0) == 0
assert middleware._find_safe_cutoff_point(messages, 1) == 1
def test_summarization_before_model_uses_unscaled_tokens_for_cutoff() -> None:
calls: list[dict[str, Any]] = []
def fake_counter(_: Iterable[MessageLikeRepresentation], **kwargs: Any) -> int:
calls.append(kwargs)
return 100
with patch(
"langchain.agents.middleware.summarization.count_tokens_approximately",
side_effect=fake_counter,
) as mock_counter:
middleware = SummarizationMiddleware(
model=MockChatModel(),
trigger=("tokens", 1),
keep=("tokens", 1),
token_counter=mock_counter,
)
state = AgentState[Any](messages=[HumanMessage(content="one"), HumanMessage(content="two")])
assert middleware.before_model(state, Runtime()) is not None
# Test we support partial token counting (which for default token counter does not
# use use_usage_metadata_scaling)
assert any(call.get("use_usage_metadata_scaling") is False for call in calls)
assert any(call.get("use_usage_metadata_scaling") is True for call in calls)
def test_summarization_middleware_find_safe_cutoff_preserves_ai_tool_pair() -> None:
"""Test `_find_safe_cutoff` preserves AI/Tool message pairs together."""
middleware = SummarizationMiddleware(
model=MockChatModel(), trigger=("messages", 10), keep=("messages", 3)
)
# Messages list: [Human, AI, Tool, Tool, Tool, Human]
messages: list[AnyMessage] = [
HumanMessage(content="msg1"),
AIMessage(
content="ai",
tool_calls=[
{"name": "tool1", "args": {}, "id": "call1"},
{"name": "tool2", "args": {}, "id": "call2"},
{"name": "tool3", "args": {}, "id": "call3"},
],
),
ToolMessage(content="result1", tool_call_id="call1"),
ToolMessage(content="result2", tool_call_id="call2"),
ToolMessage(content="result3", tool_call_id="call3"),
HumanMessage(content="msg2"),
]
# Target cutoff index is len(messages) - messages_to_keep = 6 - 3 = 3
# Index 3 is a ToolMessage, we move back to index 1 to include AIMessage
cutoff = middleware._find_safe_cutoff(messages, messages_to_keep=3)
assert cutoff == 1
# With messages_to_keep=2, target cutoff index is 6 - 2 = 4
# Index 4 is a ToolMessage, we move back to index 1 to include AIMessage
# This preserves the AI + Tools + Human, more than requested but valid
cutoff = middleware._find_safe_cutoff(messages, messages_to_keep=2)
assert cutoff == 1
def test_summarization_middleware_cutoff_at_start_of_tool_sequence() -> None:
"""Test cutoff when target lands exactly at the first ToolMessage."""
middleware = SummarizationMiddleware(
model=MockChatModel(), trigger=("messages", 8), keep=("messages", 4)
)
messages: list[AnyMessage] = [
HumanMessage(content="msg1"),
HumanMessage(content="msg2"),
AIMessage(content="ai", tool_calls=[{"name": "tool", "args": {}, "id": "call1"}]),
ToolMessage(content="result", tool_call_id="call1"),
HumanMessage(content="msg3"),
HumanMessage(content="msg4"),
]
# Target cutoff index is len(messages) - messages_to_keep = 6 - 4 = 2
# Index 2 is an AIMessage (safe cutoff point), so no adjustment needed
cutoff = middleware._find_safe_cutoff(messages, messages_to_keep=4)
assert cutoff == 2
def test_create_summary_uses_get_buffer_string_format() -> None:
"""Test that `_create_summary` formats messages using `get_buffer_string`.
Ensures that messages are formatted efficiently for the summary prompt, avoiding
token inflation from metadata when `str()` is called on message objects.
This ensures the token count of the formatted prompt stays below what
`count_tokens_approximately` estimates for the raw messages.
"""
# Create messages with metadata that would inflate str() representation
messages: list[AnyMessage] = [
HumanMessage(content="What is the weather in NYC?"),
AIMessage(
content="Let me check the weather for you.",
tool_calls=[{"name": "get_weather", "args": {"city": "NYC"}, "id": "call_123"}],
usage_metadata={"input_tokens": 50, "output_tokens": 30, "total_tokens": 80},
response_metadata={"model": "gpt-4", "finish_reason": "tool_calls"},
),
ToolMessage(
content="72F and sunny",
tool_call_id="call_123",
name="get_weather",
),
AIMessage(
content="It is 72F and sunny in NYC!",
usage_metadata={
"input_tokens": 100,
"output_tokens": 25,
"total_tokens": 125,
},
response_metadata={"model": "gpt-4", "finish_reason": "stop"},
),
]
# Verify the token ratio is favorable (get_buffer_string < str)
approx_tokens = count_tokens_approximately(messages)
buffer_string = get_buffer_string(messages)
buffer_tokens_estimate = len(buffer_string) / 4 # ~4 chars per token
# The ratio should be less than 1.0 (buffer_string uses fewer tokens than counted)
ratio = buffer_tokens_estimate / approx_tokens
assert ratio < 1.0, (
f"get_buffer_string should produce fewer tokens than count_tokens_approximately. "
f"Got ratio {ratio:.2f}x (expected < 1.0)"
)
# Verify str() would have been worse
str_tokens_estimate = len(str(messages)) / 4
str_ratio = str_tokens_estimate / approx_tokens
assert str_ratio > 1.5, (
f"str(messages) should produce significantly more tokens. "
f"Got ratio {str_ratio:.2f}x (expected > 1.5)"
)
@pytest.mark.requires("langchain_anthropic")
def test_usage_metadata_trigger() -> None:
model = init_chat_model("anthropic:claude-sonnet-4-5")
middleware = SummarizationMiddleware(
model=model, trigger=("tokens", 10_000), keep=("messages", 4)
)
messages: list[AnyMessage] = [
HumanMessage(content="msg1"),
AIMessage(
content="msg2",
tool_calls=[{"name": "tool", "args": {}, "id": "call1"}],
response_metadata={"model_provider": "anthropic"},
usage_metadata={
"input_tokens": 5000,
"output_tokens": 1000,
"total_tokens": 6000,
},
),
ToolMessage(content="result", tool_call_id="call1"),
AIMessage(
content="msg3",
response_metadata={"model_provider": "anthropic"},
usage_metadata={
"input_tokens": 6100,
"output_tokens": 900,
"total_tokens": 7000,
},
),
HumanMessage(content="msg4"),
AIMessage(
content="msg5",
response_metadata={"model_provider": "anthropic"},
usage_metadata={
"input_tokens": 7500,
"output_tokens": 2501,
"total_tokens": 10_001,
},
),
]
# reported token count should override count of zero
assert middleware._should_summarize(messages, 0)
# don't engage unless model provider matches
messages.extend(
[
HumanMessage(content="msg6"),
AIMessage(
content="msg7",
response_metadata={"model_provider": "not-anthropic"},
usage_metadata={
"input_tokens": 7500,
"output_tokens": 2501,
"total_tokens": 10_001,
},
),
]
)
assert not middleware._should_summarize(messages, 0)
# don't engage if subsequent message stays under threshold (e.g., after summarization)
messages.extend(
[
HumanMessage(content="msg8"),
AIMessage(
content="msg9",
response_metadata={"model_provider": "anthropic"},
usage_metadata={
"input_tokens": 7500,
"output_tokens": 2499,
"total_tokens": 9999,
},
),
]
)
assert not middleware._should_summarize(messages, 0)
class ConfigCapturingModel(BaseChatModel):
"""Mock model that captures the config passed to invoke/ainvoke."""
captured_configs: list[RunnableConfig | None] = Field(default_factory=list, exclude=True)
@override
def invoke(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
*,
stop: list[str] | None = None,
**kwargs: Any,
) -> AIMessage:
self.captured_configs.append(config)
return AIMessage(content="Summary")
@override
async def ainvoke(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
*,
stop: list[str] | None = None,
**kwargs: Any,
) -> AIMessage:
self.captured_configs.append(config)
return AIMessage(content="Summary")
@override
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
return ChatResult(generations=[ChatGeneration(message=AIMessage(content="Summary"))])
@property
def _llm_type(self) -> str:
return "config-capturing"
@pytest.mark.parametrize("use_async", [False, True], ids=["sync", "async"])
async def test_create_summary_passes_lc_source_metadata(use_async: bool) -> None: # noqa: FBT001
"""Test that summary creation passes `lc_source` metadata to the model.
When called outside a LangGraph runnable context, `get_config()` raises
`RuntimeError`. The middleware catches this and still passes the `lc_source`
metadata to the model.
"""
model = ConfigCapturingModel()
model.captured_configs = [] # Reset for this test
middleware = SummarizationMiddleware(model=model, trigger=("tokens", 1000))
messages: list[AnyMessage] = [HumanMessage(content="Hello"), AIMessage(content="Hi")]
if use_async:
summary = await middleware._acreate_summary(messages)
else:
summary = middleware._create_summary(messages)
assert summary == "Summary"
assert len(model.captured_configs) == 1
config = model.captured_configs[0]
assert config is not None
assert "metadata" in config
assert config["metadata"]["lc_source"] == "summarization"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_summarization.py",
"license": "MIT License",
"lines": 1086,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_todo.py | """Unit tests for TodoListMiddleware."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, cast
import pytest
from langchain_core.language_models.fake_chat_models import GenericFakeChatModel
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langchain.agents.factory import create_agent
from langchain.agents.middleware.todo import (
WRITE_TODOS_SYSTEM_PROMPT,
WRITE_TODOS_TOOL_DESCRIPTION,
PlanningState,
TodoListMiddleware,
write_todos,
)
from langchain.agents.middleware.types import AgentState, ModelRequest, ModelResponse
from tests.unit_tests.agents.model import FakeToolCallingModel
if TYPE_CHECKING:
from langgraph.runtime import Runtime
def _fake_runtime() -> Runtime:
return cast("Runtime", object())
def _make_request(system_prompt: str | None = None) -> ModelRequest:
"""Create a minimal ModelRequest for testing."""
model = GenericFakeChatModel(messages=iter([AIMessage(content="response")]))
return ModelRequest(
model=model,
system_prompt=system_prompt,
messages=[],
tool_choice=None,
tools=[],
response_format=None,
state=AgentState(messages=[]),
runtime=_fake_runtime(),
model_settings={},
)
# ==============================================================================
# Synchronous Tests
# ==============================================================================
def test_todo_middleware_initialization() -> None:
"""Test that TodoListMiddleware initializes correctly."""
middleware = TodoListMiddleware()
assert middleware.state_schema == PlanningState
assert len(middleware.tools) == 1
assert middleware.tools[0].name == "write_todos"
def test_has_write_todos_tool() -> None:
"""Test that middleware registers the write_todos tool."""
middleware = TodoListMiddleware()
# Should have one tool registered
assert len(middleware.tools) == 1
assert middleware.tools[0].name == "write_todos"
def test_todo_middleware_default_prompts() -> None:
"""Test that TodoListMiddleware uses default prompts when none provided."""
middleware = TodoListMiddleware()
# Verify default system prompt
assert middleware.system_prompt == WRITE_TODOS_SYSTEM_PROMPT
# Verify default tool description
assert middleware.tool_description == WRITE_TODOS_TOOL_DESCRIPTION
assert len(middleware.tools) == 1
tool = middleware.tools[0]
assert tool.description == WRITE_TODOS_TOOL_DESCRIPTION
def test_adds_system_prompt_when_none_exists() -> None:
"""Test that middleware adds system prompt when request has none."""
middleware = TodoListMiddleware()
request = _make_request(system_prompt=None)
captured_request = None
def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal captured_request
captured_request = req
return ModelResponse(result=[AIMessage(content="response")])
middleware.wrap_model_call(request, mock_handler)
# System prompt should be set in the modified request passed to handler
assert captured_request is not None
assert captured_request.system_prompt is not None
assert "write_todos" in captured_request.system_prompt
# Original request should be unchanged
assert request.system_prompt is None
def test_appends_to_existing_system_prompt() -> None:
"""Test that middleware appends to existing system prompt."""
existing_prompt = "You are a helpful assistant."
middleware = TodoListMiddleware()
request = _make_request(system_prompt=existing_prompt)
captured_request = None
def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal captured_request
captured_request = req
return ModelResponse(result=[AIMessage(content="response")])
middleware.wrap_model_call(request, mock_handler)
# System prompt should contain both in the modified request passed to handler
assert captured_request is not None
assert captured_request.system_prompt is not None
assert existing_prompt in captured_request.system_prompt
assert "write_todos" in captured_request.system_prompt
assert captured_request.system_prompt.startswith(existing_prompt)
# Original request should be unchanged
assert request.system_prompt == existing_prompt
@pytest.mark.parametrize(
("original_prompt", "expected_prompt_prefix"),
[
("Original prompt", "Original prompt\n\n## `write_todos`"),
(None, "## `write_todos`"),
],
)
def test_todo_middleware_on_model_call(
original_prompt: str | None, expected_prompt_prefix: str
) -> None:
"""Test that wrap_model_call handles system prompts correctly."""
middleware = TodoListMiddleware()
model = FakeToolCallingModel()
state: PlanningState = {"messages": [HumanMessage(content="Hello")]}
request = ModelRequest(
model=model,
system_prompt=original_prompt,
messages=[HumanMessage(content="Hello")],
tool_choice=None,
tools=[],
response_format=None,
state=state,
runtime=cast("Runtime", object()),
model_settings={},
)
captured_request = None
def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal captured_request
captured_request = req
return ModelResponse(result=[AIMessage(content="mock response")])
# Call wrap_model_call to trigger the middleware logic
middleware.wrap_model_call(request, mock_handler)
# Check that the modified request passed to handler has the expected prompt
assert captured_request is not None
assert captured_request.system_prompt is not None
assert captured_request.system_prompt.startswith(expected_prompt_prefix)
# Original request should be unchanged
assert request.system_prompt == original_prompt
def test_custom_system_prompt() -> None:
"""Test that middleware uses custom system prompt."""
custom_prompt = "Custom planning instructions"
middleware = TodoListMiddleware(system_prompt=custom_prompt)
request = _make_request(system_prompt=None)
captured_request = None
def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal captured_request
captured_request = req
return ModelResponse(result=[AIMessage(content="response")])
middleware.wrap_model_call(request, mock_handler)
# Should use custom prompt in the modified request passed to handler
assert captured_request is not None
assert captured_request.system_prompt == custom_prompt
# Original request should be unchanged
assert request.system_prompt is None
def test_todo_middleware_custom_system_prompt() -> None:
"""Test that TodoListMiddleware can be initialized with custom system prompt."""
custom_system_prompt = "Custom todo system prompt for testing"
middleware = TodoListMiddleware(system_prompt=custom_system_prompt)
model = FakeToolCallingModel()
state: PlanningState = {"messages": [HumanMessage(content="Hello")]}
request = ModelRequest(
model=model,
system_prompt="Original prompt",
messages=[HumanMessage(content="Hello")],
tool_choice=None,
tools=[],
response_format=None,
model_settings={},
state=state,
runtime=cast("Runtime", object()),
)
captured_request = None
def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal captured_request
captured_request = req
return ModelResponse(result=[AIMessage(content="mock response")])
# Call wrap_model_call to trigger the middleware logic
middleware.wrap_model_call(request, mock_handler)
# Check that the modified request passed to handler has the expected prompt
assert captured_request is not None
assert captured_request.system_prompt == f"Original prompt\n\n{custom_system_prompt}"
# Original request should be unchanged
assert request.system_prompt == "Original prompt"
def test_custom_tool_description() -> None:
"""Test that middleware uses custom tool description."""
custom_description = "Custom todo tool description"
middleware = TodoListMiddleware(tool_description=custom_description)
# Tool should use custom description
assert len(middleware.tools) == 1
assert middleware.tools[0].description == custom_description
def test_todo_middleware_custom_tool_description() -> None:
"""Test that TodoListMiddleware can be initialized with custom tool description."""
custom_tool_description = "Custom tool description for testing"
middleware = TodoListMiddleware(tool_description=custom_tool_description)
assert len(middleware.tools) == 1
tool = middleware.tools[0]
assert tool.description == custom_tool_description
def test_todo_middleware_custom_system_prompt_and_tool_description() -> None:
"""Test that TodoListMiddleware can be initialized with both custom prompts."""
custom_system_prompt = "Custom system prompt"
custom_tool_description = "Custom tool description"
middleware = TodoListMiddleware(
system_prompt=custom_system_prompt,
tool_description=custom_tool_description,
)
# Verify system prompt
model = FakeToolCallingModel()
state: PlanningState = {"messages": [HumanMessage(content="Hello")]}
request = ModelRequest(
model=model,
system_prompt=None,
messages=[HumanMessage(content="Hello")],
tool_choice=None,
tools=[],
response_format=None,
state=state,
runtime=cast("Runtime", object()),
model_settings={},
)
captured_request = None
def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal captured_request
captured_request = req
return ModelResponse(result=[AIMessage(content="mock response")])
# Call wrap_model_call to trigger the middleware logic
middleware.wrap_model_call(request, mock_handler)
# Check that the modified request passed to handler has the expected prompt
assert captured_request is not None
assert captured_request.system_prompt == custom_system_prompt
# Original request should be unchanged
assert request.system_prompt is None
# Verify tool description
assert len(middleware.tools) == 1
tool = middleware.tools[0]
assert tool.description == custom_tool_description
@pytest.mark.parametrize(
("todos", "expected_message"),
[
([], "Updated todo list to []"),
(
[{"content": "Task 1", "status": "pending"}],
"Updated todo list to [{'content': 'Task 1', 'status': 'pending'}]",
),
(
[
{"content": "Task 1", "status": "pending"},
{"content": "Task 2", "status": "in_progress"},
],
(
"Updated todo list to ["
"{'content': 'Task 1', 'status': 'pending'}, "
"{'content': 'Task 2', 'status': 'in_progress'}]"
),
),
(
[
{"content": "Task 1", "status": "pending"},
{"content": "Task 2", "status": "in_progress"},
{"content": "Task 3", "status": "completed"},
],
(
"Updated todo list to ["
"{'content': 'Task 1', 'status': 'pending'}, "
"{'content': 'Task 2', 'status': 'in_progress'}, "
"{'content': 'Task 3', 'status': 'completed'}]"
),
),
],
)
def test_todo_middleware_write_todos_tool_execution(
todos: list[dict[str, Any]], expected_message: str
) -> None:
"""Test that the write_todos tool executes correctly."""
tool_call = {
"args": {"todos": todos},
"name": "write_todos",
"type": "tool_call",
"id": "test_call",
}
result = write_todos.invoke(tool_call)
assert result.update["todos"] == todos
assert result.update["messages"][0].content == expected_message
@pytest.mark.parametrize(
"invalid_todos",
[
[{"content": "Task 1", "status": "invalid_status"}],
[{"status": "pending"}],
],
)
def test_todo_middleware_write_todos_tool_validation_errors(
invalid_todos: list[dict[str, Any]],
) -> None:
"""Test that the write_todos tool rejects invalid input."""
tool_call = {
"args": {"todos": invalid_todos},
"name": "write_todos",
"type": "tool_call",
"id": "test_call",
}
with pytest.raises(ValueError, match="1 validation error for write_todos"):
write_todos.invoke(tool_call)
def test_todo_middleware_agent_creation_with_middleware() -> None:
"""Test that an agent can be created with the planning middleware."""
model = FakeToolCallingModel(
tool_calls=[
[
{
"args": {"todos": [{"content": "Task 1", "status": "pending"}]},
"name": "write_todos",
"type": "tool_call",
"id": "test_call",
}
],
[
{
"args": {"todos": [{"content": "Task 1", "status": "in_progress"}]},
"name": "write_todos",
"type": "tool_call",
"id": "test_call",
}
],
[
{
"args": {"todos": [{"content": "Task 1", "status": "completed"}]},
"name": "write_todos",
"type": "tool_call",
"id": "test_call",
}
],
[],
]
)
middleware = TodoListMiddleware()
agent = create_agent(model=model, middleware=[middleware])
result = agent.invoke({"messages": [HumanMessage("Hello")]})
assert result["todos"] == [{"content": "Task 1", "status": "completed"}]
# human message (1)
# ai message (2) - initial todo
# tool message (3)
# ai message (4) - updated todo
# tool message (5)
# ai message (6) - complete todo
# tool message (7)
# ai message (8) - no tool calls
assert len(result["messages"]) == 8
def test_todo_middleware_custom_system_prompt_in_agent() -> None:
"""Test that custom tool executes correctly in an agent."""
middleware = TodoListMiddleware(system_prompt="call the write_todos tool")
model = FakeToolCallingModel(
tool_calls=[
[
{
"args": {"todos": [{"content": "Custom task", "status": "pending"}]},
"name": "write_todos",
"type": "tool_call",
"id": "test_call",
}
],
[],
]
)
agent = create_agent(model=model, middleware=[middleware])
result = agent.invoke({"messages": [HumanMessage("Hello")]})
assert result["todos"] == [{"content": "Custom task", "status": "pending"}]
# assert custom system prompt is in the first AI message
assert "call the write_todos tool" in result["messages"][1].content
# ==============================================================================
# Async Tests
# ==============================================================================
async def test_adds_system_prompt_when_none_exists_async() -> None:
"""Test async version - middleware adds system prompt when request has none."""
middleware = TodoListMiddleware()
request = _make_request(system_prompt=None)
captured_request = None
async def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal captured_request
captured_request = req
return ModelResponse(result=[AIMessage(content="response")])
await middleware.awrap_model_call(request, mock_handler)
# System prompt should be set in the modified request passed to handler
assert captured_request is not None
assert captured_request.system_prompt is not None
assert "write_todos" in captured_request.system_prompt
# Original request should be unchanged
assert request.system_prompt is None
async def test_appends_to_existing_system_prompt_async() -> None:
"""Test async version - middleware appends to existing system prompt."""
existing_prompt = "You are a helpful assistant."
middleware = TodoListMiddleware()
request = _make_request(system_prompt=existing_prompt)
captured_request = None
async def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal captured_request
captured_request = req
return ModelResponse(result=[AIMessage(content="response")])
await middleware.awrap_model_call(request, mock_handler)
# System prompt should contain both in the modified request passed to handler
assert captured_request is not None
assert captured_request.system_prompt is not None
assert existing_prompt in captured_request.system_prompt
assert "write_todos" in captured_request.system_prompt
assert captured_request.system_prompt.startswith(existing_prompt)
# Original request should be unchanged
assert request.system_prompt == existing_prompt
async def test_custom_system_prompt_async() -> None:
"""Test async version - middleware uses custom system prompt."""
custom_prompt = "Custom planning instructions"
middleware = TodoListMiddleware(system_prompt=custom_prompt)
request = _make_request(system_prompt=None)
captured_request = None
async def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal captured_request
captured_request = req
return ModelResponse(result=[AIMessage(content="response")])
await middleware.awrap_model_call(request, mock_handler)
# Should use custom prompt in the modified request passed to handler
assert captured_request is not None
assert captured_request.system_prompt == custom_prompt
def test_parallel_write_todos_calls_rejected() -> None:
"""Test that parallel write_todos calls are rejected with error messages."""
middleware = TodoListMiddleware()
# Create an AI message with two write_todos tool calls
ai_message = AIMessage(
content="I'll update the todos",
tool_calls=[
{
"name": "write_todos",
"args": {"todos": [{"content": "Task 1", "status": "pending"}]},
"id": "call_1",
"type": "tool_call",
},
{
"name": "write_todos",
"args": {"todos": [{"content": "Task 2", "status": "pending"}]},
"id": "call_2",
"type": "tool_call",
},
],
)
state: PlanningState = {"messages": [HumanMessage(content="Hello"), ai_message]}
# Call after_model hook
result = middleware.after_model(state, _fake_runtime())
# Should return error messages
assert result == {
"messages": [
ToolMessage(
content=(
"Error: The `write_todos` tool should never be called multiple times "
"in parallel. Please call it only once per model invocation to update "
"the todo list."
),
tool_call_id="call_1",
status="error",
),
ToolMessage(
content=(
"Error: The `write_todos` tool should never be called multiple times "
"in parallel. Please call it only once per model invocation to update "
"the todo list."
),
tool_call_id="call_2",
status="error",
),
]
}
def test_parallel_write_todos_with_other_tools() -> None:
"""Test that parallel write_todos calls are rejected but other tool calls remain."""
middleware = TodoListMiddleware()
# Create an AI message with two write_todos calls and one other tool call
ai_message = AIMessage(
content="I'll do multiple things",
tool_calls=[
{
"name": "some_other_tool",
"args": {"param": "value"},
"id": "call_other",
"type": "tool_call",
},
{
"name": "write_todos",
"args": {"todos": [{"content": "Task 1", "status": "pending"}]},
"id": "call_1",
"type": "tool_call",
},
{
"name": "write_todos",
"args": {"todos": [{"content": "Task 2", "status": "pending"}]},
"id": "call_2",
"type": "tool_call",
},
],
)
state: PlanningState = {"messages": [HumanMessage(content="Hello"), ai_message]}
# Call after_model hook
result = middleware.after_model(state, _fake_runtime())
# Should return error messages for write_todos calls only
assert result == {
"messages": [
ToolMessage(
content=(
"Error: The `write_todos` tool should never be called multiple times "
"in parallel. Please call it only once per model invocation to update "
"the todo list."
),
tool_call_id="call_1",
status="error",
),
ToolMessage(
content=(
"Error: The `write_todos` tool should never be called multiple times "
"in parallel. Please call it only once per model invocation to update "
"the todo list."
),
tool_call_id="call_2",
status="error",
),
]
}
def test_single_write_todos_call_allowed() -> None:
"""Test that a single write_todos call is allowed."""
middleware = TodoListMiddleware()
# Create an AI message with one write_todos tool call
ai_message = AIMessage(
content="I'll update the todos",
tool_calls=[
{
"name": "write_todos",
"args": {"todos": [{"content": "Task 1", "status": "pending"}]},
"id": "call_1",
"type": "tool_call",
},
],
)
state: PlanningState = {"messages": [HumanMessage(content="Hello"), ai_message]}
# Call after_model hook
result = middleware.after_model(state, _fake_runtime())
# Should return None (no intervention needed)
assert result is None
async def test_parallel_write_todos_calls_rejected_async() -> None:
"""Test async version - parallel write_todos calls are rejected with error messages."""
middleware = TodoListMiddleware()
# Create an AI message with two write_todos tool calls
ai_message = AIMessage(
content="I'll update the todos",
tool_calls=[
{
"name": "write_todos",
"args": {"todos": [{"content": "Task 1", "status": "pending"}]},
"id": "call_1",
"type": "tool_call",
},
{
"name": "write_todos",
"args": {"todos": [{"content": "Task 2", "status": "pending"}]},
"id": "call_2",
"type": "tool_call",
},
],
)
state: PlanningState = {"messages": [HumanMessage(content="Hello"), ai_message]}
# Call aafter_model hook
result = await middleware.aafter_model(state, _fake_runtime())
# Should return error messages
assert result == {
"messages": [
ToolMessage(
content=(
"Error: The `write_todos` tool should never be called multiple times "
"in parallel. Please call it only once per model invocation to update "
"the todo list."
),
tool_call_id="call_1",
status="error",
),
ToolMessage(
content=(
"Error: The `write_todos` tool should never be called multiple times "
"in parallel. Please call it only once per model invocation to update "
"the todo list."
),
tool_call_id="call_2",
status="error",
),
]
}
async def test_parallel_write_todos_with_other_tools_async() -> None:
"""Test async version - parallel write_todos calls are rejected but other tool calls remain."""
middleware = TodoListMiddleware()
# Create an AI message with two write_todos calls and one other tool call
ai_message = AIMessage(
content="I'll do multiple things",
tool_calls=[
{
"name": "some_other_tool",
"args": {"param": "value"},
"id": "call_other",
"type": "tool_call",
},
{
"name": "write_todos",
"args": {"todos": [{"content": "Task 1", "status": "pending"}]},
"id": "call_1",
"type": "tool_call",
},
{
"name": "write_todos",
"args": {"todos": [{"content": "Task 2", "status": "pending"}]},
"id": "call_2",
"type": "tool_call",
},
],
)
state: PlanningState = {"messages": [HumanMessage(content="Hello"), ai_message]}
# Call aafter_model hook
result = await middleware.aafter_model(state, _fake_runtime())
# Should return error messages for write_todos calls only
assert result == {
"messages": [
ToolMessage(
content=(
"Error: The `write_todos` tool should never be called multiple times "
"in parallel. Please call it only once per model invocation to update "
"the todo list."
),
tool_call_id="call_1",
status="error",
),
ToolMessage(
content=(
"Error: The `write_todos` tool should never be called multiple times "
"in parallel. Please call it only once per model invocation to update "
"the todo list."
),
tool_call_id="call_2",
status="error",
),
]
}
async def test_single_write_todos_call_allowed_async() -> None:
"""Test async version - a single write_todos call is allowed."""
middleware = TodoListMiddleware()
# Create an AI message with one write_todos tool call
ai_message = AIMessage(
content="I'll update the todos",
tool_calls=[
{
"name": "write_todos",
"args": {"todos": [{"content": "Task 1", "status": "pending"}]},
"id": "call_1",
"type": "tool_call",
},
],
)
state: PlanningState = {"messages": [HumanMessage(content="Hello"), ai_message]}
# Call aafter_model hook
result = await middleware.aafter_model(state, _fake_runtime())
# Should return None (no intervention needed)
assert result is None
async def test_handler_called_with_modified_request_async() -> None:
"""Test async version - handler receives the modified request."""
middleware = TodoListMiddleware()
request = _make_request(system_prompt="Original")
handler_called: dict[str, bool] = {"value": False}
received_prompt: dict[str, str | None] = {"value": None}
async def mock_handler(req: ModelRequest) -> ModelResponse:
handler_called["value"] = True
received_prompt["value"] = req.system_prompt
return ModelResponse(result=[AIMessage(content="response")])
await middleware.awrap_model_call(request, mock_handler)
assert handler_called["value"]
assert received_prompt["value"] is not None
assert "Original" in received_prompt["value"]
assert "write_todos" in received_prompt["value"]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_todo.py",
"license": "MIT License",
"lines": 667,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/core/tests/unit_tests/messages/block_translators/test_groq.py | """Test groq block translator."""
from typing import cast
import pytest
from langchain_core.messages import AIMessage
from langchain_core.messages import content as types
from langchain_core.messages.base import _extract_reasoning_from_additional_kwargs
from langchain_core.messages.block_translators import PROVIDER_TRANSLATORS
from langchain_core.messages.block_translators.groq import (
_parse_code_json,
translate_content,
)
def test_groq_translator_registered() -> None:
"""Test that groq translator is properly registered."""
assert "groq" in PROVIDER_TRANSLATORS
assert "translate_content" in PROVIDER_TRANSLATORS["groq"]
assert "translate_content_chunk" in PROVIDER_TRANSLATORS["groq"]
def test_extract_reasoning_from_additional_kwargs_exists() -> None:
"""Test that _extract_reasoning_from_additional_kwargs can be imported."""
# Verify it's callable
assert callable(_extract_reasoning_from_additional_kwargs)
def test_groq_translate_content_basic() -> None:
"""Test basic groq content translation."""
# Test with simple text message
message = AIMessage(content="Hello world")
blocks = translate_content(message)
assert isinstance(blocks, list)
assert len(blocks) == 1
assert blocks[0]["type"] == "text"
assert blocks[0]["text"] == "Hello world"
def test_groq_translate_content_with_reasoning() -> None:
"""Test groq content translation with reasoning content."""
# Test with reasoning content in additional_kwargs
message = AIMessage(
content="Final answer",
additional_kwargs={"reasoning_content": "Let me think about this..."},
)
blocks = translate_content(message)
assert isinstance(blocks, list)
assert len(blocks) == 2
# First block should be reasoning
assert blocks[0]["type"] == "reasoning"
assert blocks[0]["reasoning"] == "Let me think about this..."
# Second block should be text
assert blocks[1]["type"] == "text"
assert blocks[1]["text"] == "Final answer"
def test_groq_translate_content_with_tool_calls() -> None:
"""Test groq content translation with tool calls."""
# Test with tool calls
message = AIMessage(
content="",
tool_calls=[
{
"name": "search",
"args": {"query": "test"},
"id": "call_123",
}
],
)
blocks = translate_content(message)
assert isinstance(blocks, list)
assert len(blocks) == 1
assert blocks[0]["type"] == "tool_call"
assert blocks[0]["name"] == "search"
assert blocks[0]["args"] == {"query": "test"}
assert blocks[0]["id"] == "call_123"
def test_groq_translate_content_with_executed_tools() -> None:
"""Test groq content translation with executed tools (built-in tools)."""
# Test with executed_tools in additional_kwargs (Groq built-in tools)
message = AIMessage(
content="",
additional_kwargs={
"executed_tools": [
{
"type": "python",
"arguments": '{"code": "print(\\"hello\\")"}',
"output": "hello\\n",
}
]
},
)
blocks = translate_content(message)
assert isinstance(blocks, list)
# Should have server_tool_call and server_tool_result
assert len(blocks) >= 2
# Check for server_tool_call
tool_call_blocks = [
cast("types.ServerToolCall", b)
for b in blocks
if b.get("type") == "server_tool_call"
]
assert len(tool_call_blocks) == 1
assert tool_call_blocks[0]["name"] == "code_interpreter"
assert "code" in tool_call_blocks[0]["args"]
# Check for server_tool_result
tool_result_blocks = [
cast("types.ServerToolResult", b)
for b in blocks
if b.get("type") == "server_tool_result"
]
assert len(tool_result_blocks) == 1
assert tool_result_blocks[0]["output"] == "hello\\n"
assert tool_result_blocks[0]["status"] == "success"
def test_parse_code_json() -> None:
"""Test the _parse_code_json helper function."""
# Test valid code JSON
result = _parse_code_json('{"code": "print(\'hello\')"}')
assert result == {"code": "print('hello')"}
# Test code with unescaped quotes (Groq format)
result = _parse_code_json('{"code": "print("hello")"}')
assert result == {"code": 'print("hello")'}
# Test invalid format raises ValueError
with pytest.raises(ValueError, match="Could not extract Python code"):
_parse_code_json('{"invalid": "format"}')
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/messages/block_translators/test_groq.py",
"license": "MIT License",
"lines": 113,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/perplexity/tests/unit_tests/test_output_parsers.py | """Unit tests for output parsers."""
import pytest
from langchain_core.exceptions import OutputParserException
from langchain_core.outputs import Generation
from pydantic import BaseModel, Field
from langchain_perplexity.output_parsers import (
ReasoningJsonOutputParser,
ReasoningStructuredOutputParser,
strip_think_tags,
)
class TestStripThinkTags:
"""Tests for the strip_think_tags function."""
def test_strip_simple_think_tags(self) -> None:
"""Test stripping simple think tags."""
text = "Hello <think>some reasoning</think> world"
result = strip_think_tags(text)
assert result == "Hello world"
def test_strip_multiple_think_tags(self) -> None:
"""Test stripping multiple think tags."""
text = "<think>first</think> Hello <think>second</think> world\
<think>third</think>"
result = strip_think_tags(text)
assert result == "Hello world"
def test_strip_nested_like_think_tags(self) -> None:
"""Test stripping think tags that might appear nested."""
text = "<think>outer <think>inner</think> still outer</think> result"
result = strip_think_tags(text)
# The function removes from first <think> to first </think>
# then continues from after that </think>
assert result == "still outer</think> result"
def test_strip_think_tags_no_closing_tag(self) -> None:
"""Test handling of think tags without closing tag."""
text = "Hello <think>unclosed reasoning world"
result = strip_think_tags(text)
# Treats unclosed tag as literal text
assert result == "Hello <think>unclosed reasoning world"
def test_strip_think_tags_empty_content(self) -> None:
"""Test stripping empty think tags."""
text = "Hello <think></think> world"
result = strip_think_tags(text)
assert result == "Hello world"
def test_strip_think_tags_no_tags(self) -> None:
"""Test text without any think tags."""
text = "Hello world"
result = strip_think_tags(text)
assert result == "Hello world"
def test_strip_think_tags_only_tags(self) -> None:
"""Test text containing only think tags."""
text = "<think>reasoning</think>"
result = strip_think_tags(text)
assert result == ""
def test_strip_think_tags_multiline(self) -> None:
"""Test stripping think tags across multiple lines."""
text = """Hello
<think>
reasoning line 1
reasoning line 2
</think>
world"""
result = strip_think_tags(text)
assert result == "Hello\n\nworld"
def test_strip_think_tags_with_special_chars(self) -> None:
"""Test think tags containing special characters."""
text = 'Before <think>{"key": "value"}</think> After'
result = strip_think_tags(text)
assert result == "Before After"
class TestReasoningJsonOutputParser:
"""Tests for ReasoningJsonOutputParser."""
def test_parse_json_without_think_tags(self) -> None:
"""Test parsing JSON without think tags."""
parser = ReasoningJsonOutputParser()
text = '{"name": "John", "age": 30}'
generation = Generation(text=text)
result = parser.parse_result([generation])
assert result == {"name": "John", "age": 30}
def test_parse_json_with_think_tags(self) -> None:
"""Test parsing JSON with think tags."""
parser = ReasoningJsonOutputParser()
text = '<think>Let me construct the JSON</think>{"name": "John", "age": 30}'
generation = Generation(text=text)
result = parser.parse_result([generation])
assert result == {"name": "John", "age": 30}
def test_parse_json_with_multiple_think_tags(self) -> None:
"""Test parsing JSON with multiple think tags."""
parser = ReasoningJsonOutputParser()
text = '<think>Step 1</think>{"name": <think>thinking</think>"John", "age": 30}'
generation = Generation(text=text)
result = parser.parse_result([generation])
assert result == {"name": "John", "age": 30}
def test_parse_markdown_json_with_think_tags(self) -> None:
"""Test parsing markdown-wrapped JSON with think tags."""
parser = ReasoningJsonOutputParser()
text = """<think>Building response</think>
```json
{"name": "John", "age": 30}
```"""
generation = Generation(text=text)
result = parser.parse_result([generation])
assert result == {"name": "John", "age": 30}
def test_parse_complex_json_with_think_tags(self) -> None:
"""Test parsing complex nested JSON with think tags."""
parser = ReasoningJsonOutputParser()
text = """<think>Creating nested structure</think>
{
"user": {
"name": "John",
"address": {
"city": "NYC",
"zip": "10001"
}
},
"items": [1, 2, 3]
}"""
generation = Generation(text=text)
result = parser.parse_result([generation])
assert result == {
"user": {"name": "John", "address": {"city": "NYC", "zip": "10001"}},
"items": [1, 2, 3],
}
def test_parse_invalid_json_with_think_tags(self) -> None:
"""Test that invalid JSON raises an exception even with think tags."""
parser = ReasoningJsonOutputParser()
text = "<think>This will fail</think>{invalid json}"
generation = Generation(text=text)
with pytest.raises(OutputParserException):
parser.parse_result([generation])
def test_parse_empty_string_after_stripping(self) -> None:
"""Test parsing when only think tags remain."""
parser = ReasoningJsonOutputParser()
text = "<think>Only reasoning, no output</think>"
generation = Generation(text=text)
with pytest.raises(OutputParserException):
parser.parse_result([generation])
def test_parse_json_array_with_think_tags(self) -> None:
"""Test parsing JSON array with think tags."""
parser = ReasoningJsonOutputParser()
text = '<think>Creating array</think>[{"id": 1}, {"id": 2}]'
generation = Generation(text=text)
result = parser.parse_result([generation])
assert result == [{"id": 1}, {"id": 2}]
def test_partial_json_parsing_with_think_tags(self) -> None:
"""Test partial JSON parsing with think tags."""
parser = ReasoningJsonOutputParser()
text = '<think>Starting</think>{"name": "John", "age":'
generation = Generation(text=text)
# Partial parsing should handle incomplete JSON
result = parser.parse_result([generation], partial=True)
assert result == {"name": "John"}
class MockPerson(BaseModel):
"""Mock Pydantic model for testing."""
name: str = Field(description="The person's name")
age: int = Field(description="The person's age")
email: str | None = Field(default=None, description="The person's email")
class MockCompany(BaseModel):
"""Mock nested Pydantic model for testing."""
company_name: str = Field(description="Company name")
employees: list[MockPerson] = Field(description="List of employees")
founded_year: int = Field(description="Year founded")
class TestReasoningStructuredOutputParser:
"""Tests for ReasoningStructuredOutputParser."""
def test_parse_structured_output_without_think_tags(self) -> None:
"""Test parsing structured output without think tags."""
parser: ReasoningStructuredOutputParser[MockPerson] = (
ReasoningStructuredOutputParser(pydantic_object=MockPerson)
)
text = '{"name": "John Doe", "age": 30, "email": "john@example.com"}'
generation = Generation(text=text)
result = parser.parse_result([generation])
assert isinstance(result, MockPerson)
assert result.name == "John Doe"
assert result.age == 30
assert result.email == "john@example.com"
def test_parse_structured_output_with_think_tags(self) -> None:
"""Test parsing structured output with think tags."""
parser: ReasoningStructuredOutputParser[MockPerson] = (
ReasoningStructuredOutputParser(pydantic_object=MockPerson)
)
text = '<think>Let me create a person\
object</think>{"name": "John Doe", "age": 30}'
generation = Generation(text=text)
result = parser.parse_result([generation])
assert isinstance(result, MockPerson)
assert result.name == "John Doe"
assert result.age == 30
assert result.email is None
def test_parse_structured_output_with_multiple_think_tags(self) -> None:
"""Test parsing with multiple think tags."""
parser: ReasoningStructuredOutputParser[MockPerson] = (
ReasoningStructuredOutputParser(pydantic_object=MockPerson)
)
text = """<think>Step 1: Determine name</think>
<think>Step 2: Determine age</think>
{"name": "Jane Smith", "age": 25}"""
generation = Generation(text=text)
result = parser.parse_result([generation])
assert isinstance(result, MockPerson)
assert result.name == "Jane Smith"
assert result.age == 25
def test_parse_structured_output_markdown_with_think_tags(self) -> None:
"""Test parsing markdown-wrapped structured output with think tags."""
parser: ReasoningStructuredOutputParser[MockPerson] = (
ReasoningStructuredOutputParser(pydantic_object=MockPerson)
)
text = """<think>Building person object</think>
```json
{"name": "Alice Brown", "age": 35, "email": "alice@example.com"}
```"""
generation = Generation(text=text)
result = parser.parse_result([generation])
assert isinstance(result, MockPerson)
assert result.name == "Alice Brown"
assert result.age == 35
assert result.email == "alice@example.com"
def test_parse_nested_structured_output_with_think_tags(self) -> None:
"""Test parsing nested Pydantic models with think tags."""
parser: ReasoningStructuredOutputParser[MockCompany] = (
ReasoningStructuredOutputParser(pydantic_object=MockCompany)
)
text = """<think>Creating company with employees</think>
{
"company_name": "Tech Corp",
"founded_year": 2020,
"employees": [
{"name": "John", "age": 30},
{"name": "Jane", "age": 28}
]
}"""
generation = Generation(text=text)
result = parser.parse_result([generation])
assert isinstance(result, MockCompany)
assert result.company_name == "Tech Corp"
assert result.founded_year == 2020
assert len(result.employees) == 2
assert result.employees[0].name == "John"
assert result.employees[1].name == "Jane"
def test_parse_invalid_structured_output_with_think_tags(self) -> None:
"""Test that invalid structured output raises exception."""
parser: ReasoningStructuredOutputParser[MockPerson] = (
ReasoningStructuredOutputParser(pydantic_object=MockPerson)
)
# Missing required field 'age'
text = '<think>Creating person</think>{"name": "John"}'
generation = Generation(text=text)
with pytest.raises(OutputParserException):
parser.parse_result([generation])
def test_parse_structured_wrong_type_with_think_tags(self) -> None:
"""Test that wrong types raise validation errors."""
parser: ReasoningStructuredOutputParser[MockPerson] = (
ReasoningStructuredOutputParser(pydantic_object=MockPerson)
)
# Age should be int, not string
text = '<think>Creating person</think>{"name": "John", "age": "thirty"}'
generation = Generation(text=text)
with pytest.raises(OutputParserException):
parser.parse_result([generation])
def test_parse_empty_after_stripping_think_tags(self) -> None:
"""Test handling when only think tags remain."""
parser: ReasoningStructuredOutputParser[MockPerson] = (
ReasoningStructuredOutputParser(pydantic_object=MockPerson)
)
text = "<think>Only reasoning here</think>"
generation = Generation(text=text)
with pytest.raises(OutputParserException):
parser.parse_result([generation])
def test_get_format_instructions(self) -> None:
"""Test that format instructions work correctly."""
parser: ReasoningStructuredOutputParser[MockPerson] = (
ReasoningStructuredOutputParser(pydantic_object=MockPerson)
)
instructions = parser.get_format_instructions()
assert "MockPerson" in instructions or "name" in instructions
assert isinstance(instructions, str)
def test_partial_structured_parsing_with_think_tags(self) -> None:
"""Test partial parsing of structured output with think tags."""
parser: ReasoningStructuredOutputParser[MockPerson] = (
ReasoningStructuredOutputParser(pydantic_object=MockPerson)
)
text = '<think>Starting</think>{"name": "John", "age": 30'
generation = Generation(text=text)
# Partial parsing should handle incomplete JSON
result = parser.parse_result([generation], partial=True)
# With partial=True, it should return what it can parse
assert "name" in result or isinstance(result, MockPerson)
def test_parser_with_think_tags_in_json_values(self) -> None:
"""Test that think tags in JSON string values don't cause issues."""
parser: ReasoningStructuredOutputParser[MockPerson] = (
ReasoningStructuredOutputParser(pydantic_object=MockPerson)
)
# Think tags should be stripped before JSON parsing, so they won't be in values
text = '<think>reasoning</think>{"name": "John <Doe>", "age": 30}'
generation = Generation(text=text)
result = parser.parse_result([generation])
assert isinstance(result, MockPerson)
assert result.name == "John <Doe>"
assert result.age == 30
def test_multiline_think_tags_with_structured_output(self) -> None:
"""Test parsing structured output with multiline think tags."""
parser: ReasoningStructuredOutputParser[MockPerson] = (
ReasoningStructuredOutputParser(pydantic_object=MockPerson)
)
text = """<think>
Step 1: Consider the requirements
Step 2: Structure the data
Step 3: Format as JSON
</think>
{"name": "Bob Wilson", "age": 40, "email": "bob@example.com"}"""
generation = Generation(text=text)
result = parser.parse_result([generation])
assert isinstance(result, MockPerson)
assert result.name == "Bob Wilson"
assert result.age == 40
assert result.email == "bob@example.com"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/perplexity/tests/unit_tests/test_output_parsers.py",
"license": "MIT License",
"lines": 312,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/huggingface/tests/integration_tests/test_chat_models.py | from langchain_core.messages import AIMessageChunk
from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint
def test_stream_usage() -> None:
"""Test we are able to configure stream options on models that require it."""
llm = HuggingFaceEndpoint( # type: ignore[call-arg] # (model is inferred in class)
repo_id="google/gemma-3-27b-it",
task="conversational",
provider="nebius",
)
model = ChatHuggingFace(llm=llm, stream_usage=True)
full: AIMessageChunk | None = None
for chunk in model.stream("hello"):
assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.usage_metadata
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/huggingface/tests/integration_tests/test_chat_models.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_create_agent_tool_validation.py | import sys
from typing import Annotated, Any
import pytest
from langchain_core.messages import HumanMessage
from langgraph.prebuilt import InjectedStore, ToolRuntime
from langgraph.store.base import BaseStore
from langgraph.store.memory import InMemoryStore
from langchain.agents import AgentState, create_agent
from langchain.tools import InjectedState
from langchain.tools import tool as dec_tool
from tests.unit_tests.agents.model import FakeToolCallingModel
@pytest.mark.skipif(
sys.version_info >= (3, 14), reason="Pydantic model rebuild issue in Python 3.14"
)
def test_tool_invocation_error_excludes_injected_state() -> None:
"""Test that tool invocation errors only include LLM-controllable arguments.
When a tool has InjectedState parameters and the LLM makes an incorrect
invocation (e.g., missing required arguments), the error message should only
contain the arguments from the tool call that the LLM controls. This ensures
the LLM receives relevant context to correct its mistakes, without being
distracted by system-injected parameters it has no control over.
This test uses create_agent to ensure the behavior works in a full agent context.
"""
# Define a custom state schema with injected data
class TestState(AgentState[Any]):
secret_data: str # Example of state data not controlled by LLM
@dec_tool
def tool_with_injected_state(
some_val: int,
state: Annotated[TestState, InjectedState],
) -> str:
"""Tool that uses injected state."""
return f"some_val: {some_val}"
# Create a fake model that makes an incorrect tool call (missing 'some_val')
# Then returns no tool calls on the second iteration to end the loop
model = FakeToolCallingModel(
tool_calls=[
[
{
"name": "tool_with_injected_state",
"args": {"wrong_arg": "value"}, # Missing required 'some_val'
"id": "call_1",
}
],
[], # No tool calls on second iteration to end the loop
]
)
# Create an agent with the tool and custom state schema
agent = create_agent(
model=model,
tools=[tool_with_injected_state],
state_schema=TestState,
)
# Invoke the agent with injected state data
result = agent.invoke(
{
"messages": [HumanMessage("Test message")],
"secret_data": "sensitive_secret_123",
}
)
# Find the tool error message
tool_messages = [m for m in result["messages"] if m.type == "tool"]
assert len(tool_messages) == 1
tool_message = tool_messages[0]
assert tool_message.status == "error"
# The error message should contain only the LLM-provided args (wrong_arg)
# and NOT the system-injected state (secret_data)
assert "{'wrong_arg': 'value'}" in tool_message.content
assert "secret_data" not in tool_message.content
assert "sensitive_secret_123" not in tool_message.content
@pytest.mark.skipif(
sys.version_info >= (3, 14), reason="Pydantic model rebuild issue in Python 3.14"
)
async def test_tool_invocation_error_excludes_injected_state_async() -> None:
"""Test that async tool invocation errors only include LLM-controllable arguments.
This test verifies that the async execution path (_execute_tool_async and _arun_one)
properly filters validation errors to exclude system-injected arguments, ensuring
the LLM receives only relevant context for correction.
"""
# Define a custom state schema
class TestState(AgentState[Any]):
internal_data: str
@dec_tool
async def async_tool_with_injected_state(
query: str,
max_results: int,
state: Annotated[TestState, InjectedState],
) -> str:
"""Async tool that uses injected state."""
return f"query: {query}, max_results: {max_results}"
# Create a fake model that makes an incorrect tool call
# - query has wrong type (int instead of str)
# - max_results is missing
model = FakeToolCallingModel(
tool_calls=[
[
{
"name": "async_tool_with_injected_state",
"args": {"query": 999}, # Wrong type, missing max_results
"id": "call_async_1",
}
],
[], # End the loop
]
)
# Create an agent with the async tool
agent = create_agent(
model=model,
tools=[async_tool_with_injected_state],
state_schema=TestState,
)
# Invoke with state data
result = await agent.ainvoke(
{
"messages": [HumanMessage("Test async")],
"internal_data": "secret_internal_value_xyz",
}
)
# Find the tool error message
tool_messages = [m for m in result["messages"] if m.type == "tool"]
assert len(tool_messages) == 1
tool_message = tool_messages[0]
assert tool_message.status == "error"
# Verify error mentions LLM-controlled parameters only
content = tool_message.content
assert "query" in content.lower(), "Error should mention 'query' (LLM-controlled)"
assert "max_results" in content.lower(), "Error should mention 'max_results' (LLM-controlled)"
# Verify system-injected state does not appear in the validation errors
# This keeps the error focused on what the LLM can actually fix
assert "internal_data" not in content, (
"Error should NOT mention 'internal_data' (system-injected field)"
)
assert "secret_internal_value" not in content, (
"Error should NOT contain system-injected state values"
)
# Verify only LLM-controlled parameters are in the error list
# Should see "query" and "max_results" errors, but not "state"
lines = content.split("\n")
error_lines = [line.strip() for line in lines if line.strip()]
# Find lines that look like field names (single words at start of line)
field_errors = [
line
for line in error_lines
if line
and not line.startswith("input")
and not line.startswith("field")
and not line.startswith("error")
and not line.startswith("please")
and len(line.split()) <= 2
]
# Verify system-injected 'state' is not in the field error list
assert not any(field.lower() == "state" for field in field_errors), (
"The field 'state' (system-injected) should not appear in validation errors"
)
@pytest.mark.skipif(
sys.version_info >= (3, 14), reason="Pydantic model rebuild issue in Python 3.14"
)
def test_create_agent_error_content_with_multiple_params() -> None:
"""Test that error messages only include LLM-controlled parameter errors.
Uses create_agent to verify that when a tool with both LLM-controlled
and system-injected parameters receives invalid arguments, the error message:
1. Contains details about LLM-controlled parameter errors (query, limit)
2. Does NOT contain system-injected parameter names (state, store, runtime)
3. Does NOT contain values from system-injected parameters
4. Properly formats the validation errors for LLM correction
This ensures the LLM receives focused, actionable feedback.
"""
class TestState(AgentState[Any]):
user_id: str
api_key: str
session_data: dict[str, Any]
@dec_tool
def complex_tool(
query: str,
limit: int,
state: Annotated[TestState, InjectedState],
store: Annotated[BaseStore, InjectedStore()],
runtime: ToolRuntime,
) -> str:
"""A complex tool with multiple injected and non-injected parameters.
Args:
query: The search query string.
limit: Maximum number of results to return.
state: The graph state (injected).
store: The persistent store (injected).
runtime: The tool runtime context (injected).
"""
# Access injected params to verify they work in normal execution
user = state.get("user_id", "unknown")
return f"Results for '{query}' (limit={limit}, user={user})"
# Create a model that makes an incorrect tool call with multiple errors:
# - query is wrong type (int instead of str)
# - limit is missing
# Then returns no tool calls to end the loop
model = FakeToolCallingModel(
tool_calls=[
[
{
"name": "complex_tool",
"args": {
"query": 12345, # Wrong type - should be str
# "limit" is missing - required field
},
"id": "call_complex_1",
}
],
[], # No tool calls on second iteration to end the loop
]
)
# Create an agent with the complex tool and custom state
# Need to provide a store since the tool uses InjectedStore
agent = create_agent(
model=model,
tools=[complex_tool],
state_schema=TestState,
store=InMemoryStore(),
)
# Invoke with sensitive data in state
result = agent.invoke(
{
"messages": [HumanMessage("Search for something")],
"user_id": "user_12345",
"api_key": "sk-secret-key-abc123xyz",
"session_data": {"token": "secret_session_token"},
}
)
# Find the tool error message
tool_messages = [m for m in result["messages"] if m.type == "tool"]
assert len(tool_messages) == 1
tool_message = tool_messages[0]
assert tool_message.status == "error"
assert tool_message.tool_call_id == "call_complex_1"
content = tool_message.content
# Verify error mentions LLM-controlled parameter issues
assert "query" in content.lower(), "Error should mention 'query' (LLM-controlled)"
assert "limit" in content.lower(), "Error should mention 'limit' (LLM-controlled)"
# Should indicate validation errors occurred
assert "validation error" in content.lower() or "error" in content.lower(), (
"Error should indicate validation occurred"
)
# Verify NO system-injected parameter names appear in error
# These are not controlled by the LLM and should be excluded
assert "state" not in content.lower(), "Error should NOT mention 'state' (system-injected)"
assert "store" not in content.lower(), "Error should NOT mention 'store' (system-injected)"
assert "runtime" not in content.lower(), "Error should NOT mention 'runtime' (system-injected)"
# Verify NO values from system-injected parameters appear in error
# The LLM doesn't control these, so they shouldn't distract from the actual issues
assert "user_12345" not in content, "Error should NOT contain user_id value (from state)"
assert "sk-secret-key" not in content, "Error should NOT contain api_key value (from state)"
assert "secret_session_token" not in content, (
"Error should NOT contain session_data value (from state)"
)
# Verify the LLM's original tool call args are present
# The error should show what the LLM actually provided to help it correct the mistake
assert "12345" in content, "Error should show the invalid query value provided by LLM (12345)"
# Check error is well-formatted
assert "complex_tool" in content, "Error should mention the tool name"
@pytest.mark.skipif(
sys.version_info >= (3, 14), reason="Pydantic model rebuild issue in Python 3.14"
)
def test_create_agent_error_only_model_controllable_params() -> None:
"""Test that errors only include LLM-controllable parameter issues.
Focused test ensuring that validation errors for LLM-controlled parameters
are clearly reported, while system-injected parameters remain completely
absent from error messages. This provides focused feedback to the LLM.
"""
class StateWithSecrets(AgentState[Any]):
password: str # Example of data not controlled by LLM
@dec_tool
def secure_tool(
username: str,
email: str,
state: Annotated[StateWithSecrets, InjectedState],
) -> str:
"""Tool that validates user credentials.
Args:
username: The username (3-20 chars).
email: The email address.
state: State with password (system-injected).
"""
return f"Validated {username} with email {email}"
# LLM provides invalid username (too short) and invalid email
model = FakeToolCallingModel(
tool_calls=[
[
{
"name": "secure_tool",
"args": {
"username": "ab", # Too short (needs 3-20)
"email": "not-an-email", # Invalid format
},
"id": "call_secure_1",
}
],
[],
]
)
agent = create_agent(
model=model,
tools=[secure_tool],
state_schema=StateWithSecrets,
)
result = agent.invoke(
{
"messages": [HumanMessage("Create account")],
"password": "super_secret_password_12345",
}
)
tool_messages = [m for m in result["messages"] if m.type == "tool"]
assert len(tool_messages) == 1
content = tool_messages[0].content
# The error should mention LLM-controlled parameters
# Note: Pydantic's default validation may or may not catch format issues,
# but the parameters themselves should be present in error messages
assert "username" in content.lower() or "email" in content.lower(), (
"Error should mention at least one LLM-controlled parameter"
)
# Password is system-injected and should not appear
# The LLM doesn't control it, so it shouldn't distract from the actual errors
assert "password" not in content.lower(), (
"Error should NOT mention 'password' (system-injected parameter)"
)
assert "super_secret_password" not in content, (
"Error should NOT contain password value (from system-injected state)"
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/test_create_agent_tool_validation.py",
"license": "MIT License",
"lines": 325,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/integration_tests/agents/middleware/test_shell_tool_integration.py | """Integration tests for ShellToolMiddleware with create_agent."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import pytest
from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
from langchain.agents import create_agent
from langchain.agents.middleware.shell_tool import ShellToolMiddleware
if TYPE_CHECKING:
from pathlib import Path
from langgraph.graph.state import CompiledStateGraph
from langchain.agents.middleware.types import _InputAgentState
def _get_model(provider: str) -> Any:
"""Get chat model for the specified provider."""
if provider == "anthropic":
return pytest.importorskip("langchain_anthropic").ChatAnthropic(
model="claude-sonnet-4-5-20250929"
)
if provider == "openai":
return pytest.importorskip("langchain_openai").ChatOpenAI(model="gpt-4o-mini")
msg = f"Unknown provider: {provider}"
raise ValueError(msg)
@pytest.mark.parametrize("provider", ["anthropic", "openai"])
def test_shell_tool_basic_execution(tmp_path: Path, provider: str) -> None:
"""Test basic shell command execution across different models."""
workspace = tmp_path / "workspace"
agent: CompiledStateGraph[Any, Any, _InputAgentState, Any] = create_agent(
model=_get_model(provider),
middleware=[ShellToolMiddleware(workspace_root=workspace)],
)
result = agent.invoke(
{"messages": [HumanMessage("Run the command 'echo hello' and tell me what it outputs")]}
)
tool_messages = [msg for msg in result["messages"] if msg.type == "tool"]
assert len(tool_messages) > 0, "Shell tool should have been called"
tool_outputs = [msg.content for msg in tool_messages]
assert any("hello" in output.lower() for output in tool_outputs), (
"Shell output should contain 'hello'"
)
@pytest.mark.requires("langchain_anthropic")
def test_shell_session_persistence(tmp_path: Path) -> None:
"""Test shell session state persists across multiple tool calls."""
workspace = tmp_path / "workspace"
agent: CompiledStateGraph[Any, Any, _InputAgentState, Any] = create_agent(
model=_get_model("anthropic"),
middleware=[ShellToolMiddleware(workspace_root=workspace)],
)
result = agent.invoke(
{
"messages": [
HumanMessage(
"First run 'export TEST_VAR=hello'. "
"Then run 'echo $TEST_VAR' to verify it persists."
)
]
}
)
tool_messages = [msg for msg in result["messages"] if msg.type == "tool"]
assert len(tool_messages) >= 2, "Shell tool should be called multiple times"
tool_outputs = [msg.content for msg in tool_messages]
assert any("hello" in output for output in tool_outputs), "Environment variable should persist"
@pytest.mark.requires("langchain_anthropic")
def test_shell_tool_error_handling(tmp_path: Path) -> None:
"""Test shell tool captures command errors."""
workspace = tmp_path / "workspace"
agent: CompiledStateGraph[Any, Any, _InputAgentState, Any] = create_agent(
model=_get_model("anthropic"),
middleware=[ShellToolMiddleware(workspace_root=workspace)],
)
result = agent.invoke(
{
"messages": [
HumanMessage(
"Run the command 'ls /nonexistent_directory_12345' and show me the result"
)
]
}
)
tool_messages = [msg for msg in result["messages"] if msg.type == "tool"]
assert len(tool_messages) > 0, "Shell tool should have been called"
tool_outputs = " ".join(msg.content for msg in tool_messages)
assert (
"no such file" in tool_outputs.lower()
or "cannot access" in tool_outputs.lower()
or "not found" in tool_outputs.lower()
or "exit code" in tool_outputs.lower()
), "Error should be captured in tool output"
@pytest.mark.requires("langchain_anthropic")
def test_shell_tool_with_custom_tools(tmp_path: Path) -> None:
"""Test shell tool works alongside custom tools."""
workspace = tmp_path / "workspace"
@tool
def custom_greeting(name: str) -> str:
"""Greet someone by name."""
return f"Hello, {name}!"
agent: CompiledStateGraph[Any, Any, _InputAgentState, Any] = create_agent(
model=_get_model("anthropic"),
tools=[custom_greeting],
middleware=[ShellToolMiddleware(workspace_root=workspace)],
)
result = agent.invoke(
{
"messages": [
HumanMessage(
"First, use the custom_greeting tool to greet 'Alice'. "
"Then run the shell command 'echo world'."
)
]
}
)
tool_messages = [msg for msg in result["messages"] if msg.type == "tool"]
assert len(tool_messages) >= 2, "Both tools should have been called"
tool_outputs = " ".join(msg.content for msg in tool_messages)
assert "Alice" in tool_outputs, "Custom tool should be used"
assert "world" in tool_outputs, "Shell tool should be used"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/integration_tests/agents/middleware/test_shell_tool_integration.py",
"license": "MIT License",
"lines": 115,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/xai/tests/integration_tests/test_chat_models.py | """Integration tests for ChatXAI specific features."""
from __future__ import annotations
from typing import Literal
import pytest
from langchain_core.messages import AIMessage, AIMessageChunk, BaseMessageChunk
from langchain_xai import ChatXAI
MODEL_NAME = "grok-4-fast-reasoning"
@pytest.mark.parametrize("output_version", ["", "v1"])
def test_reasoning(output_version: Literal["", "v1"]) -> None:
"""Test reasoning features.
!!! note
`grok-4` does not return `reasoning_content`, but may optionally return
encrypted reasoning content if `use_encrypted_content` is set to `True`.
"""
# Test reasoning effort
if output_version:
chat_model = ChatXAI(
model="grok-3-mini",
reasoning_effort="low",
output_version=output_version,
)
else:
chat_model = ChatXAI(
model="grok-3-mini",
reasoning_effort="low",
)
input_message = "What is 3^3?"
response = chat_model.invoke(input_message)
assert response.content
assert response.additional_kwargs["reasoning_content"]
## Check output tokens
usage_metadata = response.usage_metadata
assert usage_metadata
reasoning_tokens = usage_metadata.get("output_token_details", {}).get("reasoning")
total_tokens = usage_metadata.get("output_tokens")
assert total_tokens
assert reasoning_tokens
assert total_tokens > reasoning_tokens
# Test streaming
full: BaseMessageChunk | None = None
for chunk in chat_model.stream(input_message):
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
assert full.additional_kwargs["reasoning_content"]
## Check output tokens
usage_metadata = full.usage_metadata
assert usage_metadata
reasoning_tokens = usage_metadata.get("output_token_details", {}).get("reasoning")
total_tokens = usage_metadata.get("output_tokens")
assert total_tokens
assert reasoning_tokens
assert total_tokens > reasoning_tokens
# Check that we can access reasoning content blocks
assert response.content_blocks
reasoning_content = (
block for block in response.content_blocks if block["type"] == "reasoning"
)
assert len(list(reasoning_content)) >= 1
# Test that passing message with reasoning back in works
follow_up_message = "Based on your reasoning, what is 4^4?"
followup = chat_model.invoke([input_message, response, follow_up_message])
assert followup.content
assert followup.additional_kwargs["reasoning_content"]
followup_reasoning = (
block for block in followup.content_blocks if block["type"] == "reasoning"
)
assert len(list(followup_reasoning)) >= 1
# Test passing in a ReasoningContentBlock
response_metadata = {"model_provider": "xai"}
if output_version:
response_metadata["output_version"] = output_version
msg_w_reasoning = AIMessage(
content_blocks=response.content_blocks,
response_metadata=response_metadata,
)
followup_2 = chat_model.invoke(
[msg_w_reasoning, "Based on your reasoning, what is 5^5?"]
)
assert followup_2.content
assert followup_2.additional_kwargs["reasoning_content"]
def test_web_search() -> None:
llm = ChatXAI(model=MODEL_NAME).bind_tools([{"type": "web_search"}])
# Test invoke
response = llm.invoke("Look up the current time in Boston, MA.")
assert response.content
content_types = {block["type"] for block in response.content_blocks}
assert content_types == {"server_tool_call", "server_tool_result", "text"}
assert response.content_blocks[0]["name"] == "web_search" # type: ignore[typeddict-item]
# Test streaming
full: AIMessageChunk | None = None
for chunk in llm.stream("Look up the current time in Boston, MA."):
assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
content_types = {block["type"] for block in full.content_blocks}
assert content_types == {"server_tool_call", "server_tool_result", "text"}
assert full.content_blocks[0]["name"] == "web_search" # type: ignore[typeddict-item]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/xai/tests/integration_tests/test_chat_models.py",
"license": "MIT License",
"lines": 97,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_return_direct_graph.py | """Tests for return_direct tool graph structure."""
from langchain_core.tools import tool
from syrupy.assertion import SnapshotAssertion
from langchain.agents.factory import create_agent
from tests.unit_tests.agents.model import FakeToolCallingModel
def test_agent_graph_without_return_direct_tools(snapshot: SnapshotAssertion) -> None:
"""Test that graph WITHOUT return_direct tools does NOT have edge from tools to end."""
@tool
def normal_tool(input_string: str) -> str:
"""A normal tool without return_direct."""
return input_string
agent = create_agent(
model=FakeToolCallingModel(),
tools=[normal_tool],
system_prompt="You are a helpful assistant.",
)
# The mermaid diagram should NOT include an edge from tools to __end__
# when no tools have return_direct=True
mermaid_diagram = agent.get_graph().draw_mermaid()
assert mermaid_diagram == snapshot
def test_agent_graph_with_return_direct_tool(snapshot: SnapshotAssertion) -> None:
"""Test that graph WITH return_direct tools has correct edge from tools to end."""
@tool(return_direct=True)
def return_direct_tool(input_string: str) -> str:
"""A tool with return_direct=True."""
return input_string
agent = create_agent(
model=FakeToolCallingModel(),
tools=[return_direct_tool],
system_prompt="You are a helpful assistant.",
)
# The mermaid diagram SHOULD include an edge from tools to __end__
# when at least one tool has return_direct=True
mermaid_diagram = agent.get_graph().draw_mermaid()
assert mermaid_diagram == snapshot
def test_agent_graph_with_mixed_tools(snapshot: SnapshotAssertion) -> None:
"""Test that graph with mixed tools (some return_direct, some not) has correct edges."""
@tool(return_direct=True)
def return_direct_tool(input_string: str) -> str:
"""A tool with return_direct=True."""
return input_string
@tool
def normal_tool(input_string: str) -> str:
"""A normal tool without return_direct."""
return input_string
agent = create_agent(
model=FakeToolCallingModel(),
tools=[return_direct_tool, normal_tool],
system_prompt="You are a helpful assistant.",
)
# The mermaid diagram SHOULD include an edge from tools to __end__
# because at least one tool has return_direct=True
mermaid_diagram = agent.get_graph().draw_mermaid()
assert mermaid_diagram == snapshot
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/test_return_direct_graph.py",
"license": "MIT License",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/_execution.py | """Execution policies for the persistent shell middleware."""
from __future__ import annotations
import abc
import json
import os
import shutil
import subprocess
import sys
import typing
from collections.abc import Mapping, Sequence
from dataclasses import dataclass, field
from pathlib import Path
try: # pragma: no cover - optional dependency on POSIX platforms
import resource
_HAS_RESOURCE = True
except ImportError: # pragma: no cover - non-POSIX systems
_HAS_RESOURCE = False
SHELL_TEMP_PREFIX = "langchain-shell-"
def _launch_subprocess(
command: Sequence[str],
*,
env: Mapping[str, str],
cwd: Path,
preexec_fn: typing.Callable[[], None] | None,
start_new_session: bool,
) -> subprocess.Popen[str]:
return subprocess.Popen( # noqa: S603
list(command),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
text=True,
encoding="utf-8",
errors="replace",
bufsize=1,
env=env,
preexec_fn=preexec_fn, # noqa: PLW1509
start_new_session=start_new_session,
)
if typing.TYPE_CHECKING:
from collections.abc import Mapping, Sequence
from pathlib import Path
@dataclass
class BaseExecutionPolicy(abc.ABC):
"""Configuration contract for persistent shell sessions.
Concrete subclasses encapsulate how a shell process is launched and constrained.
Each policy documents its security guarantees and the operating environments in
which it is appropriate. Use `HostExecutionPolicy` for trusted, same-host execution;
`CodexSandboxExecutionPolicy` when the Codex CLI sandbox is available and you want
additional syscall restrictions; and `DockerExecutionPolicy` for container-level
isolation using Docker.
"""
command_timeout: float = 30.0
startup_timeout: float = 30.0
termination_timeout: float = 10.0
max_output_lines: int = 100
max_output_bytes: int | None = None
def __post_init__(self) -> None:
if self.max_output_lines <= 0:
msg = "max_output_lines must be positive."
raise ValueError(msg)
@abc.abstractmethod
def spawn(
self,
*,
workspace: Path,
env: Mapping[str, str],
command: Sequence[str],
) -> subprocess.Popen[str]:
"""Launch the persistent shell process."""
@dataclass
class HostExecutionPolicy(BaseExecutionPolicy):
"""Run the shell directly on the host process.
This policy is best suited for trusted or single-tenant environments (CI jobs,
developer workstations, pre-sandboxed containers) where the agent must access the
host filesystem and tooling without additional isolation. Enforces optional CPU and
memory limits to prevent runaway commands but offers **no** filesystem or network
sandboxing; commands can modify anything the process user can reach.
On Linux platforms resource limits are applied with `resource.prlimit` after the
shell starts. On macOS, where `prlimit` is unavailable, limits are set in a
`preexec_fn` before `exec`. In both cases the shell runs in its own process group
so timeouts can terminate the full subtree.
"""
cpu_time_seconds: int | None = None
memory_bytes: int | None = None
create_process_group: bool = True
_limits_requested: bool = field(init=False, repr=False, default=False)
def __post_init__(self) -> None:
super().__post_init__()
if self.cpu_time_seconds is not None and self.cpu_time_seconds <= 0:
msg = "cpu_time_seconds must be positive if provided."
raise ValueError(msg)
if self.memory_bytes is not None and self.memory_bytes <= 0:
msg = "memory_bytes must be positive if provided."
raise ValueError(msg)
self._limits_requested = any(
value is not None for value in (self.cpu_time_seconds, self.memory_bytes)
)
if self._limits_requested and not _HAS_RESOURCE:
msg = (
"HostExecutionPolicy cpu/memory limits require the Python 'resource' module. "
"Either remove the limits or run on a POSIX platform."
)
raise RuntimeError(msg)
def spawn(
self,
*,
workspace: Path,
env: Mapping[str, str],
command: Sequence[str],
) -> subprocess.Popen[str]:
process = _launch_subprocess(
list(command),
env=env,
cwd=workspace,
preexec_fn=self._create_preexec_fn(),
start_new_session=self.create_process_group,
)
self._apply_post_spawn_limits(process)
return process
def _create_preexec_fn(self) -> typing.Callable[[], None] | None:
if not self._limits_requested or self._can_use_prlimit():
return None
def _configure() -> None: # pragma: no cover - depends on OS
if self.cpu_time_seconds is not None:
limit = (self.cpu_time_seconds, self.cpu_time_seconds)
resource.setrlimit(resource.RLIMIT_CPU, limit)
if self.memory_bytes is not None:
limit = (self.memory_bytes, self.memory_bytes)
if hasattr(resource, "RLIMIT_AS"):
resource.setrlimit(resource.RLIMIT_AS, limit)
elif hasattr(resource, "RLIMIT_DATA"):
resource.setrlimit(resource.RLIMIT_DATA, limit)
return _configure
def _apply_post_spawn_limits(self, process: subprocess.Popen[str]) -> None:
if not self._limits_requested or not self._can_use_prlimit():
return
if not _HAS_RESOURCE: # pragma: no cover - defensive
return
pid = process.pid
try:
prlimit = typing.cast("typing.Any", resource).prlimit
if self.cpu_time_seconds is not None:
prlimit(pid, resource.RLIMIT_CPU, (self.cpu_time_seconds, self.cpu_time_seconds))
if self.memory_bytes is not None:
limit = (self.memory_bytes, self.memory_bytes)
if hasattr(resource, "RLIMIT_AS"):
prlimit(pid, resource.RLIMIT_AS, limit)
elif hasattr(resource, "RLIMIT_DATA"):
prlimit(pid, resource.RLIMIT_DATA, limit)
except OSError as exc: # pragma: no cover - depends on platform support
msg = "Failed to apply resource limits via prlimit."
raise RuntimeError(msg) from exc
@staticmethod
def _can_use_prlimit() -> bool:
return _HAS_RESOURCE and hasattr(resource, "prlimit") and sys.platform.startswith("linux")
@dataclass
class CodexSandboxExecutionPolicy(BaseExecutionPolicy):
"""Launch the shell through the Codex CLI sandbox.
Ideal when you have the Codex CLI installed and want the additional syscall and
filesystem restrictions provided by Anthropic's Seatbelt (macOS) or Landlock/seccomp
(Linux) profiles. Commands still run on the host, but within the sandbox requested by
the CLI. If the Codex binary is unavailable or the runtime lacks the required
kernel features (e.g., Landlock inside some containers), process startup fails with a
`RuntimeError`.
Configure sandbox behavior via `config_overrides` to align with your Codex CLI
profile. This policy does not add its own resource limits; combine it with
host-level guards (cgroups, container resource limits) as needed.
"""
binary: str = "codex"
platform: typing.Literal["auto", "macos", "linux"] = "auto"
config_overrides: Mapping[str, typing.Any] = field(default_factory=dict)
def spawn(
self,
*,
workspace: Path,
env: Mapping[str, str],
command: Sequence[str],
) -> subprocess.Popen[str]:
full_command = self._build_command(command)
return _launch_subprocess(
full_command,
env=env,
cwd=workspace,
preexec_fn=None,
start_new_session=False,
)
def _build_command(self, command: Sequence[str]) -> list[str]:
binary = self._resolve_binary()
platform_arg = self._determine_platform()
full_command: list[str] = [binary, "sandbox", platform_arg]
for key, value in sorted(dict(self.config_overrides).items()):
full_command.extend(["-c", f"{key}={self._format_override(value)}"])
full_command.append("--")
full_command.extend(command)
return full_command
def _resolve_binary(self) -> str:
path = shutil.which(self.binary)
if path is None:
msg = (
"Codex sandbox policy requires the '%s' CLI to be installed and available on PATH."
)
raise RuntimeError(msg % self.binary)
return path
def _determine_platform(self) -> str:
if self.platform != "auto":
return self.platform
if sys.platform.startswith("linux"):
return "linux"
if sys.platform == "darwin": # type: ignore[unreachable, unused-ignore]
return "macos"
msg = ( # type: ignore[unreachable, unused-ignore]
"Codex sandbox policy could not determine a supported platform; "
"set 'platform' explicitly."
)
raise RuntimeError(msg)
@staticmethod
def _format_override(value: typing.Any) -> str:
try:
return json.dumps(value)
except TypeError:
return str(value)
@dataclass
class DockerExecutionPolicy(BaseExecutionPolicy):
"""Run the shell inside a dedicated Docker container.
Choose this policy when commands originate from untrusted users or you require
strong isolation between sessions. By default the workspace is bind-mounted only
when it refers to an existing non-temporary directory; ephemeral sessions run
without a mount to minimise host exposure. The container's network namespace is
disabled by default (`--network none`) and you can enable further hardening via
`read_only_rootfs` and `user`.
The security guarantees depend on your Docker daemon configuration. Run the agent on
a host where Docker is locked down (rootless mode, AppArmor/SELinux, etc.) and
review any additional volumes or capabilities passed through ``extra_run_args``. The
default image is `python:3.12-alpine3.19`; supply a custom image if you need
preinstalled tooling.
"""
binary: str = "docker"
image: str = "python:3.12-alpine3.19"
remove_container_on_exit: bool = True
network_enabled: bool = False
extra_run_args: Sequence[str] | None = None
memory_bytes: int | None = None
cpu_time_seconds: typing.Any | None = None
cpus: str | None = None
read_only_rootfs: bool = False
user: str | None = None
def __post_init__(self) -> None:
super().__post_init__()
if self.memory_bytes is not None and self.memory_bytes <= 0:
msg = "memory_bytes must be positive if provided."
raise ValueError(msg)
if self.cpu_time_seconds is not None:
msg = (
"DockerExecutionPolicy does not support cpu_time_seconds; configure CPU limits "
"using Docker run options such as '--cpus'."
)
raise RuntimeError(msg)
if self.cpus is not None and not self.cpus.strip():
msg = "cpus must be a non-empty string when provided."
raise ValueError(msg)
if self.user is not None and not self.user.strip():
msg = "user must be a non-empty string when provided."
raise ValueError(msg)
self.extra_run_args = tuple(self.extra_run_args or ())
def spawn(
self,
*,
workspace: Path,
env: Mapping[str, str],
command: Sequence[str],
) -> subprocess.Popen[str]:
full_command = self._build_command(workspace, env, command)
host_env = os.environ.copy()
return _launch_subprocess(
full_command,
env=host_env,
cwd=workspace,
preexec_fn=None,
start_new_session=False,
)
def _build_command(
self,
workspace: Path,
env: Mapping[str, str],
command: Sequence[str],
) -> list[str]:
binary = self._resolve_binary()
full_command: list[str] = [binary, "run", "-i"]
if self.remove_container_on_exit:
full_command.append("--rm")
if not self.network_enabled:
full_command.extend(["--network", "none"])
if self.memory_bytes is not None:
full_command.extend(["--memory", str(self.memory_bytes)])
if self._should_mount_workspace(workspace):
host_path = str(workspace)
full_command.extend(["-v", f"{host_path}:{host_path}"])
full_command.extend(["-w", host_path])
else:
full_command.extend(["-w", "/"])
if self.read_only_rootfs:
full_command.append("--read-only")
for key, value in env.items():
full_command.extend(["-e", f"{key}={value}"])
if self.cpus is not None:
full_command.extend(["--cpus", self.cpus])
if self.user is not None:
full_command.extend(["--user", self.user])
if self.extra_run_args:
full_command.extend(self.extra_run_args)
full_command.append(self.image)
full_command.extend(command)
return full_command
@staticmethod
def _should_mount_workspace(workspace: Path) -> bool:
return not workspace.name.startswith(SHELL_TEMP_PREFIX)
def _resolve_binary(self) -> str:
path = shutil.which(self.binary)
if path is None:
msg = (
"Docker execution policy requires the '%s' CLI to be installed"
" and available on PATH."
)
raise RuntimeError(msg % self.binary)
return path
__all__ = [
"BaseExecutionPolicy",
"CodexSandboxExecutionPolicy",
"DockerExecutionPolicy",
"HostExecutionPolicy",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/middleware/_execution.py",
"license": "MIT License",
"lines": 333,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/_redaction.py | """Shared redaction utilities for middleware components."""
from __future__ import annotations
import hashlib
import ipaddress
import operator
import re
from collections.abc import Callable, Sequence
from dataclasses import dataclass
from typing import Literal
from urllib.parse import urlparse
from typing_extensions import TypedDict
RedactionStrategy = Literal["block", "redact", "mask", "hash"]
"""Supported strategies for handling detected sensitive values."""
class PIIMatch(TypedDict):
"""Represents an individual match of sensitive data."""
type: str
value: str
start: int
end: int
class PIIDetectionError(Exception):
"""Raised when configured to block on detected sensitive values."""
def __init__(self, pii_type: str, matches: Sequence[PIIMatch]) -> None:
"""Initialize the exception with match context.
Args:
pii_type: Name of the detected sensitive type.
matches: All matches that were detected for that type.
"""
self.pii_type = pii_type
self.matches = list(matches)
count = len(matches)
msg = f"Detected {count} instance(s) of {pii_type} in text content"
super().__init__(msg)
Detector = Callable[[str], list[PIIMatch]]
"""Callable signature for detectors that locate sensitive values."""
def detect_email(content: str) -> list[PIIMatch]:
"""Detect email addresses in content.
Args:
content: The text content to scan for email addresses.
Returns:
A list of detected email matches.
"""
pattern = r"\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Z|a-z]{2,}\b"
return [
PIIMatch(
type="email",
value=match.group(),
start=match.start(),
end=match.end(),
)
for match in re.finditer(pattern, content)
]
def detect_credit_card(content: str) -> list[PIIMatch]:
"""Detect credit card numbers in content using Luhn validation.
Args:
content: The text content to scan for credit card numbers.
Returns:
A list of detected credit card matches.
"""
pattern = r"\b\d{4}[\s-]?\d{4}[\s-]?\d{4}[\s-]?\d{4}\b"
matches = []
for match in re.finditer(pattern, content):
card_number = match.group()
if _passes_luhn(card_number):
matches.append(
PIIMatch(
type="credit_card",
value=card_number,
start=match.start(),
end=match.end(),
)
)
return matches
def detect_ip(content: str) -> list[PIIMatch]:
"""Detect IPv4 or IPv6 addresses in content.
Args:
content: The text content to scan for IP addresses.
Returns:
A list of detected IP address matches.
"""
matches: list[PIIMatch] = []
ipv4_pattern = r"\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b"
for match in re.finditer(ipv4_pattern, content):
ip_candidate = match.group()
try:
ipaddress.ip_address(ip_candidate)
except ValueError:
continue
matches.append(
PIIMatch(
type="ip",
value=ip_candidate,
start=match.start(),
end=match.end(),
)
)
return matches
def detect_mac_address(content: str) -> list[PIIMatch]:
"""Detect MAC addresses in content.
Args:
content: The text content to scan for MAC addresses.
Returns:
A list of detected MAC address matches.
"""
pattern = r"\b([0-9A-Fa-f]{2}[:-]){5}[0-9A-Fa-f]{2}\b"
return [
PIIMatch(
type="mac_address",
value=match.group(),
start=match.start(),
end=match.end(),
)
for match in re.finditer(pattern, content)
]
def detect_url(content: str) -> list[PIIMatch]:
"""Detect URLs in content using regex and stdlib validation.
Args:
content: The text content to scan for URLs.
Returns:
A list of detected URL matches.
"""
matches: list[PIIMatch] = []
# Pattern 1: URLs with scheme (http:// or https://)
scheme_pattern = r"https?://[^\s<>\"{}|\\^`\[\]]+"
for match in re.finditer(scheme_pattern, content):
url = match.group()
result = urlparse(url)
if result.scheme in {"http", "https"} and result.netloc:
matches.append(
PIIMatch(
type="url",
value=url,
start=match.start(),
end=match.end(),
)
)
# Pattern 2: URLs without scheme (www.example.com or example.com/path)
# More conservative to avoid false positives
bare_pattern = (
r"\b(?:www\.)?[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?"
r"(?:\.[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?)+(?:/[^\s]*)?"
)
for match in re.finditer(bare_pattern, content):
start, end = match.start(), match.end()
# Skip if already matched with scheme
if any(m["start"] <= start < m["end"] or m["start"] < end <= m["end"] for m in matches):
continue
url = match.group()
# Only accept if it has a path or starts with www
# This reduces false positives like "example.com" in prose
if "/" in url or url.startswith("www."):
# Add scheme for validation (required for urlparse to work correctly)
test_url = f"http://{url}"
result = urlparse(test_url)
if result.netloc and "." in result.netloc:
matches.append(
PIIMatch(
type="url",
value=url,
start=start,
end=end,
)
)
return matches
BUILTIN_DETECTORS: dict[str, Detector] = {
"email": detect_email,
"credit_card": detect_credit_card,
"ip": detect_ip,
"mac_address": detect_mac_address,
"url": detect_url,
}
"""Registry of built-in detectors keyed by type name."""
_CARD_NUMBER_MIN_DIGITS = 13
_CARD_NUMBER_MAX_DIGITS = 19
def _passes_luhn(card_number: str) -> bool:
"""Validate credit card number using the Luhn checksum."""
digits = [int(d) for d in card_number if d.isdigit()]
if not _CARD_NUMBER_MIN_DIGITS <= len(digits) <= _CARD_NUMBER_MAX_DIGITS:
return False
checksum = 0
for index, digit in enumerate(reversed(digits)):
value = digit
if index % 2 == 1:
value *= 2
if value > 9: # noqa: PLR2004
value -= 9
checksum += value
return checksum % 10 == 0
def _apply_redact_strategy(content: str, matches: list[PIIMatch]) -> str:
result = content
for match in sorted(matches, key=operator.itemgetter("start"), reverse=True):
replacement = f"[REDACTED_{match['type'].upper()}]"
result = result[: match["start"]] + replacement + result[match["end"] :]
return result
_UNMASKED_CHAR_NUMBER = 4
_IPV4_PARTS_NUMBER = 4
def _apply_mask_strategy(content: str, matches: list[PIIMatch]) -> str:
result = content
for match in sorted(matches, key=operator.itemgetter("start"), reverse=True):
value = match["value"]
pii_type = match["type"]
if pii_type == "email":
parts = value.split("@")
if len(parts) == 2: # noqa: PLR2004
domain_parts = parts[1].split(".")
masked = (
f"{parts[0]}@****.{domain_parts[-1]}"
if len(domain_parts) > 1
else f"{parts[0]}@****"
)
else:
masked = "****"
elif pii_type == "credit_card":
digits_only = "".join(c for c in value if c.isdigit())
separator = "-" if "-" in value else " " if " " in value else ""
if separator:
masked = (
f"****{separator}****{separator}****{separator}"
f"{digits_only[-_UNMASKED_CHAR_NUMBER:]}"
)
else:
masked = f"************{digits_only[-_UNMASKED_CHAR_NUMBER:]}"
elif pii_type == "ip":
octets = value.split(".")
masked = f"*.*.*.{octets[-1]}" if len(octets) == _IPV4_PARTS_NUMBER else "****"
elif pii_type == "mac_address":
separator = ":" if ":" in value else "-"
masked = (
f"**{separator}**{separator}**{separator}**{separator}**{separator}{value[-2:]}"
)
elif pii_type == "url":
masked = "[MASKED_URL]"
else:
masked = (
f"****{value[-_UNMASKED_CHAR_NUMBER:]}"
if len(value) > _UNMASKED_CHAR_NUMBER
else "****"
)
result = result[: match["start"]] + masked + result[match["end"] :]
return result
def _apply_hash_strategy(content: str, matches: list[PIIMatch]) -> str:
result = content
for match in sorted(matches, key=operator.itemgetter("start"), reverse=True):
digest = hashlib.sha256(match["value"].encode()).hexdigest()[:8]
replacement = f"<{match['type']}_hash:{digest}>"
result = result[: match["start"]] + replacement + result[match["end"] :]
return result
def apply_strategy(
content: str,
matches: list[PIIMatch],
strategy: RedactionStrategy,
) -> str:
"""Apply the configured strategy to matches within content.
Args:
content: The content to apply strategy to.
matches: List of detected PII matches.
strategy: The redaction strategy to apply.
Returns:
The content with the strategy applied.
Raises:
PIIDetectionError: If the strategy is `'block'` and matches are found.
ValueError: If the strategy is unknown.
"""
if not matches:
return content
if strategy == "redact":
return _apply_redact_strategy(content, matches)
if strategy == "mask":
return _apply_mask_strategy(content, matches)
if strategy == "hash":
return _apply_hash_strategy(content, matches)
if strategy == "block":
raise PIIDetectionError(matches[0]["type"], matches)
msg = f"Unknown redaction strategy: {strategy}" # type: ignore[unreachable]
raise ValueError(msg)
def resolve_detector(pii_type: str, detector: Detector | str | None) -> Detector:
"""Return a callable detector for the given configuration.
Args:
pii_type: The PII type name.
detector: Optional custom detector or regex pattern. If `None`, a built-in detector
for the given PII type will be used.
Returns:
The resolved detector.
Raises:
ValueError: If an unknown PII type is specified without a custom detector or regex.
"""
if detector is None:
if pii_type not in BUILTIN_DETECTORS:
msg = (
f"Unknown PII type: {pii_type}. "
f"Must be one of {list(BUILTIN_DETECTORS.keys())} or provide a custom detector."
)
raise ValueError(msg)
return BUILTIN_DETECTORS[pii_type]
if isinstance(detector, str):
pattern = re.compile(detector)
def regex_detector(content: str) -> list[PIIMatch]:
return [
PIIMatch(
type=pii_type,
value=match.group(),
start=match.start(),
end=match.end(),
)
for match in pattern.finditer(content)
]
return regex_detector
return detector
@dataclass(frozen=True)
class RedactionRule:
"""Configuration for handling a single PII type."""
pii_type: str
strategy: RedactionStrategy = "redact"
detector: Detector | str | None = None
def resolve(self) -> ResolvedRedactionRule:
"""Resolve runtime detector and return an immutable rule.
Returns:
The resolved redaction rule.
"""
resolved_detector = resolve_detector(self.pii_type, self.detector)
return ResolvedRedactionRule(
pii_type=self.pii_type,
strategy=self.strategy,
detector=resolved_detector,
)
@dataclass(frozen=True)
class ResolvedRedactionRule:
"""Resolved redaction rule ready for execution."""
pii_type: str
strategy: RedactionStrategy
detector: Detector
def apply(self, content: str) -> tuple[str, list[PIIMatch]]:
"""Apply this rule to content, returning new content and matches.
Args:
content: The text content to scan and redact.
Returns:
A tuple of (updated content, list of detected matches).
"""
matches = self.detector(content)
if not matches:
return content, []
updated = apply_strategy(content, matches, self.strategy)
return updated, matches
__all__ = [
"PIIDetectionError",
"PIIMatch",
"RedactionRule",
"ResolvedRedactionRule",
"apply_strategy",
"detect_credit_card",
"detect_email",
"detect_ip",
"detect_mac_address",
"detect_url",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/middleware/_redaction.py",
"license": "MIT License",
"lines": 354,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/shell_tool.py | """Middleware that exposes a persistent shell tool to agents."""
from __future__ import annotations
import contextlib
import logging
import os
import queue
import signal
import subprocess
import tempfile
import threading
import time
import uuid
import weakref
from dataclasses import dataclass, field
from pathlib import Path
from typing import TYPE_CHECKING, Annotated, Any, Literal, cast
from langchain_core.messages import ToolMessage
from langchain_core.runnables import run_in_executor
from langchain_core.tools.base import ToolException
from langgraph.channels.untracked_value import UntrackedValue
from pydantic import BaseModel, model_validator
from pydantic.json_schema import SkipJsonSchema
from typing_extensions import NotRequired, override
from langchain.agents.middleware._execution import (
SHELL_TEMP_PREFIX,
BaseExecutionPolicy,
CodexSandboxExecutionPolicy,
DockerExecutionPolicy,
HostExecutionPolicy,
)
from langchain.agents.middleware._redaction import (
PIIDetectionError,
PIIMatch,
RedactionRule,
ResolvedRedactionRule,
)
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
PrivateStateAttr,
ResponseT,
)
from langchain.tools import ToolRuntime, tool
if TYPE_CHECKING:
from collections.abc import Mapping, Sequence
from langgraph.runtime import Runtime
LOGGER = logging.getLogger(__name__)
_DONE_MARKER_PREFIX = "__LC_SHELL_DONE__"
DEFAULT_TOOL_DESCRIPTION = (
"Execute a shell command inside a persistent session. Before running a command, "
"confirm the working directory is correct (e.g., inspect with `ls` or `pwd`) and ensure "
"any parent directories exist. Prefer absolute paths and quote paths containing spaces, "
'such as `cd "/path/with spaces"`. Chain multiple commands with `&&` or `;` instead of '
"embedding newlines. Avoid unnecessary `cd` usage unless explicitly required so the "
"session remains stable. Outputs may be truncated when they become very large, and long "
"running commands will be terminated once their configured timeout elapses."
)
SHELL_TOOL_NAME = "shell"
def _cleanup_resources(
session: ShellSession, tempdir: tempfile.TemporaryDirectory[str] | None, timeout: float
) -> None:
with contextlib.suppress(Exception):
session.stop(timeout)
if tempdir is not None:
with contextlib.suppress(Exception):
tempdir.cleanup()
@dataclass
class _SessionResources:
"""Container for per-run shell resources."""
session: ShellSession
tempdir: tempfile.TemporaryDirectory[str] | None
policy: BaseExecutionPolicy
finalizer: weakref.finalize = field(init=False, repr=False) # type: ignore[type-arg]
def __post_init__(self) -> None:
self.finalizer = weakref.finalize(
self,
_cleanup_resources,
self.session,
self.tempdir,
self.policy.termination_timeout,
)
class ShellToolState(AgentState[ResponseT]):
"""Agent state extension for tracking shell session resources.
Type Parameters:
ResponseT: The type of the structured response. Defaults to `Any`.
"""
shell_session_resources: NotRequired[
Annotated[_SessionResources | None, UntrackedValue, PrivateStateAttr]
]
@dataclass(frozen=True)
class CommandExecutionResult:
"""Structured result from command execution."""
output: str
exit_code: int | None
timed_out: bool
truncated_by_lines: bool
truncated_by_bytes: bool
total_lines: int
total_bytes: int
class ShellSession:
"""Persistent shell session that supports sequential command execution."""
def __init__(
self,
workspace: Path,
policy: BaseExecutionPolicy,
command: tuple[str, ...],
environment: Mapping[str, str],
) -> None:
self._workspace = workspace
self._policy = policy
self._command = command
self._environment = dict(environment)
self._process: subprocess.Popen[str] | None = None
self._stdin: Any = None
self._queue: queue.Queue[tuple[str, str | None]] = queue.Queue()
self._lock = threading.Lock()
self._stdout_thread: threading.Thread | None = None
self._stderr_thread: threading.Thread | None = None
self._terminated = False
def start(self) -> None:
"""Start the shell subprocess and reader threads.
Raises:
RuntimeError: If the shell session pipes cannot be initialized.
"""
if self._process and self._process.poll() is None:
return
self._process = self._policy.spawn(
workspace=self._workspace,
env=self._environment,
command=self._command,
)
if (
self._process.stdin is None
or self._process.stdout is None
or self._process.stderr is None
):
msg = "Failed to initialize shell session pipes."
raise RuntimeError(msg)
self._stdin = self._process.stdin
self._terminated = False
self._queue = queue.Queue()
self._stdout_thread = threading.Thread(
target=self._enqueue_stream,
args=(self._process.stdout, "stdout"),
daemon=True,
)
self._stderr_thread = threading.Thread(
target=self._enqueue_stream,
args=(self._process.stderr, "stderr"),
daemon=True,
)
self._stdout_thread.start()
self._stderr_thread.start()
def restart(self) -> None:
"""Restart the shell process."""
self.stop(self._policy.termination_timeout)
self.start()
def stop(self, timeout: float) -> None:
"""Stop the shell subprocess."""
if not self._process:
return
if self._process.poll() is None and not self._terminated:
try:
self._stdin.write("exit\n")
self._stdin.flush()
except (BrokenPipeError, OSError):
LOGGER.debug(
"Failed to write exit command; terminating shell session.",
exc_info=True,
)
try:
if self._process.wait(timeout=timeout) is None:
self._kill_process()
except subprocess.TimeoutExpired:
self._kill_process()
finally:
self._terminated = True
with contextlib.suppress(Exception):
self._stdin.close()
self._process = None
def execute(self, command: str, *, timeout: float) -> CommandExecutionResult:
"""Execute a command in the persistent shell."""
if not self._process or self._process.poll() is not None:
msg = "Shell session is not running."
raise RuntimeError(msg)
marker = f"{_DONE_MARKER_PREFIX}{uuid.uuid4().hex}"
deadline = time.monotonic() + timeout
with self._lock:
self._drain_queue()
payload = command if command.endswith("\n") else f"{command}\n"
try:
self._stdin.write(payload)
self._stdin.write(f"printf '{marker} %s\\n' $?\n")
self._stdin.flush()
except (BrokenPipeError, OSError):
# The shell exited before we could write the marker command.
# This happens when commands like 'exit 1' terminate the shell.
return self._collect_output_after_exit(deadline)
return self._collect_output(marker, deadline, timeout)
def _collect_output(
self,
marker: str,
deadline: float,
timeout: float,
) -> CommandExecutionResult:
collected: list[str] = []
total_lines = 0
total_bytes = 0
truncated_by_lines = False
truncated_by_bytes = False
exit_code: int | None = None
timed_out = False
while True:
remaining = deadline - time.monotonic()
if remaining <= 0:
timed_out = True
break
try:
source, data = self._queue.get(timeout=remaining)
except queue.Empty:
timed_out = True
break
if data is None:
continue
if source == "stdout" and data.startswith(marker):
_, _, status = data.partition(" ")
exit_code = self._safe_int(status.strip())
# Drain any remaining stderr that may have arrived concurrently.
# The stderr reader thread runs independently, so output might
# still be in flight when the stdout marker arrives.
self._drain_remaining_stderr(collected, deadline)
break
total_lines += 1
encoded = data.encode("utf-8", "replace")
total_bytes += len(encoded)
if total_lines > self._policy.max_output_lines:
truncated_by_lines = True
continue
if (
self._policy.max_output_bytes is not None
and total_bytes > self._policy.max_output_bytes
):
truncated_by_bytes = True
continue
if source == "stderr":
stripped = data.rstrip("\n")
collected.append(f"[stderr] {stripped}")
if data.endswith("\n"):
collected.append("\n")
else:
collected.append(data)
if timed_out:
LOGGER.warning(
"Command timed out after %.2f seconds; restarting shell session.",
timeout,
)
self.restart()
return CommandExecutionResult(
output="",
exit_code=None,
timed_out=True,
truncated_by_lines=truncated_by_lines,
truncated_by_bytes=truncated_by_bytes,
total_lines=total_lines,
total_bytes=total_bytes,
)
output = "".join(collected)
return CommandExecutionResult(
output=output,
exit_code=exit_code,
timed_out=False,
truncated_by_lines=truncated_by_lines,
truncated_by_bytes=truncated_by_bytes,
total_lines=total_lines,
total_bytes=total_bytes,
)
def _collect_output_after_exit(self, deadline: float) -> CommandExecutionResult:
"""Collect output after the shell exited unexpectedly.
Called when a `BrokenPipeError` occurs while writing to stdin, indicating the
shell process terminated (e.g., due to an 'exit' command).
Args:
deadline: Absolute time by which collection must complete.
Returns:
`CommandExecutionResult` with collected output and the process exit code.
"""
collected: list[str] = []
total_lines = 0
total_bytes = 0
truncated_by_lines = False
truncated_by_bytes = False
# Give reader threads a brief moment to enqueue any remaining output.
drain_timeout = 0.1
drain_deadline = min(time.monotonic() + drain_timeout, deadline)
while True:
remaining = drain_deadline - time.monotonic()
if remaining <= 0:
break
try:
source, data = self._queue.get(timeout=remaining)
except queue.Empty:
break
if data is None:
# EOF marker from a reader thread; continue draining.
continue
total_lines += 1
encoded = data.encode("utf-8", "replace")
total_bytes += len(encoded)
if total_lines > self._policy.max_output_lines:
truncated_by_lines = True
continue
if (
self._policy.max_output_bytes is not None
and total_bytes > self._policy.max_output_bytes
):
truncated_by_bytes = True
continue
if source == "stderr":
stripped = data.rstrip("\n")
collected.append(f"[stderr] {stripped}")
if data.endswith("\n"):
collected.append("\n")
else:
collected.append(data)
# Get exit code from the terminated process.
exit_code: int | None = None
if self._process:
exit_code = self._process.poll()
output = "".join(collected)
return CommandExecutionResult(
output=output,
exit_code=exit_code,
timed_out=False,
truncated_by_lines=truncated_by_lines,
truncated_by_bytes=truncated_by_bytes,
total_lines=total_lines,
total_bytes=total_bytes,
)
def _kill_process(self) -> None:
if not self._process:
return
if hasattr(os, "killpg"):
with contextlib.suppress(ProcessLookupError):
os.killpg(os.getpgid(self._process.pid), signal.SIGKILL)
else: # pragma: no cover
with contextlib.suppress(ProcessLookupError):
self._process.kill()
def _enqueue_stream(self, stream: Any, label: str) -> None:
for line in iter(stream.readline, ""):
self._queue.put((label, line))
self._queue.put((label, None))
def _drain_queue(self) -> None:
while True:
try:
self._queue.get_nowait()
except queue.Empty:
break
def _drain_remaining_stderr(
self, collected: list[str], deadline: float, drain_timeout: float = 0.05
) -> None:
"""Drain any stderr output that arrived concurrently with the done marker.
The stdout and stderr reader threads run independently. When a command writes to
stderr just before exiting, the stderr output may still be in transit when the
done marker arrives on stdout. This method briefly polls the queue to capture
such output.
Args:
collected: The list to append collected stderr lines to.
deadline: The original command deadline (used as an upper bound).
drain_timeout: Maximum time to wait for additional stderr output.
"""
drain_deadline = min(time.monotonic() + drain_timeout, deadline)
while True:
remaining = drain_deadline - time.monotonic()
if remaining <= 0:
break
try:
source, data = self._queue.get(timeout=remaining)
except queue.Empty:
break
if data is None or source != "stderr":
continue
stripped = data.rstrip("\n")
collected.append(f"[stderr] {stripped}")
if data.endswith("\n"):
collected.append("\n")
@staticmethod
def _safe_int(value: str) -> int | None:
with contextlib.suppress(ValueError):
return int(value)
return None
class _ShellToolInput(BaseModel):
"""Input schema for the persistent shell tool."""
command: str | None = None
"""The shell command to execute."""
restart: bool | None = None
"""Whether to restart the shell session."""
runtime: Annotated[Any, SkipJsonSchema()] = None
"""The runtime for the shell tool.
Included as a workaround at the moment bc args_schema doesn't work with
injected ToolRuntime.
"""
@model_validator(mode="after")
def validate_payload(self) -> _ShellToolInput:
if self.command is None and not self.restart:
msg = "Shell tool requires either 'command' or 'restart'."
raise ValueError(msg)
if self.command is not None and self.restart:
msg = "Specify only one of 'command' or 'restart'."
raise ValueError(msg)
return self
class ShellToolMiddleware(AgentMiddleware[ShellToolState[ResponseT], ContextT, ResponseT]):
"""Middleware that registers a persistent shell tool for agents.
The middleware exposes a single long-lived shell session. Use the execution policy
to match your deployment's security posture:
* `HostExecutionPolicy` – full host access; best for trusted environments where the
agent already runs inside a container or VM that provides isolation.
* `CodexSandboxExecutionPolicy` – reuses the Codex CLI sandbox for additional
syscall/filesystem restrictions when the CLI is available.
* `DockerExecutionPolicy` – launches a separate Docker container for each agent run,
providing harder isolation, optional read-only root filesystems, and user
remapping.
When no policy is provided the middleware defaults to `HostExecutionPolicy`.
"""
state_schema = ShellToolState # type: ignore[assignment]
def __init__(
self,
workspace_root: str | Path | None = None,
*,
startup_commands: tuple[str, ...] | list[str] | str | None = None,
shutdown_commands: tuple[str, ...] | list[str] | str | None = None,
execution_policy: BaseExecutionPolicy | None = None,
redaction_rules: tuple[RedactionRule, ...] | list[RedactionRule] | None = None,
tool_description: str | None = None,
tool_name: str = SHELL_TOOL_NAME,
shell_command: Sequence[str] | str | None = None,
env: Mapping[str, Any] | None = None,
) -> None:
"""Initialize an instance of `ShellToolMiddleware`.
Args:
workspace_root: Base directory for the shell session.
If omitted, a temporary directory is created when the agent starts and
removed when it ends.
startup_commands: Optional commands executed sequentially after the session
starts.
shutdown_commands: Optional commands executed before the session shuts down.
execution_policy: Execution policy controlling timeouts, output limits, and
resource configuration.
Defaults to `HostExecutionPolicy` for native execution.
redaction_rules: Optional redaction rules to sanitize command output before
returning it to the model.
!!! warning
Redaction rules are applied post execution and do not prevent
exfiltration of secrets or sensitive data when using
`HostExecutionPolicy`.
tool_description: Optional override for the registered shell tool
description.
tool_name: Name for the registered shell tool.
Defaults to `"shell"`.
shell_command: Optional shell executable (string) or argument sequence used
to launch the persistent session.
Defaults to an implementation-defined bash command.
env: Optional environment variables to supply to the shell session.
Values are coerced to strings before command execution. If omitted, the
session inherits the parent process environment.
"""
super().__init__()
self._workspace_root = Path(workspace_root) if workspace_root else None
self._tool_name = tool_name
self._shell_command = self._normalize_shell_command(shell_command)
self._environment = self._normalize_env(env)
if execution_policy is not None:
self._execution_policy = execution_policy
else:
self._execution_policy = HostExecutionPolicy()
rules = redaction_rules or ()
self._redaction_rules: tuple[ResolvedRedactionRule, ...] = tuple(
rule.resolve() for rule in rules
)
self._startup_commands = self._normalize_commands(startup_commands)
self._shutdown_commands = self._normalize_commands(shutdown_commands)
# Create a proper tool that executes directly (no interception needed)
description = tool_description or DEFAULT_TOOL_DESCRIPTION
@tool(self._tool_name, args_schema=_ShellToolInput, description=description)
def shell_tool(
*,
runtime: ToolRuntime[None, ShellToolState],
command: str | None = None,
restart: bool = False,
) -> ToolMessage | str:
resources = self._get_or_create_resources(runtime.state)
return self._run_shell_tool(
resources,
{"command": command, "restart": restart},
tool_call_id=runtime.tool_call_id,
)
self._shell_tool = shell_tool
self.tools = [self._shell_tool]
@staticmethod
def _normalize_commands(
commands: tuple[str, ...] | list[str] | str | None,
) -> tuple[str, ...]:
if commands is None:
return ()
if isinstance(commands, str):
return (commands,)
return tuple(commands)
@staticmethod
def _normalize_shell_command(
shell_command: Sequence[str] | str | None,
) -> tuple[str, ...]:
if shell_command is None:
return ("/bin/bash",)
normalized = (shell_command,) if isinstance(shell_command, str) else tuple(shell_command)
if not normalized:
msg = "Shell command must contain at least one argument."
raise ValueError(msg)
return normalized
@staticmethod
def _normalize_env(env: Mapping[str, Any] | None) -> dict[str, str] | None:
if env is None:
return None
normalized: dict[str, str] = {}
for key, value in env.items():
if not isinstance(key, str):
msg = "Environment variable names must be strings." # type: ignore[unreachable]
raise TypeError(msg)
normalized[key] = str(value)
return normalized
@override
def before_agent(
self, state: ShellToolState[ResponseT], runtime: Runtime[ContextT]
) -> dict[str, Any] | None:
"""Start the shell session and run startup commands.
Args:
state: The current agent state.
runtime: The runtime context.
Returns:
Shell session resources to be stored in the agent state.
"""
resources = self._get_or_create_resources(state)
return {"shell_session_resources": resources}
async def abefore_agent(
self, state: ShellToolState[ResponseT], runtime: Runtime[ContextT]
) -> dict[str, Any] | None:
"""Async start the shell session and run startup commands.
Args:
state: The current agent state.
runtime: The runtime context.
Returns:
Shell session resources to be stored in the agent state.
"""
return await run_in_executor(None, self.before_agent, state, runtime)
@override
def after_agent(self, state: ShellToolState[ResponseT], runtime: Runtime[ContextT]) -> None:
"""Run shutdown commands and release resources when an agent completes."""
resources = state.get("shell_session_resources")
if not isinstance(resources, _SessionResources):
# Resources were never created, nothing to clean up
return
try:
self._run_shutdown_commands(resources.session)
finally:
resources.finalizer()
async def aafter_agent(
self, state: ShellToolState[ResponseT], runtime: Runtime[ContextT]
) -> None:
"""Async run shutdown commands and release resources when an agent completes."""
return self.after_agent(state, runtime)
def _get_or_create_resources(self, state: ShellToolState[ResponseT]) -> _SessionResources:
"""Get existing resources from state or create new ones if they don't exist.
This method enables resumability by checking if resources already exist in the state
(e.g., after an interrupt), and only creating new resources if they're not present.
Args:
state: The agent state which may contain shell session resources.
Returns:
Session resources, either retrieved from state or newly created.
"""
resources = state.get("shell_session_resources")
if isinstance(resources, _SessionResources):
return resources
new_resources = self._create_resources()
# Cast needed to make state dict-like for mutation
cast("dict[str, Any]", state)["shell_session_resources"] = new_resources
return new_resources
def _create_resources(self) -> _SessionResources:
workspace = self._workspace_root
tempdir: tempfile.TemporaryDirectory[str] | None = None
if workspace is None:
tempdir = tempfile.TemporaryDirectory(prefix=SHELL_TEMP_PREFIX)
workspace_path = Path(tempdir.name)
else:
workspace_path = workspace
workspace_path.mkdir(parents=True, exist_ok=True)
session = ShellSession(
workspace_path,
self._execution_policy,
self._shell_command,
self._environment or {},
)
try:
session.start()
LOGGER.info("Started shell session in %s", workspace_path)
self._run_startup_commands(session)
except BaseException:
LOGGER.exception("Starting shell session failed; cleaning up resources.")
session.stop(self._execution_policy.termination_timeout)
if tempdir is not None:
tempdir.cleanup()
raise
return _SessionResources(session=session, tempdir=tempdir, policy=self._execution_policy)
def _run_startup_commands(self, session: ShellSession) -> None:
if not self._startup_commands:
return
for command in self._startup_commands:
result = session.execute(command, timeout=self._execution_policy.startup_timeout)
if result.timed_out or (result.exit_code not in {0, None}):
msg = f"Startup command '{command}' failed with exit code {result.exit_code}"
raise RuntimeError(msg)
def _run_shutdown_commands(self, session: ShellSession) -> None:
if not self._shutdown_commands:
return
for command in self._shutdown_commands:
try:
result = session.execute(command, timeout=self._execution_policy.command_timeout)
if result.timed_out:
LOGGER.warning("Shutdown command '%s' timed out.", command)
elif result.exit_code not in {0, None}:
LOGGER.warning(
"Shutdown command '%s' exited with %s.", command, result.exit_code
)
except (RuntimeError, ToolException, OSError) as exc:
LOGGER.warning(
"Failed to run shutdown command '%s': %s", command, exc, exc_info=True
)
def _apply_redactions(self, content: str) -> tuple[str, dict[str, list[PIIMatch]]]:
"""Apply configured redaction rules to command output."""
matches_by_type: dict[str, list[PIIMatch]] = {}
updated = content
for rule in self._redaction_rules:
updated, matches = rule.apply(updated)
if matches:
matches_by_type.setdefault(rule.pii_type, []).extend(matches)
return updated, matches_by_type
def _run_shell_tool(
self,
resources: _SessionResources,
payload: dict[str, Any],
*,
tool_call_id: str | None,
) -> Any:
session = resources.session
if payload.get("restart"):
LOGGER.info("Restarting shell session on request.")
try:
session.restart()
self._run_startup_commands(session)
except BaseException as err:
LOGGER.exception("Restarting shell session failed; session remains unavailable.")
msg = "Failed to restart shell session."
raise ToolException(msg) from err
message = "Shell session restarted."
return self._format_tool_message(message, tool_call_id, status="success")
command = payload.get("command")
if not command or not isinstance(command, str):
msg = "Shell tool expects a 'command' string when restart is not requested."
raise ToolException(msg)
LOGGER.info("Executing shell command: %s", command)
result = session.execute(command, timeout=self._execution_policy.command_timeout)
if result.timed_out:
timeout_seconds = self._execution_policy.command_timeout
message = f"Error: Command timed out after {timeout_seconds:.1f} seconds."
return self._format_tool_message(
message,
tool_call_id,
status="error",
artifact={
"timed_out": True,
"exit_code": None,
},
)
try:
sanitized_output, matches = self._apply_redactions(result.output)
except PIIDetectionError as error:
LOGGER.warning("Blocking command output due to detected %s.", error.pii_type)
message = f"Output blocked: detected {error.pii_type}."
return self._format_tool_message(
message,
tool_call_id,
status="error",
artifact={
"timed_out": False,
"exit_code": result.exit_code,
"matches": {error.pii_type: error.matches},
},
)
sanitized_output = sanitized_output or "<no output>"
if result.truncated_by_lines:
sanitized_output = (
f"{sanitized_output.rstrip()}\n\n"
f"... Output truncated at {self._execution_policy.max_output_lines} lines "
f"(observed {result.total_lines})."
)
if result.truncated_by_bytes and self._execution_policy.max_output_bytes is not None:
sanitized_output = (
f"{sanitized_output.rstrip()}\n\n"
f"... Output truncated at {self._execution_policy.max_output_bytes} bytes "
f"(observed {result.total_bytes})."
)
if result.exit_code not in {0, None}:
sanitized_output = f"{sanitized_output.rstrip()}\n\nExit code: {result.exit_code}"
final_status: Literal["success", "error"] = "error"
else:
final_status = "success"
artifact = {
"timed_out": False,
"exit_code": result.exit_code,
"truncated_by_lines": result.truncated_by_lines,
"truncated_by_bytes": result.truncated_by_bytes,
"total_lines": result.total_lines,
"total_bytes": result.total_bytes,
"redaction_matches": matches,
}
return self._format_tool_message(
sanitized_output,
tool_call_id,
status=final_status,
artifact=artifact,
)
def _format_tool_message(
self,
content: str,
tool_call_id: str | None,
*,
status: Literal["success", "error"],
artifact: dict[str, Any] | None = None,
) -> ToolMessage | str:
artifact = artifact or {}
if tool_call_id is None:
return content
return ToolMessage(
content=content,
tool_call_id=tool_call_id,
name=self._tool_name,
status=status,
artifact=artifact,
)
__all__ = [
"CodexSandboxExecutionPolicy",
"DockerExecutionPolicy",
"HostExecutionPolicy",
"RedactionRule",
"ShellToolMiddleware",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/middleware/shell_tool.py",
"license": "MIT License",
"lines": 753,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/partners/anthropic/langchain_anthropic/middleware/bash.py | """Anthropic-specific middleware for the Claude bash tool."""
from __future__ import annotations
from collections.abc import Awaitable, Callable
from typing import Any
from langchain.agents.middleware.shell_tool import ShellToolMiddleware
from langchain.agents.middleware.types import (
ModelRequest,
ModelResponse,
)
# Tool type constants for Anthropic
BASH_TOOL_TYPE = "bash_20250124"
BASH_TOOL_NAME = "bash"
class ClaudeBashToolMiddleware(ShellToolMiddleware):
"""Middleware that exposes Anthropic's native bash tool to models."""
def __init__(
self,
workspace_root: str | None = None,
*,
startup_commands: tuple[str, ...] | list[str] | str | None = None,
shutdown_commands: tuple[str, ...] | list[str] | str | None = None,
execution_policy: Any | None = None,
redaction_rules: tuple[Any, ...] | list[Any] | None = None,
tool_description: str | None = None,
env: dict[str, Any] | None = None,
) -> None:
"""Initialize middleware for Claude's native bash tool.
Args:
workspace_root: Base directory for the shell session.
If omitted, a temporary directory is created.
startup_commands: Optional commands executed after the session starts.
shutdown_commands: Optional commands executed before session shutdown.
execution_policy: Execution policy controlling timeouts and limits.
redaction_rules: Optional redaction rules to sanitize output.
tool_description: Optional override for tool description.
env: Optional environment variables for the shell session.
"""
super().__init__(
workspace_root=workspace_root,
startup_commands=startup_commands,
shutdown_commands=shutdown_commands,
execution_policy=execution_policy,
redaction_rules=redaction_rules,
tool_description=tool_description,
tool_name=BASH_TOOL_NAME,
shell_command=("/bin/bash",),
env=env,
)
# Parent class now creates the tool with name "bash" via tool_name parameter
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
"""Replace parent's shell tool with Claude's bash descriptor."""
filtered = [
t for t in request.tools if getattr(t, "name", None) != BASH_TOOL_NAME
]
tools = [*filtered, {"type": BASH_TOOL_TYPE, "name": BASH_TOOL_NAME}]
return handler(request.override(tools=tools))
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelResponse:
"""Async: replace parent's shell tool with Claude's bash descriptor."""
filtered = [
t for t in request.tools if getattr(t, "name", None) != BASH_TOOL_NAME
]
tools = [*filtered, {"type": BASH_TOOL_TYPE, "name": BASH_TOOL_NAME}]
return await handler(request.override(tools=tools))
__all__ = ["ClaudeBashToolMiddleware"]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/anthropic/langchain_anthropic/middleware/bash.py",
"license": "MIT License",
"lines": 71,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/partners/anthropic/tests/unit_tests/middleware/test_bash.py | from __future__ import annotations
from unittest.mock import MagicMock
import pytest
pytest.importorskip(
"anthropic", reason="Anthropic SDK is required for Claude middleware tests"
)
from langchain_anthropic.middleware.bash import ClaudeBashToolMiddleware
def test_creates_bash_tool(monkeypatch: pytest.MonkeyPatch) -> None:
"""Test that ClaudeBashToolMiddleware creates a tool named 'bash'."""
middleware = ClaudeBashToolMiddleware()
# Should have exactly one tool registered (from parent)
assert len(middleware.tools) == 1
# Tool is named "bash" (via tool_name parameter)
bash_tool = middleware.tools[0]
assert bash_tool.name == "bash"
def test_replaces_tool_with_claude_descriptor() -> None:
"""Test wrap_model_call replaces bash tool with Claude's bash descriptor."""
from langchain.agents.middleware.types import ModelRequest
middleware = ClaudeBashToolMiddleware()
# Create a mock request with the bash tool (inherited from parent)
bash_tool = middleware.tools[0]
request = ModelRequest(
model=MagicMock(),
system_prompt=None,
messages=[],
tool_choice=None,
tools=[bash_tool],
response_format=None,
state={"messages": []},
runtime=MagicMock(),
)
# Mock handler that captures the modified request
captured_request = None
def handler(req: ModelRequest) -> MagicMock:
nonlocal captured_request
captured_request = req
return MagicMock()
middleware.wrap_model_call(request, handler)
# The bash tool should be replaced with Claude's native bash descriptor
assert captured_request is not None
assert len(captured_request.tools) == 1
assert captured_request.tools[0] == {
"type": "bash_20250124",
"name": "bash",
}
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/anthropic/tests/unit_tests/middleware/test_bash.py",
"license": "MIT License",
"lines": 45,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/file_search.py | """File search middleware for Anthropic text editor and memory tools.
This module provides Glob and Grep search tools that operate on files stored
in state or filesystem.
"""
from __future__ import annotations
import fnmatch
import json
import re
import subprocess
from contextlib import suppress
from datetime import datetime, timezone
from pathlib import Path
from typing import Literal
from langchain_core.tools import tool
from langchain.agents.middleware.types import AgentMiddleware, AgentState, ContextT, ResponseT
def _expand_include_patterns(pattern: str) -> list[str] | None:
"""Expand brace patterns like `*.{py,pyi}` into a list of globs."""
if "}" in pattern and "{" not in pattern:
return None
expanded: list[str] = []
def _expand(current: str) -> None:
start = current.find("{")
if start == -1:
expanded.append(current)
return
end = current.find("}", start)
if end == -1:
raise ValueError
prefix = current[:start]
suffix = current[end + 1 :]
inner = current[start + 1 : end]
if not inner:
raise ValueError
for option in inner.split(","):
_expand(prefix + option + suffix)
try:
_expand(pattern)
except ValueError:
return None
return expanded
def _is_valid_include_pattern(pattern: str) -> bool:
"""Validate glob pattern used for include filters."""
if not pattern:
return False
if any(char in pattern for char in ("\x00", "\n", "\r")):
return False
expanded = _expand_include_patterns(pattern)
if expanded is None:
return False
try:
for candidate in expanded:
re.compile(fnmatch.translate(candidate))
except re.error:
return False
return True
def _match_include_pattern(basename: str, pattern: str) -> bool:
"""Return True if the basename matches the include pattern."""
expanded = _expand_include_patterns(pattern)
if not expanded:
return False
return any(fnmatch.fnmatch(basename, candidate) for candidate in expanded)
class FilesystemFileSearchMiddleware(AgentMiddleware[AgentState[ResponseT], ContextT, ResponseT]):
"""Provides Glob and Grep search over filesystem files.
This middleware adds two tools that search through local filesystem:
- Glob: Fast file pattern matching by file path
- Grep: Fast content search using ripgrep or Python fallback
Example:
```python
from langchain.agents import create_agent
from langchain.agents.middleware import (
FilesystemFileSearchMiddleware,
)
agent = create_agent(
model=model,
tools=[], # Add tools as needed
middleware=[
FilesystemFileSearchMiddleware(root_path="/workspace"),
],
)
```
"""
def __init__(
self,
*,
root_path: str,
use_ripgrep: bool = True,
max_file_size_mb: int = 10,
) -> None:
"""Initialize the search middleware.
Args:
root_path: Root directory to search.
use_ripgrep: Whether to use `ripgrep` for search.
Falls back to Python if `ripgrep` unavailable.
max_file_size_mb: Maximum file size to search in MB.
"""
self.root_path = Path(root_path).resolve()
self.use_ripgrep = use_ripgrep
self.max_file_size_bytes = max_file_size_mb * 1024 * 1024
# Create tool instances as closures that capture self
@tool
def glob_search(pattern: str, path: str = "/") -> str:
"""Fast file pattern matching tool that works with any codebase size.
Supports glob patterns like `**/*.js` or `src/**/*.ts`.
Returns matching file paths sorted by modification time.
Use this tool when you need to find files by name patterns.
Args:
pattern: The glob pattern to match files against.
path: The directory to search in. If not specified, searches from root.
Returns:
Newline-separated list of matching file paths, sorted by modification
time (most recently modified first). Returns `'No files found'` if no
matches.
"""
try:
base_full = self._validate_and_resolve_path(path)
except ValueError:
return "No files found"
if not base_full.exists() or not base_full.is_dir():
return "No files found"
# Use pathlib glob
matching: list[tuple[str, str]] = []
for match in base_full.glob(pattern):
if match.is_file():
# Convert to virtual path
virtual_path = "/" + str(match.relative_to(self.root_path))
stat = match.stat()
modified_at = datetime.fromtimestamp(stat.st_mtime, tz=timezone.utc).isoformat()
matching.append((virtual_path, modified_at))
if not matching:
return "No files found"
file_paths = [p for p, _ in matching]
return "\n".join(file_paths)
@tool
def grep_search(
pattern: str,
path: str = "/",
include: str | None = None,
output_mode: Literal["files_with_matches", "content", "count"] = "files_with_matches",
) -> str:
"""Fast content search tool that works with any codebase size.
Searches file contents using regular expressions. Supports full regex
syntax and filters files by pattern with the include parameter.
Args:
pattern: The regular expression pattern to search for in file contents.
path: The directory to search in. If not specified, searches from root.
include: File pattern to filter (e.g., `'*.js'`, `'*.{ts,tsx}'`).
output_mode: Output format:
- `'files_with_matches'`: Only file paths containing matches
- `'content'`: Matching lines with `file:line:content` format
- `'count'`: Count of matches per file
Returns:
Search results formatted according to `output_mode`.
Returns `'No matches found'` if no results.
"""
# Compile regex pattern (for validation)
try:
re.compile(pattern)
except re.error as e:
return f"Invalid regex pattern: {e}"
if include and not _is_valid_include_pattern(include):
return "Invalid include pattern"
# Try ripgrep first if enabled
results = None
if self.use_ripgrep:
with suppress(
FileNotFoundError,
subprocess.CalledProcessError,
subprocess.TimeoutExpired,
):
results = self._ripgrep_search(pattern, path, include)
# Python fallback if ripgrep failed or is disabled
if results is None:
results = self._python_search(pattern, path, include)
if not results:
return "No matches found"
# Format output based on mode
return self._format_grep_results(results, output_mode)
self.glob_search = glob_search
self.grep_search = grep_search
self.tools = [glob_search, grep_search]
def _validate_and_resolve_path(self, path: str) -> Path:
"""Validate and resolve a virtual path to filesystem path."""
# Normalize path
if not path.startswith("/"):
path = "/" + path
# Check for path traversal
if ".." in path or "~" in path:
msg = "Path traversal not allowed"
raise ValueError(msg)
# Convert virtual path to filesystem path
relative = path.lstrip("/")
full_path = (self.root_path / relative).resolve()
# Ensure path is within root
try:
full_path.relative_to(self.root_path)
except ValueError:
msg = f"Path outside root directory: {path}"
raise ValueError(msg) from None
return full_path
def _ripgrep_search(
self, pattern: str, base_path: str, include: str | None
) -> dict[str, list[tuple[int, str]]]:
"""Search using ripgrep subprocess."""
try:
base_full = self._validate_and_resolve_path(base_path)
except ValueError:
return {}
if not base_full.exists():
return {}
# Build ripgrep command
cmd = ["rg", "--json"]
if include:
# Convert glob pattern to ripgrep glob
cmd.extend(["--glob", include])
cmd.extend(["--", pattern, str(base_full)])
try:
result = subprocess.run( # noqa: S603
cmd,
capture_output=True,
text=True,
timeout=30,
check=False,
)
except (subprocess.TimeoutExpired, FileNotFoundError):
# Fallback to Python search if ripgrep unavailable or times out
return self._python_search(pattern, base_path, include)
# Parse ripgrep JSON output
results: dict[str, list[tuple[int, str]]] = {}
for line in result.stdout.splitlines():
try:
data = json.loads(line)
if data["type"] == "match":
path = data["data"]["path"]["text"]
# Convert to virtual path
virtual_path = "/" + str(Path(path).relative_to(self.root_path))
line_num = data["data"]["line_number"]
line_text = data["data"]["lines"]["text"].rstrip("\n")
if virtual_path not in results:
results[virtual_path] = []
results[virtual_path].append((line_num, line_text))
except (json.JSONDecodeError, KeyError):
continue
return results
def _python_search(
self, pattern: str, base_path: str, include: str | None
) -> dict[str, list[tuple[int, str]]]:
"""Search using Python regex (fallback)."""
try:
base_full = self._validate_and_resolve_path(base_path)
except ValueError:
return {}
if not base_full.exists():
return {}
regex = re.compile(pattern)
results: dict[str, list[tuple[int, str]]] = {}
# Walk directory tree
for file_path in base_full.rglob("*"):
if not file_path.is_file():
continue
# Check include filter
if include and not _match_include_pattern(file_path.name, include):
continue
# Skip files that are too large
if file_path.stat().st_size > self.max_file_size_bytes:
continue
try:
content = file_path.read_text()
except (UnicodeDecodeError, PermissionError):
continue
# Search content
for line_num, line in enumerate(content.splitlines(), 1):
if regex.search(line):
virtual_path = "/" + str(file_path.relative_to(self.root_path))
if virtual_path not in results:
results[virtual_path] = []
results[virtual_path].append((line_num, line))
return results
@staticmethod
def _format_grep_results(
results: dict[str, list[tuple[int, str]]],
output_mode: str,
) -> str:
"""Format grep results based on output mode."""
if output_mode == "files_with_matches":
# Just return file paths
return "\n".join(sorted(results.keys()))
if output_mode == "content":
# Return file:line:content format
lines = []
for file_path in sorted(results.keys()):
for line_num, line in results[file_path]:
lines.append(f"{file_path}:{line_num}:{line}")
return "\n".join(lines)
if output_mode == "count":
# Return file:count format
lines = []
for file_path in sorted(results.keys()):
count = len(results[file_path])
lines.append(f"{file_path}:{count}")
return "\n".join(lines)
# Default to files_with_matches
return "\n".join(sorted(results.keys()))
__all__ = [
"FilesystemFileSearchMiddleware",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/middleware/file_search.py",
"license": "MIT License",
"lines": 305,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/partners/anthropic/langchain_anthropic/middleware/anthropic_tools.py | """Anthropic text editor and memory tool middleware.
This module provides client-side implementations of Anthropic's text editor and
memory tools using schema-less tool definitions and tool call interception.
"""
from __future__ import annotations
import os
import shutil
from datetime import datetime, timezone
from pathlib import Path
from typing import TYPE_CHECKING, Annotated, Any, cast
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ModelRequest,
ModelResponse,
_ModelRequestOverrides,
)
from langchain.tools import ToolRuntime, tool
from langchain_core.messages import SystemMessage, ToolMessage
from langgraph.types import Command
from typing_extensions import NotRequired, TypedDict
if TYPE_CHECKING:
from collections.abc import Awaitable, Callable, Sequence
# Tool type constants
TEXT_EDITOR_TOOL_TYPE = "text_editor_20250728"
TEXT_EDITOR_TOOL_NAME = "str_replace_based_edit_tool"
MEMORY_TOOL_TYPE = "memory_20250818"
MEMORY_TOOL_NAME = "memory"
MEMORY_SYSTEM_PROMPT = """IMPORTANT: ALWAYS VIEW YOUR MEMORY DIRECTORY BEFORE \
DOING ANYTHING ELSE.
MEMORY PROTOCOL:
1. Use the `view` command of your `memory` tool to check for earlier progress.
2. ... (work on the task) ...
- As you make progress, record status / progress / thoughts etc in your memory.
ASSUME INTERRUPTION: Your context window might be reset at any moment, so you risk \
losing any progress that is not recorded in your memory directory."""
class FileData(TypedDict):
"""Data structure for storing file contents."""
content: list[str]
"""Lines of the file."""
created_at: str
"""ISO 8601 timestamp of file creation."""
modified_at: str
"""ISO 8601 timestamp of last modification."""
def files_reducer(
left: dict[str, FileData] | None, right: dict[str, FileData | None]
) -> dict[str, FileData]:
"""Custom reducer that merges file updates.
Args:
left: Existing files dict.
right: New files dict to merge (`None` values delete files).
Returns:
Merged `dict` where right overwrites left for matching keys.
"""
if left is None:
# Filter out None values when initializing
return {k: v for k, v in right.items() if v is not None}
# Merge, filtering out None values (deletions)
result = {**left}
for k, v in right.items():
if v is None:
result.pop(k, None)
else:
result[k] = v
return result
class AnthropicToolsState(AgentState):
"""State schema for Anthropic text editor and memory tools."""
text_editor_files: NotRequired[Annotated[dict[str, FileData], files_reducer]]
"""Virtual file system for text editor tools."""
memory_files: NotRequired[Annotated[dict[str, FileData], files_reducer]]
"""Virtual file system for memory tools."""
def _validate_path(path: str, *, allowed_prefixes: Sequence[str] | None = None) -> str:
"""Validate and normalize file path for security.
Args:
path: The path to validate.
allowed_prefixes: Optional list of allowed path prefixes.
Returns:
Normalized canonical path.
Raises:
ValueError: If path contains traversal sequences or violates prefix rules.
"""
# Reject paths with traversal attempts
if ".." in path or path.startswith("~"):
msg = f"Path traversal not allowed: {path}"
raise ValueError(msg)
# Normalize path (resolve ., //, etc.)
normalized = os.path.normpath(path)
# Convert to forward slashes for consistency
normalized = normalized.replace("\\", "/")
# Ensure path starts with /
if not normalized.startswith("/"):
normalized = f"/{normalized}"
# Check allowed prefixes if specified
if allowed_prefixes is not None and not any(
normalized.startswith(prefix) for prefix in allowed_prefixes
):
msg = f"Path must start with one of {allowed_prefixes}: {path}"
raise ValueError(msg)
return normalized
def _list_directory(files: dict[str, FileData], path: str) -> list[str]:
"""List files in a directory.
Args:
files: Files `dict`.
path: Normalized directory path.
Returns:
Sorted list of file paths in the directory.
"""
# Ensure path ends with / for directory matching
dir_path = path if path.endswith("/") else f"{path}/"
matching_files = []
for file_path in files:
if file_path.startswith(dir_path):
# Get relative path from directory
relative = file_path[len(dir_path) :]
# Only include direct children (no subdirectories)
if "/" not in relative:
matching_files.append(file_path)
return sorted(matching_files)
class _StateClaudeFileToolMiddleware(AgentMiddleware):
"""Base class for state-based file tool middleware (internal)."""
state_schema = AnthropicToolsState
def __init__(
self,
*,
tool_type: str,
tool_name: str,
state_key: str,
allowed_path_prefixes: Sequence[str] | None = None,
system_prompt: str | None = None,
) -> None:
"""Initialize.
Args:
tool_type: Tool type identifier.
tool_name: Tool name.
state_key: State key for file storage.
allowed_path_prefixes: Optional list of allowed path prefixes.
system_prompt: Optional system prompt to inject.
"""
self.tool_type = tool_type
self.tool_name = tool_name
self.state_key = state_key
self.allowed_prefixes = allowed_path_prefixes
self.system_prompt = system_prompt
# Create tool that will be executed by the tool node
@tool(tool_name)
def file_tool(
runtime: ToolRuntime[None, AnthropicToolsState],
command: str,
path: str,
file_text: str | None = None,
old_str: str | None = None,
new_str: str | None = None,
insert_line: int | None = None,
new_path: str | None = None,
view_range: list[int] | None = None,
) -> Command | str:
"""Execute file operations on virtual file system.
Args:
runtime: Tool runtime providing access to state.
command: Operation to perform.
path: File path to operate on.
file_text: Full file content for create command.
old_str: String to replace for str_replace command.
new_str: Replacement string for str_replace command.
insert_line: Line number for insert command.
new_path: New path for rename command.
view_range: Line range `[start, end]` for view command.
Returns:
Command for state update or string result.
"""
# Build args dict for handler methods
args: dict[str, Any] = {"path": path}
if file_text is not None:
args["file_text"] = file_text
if old_str is not None:
args["old_str"] = old_str
if new_str is not None:
args["new_str"] = new_str
if insert_line is not None:
args["insert_line"] = insert_line
if new_path is not None:
args["new_path"] = new_path
if view_range is not None:
args["view_range"] = view_range
# Route to appropriate handler based on command
try:
if command == "view":
return self._handle_view(args, runtime.state, runtime.tool_call_id)
if command == "create":
return self._handle_create(
args, runtime.state, runtime.tool_call_id
)
if command == "str_replace":
return self._handle_str_replace(
args, runtime.state, runtime.tool_call_id
)
if command == "insert":
return self._handle_insert(
args, runtime.state, runtime.tool_call_id
)
if command == "delete":
return self._handle_delete(
args, runtime.state, runtime.tool_call_id
)
if command == "rename":
return self._handle_rename(
args, runtime.state, runtime.tool_call_id
)
return f"Unknown command: {command}"
except (ValueError, FileNotFoundError) as e:
return str(e)
self.tools = [file_tool]
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
"""Inject Anthropic tool descriptor and optional system prompt."""
# Replace our BaseTool with Anthropic's native tool descriptor
tools = [
t
for t in (request.tools or [])
if getattr(t, "name", None) != self.tool_name
] + [{"type": self.tool_type, "name": self.tool_name}]
# Inject system prompt if provided
overrides: _ModelRequestOverrides = {"tools": tools}
if self.system_prompt:
if request.system_message is not None:
new_system_content = [
*request.system_message.content_blocks,
{"type": "text", "text": f"\n\n{self.system_prompt}"},
]
else:
new_system_content = [{"type": "text", "text": self.system_prompt}]
new_system_message = SystemMessage(
content=cast("list[str | dict[str, str]]", new_system_content)
)
overrides["system_message"] = new_system_message
return handler(request.override(**overrides))
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelResponse:
"""Inject Anthropic tool descriptor and optional system prompt."""
# Replace our BaseTool with Anthropic's native tool descriptor
tools = [
t
for t in (request.tools or [])
if getattr(t, "name", None) != self.tool_name
] + [{"type": self.tool_type, "name": self.tool_name}]
# Inject system prompt if provided
overrides: _ModelRequestOverrides = {"tools": tools}
if self.system_prompt:
if request.system_message is not None:
new_system_content = [
*request.system_message.content_blocks,
{"type": "text", "text": f"\n\n{self.system_prompt}"},
]
else:
new_system_content = [{"type": "text", "text": self.system_prompt}]
new_system_message = SystemMessage(
content=cast("list[str | dict[str, str]]", new_system_content)
)
overrides["system_message"] = new_system_message
return await handler(request.override(**overrides))
def _handle_view(
self, args: dict, state: AnthropicToolsState, tool_call_id: str | None
) -> Command:
"""Handle view command."""
path = args["path"]
normalized_path = _validate_path(path, allowed_prefixes=self.allowed_prefixes)
files = cast("dict[str, Any]", state.get(self.state_key, {}))
file_data = files.get(normalized_path)
if file_data is None:
# Try directory listing
matching = _list_directory(files, normalized_path)
if matching:
content = "\n".join(matching)
return Command(
update={
"messages": [
ToolMessage(
content=content,
tool_call_id=tool_call_id,
name=self.tool_name,
)
]
}
)
msg = f"File not found: {path}"
raise FileNotFoundError(msg)
# Format file content with line numbers
lines_content = file_data["content"]
formatted_lines = [f"{i + 1}|{line}" for i, line in enumerate(lines_content)]
content = "\n".join(formatted_lines)
return Command(
update={
"messages": [
ToolMessage(
content=content,
tool_call_id=tool_call_id,
name=self.tool_name,
)
]
}
)
def _handle_create(
self, args: dict, state: AnthropicToolsState, tool_call_id: str | None
) -> Command:
"""Handle create command."""
path = args["path"]
file_text = args["file_text"]
normalized_path = _validate_path(path, allowed_prefixes=self.allowed_prefixes)
# Get existing files
files = cast("dict[str, Any]", state.get(self.state_key, {}))
existing = files.get(normalized_path)
# Create file data
now = datetime.now(timezone.utc).isoformat()
created_at = existing["created_at"] if existing else now
content_lines = file_text.split("\n")
return Command(
update={
self.state_key: {
normalized_path: {
"content": content_lines,
"created_at": created_at,
"modified_at": now,
}
},
"messages": [
ToolMessage(
content=f"File created: {path}",
tool_call_id=tool_call_id,
name=self.tool_name,
)
],
}
)
def _handle_str_replace(
self, args: dict, state: AnthropicToolsState, tool_call_id: str | None
) -> Command:
"""Handle str_replace command."""
path = args["path"]
old_str = args["old_str"]
new_str = args.get("new_str", "")
normalized_path = _validate_path(path, allowed_prefixes=self.allowed_prefixes)
# Read file
files = cast("dict[str, Any]", state.get(self.state_key, {}))
file_data = files.get(normalized_path)
if file_data is None:
msg = f"File not found: {path}"
raise FileNotFoundError(msg)
lines_content = file_data["content"]
content = "\n".join(lines_content)
# Replace string
if old_str not in content:
msg = f"String not found in file: {old_str}"
raise ValueError(msg)
new_content = content.replace(old_str, new_str, 1)
new_lines = new_content.split("\n")
# Update file
now = datetime.now(timezone.utc).isoformat()
return Command(
update={
self.state_key: {
normalized_path: {
"content": new_lines,
"created_at": file_data["created_at"],
"modified_at": now,
}
},
"messages": [
ToolMessage(
content=f"String replaced in {path}",
tool_call_id=tool_call_id,
name=self.tool_name,
)
],
}
)
def _handle_insert(
self, args: dict, state: AnthropicToolsState, tool_call_id: str | None
) -> Command:
"""Handle insert command."""
path = args["path"]
insert_line = args["insert_line"]
text_to_insert = args["new_str"]
normalized_path = _validate_path(path, allowed_prefixes=self.allowed_prefixes)
# Read file
files = cast("dict[str, Any]", state.get(self.state_key, {}))
file_data = files.get(normalized_path)
if file_data is None:
msg = f"File not found: {path}"
raise FileNotFoundError(msg)
lines_content = file_data["content"]
new_lines = text_to_insert.split("\n")
# Insert after insert_line (0-indexed)
updated_lines = (
lines_content[:insert_line] + new_lines + lines_content[insert_line:]
)
# Update file
now = datetime.now(timezone.utc).isoformat()
return Command(
update={
self.state_key: {
normalized_path: {
"content": updated_lines,
"created_at": file_data["created_at"],
"modified_at": now,
}
},
"messages": [
ToolMessage(
content=f"Text inserted in {path}",
tool_call_id=tool_call_id,
name=self.tool_name,
)
],
}
)
def _handle_delete(
self,
args: dict,
state: AnthropicToolsState,
tool_call_id: str | None,
) -> Command:
"""Handle delete command."""
path = args["path"]
normalized_path = _validate_path(path, allowed_prefixes=self.allowed_prefixes)
return Command(
update={
self.state_key: {normalized_path: None},
"messages": [
ToolMessage(
content=f"File deleted: {path}",
tool_call_id=tool_call_id,
name=self.tool_name,
)
],
}
)
def _handle_rename(
self, args: dict, state: AnthropicToolsState, tool_call_id: str | None
) -> Command:
"""Handle rename command."""
old_path = args["old_path"]
new_path = args["new_path"]
normalized_old = _validate_path(
old_path, allowed_prefixes=self.allowed_prefixes
)
normalized_new = _validate_path(
new_path, allowed_prefixes=self.allowed_prefixes
)
# Read file
files = cast("dict[str, Any]", state.get(self.state_key, {}))
file_data = files.get(normalized_old)
if file_data is None:
msg = f"File not found: {old_path}"
raise ValueError(msg)
# Update timestamp
now = datetime.now(timezone.utc).isoformat()
file_data_copy = file_data.copy()
file_data_copy["modified_at"] = now
return Command(
update={
self.state_key: {
normalized_old: None,
normalized_new: file_data_copy,
},
"messages": [
ToolMessage(
content=f"File renamed: {old_path} -> {new_path}",
tool_call_id=tool_call_id,
name=self.tool_name,
)
],
}
)
class StateClaudeTextEditorMiddleware(_StateClaudeFileToolMiddleware):
"""State-based text editor tool middleware.
Provides Anthropic's `text_editor` tool using LangGraph state for storage.
Files persist for the conversation thread.
Example:
```python
from langchain.agents import create_agent
from langchain.agents.middleware import StateTextEditorToolMiddleware
agent = create_agent(
model=model,
tools=[],
middleware=[StateTextEditorToolMiddleware()],
)
```
"""
def __init__(
self,
*,
allowed_path_prefixes: Sequence[str] | None = None,
) -> None:
"""Initialize the text editor middleware.
Args:
allowed_path_prefixes: Optional list of allowed path prefixes.
If specified, only paths starting with these prefixes are allowed.
"""
super().__init__(
tool_type=TEXT_EDITOR_TOOL_TYPE,
tool_name=TEXT_EDITOR_TOOL_NAME,
state_key="text_editor_files",
allowed_path_prefixes=allowed_path_prefixes,
)
class StateClaudeMemoryMiddleware(_StateClaudeFileToolMiddleware):
"""State-based memory tool middleware.
Provides Anthropic's memory tool using LangGraph state for storage.
Files persist for the conversation thread.
Enforces `/memories` prefix and injects Anthropic's recommended system prompt.
Example:
```python
from langchain.agents import create_agent
from langchain.agents.middleware import StateMemoryToolMiddleware
agent = create_agent(
model=model,
tools=[],
middleware=[StateMemoryToolMiddleware()],
)
```
"""
def __init__(
self,
*,
allowed_path_prefixes: Sequence[str] | None = None,
system_prompt: str = MEMORY_SYSTEM_PROMPT,
) -> None:
"""Initialize the memory middleware.
Args:
allowed_path_prefixes: Optional list of allowed path prefixes.
Defaults to `['/memories']`.
system_prompt: System prompt to inject.
Defaults to Anthropic's recommended memory prompt.
"""
super().__init__(
tool_type=MEMORY_TOOL_TYPE,
tool_name=MEMORY_TOOL_NAME,
state_key="memory_files",
allowed_path_prefixes=allowed_path_prefixes or ["/memories"],
system_prompt=system_prompt,
)
class _FilesystemClaudeFileToolMiddleware(AgentMiddleware):
"""Base class for filesystem-based file tool middleware (internal)."""
def __init__(
self,
*,
tool_type: str,
tool_name: str,
root_path: str,
allowed_prefixes: list[str] | None = None,
max_file_size_mb: int = 10,
system_prompt: str | None = None,
) -> None:
"""Initialize.
Args:
tool_type: Tool type identifier.
tool_name: Tool name.
root_path: Root directory for file operations.
allowed_prefixes: Optional list of allowed virtual path prefixes.
max_file_size_mb: Maximum file size in MB.
system_prompt: Optional system prompt to inject.
"""
self.tool_type = tool_type
self.tool_name = tool_name
self.root_path = Path(root_path).resolve()
self.allowed_prefixes = allowed_prefixes or ["/"]
self.max_file_size_bytes = max_file_size_mb * 1024 * 1024
self.system_prompt = system_prompt
# Create root directory if it doesn't exist
self.root_path.mkdir(parents=True, exist_ok=True)
# Create tool that will be executed by the tool node
@tool(tool_name)
def file_tool(
runtime: ToolRuntime,
command: str,
path: str,
file_text: str | None = None,
old_str: str | None = None,
new_str: str | None = None,
insert_line: int | None = None,
new_path: str | None = None,
view_range: list[int] | None = None,
) -> Command | str:
"""Execute file operations on filesystem.
Args:
runtime: Tool runtime providing `tool_call_id`.
command: Operation to perform.
path: File path to operate on.
file_text: Full file content for create command.
old_str: String to replace for `str_replace` command.
new_str: Replacement string for `str_replace` command.
insert_line: Line number for insert command.
new_path: New path for rename command.
view_range: Line range `[start, end]` for view command.
Returns:
Command for message update or string result.
"""
# Build args dict for handler methods
args: dict[str, Any] = {"path": path}
if file_text is not None:
args["file_text"] = file_text
if old_str is not None:
args["old_str"] = old_str
if new_str is not None:
args["new_str"] = new_str
if insert_line is not None:
args["insert_line"] = insert_line
if new_path is not None:
args["new_path"] = new_path
if view_range is not None:
args["view_range"] = view_range
# Route to appropriate handler based on command
try:
if command == "view":
return self._handle_view(args, runtime.tool_call_id)
if command == "create":
return self._handle_create(args, runtime.tool_call_id)
if command == "str_replace":
return self._handle_str_replace(args, runtime.tool_call_id)
if command == "insert":
return self._handle_insert(args, runtime.tool_call_id)
if command == "delete":
return self._handle_delete(args, runtime.tool_call_id)
if command == "rename":
return self._handle_rename(args, runtime.tool_call_id)
return f"Unknown command: {command}"
except (ValueError, FileNotFoundError, PermissionError) as e:
return str(e)
self.tools = [file_tool]
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelResponse:
"""Inject Anthropic tool descriptor and optional system prompt."""
# Replace our BaseTool with Anthropic's native tool descriptor
tools = [
t
for t in (request.tools or [])
if getattr(t, "name", None) != self.tool_name
] + [{"type": self.tool_type, "name": self.tool_name}]
# Inject system prompt if provided
overrides: _ModelRequestOverrides = {"tools": tools}
if self.system_prompt:
if request.system_message is not None:
new_system_content = [
*request.system_message.content_blocks,
{"type": "text", "text": f"\n\n{self.system_prompt}"},
]
else:
new_system_content = [{"type": "text", "text": self.system_prompt}]
new_system_message = SystemMessage(
content=cast("list[str | dict[str, str]]", new_system_content)
)
overrides["system_message"] = new_system_message
return handler(request.override(**overrides))
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelResponse:
"""Inject Anthropic tool descriptor and optional system prompt."""
# Replace our BaseTool with Anthropic's native tool descriptor
tools = [
t
for t in (request.tools or [])
if getattr(t, "name", None) != self.tool_name
] + [{"type": self.tool_type, "name": self.tool_name}]
# Inject system prompt if provided
overrides: _ModelRequestOverrides = {"tools": tools}
if self.system_prompt:
if request.system_message is not None:
new_system_content = [
*request.system_message.content_blocks,
{"type": "text", "text": f"\n\n{self.system_prompt}"},
]
else:
new_system_content = [{"type": "text", "text": self.system_prompt}]
new_system_message = SystemMessage(
content=cast("list[str | dict[str, str]]", new_system_content)
)
overrides["system_message"] = new_system_message
return await handler(request.override(**overrides))
def _validate_and_resolve_path(self, path: str) -> Path:
"""Validate and resolve a virtual path to filesystem path.
Args:
path: Virtual path (e.g., `/file.txt` or `/src/main.py`).
Returns:
Resolved absolute filesystem path within `root_path`.
Raises:
ValueError: If path contains traversal attempts, escapes root directory,
or violates `allowed_prefixes` restrictions.
"""
# Normalize path
if not path.startswith("/"):
path = "/" + path
# Check for path traversal
if ".." in path or "~" in path:
msg = "Path traversal not allowed"
raise ValueError(msg)
# Convert virtual path to filesystem path
# Remove leading / and resolve relative to root
relative = path.lstrip("/")
full_path = (self.root_path / relative).resolve()
# Ensure path is within root
try:
full_path.relative_to(self.root_path)
except ValueError:
msg = f"Path outside root directory: {path}"
raise ValueError(msg) from None
# Check allowed prefixes
virtual_path = "/" + str(full_path.relative_to(self.root_path))
if self.allowed_prefixes:
allowed = any(
virtual_path.startswith(prefix) or virtual_path == prefix.rstrip("/")
for prefix in self.allowed_prefixes
)
if not allowed:
msg = f"Path must start with one of: {self.allowed_prefixes}"
raise ValueError(msg)
return full_path
def _handle_view(self, args: dict, tool_call_id: str | None) -> Command:
"""Handle view command."""
path = args["path"]
full_path = self._validate_and_resolve_path(path)
if not full_path.exists() or not full_path.is_file():
msg = f"File not found: {path}"
raise FileNotFoundError(msg)
# Check file size
if full_path.stat().st_size > self.max_file_size_bytes:
max_mb = self.max_file_size_bytes / 1024 / 1024
msg = f"File too large: {path} exceeds {max_mb}MB"
raise ValueError(msg)
# Read file
try:
content = full_path.read_text()
except UnicodeDecodeError as e:
msg = f"Cannot decode file {path}: {e}"
raise ValueError(msg) from e
# Format with line numbers
lines = content.split("\n")
# Remove trailing newline's empty string if present
if lines and lines[-1] == "":
lines = lines[:-1]
formatted_lines = [f"{i + 1}|{line}" for i, line in enumerate(lines)]
formatted_content = "\n".join(formatted_lines)
return Command(
update={
"messages": [
ToolMessage(
content=formatted_content,
tool_call_id=tool_call_id,
name=self.tool_name,
)
]
}
)
def _handle_create(self, args: dict, tool_call_id: str | None) -> Command:
"""Handle create command."""
path = args["path"]
file_text = args["file_text"]
full_path = self._validate_and_resolve_path(path)
# Create parent directories
full_path.parent.mkdir(parents=True, exist_ok=True)
# Write file
full_path.write_text(file_text + "\n")
return Command(
update={
"messages": [
ToolMessage(
content=f"File created: {path}",
tool_call_id=tool_call_id,
name=self.tool_name,
)
]
}
)
def _handle_str_replace(self, args: dict, tool_call_id: str | None) -> Command:
"""Handle `str_replace` command."""
path = args["path"]
old_str = args["old_str"]
new_str = args.get("new_str", "")
full_path = self._validate_and_resolve_path(path)
if not full_path.exists():
msg = f"File not found: {path}"
raise FileNotFoundError(msg)
# Read file
content = full_path.read_text()
# Replace string
if old_str not in content:
msg = f"String not found in file: {old_str}"
raise ValueError(msg)
new_content = content.replace(old_str, new_str, 1)
# Write back
full_path.write_text(new_content)
return Command(
update={
"messages": [
ToolMessage(
content=f"String replaced in {path}",
tool_call_id=tool_call_id,
name=self.tool_name,
)
]
}
)
def _handle_insert(self, args: dict, tool_call_id: str | None) -> Command:
"""Handle insert command."""
path = args["path"]
insert_line = args["insert_line"]
text_to_insert = args["new_str"]
full_path = self._validate_and_resolve_path(path)
if not full_path.exists():
msg = f"File not found: {path}"
raise FileNotFoundError(msg)
# Read file
content = full_path.read_text()
lines = content.split("\n")
# Handle trailing newline
if lines and lines[-1] == "":
lines = lines[:-1]
had_trailing_newline = True
else:
had_trailing_newline = False
new_lines = text_to_insert.split("\n")
# Insert after insert_line (0-indexed)
updated_lines = lines[:insert_line] + new_lines + lines[insert_line:]
# Write back
new_content = "\n".join(updated_lines)
if had_trailing_newline:
new_content += "\n"
full_path.write_text(new_content)
return Command(
update={
"messages": [
ToolMessage(
content=f"Text inserted in {path}",
tool_call_id=tool_call_id,
name=self.tool_name,
)
]
}
)
def _handle_delete(self, args: dict, tool_call_id: str | None) -> Command:
"""Handle delete command."""
path = args["path"]
full_path = self._validate_and_resolve_path(path)
if full_path.is_file():
full_path.unlink()
elif full_path.is_dir():
shutil.rmtree(full_path)
# If doesn't exist, silently succeed
return Command(
update={
"messages": [
ToolMessage(
content=f"File deleted: {path}",
tool_call_id=tool_call_id,
name=self.tool_name,
)
]
}
)
def _handle_rename(self, args: dict, tool_call_id: str | None) -> Command:
"""Handle rename command."""
old_path = args["old_path"]
new_path = args["new_path"]
old_full = self._validate_and_resolve_path(old_path)
new_full = self._validate_and_resolve_path(new_path)
if not old_full.exists():
msg = f"File not found: {old_path}"
raise ValueError(msg)
# Create parent directory for new path
new_full.parent.mkdir(parents=True, exist_ok=True)
# Rename
old_full.rename(new_full)
return Command(
update={
"messages": [
ToolMessage(
content=f"File renamed: {old_path} -> {new_path}",
tool_call_id=tool_call_id,
name=self.tool_name,
)
]
}
)
class FilesystemClaudeTextEditorMiddleware(_FilesystemClaudeFileToolMiddleware):
"""Filesystem-based text editor tool middleware.
Provides Anthropic's `text_editor` tool using local filesystem for storage.
User handles persistence via volumes, git, or other mechanisms.
Example:
```python
from langchain.agents import create_agent
from langchain.agents.middleware import FilesystemTextEditorToolMiddleware
agent = create_agent(
model=model,
tools=[],
middleware=[FilesystemTextEditorToolMiddleware(root_path="/workspace")],
)
```
"""
def __init__(
self,
*,
root_path: str,
allowed_prefixes: list[str] | None = None,
max_file_size_mb: int = 10,
) -> None:
"""Initialize the text editor middleware.
Args:
root_path: Root directory for file operations.
allowed_prefixes: Optional list of allowed virtual path prefixes.
Defaults to `['/']`.
max_file_size_mb: Maximum file size in MB
Defaults to `10`.
"""
super().__init__(
tool_type=TEXT_EDITOR_TOOL_TYPE,
tool_name=TEXT_EDITOR_TOOL_NAME,
root_path=root_path,
allowed_prefixes=allowed_prefixes,
max_file_size_mb=max_file_size_mb,
)
class FilesystemClaudeMemoryMiddleware(_FilesystemClaudeFileToolMiddleware):
"""Filesystem-based memory tool middleware.
Provides Anthropic's memory tool using local filesystem for storage.
User handles persistence via volumes, git, or other mechanisms.
Enforces `/memories` prefix and injects Anthropic's recommended system
prompt.
Example:
```python
from langchain.agents import create_agent
from langchain.agents.middleware import FilesystemMemoryToolMiddleware
agent = create_agent(
model=model,
tools=[],
middleware=[FilesystemMemoryToolMiddleware(root_path="/workspace")],
)
```
"""
def __init__(
self,
*,
root_path: str,
allowed_prefixes: list[str] | None = None,
max_file_size_mb: int = 10,
system_prompt: str = MEMORY_SYSTEM_PROMPT,
) -> None:
"""Initialize the memory middleware.
Args:
root_path: Root directory for file operations.
allowed_prefixes: Optional list of allowed virtual path prefixes.
Defaults to `['/memories']`.
max_file_size_mb: Maximum file size in MB
Defaults to `10`.
system_prompt: System prompt to inject.
Defaults to Anthropic's recommended memory prompt.
"""
super().__init__(
tool_type=MEMORY_TOOL_TYPE,
tool_name=MEMORY_TOOL_NAME,
root_path=root_path,
allowed_prefixes=allowed_prefixes or ["/memories"],
max_file_size_mb=max_file_size_mb,
system_prompt=system_prompt,
)
__all__ = [
"AnthropicToolsState",
"FileData",
"FilesystemClaudeMemoryMiddleware",
"FilesystemClaudeTextEditorMiddleware",
"StateClaudeMemoryMiddleware",
"StateClaudeTextEditorMiddleware",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/anthropic/langchain_anthropic/middleware/anthropic_tools.py",
"license": "MIT License",
"lines": 980,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/partners/anthropic/langchain_anthropic/middleware/file_search.py | """File search middleware for Anthropic text editor and memory tools.
This module provides Glob and Grep search tools that operate on files stored
in state or filesystem.
"""
from __future__ import annotations
import fnmatch
import re
from pathlib import Path, PurePosixPath
from typing import TYPE_CHECKING, Literal, cast
if TYPE_CHECKING:
from typing import Any
from langchain.agents.middleware.types import AgentMiddleware
from langchain.tools import ToolRuntime, tool
from langchain_anthropic.middleware.anthropic_tools import AnthropicToolsState
def _expand_include_patterns(pattern: str) -> list[str] | None:
"""Expand brace patterns like `*.{py,pyi}` into a list of globs."""
if "}" in pattern and "{" not in pattern:
return None
expanded: list[str] = []
def _expand(current: str) -> None:
start = current.find("{")
if start == -1:
expanded.append(current)
return
end = current.find("}", start)
if end == -1:
raise ValueError
prefix = current[:start]
suffix = current[end + 1 :]
inner = current[start + 1 : end]
if not inner:
raise ValueError
for option in inner.split(","):
_expand(prefix + option + suffix)
try:
_expand(pattern)
except ValueError:
return None
return expanded
def _is_valid_include_pattern(pattern: str) -> bool:
"""Validate glob pattern used for include filters."""
if not pattern:
return False
if any(char in pattern for char in ("\x00", "\n", "\r")):
return False
expanded = _expand_include_patterns(pattern)
if expanded is None:
return False
try:
for candidate in expanded:
re.compile(fnmatch.translate(candidate))
except re.error:
return False
return True
def _match_include_pattern(basename: str, pattern: str) -> bool:
"""Return `True` if the basename matches the include pattern."""
expanded = _expand_include_patterns(pattern)
if not expanded:
return False
return any(fnmatch.fnmatch(basename, candidate) for candidate in expanded)
class StateFileSearchMiddleware(AgentMiddleware):
"""Provides Glob and Grep search over state-based files.
This middleware adds two tools that search through virtual files in state:
- Glob: Fast file pattern matching by file path
- Grep: Fast content search using regular expressions
Example:
```python
from langchain.agents import create_agent
from langchain.agents.middleware import (
StateTextEditorToolMiddleware,
StateFileSearchMiddleware,
)
agent = create_agent(
model=model,
tools=[],
middleware=[
StateTextEditorToolMiddleware(),
StateFileSearchMiddleware(),
],
)
```
"""
state_schema = AnthropicToolsState
def __init__(
self,
*,
state_key: str = "text_editor_files",
) -> None:
"""Initialize the search middleware.
Args:
state_key: State key to search
Use `'memory_files'` to search memory tool files.
"""
self.state_key = state_key
# Create tool instances
@tool
def glob_search( # noqa: D417
runtime: ToolRuntime[None, AnthropicToolsState],
pattern: str,
path: str = "/",
) -> str:
"""Fast file pattern matching tool that works with any codebase size.
Supports glob patterns like `**/*.js` or `src/**/*.ts`.
Returns matching file paths sorted by modification time.
Use this tool when you need to find files by name patterns.
Args:
pattern: The glob pattern to match files against.
path: The directory to search in.
If not specified, searches from root.
Returns:
Newline-separated list of matching file paths, sorted by modification
time (most recently modified first).
Returns `'No files found'` if no matches.
"""
return self._handle_glob_search(pattern, path, runtime.state)
@tool
def grep_search( # noqa: D417
runtime: ToolRuntime[None, AnthropicToolsState],
pattern: str,
path: str = "/",
include: str | None = None,
output_mode: Literal[
"files_with_matches", "content", "count"
] = "files_with_matches",
) -> str:
"""Fast content search tool that works with any codebase size.
Searches file contents using regular expressions.
Supports full regex syntax and filters files by pattern with the include
parameter.
Args:
pattern: The regular expression pattern to search for in file contents.
path: The directory to search in. If not specified, searches from root.
include: File pattern to filter (e.g., `'*.js'`, `'*.{ts,tsx}'`).
output_mode: Output format.
Options:
- `'files_with_matches'`: Only file paths containing matches
- `'content'`: Matching lines with file:line:content format
- `'count'`: Count of matches per file
Returns:
Search results formatted according to `output_mode`.
Returns `'No matches found'` if no results.
"""
return self._handle_grep_search(
pattern, path, include, output_mode, runtime.state
)
self.glob_search = glob_search
self.grep_search = grep_search
self.tools = [glob_search, grep_search]
def _handle_glob_search(
self,
pattern: str,
path: str,
state: AnthropicToolsState,
) -> str:
"""Handle glob search operation.
Args:
pattern: The glob pattern to match files against.
path: The directory to search in.
state: The current agent state.
Returns:
Newline-separated list of matching file paths, sorted by modification
time (most recently modified first).
Returns `'No files found'` if no matches.
"""
# Normalize base path
base_path = path if path.startswith("/") else "/" + path
# Get files from state
files = cast("dict[str, Any]", state.get(self.state_key, {}))
# Match files
matches = []
for file_path, file_data in files.items():
if file_path.startswith(base_path):
# Get relative path from base
if base_path == "/":
relative = file_path[1:] # Remove leading /
elif file_path == base_path:
relative = Path(file_path).name
elif file_path.startswith(base_path + "/"):
relative = file_path[len(base_path) + 1 :]
else:
continue
# Match against pattern
# Handle ** pattern which requires special care
# PurePosixPath.match doesn't match single-level paths
# against **/pattern
is_match = PurePosixPath(relative).match(pattern)
if not is_match and pattern.startswith("**/"):
# Also try matching without the **/ prefix for files in base dir
is_match = PurePosixPath(relative).match(pattern[3:])
if is_match:
matches.append((file_path, file_data["modified_at"]))
if not matches:
return "No files found"
# Sort by modification time
matches.sort(key=lambda x: x[1], reverse=True)
file_paths = [path for path, _ in matches]
return "\n".join(file_paths)
def _handle_grep_search(
self,
pattern: str,
path: str,
include: str | None,
output_mode: str,
state: AnthropicToolsState,
) -> str:
"""Handle grep search operation.
Args:
pattern: The regular expression pattern to search for in file contents.
path: The directory to search in.
include: File pattern to filter (e.g., `'*.js'`, `'*.{ts,tsx}'`).
output_mode: Output format.
state: The current agent state.
Returns:
Search results formatted according to `output_mode`.
Returns `'No matches found'` if no results.
"""
# Normalize base path
base_path = path if path.startswith("/") else "/" + path
# Compile regex pattern (for validation)
try:
regex = re.compile(pattern)
except re.error as e:
return f"Invalid regex pattern: {e}"
if include and not _is_valid_include_pattern(include):
return "Invalid include pattern"
# Search files
files = cast("dict[str, Any]", state.get(self.state_key, {}))
results: dict[str, list[tuple[int, str]]] = {}
for file_path, file_data in files.items():
if not file_path.startswith(base_path):
continue
# Check include filter
if include:
basename = Path(file_path).name
if not _match_include_pattern(basename, include):
continue
# Search file content
for line_num, line in enumerate(file_data["content"], 1):
if regex.search(line):
if file_path not in results:
results[file_path] = []
results[file_path].append((line_num, line))
if not results:
return "No matches found"
# Format output based on mode
return self._format_grep_results(results, output_mode)
def _format_grep_results(
self,
results: dict[str, list[tuple[int, str]]],
output_mode: str,
) -> str:
"""Format grep results based on output mode."""
if output_mode == "files_with_matches":
# Just return file paths
return "\n".join(sorted(results.keys()))
if output_mode == "content":
# Return file:line:content format
lines = []
for file_path in sorted(results.keys()):
for line_num, line in results[file_path]:
lines.append(f"{file_path}:{line_num}:{line}")
return "\n".join(lines)
if output_mode == "count":
# Return file:count format
lines = []
for file_path in sorted(results.keys()):
count = len(results[file_path])
lines.append(f"{file_path}:{count}")
return "\n".join(lines)
# Default to files_with_matches
return "\n".join(sorted(results.keys()))
__all__ = [
"StateFileSearchMiddleware",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/anthropic/langchain_anthropic/middleware/file_search.py",
"license": "MIT License",
"lines": 274,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/partners/anthropic/tests/unit_tests/middleware/test_anthropic_tools.py | """Unit tests for Anthropic text editor and memory tool middleware."""
from unittest.mock import MagicMock
import pytest
from langchain_core.messages import SystemMessage, ToolMessage
from langgraph.types import Command
from langchain_anthropic.middleware.anthropic_tools import (
AnthropicToolsState,
StateClaudeMemoryMiddleware,
StateClaudeTextEditorMiddleware,
_validate_path,
)
class TestPathValidation:
"""Test path validation and security."""
def test_basic_path_normalization(self) -> None:
"""Test basic path normalization."""
assert _validate_path("/foo/bar") == "/foo/bar"
assert _validate_path("foo/bar") == "/foo/bar"
assert _validate_path("/foo//bar") == "/foo/bar"
assert _validate_path("/foo/./bar") == "/foo/bar"
def test_path_traversal_blocked(self) -> None:
"""Test that path traversal attempts are blocked."""
with pytest.raises(ValueError, match="Path traversal not allowed"):
_validate_path("/foo/../etc/passwd")
with pytest.raises(ValueError, match="Path traversal not allowed"):
_validate_path("../etc/passwd")
with pytest.raises(ValueError, match="Path traversal not allowed"):
_validate_path("~/.ssh/id_rsa")
def test_allowed_prefixes(self) -> None:
"""Test path prefix validation."""
# Should pass
assert (
_validate_path("/workspace/file.txt", allowed_prefixes=["/workspace"])
== "/workspace/file.txt"
)
# Should fail
with pytest.raises(ValueError, match="Path must start with"):
_validate_path("/etc/passwd", allowed_prefixes=["/workspace"])
with pytest.raises(ValueError, match="Path must start with"):
_validate_path(
"/workspacemalicious/file.txt", allowed_prefixes=["/workspace/"]
)
def test_memories_prefix(self) -> None:
"""Test /memories prefix validation for memory tools."""
assert (
_validate_path("/memories/notes.txt", allowed_prefixes=["/memories"])
== "/memories/notes.txt"
)
with pytest.raises(ValueError, match="Path must start with"):
_validate_path("/other/notes.txt", allowed_prefixes=["/memories"])
class TestTextEditorMiddleware:
"""Test text editor middleware functionality."""
def test_middleware_initialization(self) -> None:
"""Test middleware initializes correctly."""
middleware = StateClaudeTextEditorMiddleware()
assert middleware.state_schema == AnthropicToolsState
assert middleware.tool_type == "text_editor_20250728"
assert middleware.tool_name == "str_replace_based_edit_tool"
assert middleware.state_key == "text_editor_files"
# With path restrictions
middleware = StateClaudeTextEditorMiddleware(
allowed_path_prefixes=["/workspace"]
)
assert middleware.allowed_prefixes == ["/workspace"]
class TestMemoryMiddleware:
"""Test memory middleware functionality."""
def test_middleware_initialization(self) -> None:
"""Test middleware initializes correctly."""
middleware = StateClaudeMemoryMiddleware()
assert middleware.state_schema == AnthropicToolsState
assert middleware.tool_type == "memory_20250818"
assert middleware.tool_name == "memory"
assert middleware.state_key == "memory_files"
assert middleware.system_prompt # Should have default prompt
def test_custom_system_prompt(self) -> None:
"""Test custom system prompt can be set."""
custom_prompt = "Custom memory instructions"
middleware = StateClaudeMemoryMiddleware(system_prompt=custom_prompt)
assert middleware.system_prompt == custom_prompt
class TestFileOperations:
"""Test file operation implementations via wrap_tool_call."""
def test_view_operation(self) -> None:
"""Test view command execution."""
middleware = StateClaudeTextEditorMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/test.txt": {
"content": ["line1", "line2", "line3"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
}
},
}
args = {"command": "view", "path": "/test.txt"}
result = middleware._handle_view(args, state, "test_id")
assert isinstance(result, Command)
assert result.update is not None
messages = result.update.get("messages", [])
assert len(messages) == 1
assert isinstance(messages[0], ToolMessage)
assert messages[0].content == "1|line1\n2|line2\n3|line3"
assert messages[0].tool_call_id == "test_id"
def test_create_operation(self) -> None:
"""Test create command execution."""
middleware = StateClaudeTextEditorMiddleware()
state: AnthropicToolsState = {"messages": []}
args = {"command": "create", "path": "/test.txt", "file_text": "line1\nline2"}
result = middleware._handle_create(args, state, "test_id")
assert isinstance(result, Command)
assert result.update is not None
files = result.update.get("text_editor_files", {})
assert "/test.txt" in files
assert files["/test.txt"]["content"] == ["line1", "line2"]
def test_path_prefix_enforcement(self) -> None:
"""Test that path prefixes are enforced."""
middleware = StateClaudeTextEditorMiddleware(
allowed_path_prefixes=["/workspace"]
)
state: AnthropicToolsState = {"messages": []}
# Should fail with /etc/passwd
args = {"command": "create", "path": "/etc/passwd", "file_text": "test"}
with pytest.raises(ValueError, match="Path must start with"):
middleware._handle_create(args, state, "test_id")
def test_memories_prefix_enforcement(self) -> None:
"""Test that /memories prefix is enforced for memory middleware."""
middleware = StateClaudeMemoryMiddleware()
state: AnthropicToolsState = {"messages": []}
# Should fail with /other/path
args = {"command": "create", "path": "/other/path.txt", "file_text": "test"}
with pytest.raises(ValueError, match="/memories"):
middleware._handle_create(args, state, "test_id")
def test_str_replace_operation(self) -> None:
"""Test str_replace command execution."""
middleware = StateClaudeTextEditorMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/test.txt": {
"content": ["Hello world", "Goodbye world"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
}
},
}
args = {
"command": "str_replace",
"path": "/test.txt",
"old_str": "world",
"new_str": "universe",
}
result = middleware._handle_str_replace(args, state, "test_id")
assert isinstance(result, Command)
assert result.update is not None
files = result.update.get("text_editor_files", {})
# Should only replace first occurrence
assert files["/test.txt"]["content"] == ["Hello universe", "Goodbye world"]
def test_insert_operation(self) -> None:
"""Test insert command execution."""
middleware = StateClaudeTextEditorMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/test.txt": {
"content": ["line1", "line2"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
}
},
}
args = {
"command": "insert",
"path": "/test.txt",
"insert_line": 0,
"new_str": "inserted",
}
result = middleware._handle_insert(args, state, "test_id")
assert isinstance(result, Command)
assert result.update is not None
files = result.update.get("text_editor_files", {})
assert files["/test.txt"]["content"] == ["inserted", "line1", "line2"]
def test_delete_operation(self) -> None:
"""Test delete command execution (memory only)."""
middleware = StateClaudeMemoryMiddleware()
state: AnthropicToolsState = {
"messages": [],
"memory_files": {
"/memories/test.txt": {
"content": ["line1"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
}
},
}
args = {"command": "delete", "path": "/memories/test.txt"}
result = middleware._handle_delete(args, state, "test_id")
assert isinstance(result, Command)
assert result.update is not None
files = result.update.get("memory_files", {})
# Deleted files are marked as None in state
assert files.get("/memories/test.txt") is None
def test_rename_operation(self) -> None:
"""Test rename command execution (memory only)."""
middleware = StateClaudeMemoryMiddleware()
state: AnthropicToolsState = {
"messages": [],
"memory_files": {
"/memories/old.txt": {
"content": ["line1"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
}
},
}
args = {
"command": "rename",
"old_path": "/memories/old.txt",
"new_path": "/memories/new.txt",
}
result = middleware._handle_rename(args, state, "test_id")
assert isinstance(result, Command)
assert result.update is not None
files = result.update.get("memory_files", {})
# Old path is marked as None (deleted)
assert files.get("/memories/old.txt") is None
# New path has the file data
assert files.get("/memories/new.txt") is not None
assert files["/memories/new.txt"]["content"] == ["line1"]
class TestSystemMessageHandling:
"""Test system message handling in wrap_model_call."""
def test_text_editor_no_system_message(self) -> None:
"""Test text editor middleware without system message."""
from langchain.agents.middleware.types import ModelRequest
middleware = StateClaudeTextEditorMiddleware()
request = ModelRequest(
model=MagicMock(),
messages=[],
system_message=None,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": []},
runtime=MagicMock(),
)
captured_request = None
def handler(req: ModelRequest) -> MagicMock:
nonlocal captured_request
captured_request = req
return MagicMock()
middleware.wrap_model_call(request, handler)
# No system message should be added for text editor
assert captured_request is not None
assert captured_request.system_message is None
def test_memory_middleware_adds_system_message(self) -> None:
"""Test memory middleware adds system message when none exists."""
from langchain.agents.middleware.types import ModelRequest
middleware = StateClaudeMemoryMiddleware()
request = ModelRequest(
model=MagicMock(),
messages=[],
system_message=None,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": []},
runtime=MagicMock(),
)
captured_request = None
def handler(req: ModelRequest) -> MagicMock:
nonlocal captured_request
captured_request = req
return MagicMock()
middleware.wrap_model_call(request, handler)
# System message should be added
assert captured_request is not None
assert captured_request.system_message is not None
assert isinstance(captured_request.system_message, SystemMessage)
assert "MEMORY PROTOCOL" in captured_request.system_message.text
def test_memory_middleware_merges_system_message(self) -> None:
"""Test memory middleware merges with existing system message."""
from langchain.agents.middleware.types import ModelRequest
middleware = StateClaudeMemoryMiddleware()
existing_message = SystemMessage("You are a helpful assistant.")
request = ModelRequest(
model=MagicMock(),
messages=[],
system_message=existing_message,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": []},
runtime=MagicMock(),
)
captured_request = None
def handler(req: ModelRequest) -> MagicMock:
nonlocal captured_request
captured_request = req
return MagicMock()
middleware.wrap_model_call(request, handler)
# System message should be merged
assert captured_request is not None
assert captured_request.system_message is not None
assert isinstance(captured_request.system_message, SystemMessage)
assert "You are a helpful assistant." in captured_request.system_message.text
assert "MEMORY PROTOCOL" in captured_request.system_message.text
async def test_async_memory_middleware_merges_system_message(self) -> None:
"""Test async memory middleware merges with existing system message."""
from langchain.agents.middleware.types import ModelRequest
middleware = StateClaudeMemoryMiddleware()
existing_message = SystemMessage("You are a helpful assistant.")
request = ModelRequest(
model=MagicMock(),
messages=[],
system_message=existing_message,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": []},
runtime=MagicMock(),
)
captured_request = None
async def handler(req: ModelRequest) -> MagicMock:
nonlocal captured_request
captured_request = req
return MagicMock()
await middleware.awrap_model_call(request, handler)
# System message should be merged
assert captured_request is not None
assert captured_request.system_message is not None
assert isinstance(captured_request.system_message, SystemMessage)
assert "You are a helpful assistant." in captured_request.system_message.text
assert "MEMORY PROTOCOL" in captured_request.system_message.text
def test_custom_system_prompt_merges_correctly(self) -> None:
"""Test custom system prompt merges with existing system message."""
from langchain.agents.middleware.types import ModelRequest
custom_prompt = "Custom instructions for memory tool."
middleware = StateClaudeMemoryMiddleware(system_prompt=custom_prompt)
existing_message = SystemMessage("Existing instructions.")
request = ModelRequest(
model=MagicMock(),
messages=[],
system_message=existing_message,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": []},
runtime=MagicMock(),
)
captured_request = None
def handler(req: ModelRequest) -> MagicMock:
nonlocal captured_request
captured_request = req
return MagicMock()
middleware.wrap_model_call(request, handler)
# Both prompts should be in the final message
assert captured_request is not None
assert captured_request.system_message is not None
assert "Existing instructions." in captured_request.system_message.text
assert custom_prompt in captured_request.system_message.text
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/anthropic/tests/unit_tests/middleware/test_anthropic_tools.py",
"license": "MIT License",
"lines": 358,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/anthropic/tests/unit_tests/middleware/test_file_search.py | """Unit tests for file search middleware."""
from langchain_anthropic.middleware.anthropic_tools import AnthropicToolsState
from langchain_anthropic.middleware.file_search import (
StateFileSearchMiddleware,
)
class TestSearchMiddlewareInitialization:
"""Test search middleware initialization."""
def test_middleware_initialization(self) -> None:
"""Test middleware initializes correctly."""
middleware = StateFileSearchMiddleware()
assert middleware.state_schema == AnthropicToolsState
assert middleware.state_key == "text_editor_files"
def test_custom_state_key(self) -> None:
"""Test middleware with custom state key."""
middleware = StateFileSearchMiddleware(state_key="memory_files")
assert middleware.state_key == "memory_files"
class TestGlobSearch:
"""Test Glob file pattern matching."""
def test_glob_basic_pattern(self) -> None:
"""Test basic glob pattern matching."""
middleware = StateFileSearchMiddleware()
test_state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": ["print('hello')"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
"/src/utils.py": {
"content": ["def helper(): pass"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
"/README.md": {
"content": ["# Project"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
}
# Call internal handler method directly
result = middleware._handle_glob_search(
pattern="*.py", path="/", state=test_state
)
assert isinstance(result, str)
assert "/src/main.py" in result
assert "/src/utils.py" in result
assert "/README.md" not in result
def test_glob_recursive_pattern(self) -> None:
"""Test recursive glob pattern matching."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": [],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
"/src/utils/helper.py": {
"content": [],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
"/tests/test_main.py": {
"content": [],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
}
result = middleware._handle_glob_search(
pattern="**/*.py", path="/", state=state
)
assert isinstance(result, str)
lines = result.split("\n")
assert len(lines) == 3
assert all(".py" in line for line in lines)
def test_glob_with_base_path(self) -> None:
"""Test glob with base path restriction."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": [],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
"/tests/test.py": {
"content": [],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
}
result = middleware._handle_glob_search(
pattern="**/*.py", path="/src", state=state
)
assert isinstance(result, str)
assert "/src/main.py" in result
assert "/tests/test.py" not in result
def test_glob_no_matches(self) -> None:
"""Test glob with no matching files."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": [],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
}
result = middleware._handle_glob_search(pattern="*.ts", path="/", state=state)
assert isinstance(result, str)
assert result == "No files found"
def test_glob_sorts_by_modified_time(self) -> None:
"""Test that glob results are sorted by modification time."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/old.py": {
"content": [],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
"/new.py": {
"content": [],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-02T00:00:00",
},
},
}
result = middleware._handle_glob_search(pattern="*.py", path="/", state=state)
lines = result.split("\n")
# Most recent first
assert lines[0] == "/new.py"
assert lines[1] == "/old.py"
class TestGrepSearch:
"""Test Grep content search."""
def test_grep_files_with_matches_mode(self) -> None:
"""Test grep with files_with_matches output mode."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": ["def foo():", " pass"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
"/src/utils.py": {
"content": ["def bar():", " return None"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
"/README.md": {
"content": ["# Documentation", "No code here"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
}
result = middleware._handle_grep_search(
pattern=r"def \w+\(\):",
path="/",
include=None,
output_mode="files_with_matches",
state=state,
)
assert isinstance(result, str)
assert "/src/main.py" in result
assert "/src/utils.py" in result
assert "/README.md" not in result
# Should only have file paths, not line content
def test_grep_invalid_include_pattern(self) -> None:
"""Return error when include glob is invalid."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": ["def foo():"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
}
},
}
result = middleware._handle_grep_search(
pattern=r"def",
path="/",
include="*.{py",
output_mode="files_with_matches",
state=state,
)
assert result == "Invalid include pattern"
class TestFilesystemGrepSearch:
"""Tests for filesystem-backed grep search."""
def test_grep_content_mode(self) -> None:
"""Test grep with content output mode."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": ["def foo():", " pass", "def bar():"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
}
result = middleware._handle_grep_search(
pattern=r"def \w+\(\):",
path="/",
include=None,
output_mode="content",
state=state,
)
assert isinstance(result, str)
lines = result.split("\n")
assert len(lines) == 2
assert lines[0] == "/src/main.py:1:def foo():"
assert lines[1] == "/src/main.py:3:def bar():"
def test_grep_count_mode(self) -> None:
"""Test grep with count output mode."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": ["TODO: fix this", "print('hello')", "TODO: add tests"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
"/src/utils.py": {
"content": ["TODO: implement"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
}
result = middleware._handle_grep_search(
pattern=r"TODO", path="/", include=None, output_mode="count", state=state
)
assert isinstance(result, str)
lines = result.split("\n")
assert "/src/main.py:2" in lines
assert "/src/utils.py:1" in lines
def test_grep_with_include_filter(self) -> None:
"""Test grep with include file pattern filter."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": ["import os"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
"/src/main.ts": {
"content": ["import os from 'os'"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
}
result = middleware._handle_grep_search(
pattern="import",
path="/",
include="*.py",
output_mode="files_with_matches",
state=state,
)
assert isinstance(result, str)
assert "/src/main.py" in result
assert "/src/main.ts" not in result
def test_grep_with_brace_expansion_filter(self) -> None:
"""Test grep with brace expansion in include filter."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.ts": {
"content": ["const x = 1"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
"/src/App.tsx": {
"content": ["const y = 2"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
"/src/main.py": {
"content": ["z = 3"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
}
result = middleware._handle_grep_search(
pattern="const",
path="/",
include="*.{ts,tsx}",
output_mode="files_with_matches",
state=state,
)
assert isinstance(result, str)
assert "/src/main.ts" in result
assert "/src/App.tsx" in result
assert "/src/main.py" not in result
def test_grep_with_base_path(self) -> None:
"""Test grep with base path restriction."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": ["import foo"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
"/tests/test.py": {
"content": ["import foo"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
}
result = middleware._handle_grep_search(
pattern="import",
path="/src",
include=None,
output_mode="files_with_matches",
state=state,
)
assert isinstance(result, str)
assert "/src/main.py" in result
assert "/tests/test.py" not in result
def test_grep_no_matches(self) -> None:
"""Test grep with no matching content."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": ["print('hello')"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
}
result = middleware._handle_grep_search(
pattern=r"TODO",
path="/",
include=None,
output_mode="files_with_matches",
state=state,
)
assert isinstance(result, str)
assert result == "No matches found"
def test_grep_invalid_regex(self) -> None:
"""Test grep with invalid regex pattern."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {},
}
result = middleware._handle_grep_search(
pattern=r"[unclosed",
path="/",
include=None,
output_mode="files_with_matches",
state=state,
)
assert isinstance(result, str)
assert "Invalid regex pattern" in result
class TestSearchWithDifferentBackends:
"""Test searching with different backend configurations."""
def test_glob_default_backend(self) -> None:
"""Test that glob searches the default backend (text_editor_files)."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": [],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
"memory_files": {
"/memories/notes.txt": {
"content": [],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
}
result = middleware._handle_glob_search(pattern="**/*", path="/", state=state)
assert isinstance(result, str)
assert "/src/main.py" in result
# Should NOT find memory_files since default backend is text_editor_files
assert "/memories/notes.txt" not in result
def test_grep_default_backend(self) -> None:
"""Test that grep searches the default backend (text_editor_files)."""
middleware = StateFileSearchMiddleware()
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": ["TODO: implement"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
"memory_files": {
"/memories/tasks.txt": {
"content": ["TODO: review"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
}
result = middleware._handle_grep_search(
pattern=r"TODO",
path="/",
include=None,
output_mode="files_with_matches",
state=state,
)
assert isinstance(result, str)
assert "/src/main.py" in result
# Should NOT find memory_files since default backend is text_editor_files
assert "/memories/tasks.txt" not in result
def test_search_with_single_store(self) -> None:
"""Test searching with a specific state key."""
middleware = StateFileSearchMiddleware(state_key="text_editor_files")
state: AnthropicToolsState = {
"messages": [],
"text_editor_files": {
"/src/main.py": {
"content": ["code"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
"memory_files": {
"/memories/notes.txt": {
"content": ["notes"],
"created_at": "2025-01-01T00:00:00",
"modified_at": "2025-01-01T00:00:00",
},
},
}
result = middleware._handle_grep_search(
pattern=r".*",
path="/",
include=None,
output_mode="files_with_matches",
state=state,
)
assert isinstance(result, str)
assert "/src/main.py" in result
assert "/memories/notes.txt" not in result
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/anthropic/tests/unit_tests/middleware/test_file_search.py",
"license": "MIT License",
"lines": 468,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_state_schema.py | """Test state_schema parameter in create_agent.
This module tests that the state_schema parameter allows users to extend
AgentState without needing to create custom middleware.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Annotated, Any
from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
from langchain.agents import create_agent
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
PrivateStateAttr,
)
# Cannot move ToolRuntime to TYPE_CHECKING as parameters of @tool annotated functions
# are inspected at runtime.
from langchain.tools import ToolRuntime # noqa: TC001
from tests.unit_tests.agents.model import FakeToolCallingModel
if TYPE_CHECKING:
from langgraph.runtime import Runtime
@tool
def simple_tool(x: int) -> str:
"""Simple tool for basic tests."""
return f"Result: {x}"
def test_state_schema_single_custom_field() -> None:
"""Test that a single custom state field is preserved through agent execution."""
class CustomState(AgentState[Any]):
custom_field: str
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[[{"args": {"x": 1}, "id": "call_1", "name": "simple_tool"}], []]
),
tools=[simple_tool],
state_schema=CustomState,
)
result = agent.invoke({"messages": [HumanMessage("Test")], "custom_field": "test_value"})
assert result["custom_field"] == "test_value"
assert len(result["messages"]) == 4
def test_state_schema_multiple_custom_fields() -> None:
"""Test that multiple custom state fields are preserved through agent execution."""
class CustomState(AgentState[Any]):
user_id: str
session_id: str
context: str
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[[{"args": {"x": 1}, "id": "call_1", "name": "simple_tool"}], []]
),
tools=[simple_tool],
state_schema=CustomState,
)
result = agent.invoke(
{
"messages": [HumanMessage("Test")],
"user_id": "user_123",
"session_id": "session_456",
"context": "test_ctx",
}
)
assert result["user_id"] == "user_123"
assert result["session_id"] == "session_456"
assert result["context"] == "test_ctx"
assert len(result["messages"]) == 4
def test_state_schema_with_tool_runtime() -> None:
"""Test that custom state fields are accessible via ToolRuntime."""
class ExtendedState(AgentState[Any]):
counter: int
runtime_data = {}
@tool
def counter_tool(x: int, runtime: ToolRuntime) -> str:
"""Tool that accesses custom state field."""
runtime_data["counter"] = runtime.state["counter"]
return f"Counter is {runtime_data['counter']}, x is {x}"
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[[{"args": {"x": 10}, "id": "call_1", "name": "counter_tool"}], []]
),
tools=[counter_tool],
state_schema=ExtendedState,
)
result = agent.invoke({"messages": [HumanMessage("Test")], "counter": 5})
assert runtime_data["counter"] == 5
assert "Counter is 5" in result["messages"][2].content
def test_state_schema_with_middleware() -> None:
"""Test that state_schema merges with middleware state schemas."""
class UserState(AgentState[Any]):
user_name: str
class MiddlewareState(AgentState[Any]):
middleware_data: str
middleware_calls = []
class TestMiddleware(AgentMiddleware[MiddlewareState, None]):
state_schema = MiddlewareState
def before_model(self, state: MiddlewareState, runtime: Runtime) -> dict[str, Any]:
middleware_calls.append(state["middleware_data"])
return {}
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[[{"args": {"x": 5}, "id": "call_1", "name": "simple_tool"}], []]
),
tools=[simple_tool],
state_schema=UserState,
middleware=[TestMiddleware()],
)
result = agent.invoke(
{
"messages": [HumanMessage("Test")],
"user_name": "Alice",
"middleware_data": "test_data",
}
)
assert result["user_name"] == "Alice"
assert result["middleware_data"] == "test_data"
assert "test_data" in middleware_calls
def test_state_schema_none_uses_default() -> None:
"""Test that state_schema=None uses default AgentState."""
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[[{"args": {"x": 1}, "id": "call_1", "name": "simple_tool"}], []]
),
tools=[simple_tool],
state_schema=None,
)
result = agent.invoke({"messages": [HumanMessage("Test")]})
assert len(result["messages"]) == 4
assert "Result: 1" in result["messages"][2].content
async def test_state_schema_async() -> None:
"""Test that state_schema works with async agents."""
class AsyncState(AgentState[Any]):
async_field: str
@tool
async def async_tool(x: int) -> str:
"""Async tool."""
return f"Async: {x}"
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[[{"args": {"x": 99}, "id": "call_1", "name": "async_tool"}], []]
),
tools=[async_tool],
state_schema=AsyncState,
)
result = await agent.ainvoke(
{
"messages": [HumanMessage("Test async")],
"async_field": "async_value",
}
)
assert result["async_field"] == "async_value"
assert "Async: 99" in result["messages"][2].content
def test_state_schema_with_private_state_field() -> None:
"""Test that private state fields (PrivateStateAttr) are filtered from input and output.
Private state fields are marked with PrivateStateAttr annotation, which means:
- They are omitted from the input schema (filtered out when invoking)
- They are omitted from the output schema (filtered out from results)
- Even if provided during invoke, they won't appear in state or results
"""
class StateWithPrivateField(AgentState[Any]):
public_field: str
private_field: Annotated[str, PrivateStateAttr]
captured_state = {}
@tool
def capture_state_tool(x: int, runtime: ToolRuntime) -> str:
"""Tool that captures the current state for inspection."""
captured_state["state"] = dict(runtime.state)
return f"Captured state with x={x}"
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[{"args": {"x": 42}, "id": "call_1", "name": "capture_state_tool"}],
[],
]
),
tools=[capture_state_tool],
state_schema=StateWithPrivateField,
)
# Invoke the agent with BOTH public and private fields
result = agent.invoke(
{
"messages": [HumanMessage("Test private state")],
"public_field": "public_value",
"private_field": "private_value", # This should be filtered out
}
)
# Assert that public_field is preserved in the result
assert result["public_field"] == "public_value"
# Assert that private_field is NOT in the result (filtered out from output)
assert "private_field" not in result
# Assert that private_field was NOT in the state during tool execution
assert "private_field" not in captured_state["state"]
# Assert that public_field WAS in the state during tool execution
assert captured_state["state"]["public_field"] == "public_value"
# Verify the agent executed normally
assert len(result["messages"]) == 4 # Human, AI (tool call), Tool result, AI (final)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/test_state_schema.py",
"license": "MIT License",
"lines": 192,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/tool_retry.py | """Tool retry middleware for agents."""
from __future__ import annotations
import asyncio
import time
import warnings
from typing import TYPE_CHECKING, Any
from langchain_core.messages import ToolMessage
from langchain.agents.middleware._retry import (
OnFailure,
RetryOn,
calculate_delay,
should_retry_exception,
validate_retry_params,
)
from langchain.agents.middleware.types import AgentMiddleware, AgentState, ContextT, ResponseT
if TYPE_CHECKING:
from collections.abc import Awaitable, Callable
from langgraph.types import Command
from langchain.agents.middleware.types import ToolCallRequest
from langchain.tools import BaseTool
class ToolRetryMiddleware(AgentMiddleware[AgentState[ResponseT], ContextT, ResponseT]):
"""Middleware that automatically retries failed tool calls with configurable backoff.
Supports retrying on specific exceptions and exponential backoff.
Examples:
!!! example "Basic usage with default settings (2 retries, exponential backoff)"
```python
from langchain.agents import create_agent
from langchain.agents.middleware import ToolRetryMiddleware
agent = create_agent(model, tools=[search_tool], middleware=[ToolRetryMiddleware()])
```
!!! example "Retry specific exceptions only"
```python
from requests.exceptions import RequestException, Timeout
retry = ToolRetryMiddleware(
max_retries=4,
retry_on=(RequestException, Timeout),
backoff_factor=1.5,
)
```
!!! example "Custom exception filtering"
```python
from requests.exceptions import HTTPError
def should_retry(exc: Exception) -> bool:
# Only retry on 5xx errors
if isinstance(exc, HTTPError):
return 500 <= exc.status_code < 600
return False
retry = ToolRetryMiddleware(
max_retries=3,
retry_on=should_retry,
)
```
!!! example "Apply to specific tools with custom error handling"
```python
def format_error(exc: Exception) -> str:
return "Database temporarily unavailable. Please try again later."
retry = ToolRetryMiddleware(
max_retries=4,
tools=["search_database"],
on_failure=format_error,
)
```
!!! example "Apply to specific tools using `BaseTool` instances"
```python
from langchain_core.tools import tool
@tool
def search_database(query: str) -> str:
'''Search the database.'''
return results
retry = ToolRetryMiddleware(
max_retries=4,
tools=[search_database], # Pass BaseTool instance
)
```
!!! example "Constant backoff (no exponential growth)"
```python
retry = ToolRetryMiddleware(
max_retries=5,
backoff_factor=0.0, # No exponential growth
initial_delay=2.0, # Always wait 2 seconds
)
```
!!! example "Raise exception on failure"
```python
retry = ToolRetryMiddleware(
max_retries=2,
on_failure="error", # Re-raise exception instead of returning message
)
```
"""
def __init__(
self,
*,
max_retries: int = 2,
tools: list[BaseTool | str] | None = None,
retry_on: RetryOn = (Exception,),
on_failure: OnFailure = "continue",
backoff_factor: float = 2.0,
initial_delay: float = 1.0,
max_delay: float = 60.0,
jitter: bool = True,
) -> None:
"""Initialize `ToolRetryMiddleware`.
Args:
max_retries: Maximum number of retry attempts after the initial call.
Must be `>= 0`.
tools: Optional list of tools or tool names to apply retry logic to.
Can be a list of `BaseTool` instances or tool name strings.
If `None`, applies to all tools.
retry_on: Either a tuple of exception types to retry on, or a callable
that takes an exception and returns `True` if it should be retried.
Default is to retry on all exceptions.
on_failure: Behavior when all retries are exhausted.
Options:
- `'continue'`: Return a `ToolMessage` with error details,
allowing the LLM to handle the failure and potentially recover.
- `'error'`: Re-raise the exception, stopping agent execution.
- **Custom callable:** Function that takes the exception and returns a
string for the `ToolMessage` content, allowing custom error
formatting.
**Deprecated values** (for backwards compatibility):
- `'return_message'`: Use `'continue'` instead.
- `'raise'`: Use `'error'` instead.
backoff_factor: Multiplier for exponential backoff.
Each retry waits `initial_delay * (backoff_factor ** retry_number)`
seconds.
Set to `0.0` for constant delay.
initial_delay: Initial delay in seconds before first retry.
max_delay: Maximum delay in seconds between retries.
Caps exponential backoff growth.
jitter: Whether to add random jitter (`±25%`) to delay to avoid thundering herd.
Raises:
ValueError: If `max_retries < 0` or delays are negative.
"""
super().__init__()
# Validate parameters
validate_retry_params(max_retries, initial_delay, max_delay, backoff_factor)
# Handle backwards compatibility for deprecated on_failure values
if on_failure == "raise": # type: ignore[comparison-overlap]
msg = ( # type: ignore[unreachable]
"on_failure='raise' is deprecated and will be removed in a future version. "
"Use on_failure='error' instead."
)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
on_failure = "error"
elif on_failure == "return_message": # type: ignore[comparison-overlap]
msg = ( # type: ignore[unreachable]
"on_failure='return_message' is deprecated and will be removed "
"in a future version. Use on_failure='continue' instead."
)
warnings.warn(msg, DeprecationWarning, stacklevel=2)
on_failure = "continue"
self.max_retries = max_retries
# Extract tool names from BaseTool instances or strings
self._tool_filter: list[str] | None
if tools is not None:
self._tool_filter = [tool.name if not isinstance(tool, str) else tool for tool in tools]
else:
self._tool_filter = None
self.tools = [] # No additional tools registered by this middleware
self.retry_on = retry_on
self.on_failure = on_failure
self.backoff_factor = backoff_factor
self.initial_delay = initial_delay
self.max_delay = max_delay
self.jitter = jitter
def _should_retry_tool(self, tool_name: str) -> bool:
"""Check if retry logic should apply to this tool.
Args:
tool_name: Name of the tool being called.
Returns:
`True` if retry logic should apply, `False` otherwise.
"""
if self._tool_filter is None:
return True
return tool_name in self._tool_filter
@staticmethod
def _format_failure_message(tool_name: str, exc: Exception, attempts_made: int) -> str:
"""Format the failure message when retries are exhausted.
Args:
tool_name: Name of the tool that failed.
exc: The exception that caused the failure.
attempts_made: Number of attempts actually made.
Returns:
Formatted error message string.
"""
exc_type = type(exc).__name__
exc_msg = str(exc)
attempt_word = "attempt" if attempts_made == 1 else "attempts"
return (
f"Tool '{tool_name}' failed after {attempts_made} {attempt_word} "
f"with {exc_type}: {exc_msg}. Please try again."
)
def _handle_failure(
self, tool_name: str, tool_call_id: str | None, exc: Exception, attempts_made: int
) -> ToolMessage:
"""Handle failure when all retries are exhausted.
Args:
tool_name: Name of the tool that failed.
tool_call_id: ID of the tool call (may be `None`).
exc: The exception that caused the failure.
attempts_made: Number of attempts actually made.
Returns:
`ToolMessage` with error details.
Raises:
Exception: If `on_failure` is `'error'`, re-raises the exception.
"""
if self.on_failure == "error":
raise exc
if callable(self.on_failure):
content = self.on_failure(exc)
else:
content = self._format_failure_message(tool_name, exc, attempts_made)
return ToolMessage(
content=content,
tool_call_id=tool_call_id,
name=tool_name,
status="error",
)
def wrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], ToolMessage | Command[Any]],
) -> ToolMessage | Command[Any]:
"""Intercept tool execution and retry on failure.
Args:
request: Tool call request with call dict, `BaseTool`, state, and runtime.
handler: Callable to execute the tool (can be called multiple times).
Returns:
`ToolMessage` or `Command` (the final result).
Raises:
RuntimeError: If the retry loop completes without returning. This should not happen.
"""
tool_name = request.tool.name if request.tool else request.tool_call["name"]
# Check if retry should apply to this tool
if not self._should_retry_tool(tool_name):
return handler(request)
tool_call_id = request.tool_call["id"]
# Initial attempt + retries
for attempt in range(self.max_retries + 1):
try:
return handler(request)
except Exception as exc:
attempts_made = attempt + 1 # attempt is 0-indexed
# Check if we should retry this exception
if not should_retry_exception(exc, self.retry_on):
# Exception is not retryable, handle failure immediately
return self._handle_failure(tool_name, tool_call_id, exc, attempts_made)
# Check if we have more retries left
if attempt < self.max_retries:
# Calculate and apply backoff delay
delay = calculate_delay(
attempt,
backoff_factor=self.backoff_factor,
initial_delay=self.initial_delay,
max_delay=self.max_delay,
jitter=self.jitter,
)
if delay > 0:
time.sleep(delay)
# Continue to next retry
else:
# No more retries, handle failure
return self._handle_failure(tool_name, tool_call_id, exc, attempts_made)
# Unreachable: loop always returns via handler success or _handle_failure
msg = "Unexpected: retry loop completed without returning"
raise RuntimeError(msg)
async def awrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]],
) -> ToolMessage | Command[Any]:
"""Intercept and control async tool execution with retry logic.
Args:
request: Tool call request with call `dict`, `BaseTool`, state, and runtime.
handler: Async callable to execute the tool and returns `ToolMessage` or
`Command`.
Returns:
`ToolMessage` or `Command` (the final result).
Raises:
RuntimeError: If the retry loop completes without returning. This should not happen.
"""
tool_name = request.tool.name if request.tool else request.tool_call["name"]
# Check if retry should apply to this tool
if not self._should_retry_tool(tool_name):
return await handler(request)
tool_call_id = request.tool_call["id"]
# Initial attempt + retries
for attempt in range(self.max_retries + 1):
try:
return await handler(request)
except Exception as exc:
attempts_made = attempt + 1 # attempt is 0-indexed
# Check if we should retry this exception
if not should_retry_exception(exc, self.retry_on):
# Exception is not retryable, handle failure immediately
return self._handle_failure(tool_name, tool_call_id, exc, attempts_made)
# Check if we have more retries left
if attempt < self.max_retries:
# Calculate and apply backoff delay
delay = calculate_delay(
attempt,
backoff_factor=self.backoff_factor,
initial_delay=self.initial_delay,
max_delay=self.max_delay,
jitter=self.jitter,
)
if delay > 0:
await asyncio.sleep(delay)
# Continue to next retry
else:
# No more retries, handle failure
return self._handle_failure(tool_name, tool_call_id, exc, attempts_made)
# Unreachable: loop always returns via handler success or _handle_failure
msg = "Unexpected: retry loop completed without returning"
raise RuntimeError(msg)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/middleware/tool_retry.py",
"license": "MIT License",
"lines": 316,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_injected_runtime_create_agent.py | """Test ToolRuntime injection with create_agent.
This module tests the injected runtime functionality when using tools
with the create_agent factory. The ToolRuntime provides tools access to:
- state: Current graph state
- tool_call_id: ID of the current tool call
- config: RunnableConfig for the execution
- context: Runtime context from LangGraph
- store: BaseStore for persistent storage
- stream_writer: For streaming custom output
These tests verify that runtime injection works correctly across both
sync and async execution paths, with middleware, and in various agent
configurations.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Annotated, Any
from langchain_core.messages import HumanMessage, ToolMessage
from langchain_core.tools import tool
from langgraph.prebuilt import InjectedStore
from langgraph.store.memory import InMemoryStore
from langchain.agents import create_agent
from langchain.agents.middleware.types import AgentMiddleware, AgentState
from langchain.tools import InjectedState, ToolRuntime
from tests.unit_tests.agents.model import FakeToolCallingModel
if TYPE_CHECKING:
from langgraph.runtime import Runtime
def test_tool_runtime_basic_injection() -> None:
"""Test basic ToolRuntime injection in tools with create_agent."""
# Track what was injected
injected_data: dict[str, Any] = {}
@tool
def runtime_tool(x: int, runtime: ToolRuntime) -> str:
"""Tool that accesses runtime context."""
injected_data["state"] = runtime.state
injected_data["tool_call_id"] = runtime.tool_call_id
injected_data["config"] = runtime.config
injected_data["context"] = runtime.context
injected_data["store"] = runtime.store
injected_data["stream_writer"] = runtime.stream_writer
return f"Processed {x}"
assert runtime_tool.args
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[{"args": {"x": 42}, "id": "call_123", "name": "runtime_tool"}],
[],
]
),
tools=[runtime_tool],
system_prompt="You are a helpful assistant.",
)
result = agent.invoke({"messages": [HumanMessage("Test")]})
# Verify tool executed
assert len(result["messages"]) == 4
tool_message = result["messages"][2]
assert isinstance(tool_message, ToolMessage)
assert tool_message.content == "Processed 42"
assert tool_message.tool_call_id == "call_123"
# Verify runtime was injected
assert injected_data["state"] is not None
assert "messages" in injected_data["state"]
assert injected_data["tool_call_id"] == "call_123"
assert injected_data["config"] is not None
# Context, store, stream_writer may be None depending on graph setup
assert "context" in injected_data
assert "store" in injected_data
assert "stream_writer" in injected_data
async def test_tool_runtime_async_injection() -> None:
"""Test ToolRuntime injection works with async tools."""
injected_data: dict[str, Any] = {}
@tool
async def async_runtime_tool(x: int, runtime: ToolRuntime) -> str:
"""Async tool that accesses runtime context."""
injected_data["state"] = runtime.state
injected_data["tool_call_id"] = runtime.tool_call_id
injected_data["config"] = runtime.config
return f"Async processed {x}"
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[{"args": {"x": 99}, "id": "async_call_456", "name": "async_runtime_tool"}],
[],
]
),
tools=[async_runtime_tool],
system_prompt="You are a helpful assistant.",
)
result = await agent.ainvoke({"messages": [HumanMessage("Test async")]})
# Verify tool executed
assert len(result["messages"]) == 4
tool_message = result["messages"][2]
assert isinstance(tool_message, ToolMessage)
assert tool_message.content == "Async processed 99"
assert tool_message.tool_call_id == "async_call_456"
# Verify runtime was injected
assert injected_data["state"] is not None
assert "messages" in injected_data["state"]
assert injected_data["tool_call_id"] == "async_call_456"
assert injected_data["config"] is not None
def test_tool_runtime_state_access() -> None:
"""Test that tools can access and use state via ToolRuntime."""
@tool
def state_aware_tool(query: str, runtime: ToolRuntime) -> str:
"""Tool that uses state to provide context-aware responses."""
messages = runtime.state.get("messages", [])
msg_count = len(messages)
return f"Query: {query}, Message count: {msg_count}"
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[{"args": {"query": "test"}, "id": "state_call", "name": "state_aware_tool"}],
[],
]
),
tools=[state_aware_tool],
system_prompt="You are a helpful assistant.",
)
result = agent.invoke({"messages": [HumanMessage("Hello"), HumanMessage("World")]})
# Check that tool accessed state correctly
tool_message = result["messages"][3]
assert isinstance(tool_message, ToolMessage)
# Should have original 2 HumanMessages + 1 AIMessage before tool execution
assert "Message count: 3" in tool_message.content
def test_tool_runtime_with_store() -> None:
"""Test ToolRuntime provides access to store."""
# Note: create_agent doesn't currently expose a store parameter,
# so runtime.store will be None in this test.
# This test demonstrates the runtime injection works correctly.
@tool
def store_tool(key: str, value: str, runtime: ToolRuntime) -> str:
"""Tool that uses store."""
if runtime.store is None:
return f"No store (key={key}, value={value})"
runtime.store.put(("test",), key, {"data": value})
return f"Stored {key}={value}"
@tool
def check_runtime_tool(runtime: ToolRuntime) -> str:
"""Tool that checks runtime availability."""
has_store = runtime.store is not None
has_context = runtime.context is not None
return f"Runtime: store={has_store}, context={has_context}"
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[{"args": {"key": "foo", "value": "bar"}, "id": "call_1", "name": "store_tool"}],
[{"args": {}, "id": "call_2", "name": "check_runtime_tool"}],
[],
]
),
tools=[store_tool, check_runtime_tool],
system_prompt="You are a helpful assistant.",
)
result = agent.invoke({"messages": [HumanMessage("Test store")]})
# Find the tool messages
tool_messages = [msg for msg in result["messages"] if isinstance(msg, ToolMessage)]
assert len(tool_messages) == 2
# First tool indicates no store is available (expected since create_agent doesn't expose store)
assert "No store" in tool_messages[0].content
# Second tool confirms runtime was injected
assert "Runtime:" in tool_messages[1].content
def test_tool_runtime_with_multiple_tools() -> None:
"""Test multiple tools can all access ToolRuntime."""
call_log: list[tuple[str, str | None, int | str]] = []
@tool
def tool_a(x: int, runtime: ToolRuntime) -> str:
"""First tool."""
call_log.append(("tool_a", runtime.tool_call_id, x))
return f"A: {x}"
@tool
def tool_b(y: str, runtime: ToolRuntime) -> str:
"""Second tool."""
call_log.append(("tool_b", runtime.tool_call_id, y))
return f"B: {y}"
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[
{"args": {"x": 1}, "id": "call_a", "name": "tool_a"},
{"args": {"y": "test"}, "id": "call_b", "name": "tool_b"},
],
[],
]
),
tools=[tool_a, tool_b],
system_prompt="You are a helpful assistant.",
)
result = agent.invoke({"messages": [HumanMessage("Use both tools")]})
# Verify both tools were called with correct runtime
assert len(call_log) == 2
# Tools may execute in parallel, so check both calls are present
call_ids = {(name, call_id) for name, call_id, _ in call_log}
assert ("tool_a", "call_a") in call_ids
assert ("tool_b", "call_b") in call_ids
# Verify tool messages
tool_messages = [msg for msg in result["messages"] if isinstance(msg, ToolMessage)]
assert len(tool_messages) == 2
contents = {msg.content for msg in tool_messages}
assert "A: 1" in contents
assert "B: test" in contents
def test_tool_runtime_config_access() -> None:
"""Test tools can access config through ToolRuntime."""
config_data: dict[str, Any] = {}
@tool
def config_tool(x: int, runtime: ToolRuntime) -> str:
"""Tool that accesses config."""
config_data["config_exists"] = runtime.config is not None
config_data["has_configurable"] = (
"configurable" in runtime.config if runtime.config else False
)
# Config may have run_id or other fields depending on execution context
if runtime.config:
config_data["config_keys"] = list(runtime.config.keys())
return f"Config accessed for {x}"
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[{"args": {"x": 5}, "id": "config_call", "name": "config_tool"}],
[],
]
),
tools=[config_tool],
system_prompt="You are a helpful assistant.",
)
result = agent.invoke({"messages": [HumanMessage("Test config")]})
# Verify config was accessible
assert config_data["config_exists"] is True
assert "config_keys" in config_data
# Verify tool executed
tool_message = result["messages"][2]
assert isinstance(tool_message, ToolMessage)
assert tool_message.content == "Config accessed for 5"
def test_tool_runtime_with_custom_state() -> None:
"""Test ToolRuntime works with custom state schemas."""
class CustomState(AgentState[Any]):
custom_field: str
runtime_state = {}
@tool
def custom_state_tool(x: int, runtime: ToolRuntime) -> str:
"""Tool that accesses custom state."""
runtime_state["custom_field"] = runtime.state.get("custom_field", "not found")
return f"Custom: {x}"
class CustomMiddleware(AgentMiddleware):
state_schema = CustomState
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[{"args": {"x": 10}, "id": "custom_call", "name": "custom_state_tool"}],
[],
]
),
tools=[custom_state_tool],
system_prompt="You are a helpful assistant.",
middleware=[CustomMiddleware()],
)
result = agent.invoke(
{
"messages": [HumanMessage("Test custom state")],
"custom_field": "custom_value",
}
)
# Verify custom field was accessible
assert runtime_state["custom_field"] == "custom_value"
# Verify tool executed
tool_message = result["messages"][2]
assert isinstance(tool_message, ToolMessage)
assert tool_message.content == "Custom: 10"
def test_tool_runtime_no_runtime_parameter() -> None:
"""Test that tools without runtime parameter work normally."""
@tool
def regular_tool(x: int) -> str:
"""Regular tool without runtime."""
return f"Regular: {x}"
@tool
def runtime_tool(y: int, runtime: ToolRuntime) -> str:
"""Tool with runtime."""
return f"Runtime: {y}, call_id: {runtime.tool_call_id}"
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[
{"args": {"x": 1}, "id": "regular_call", "name": "regular_tool"},
{"args": {"y": 2}, "id": "runtime_call", "name": "runtime_tool"},
],
[],
]
),
tools=[regular_tool, runtime_tool],
system_prompt="You are a helpful assistant.",
)
result = agent.invoke({"messages": [HumanMessage("Test mixed tools")]})
# Verify both tools executed correctly
tool_messages = [msg for msg in result["messages"] if isinstance(msg, ToolMessage)]
assert len(tool_messages) == 2
assert tool_messages[0].content == "Regular: 1"
assert "Runtime: 2, call_id: runtime_call" in tool_messages[1].content
async def test_tool_runtime_parallel_execution() -> None:
"""Test ToolRuntime injection works with parallel tool execution."""
execution_log = []
@tool
async def parallel_tool_1(x: int, runtime: ToolRuntime) -> str:
"""First parallel tool."""
execution_log.append(("tool_1", runtime.tool_call_id, x))
return f"Tool1: {x}"
@tool
async def parallel_tool_2(y: int, runtime: ToolRuntime) -> str:
"""Second parallel tool."""
execution_log.append(("tool_2", runtime.tool_call_id, y))
return f"Tool2: {y}"
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[
{"args": {"x": 10}, "id": "parallel_1", "name": "parallel_tool_1"},
{"args": {"y": 20}, "id": "parallel_2", "name": "parallel_tool_2"},
],
[],
]
),
tools=[parallel_tool_1, parallel_tool_2],
system_prompt="You are a helpful assistant.",
)
result = await agent.ainvoke({"messages": [HumanMessage("Run parallel")]})
# Verify both tools executed
assert len(execution_log) == 2
# Find the tool messages (order may vary due to parallel execution)
tool_messages = [msg for msg in result["messages"] if isinstance(msg, ToolMessage)]
assert len(tool_messages) == 2
contents = {msg.content for msg in tool_messages}
assert "Tool1: 10" in contents
assert "Tool2: 20" in contents
call_ids = {msg.tool_call_id for msg in tool_messages}
assert "parallel_1" in call_ids
assert "parallel_2" in call_ids
def test_tool_runtime_error_handling() -> None:
"""Test error handling with ToolRuntime injection."""
@tool
def error_tool(x: int, runtime: ToolRuntime) -> str:
"""Tool that may error."""
# Access runtime to ensure it's injected even during errors
_ = runtime.tool_call_id
if x == 0:
msg = "Cannot process zero"
raise ValueError(msg)
return f"Processed: {x}"
# create_agent uses default error handling which doesn't catch ValueError
# So we need to handle this differently
@tool
def safe_tool(x: int, runtime: ToolRuntime) -> str:
"""Tool that handles errors safely."""
try:
if x == 0:
return "Error: Cannot process zero"
except Exception as e:
return f"Error: {e}"
return f"Processed: {x}"
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[{"args": {"x": 0}, "id": "error_call", "name": "safe_tool"}],
[{"args": {"x": 5}, "id": "success_call", "name": "safe_tool"}],
[],
]
),
tools=[safe_tool],
system_prompt="You are a helpful assistant.",
)
result = agent.invoke({"messages": [HumanMessage("Test error handling")]})
# Both tool calls should complete
tool_messages = [msg for msg in result["messages"] if isinstance(msg, ToolMessage)]
assert len(tool_messages) == 2
# First call returned error message
assert "Error:" in tool_messages[0].content or "Cannot process zero" in tool_messages[0].content
# Second call succeeded
assert "Processed: 5" in tool_messages[1].content
def test_tool_runtime_with_middleware() -> None:
"""Test ToolRuntime injection works with agent middleware."""
middleware_calls = []
runtime_calls = []
class TestMiddleware(AgentMiddleware):
def before_model(self, state: AgentState[Any], runtime: Runtime) -> dict[str, Any]:
middleware_calls.append("before_model")
return {}
def after_model(self, state: AgentState[Any], runtime: Runtime) -> dict[str, Any]:
middleware_calls.append("after_model")
return {}
@tool
def middleware_tool(x: int, runtime: ToolRuntime) -> str:
"""Tool with runtime in middleware agent."""
runtime_calls.append(("middleware_tool", runtime.tool_call_id))
return f"Middleware result: {x}"
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[{"args": {"x": 7}, "id": "mw_call", "name": "middleware_tool"}],
[],
]
),
tools=[middleware_tool],
system_prompt="You are a helpful assistant.",
middleware=[TestMiddleware()],
)
result = agent.invoke({"messages": [HumanMessage("Test with middleware")]})
# Verify middleware ran
assert "before_model" in middleware_calls
assert "after_model" in middleware_calls
# Verify tool with runtime executed
assert len(runtime_calls) == 1
assert runtime_calls[0] == ("middleware_tool", "mw_call")
# Verify result
tool_message = result["messages"][2]
assert isinstance(tool_message, ToolMessage)
assert tool_message.content == "Middleware result: 7"
def test_tool_runtime_type_hints() -> None:
"""Test that ToolRuntime provides access to state fields."""
typed_runtime = {}
# Use ToolRuntime without generic type hints to avoid forward reference issues
@tool
def typed_runtime_tool(x: int, runtime: ToolRuntime) -> str:
"""Tool with runtime access."""
# Access state dict - verify we can access standard state fields
typed_runtime["message_count"] = len(runtime.state.get("messages", []))
return f"Typed: {x}"
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[{"args": {"x": 3}, "id": "typed_call", "name": "typed_runtime_tool"}],
[],
]
),
tools=[typed_runtime_tool],
system_prompt="You are a helpful assistant.",
)
result = agent.invoke({"messages": [HumanMessage("Test")]})
# Verify typed runtime worked -
# should see 2 messages (HumanMessage + AIMessage) before tool executes
assert typed_runtime["message_count"] == 2
tool_message = result["messages"][2]
assert isinstance(tool_message, ToolMessage)
assert tool_message.content == "Typed: 3"
def test_tool_runtime_name_based_injection() -> None:
"""Test that parameter named 'runtime' gets injected without type annotation."""
injected_data: dict[str, Any] = {}
@tool
def name_based_tool(x: int, runtime: Any) -> str:
"""Tool with 'runtime' parameter without ToolRuntime type annotation."""
# Even though type is Any, runtime should still be injected as ToolRuntime
injected_data["is_tool_runtime"] = isinstance(runtime, ToolRuntime)
injected_data["has_state"] = hasattr(runtime, "state")
injected_data["has_tool_call_id"] = hasattr(runtime, "tool_call_id")
if hasattr(runtime, "tool_call_id"):
injected_data["tool_call_id"] = runtime.tool_call_id
if hasattr(runtime, "state"):
injected_data["state"] = runtime.state
return f"Processed {x}"
agent = create_agent(
model=FakeToolCallingModel(
tool_calls=[
[{"args": {"x": 42}, "id": "name_call_123", "name": "name_based_tool"}],
[],
]
),
tools=[name_based_tool],
system_prompt="You are a helpful assistant.",
)
result = agent.invoke({"messages": [HumanMessage("Test")]})
# Verify tool executed
assert len(result["messages"]) == 4
tool_message = result["messages"][2]
assert isinstance(tool_message, ToolMessage)
assert tool_message.content == "Processed 42"
# Verify runtime was injected based on parameter name
assert injected_data["is_tool_runtime"] is True
assert injected_data["has_state"] is True
assert injected_data["has_tool_call_id"] is True
assert injected_data["tool_call_id"] == "name_call_123"
assert injected_data["state"] is not None
assert "messages" in injected_data["state"]
def test_combined_injected_state_runtime_store() -> None:
"""Test that all injection mechanisms work together in create_agent.
This test verifies that a tool can receive injected state, tool runtime,
and injected store simultaneously when specified in the function signature
but not in the explicit args schema. This is modeled after the pattern
from mre.py where multiple injection types are combined.
"""
# Track what was injected
injected_data = {}
# Custom state schema with additional fields
class CustomState(AgentState[Any]):
user_id: str
session_id: str
# Define explicit args schema that only includes LLM-controlled parameters
weather_schema = {
"type": "object",
"properties": {
"location": {"type": "string", "description": "The location to get weather for"},
},
"required": ["location"],
}
@tool(args_schema=weather_schema)
def multi_injection_tool(
location: str,
state: Annotated[Any, InjectedState],
runtime: ToolRuntime,
store: Annotated[Any, InjectedStore()],
) -> str:
"""Tool that uses injected state, runtime, and store together.
Args:
location: The location to get weather for (LLM-controlled).
state: The graph state (injected).
runtime: The tool runtime context (injected).
store: The persistent store (injected).
"""
# Capture all injected parameters
injected_data["state"] = state
injected_data["user_id"] = state.get("user_id", "unknown")
injected_data["session_id"] = state.get("session_id", "unknown")
injected_data["runtime"] = runtime
injected_data["tool_call_id"] = runtime.tool_call_id
injected_data["store"] = store
injected_data["store_is_none"] = store is None
# Verify runtime.state matches the state parameter
injected_data["runtime_state_matches"] = runtime.state == state
return f"Weather info for {location}"
# Create model that calls the tool
model = FakeToolCallingModel(
tool_calls=[
[
{
"name": "multi_injection_tool",
"args": {"location": "San Francisco"}, # Only LLM-controlled arg
"id": "call_weather_123",
}
],
[], # End the loop
]
)
# Create agent with custom state and store
agent = create_agent(
model=model,
tools=[multi_injection_tool],
state_schema=CustomState,
store=InMemoryStore(),
)
# Verify the tool's args schema only includes LLM-controlled parameters
tool_args_schema = multi_injection_tool.args_schema
assert isinstance(tool_args_schema, dict)
assert "location" in tool_args_schema["properties"]
assert "state" not in tool_args_schema["properties"]
assert "runtime" not in tool_args_schema["properties"]
assert "store" not in tool_args_schema["properties"]
# Invoke with custom state fields
result = agent.invoke(
{
"messages": [HumanMessage("What's the weather like?")],
"user_id": "user_42",
"session_id": "session_abc123",
}
)
# Verify tool executed successfully
tool_messages = [msg for msg in result["messages"] if isinstance(msg, ToolMessage)]
assert len(tool_messages) == 1
tool_message = tool_messages[0]
assert tool_message.content == "Weather info for San Francisco"
assert tool_message.tool_call_id == "call_weather_123"
# Verify all injections worked correctly
assert injected_data["state"] is not None
assert "messages" in injected_data["state"]
# Verify custom state fields were accessible
assert injected_data["user_id"] == "user_42"
assert injected_data["session_id"] == "session_abc123"
# Verify runtime was injected
assert injected_data["runtime"] is not None
assert injected_data["tool_call_id"] == "call_weather_123"
# Verify store was injected
assert injected_data["store_is_none"] is False
assert injected_data["store"] is not None
# Verify runtime.state matches the injected state
assert injected_data["runtime_state_matches"] is True
async def test_combined_injected_state_runtime_store_async() -> None:
"""Test that all injection mechanisms work together in async execution.
This async version verifies that injected state, tool runtime, and injected
store all work correctly with async tools in create_agent.
"""
# Track what was injected
injected_data = {}
# Custom state schema
class CustomState(AgentState[Any]):
api_key: str
request_id: str
# Define explicit args schema that only includes LLM-controlled parameters
# Note: state, runtime, and store are NOT in this schema
search_schema = {
"type": "object",
"properties": {
"query": {"type": "string", "description": "The search query"},
"max_results": {"type": "integer", "description": "Maximum number of results"},
},
"required": ["query", "max_results"],
}
@tool(args_schema=search_schema)
async def async_multi_injection_tool(
query: str,
max_results: int,
state: Annotated[Any, InjectedState],
runtime: ToolRuntime,
store: Annotated[Any, InjectedStore()],
) -> str:
"""Async tool with multiple injection types.
Args:
query: The search query (LLM-controlled).
max_results: Maximum number of results (LLM-controlled).
state: The graph state (injected).
runtime: The tool runtime context (injected).
store: The persistent store (injected).
"""
# Capture all injected parameters
injected_data["state"] = state
injected_data["api_key"] = state.get("api_key", "unknown")
injected_data["request_id"] = state.get("request_id", "unknown")
injected_data["runtime"] = runtime
injected_data["tool_call_id"] = runtime.tool_call_id
injected_data["config"] = runtime.config
injected_data["store"] = store
# Verify we can write to the store
if store is not None:
await store.aput(("test", "namespace"), "test_key", {"query": query})
# Read back to verify it worked
item = await store.aget(("test", "namespace"), "test_key")
injected_data["store_write_success"] = item is not None
return f"Found {max_results} results for '{query}'"
# Create model that calls the async tool
model = FakeToolCallingModel(
tool_calls=[
[
{
"name": "async_multi_injection_tool",
"args": {"query": "test search", "max_results": 10},
"id": "call_search_456",
}
],
[],
]
)
# Create agent with custom state and store
agent = create_agent(
model=model,
tools=[async_multi_injection_tool],
state_schema=CustomState,
store=InMemoryStore(),
)
# Verify the tool's args schema only includes LLM-controlled parameters
tool_args_schema = async_multi_injection_tool.args_schema
assert isinstance(tool_args_schema, dict)
assert "query" in tool_args_schema["properties"]
assert "max_results" in tool_args_schema["properties"]
assert "state" not in tool_args_schema["properties"]
assert "runtime" not in tool_args_schema["properties"]
assert "store" not in tool_args_schema["properties"]
# Invoke async
result = await agent.ainvoke(
{
"messages": [HumanMessage("Search for something")],
"api_key": "sk-test-key-xyz",
"request_id": "req_999",
}
)
# Verify tool executed successfully
tool_messages = [msg for msg in result["messages"] if isinstance(msg, ToolMessage)]
assert len(tool_messages) == 1
tool_message = tool_messages[0]
assert tool_message.content == "Found 10 results for 'test search'"
assert tool_message.tool_call_id == "call_search_456"
# Verify all injections worked correctly
assert injected_data["state"] is not None
assert injected_data["api_key"] == "sk-test-key-xyz"
assert injected_data["request_id"] == "req_999"
# Verify runtime was injected
assert injected_data["runtime"] is not None
assert injected_data["tool_call_id"] == "call_search_456"
assert injected_data["config"] is not None
# Verify store was injected and writable
assert injected_data["store"] is not None
assert injected_data["store_write_success"] is True
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/test_injected_runtime_create_agent.py",
"license": "MIT License",
"lines": 682,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/openai/langchain_openai/middleware/openai_moderation.py | """Agent middleware that integrates OpenAI's moderation endpoint."""
from __future__ import annotations
import json
from collections.abc import Sequence
from typing import TYPE_CHECKING, Any, Literal, cast
from langchain.agents.middleware.types import AgentMiddleware, AgentState, hook_config
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, ToolMessage
from openai import AsyncOpenAI, OpenAI
from openai.types import Moderation, ModerationModel
if TYPE_CHECKING: # pragma: no cover
from langgraph.runtime import Runtime
ViolationStage = Literal["input", "output", "tool"]
DEFAULT_VIOLATION_TEMPLATE = (
"I'm sorry, but I can't comply with that request. It was flagged for {categories}."
)
class OpenAIModerationError(RuntimeError):
"""Raised when OpenAI flags content and `exit_behavior` is set to ``"error"``."""
def __init__(
self,
*,
content: str,
stage: ViolationStage,
result: Moderation,
message: str,
) -> None:
"""Initialize the error with violation details.
Args:
content: The content that was flagged.
stage: The stage where the violation occurred.
result: The moderation result from OpenAI.
message: The error message.
"""
super().__init__(message)
self.content = content
self.stage = stage
self.result = result
class OpenAIModerationMiddleware(AgentMiddleware[AgentState[Any], Any]):
"""Moderate agent traffic using OpenAI's moderation endpoint."""
def __init__(
self,
*,
model: ModerationModel = "omni-moderation-latest",
check_input: bool = True,
check_output: bool = True,
check_tool_results: bool = False,
exit_behavior: Literal["error", "end", "replace"] = "end",
violation_message: str | None = None,
client: OpenAI | None = None,
async_client: AsyncOpenAI | None = None,
) -> None:
"""Create the middleware instance.
Args:
model: OpenAI moderation model to use.
check_input: Whether to check user input messages.
check_output: Whether to check model output messages.
check_tool_results: Whether to check tool result messages.
exit_behavior: How to handle violations
(`'error'`, `'end'`, or `'replace'`).
violation_message: Custom template for violation messages.
client: Optional pre-configured OpenAI client to reuse.
If not provided, a new client will be created.
async_client: Optional pre-configured AsyncOpenAI client to reuse.
If not provided, a new async client will be created.
"""
super().__init__()
self.model = model
self.check_input = check_input
self.check_output = check_output
self.check_tool_results = check_tool_results
self.exit_behavior = exit_behavior
self.violation_message = violation_message
self._client = client
self._async_client = async_client
@hook_config(can_jump_to=["end"])
def before_model(
self, state: AgentState[Any], runtime: Runtime[Any]
) -> dict[str, Any] | None: # type: ignore[override]
"""Moderate user input and tool results before the model is called.
Args:
state: Current agent state containing messages.
runtime: Agent runtime context.
Returns:
Updated state with moderated messages, or `None` if no changes.
"""
if not self.check_input and not self.check_tool_results:
return None
messages = list(state.get("messages", []))
if not messages:
return None
return self._moderate_inputs(messages)
@hook_config(can_jump_to=["end"])
def after_model(
self, state: AgentState[Any], runtime: Runtime[Any]
) -> dict[str, Any] | None: # type: ignore[override]
"""Moderate model output after the model is called.
Args:
state: Current agent state containing messages.
runtime: Agent runtime context.
Returns:
Updated state with moderated messages, or `None` if no changes.
"""
if not self.check_output:
return None
messages = list(state.get("messages", []))
if not messages:
return None
return self._moderate_output(messages)
@hook_config(can_jump_to=["end"])
async def abefore_model(
self, state: AgentState[Any], runtime: Runtime[Any]
) -> dict[str, Any] | None: # type: ignore[override]
"""Async version of before_model.
Args:
state: Current agent state containing messages.
runtime: Agent runtime context.
Returns:
Updated state with moderated messages, or `None` if no changes.
"""
if not self.check_input and not self.check_tool_results:
return None
messages = list(state.get("messages", []))
if not messages:
return None
return await self._amoderate_inputs(messages)
@hook_config(can_jump_to=["end"])
async def aafter_model(
self, state: AgentState[Any], runtime: Runtime[Any]
) -> dict[str, Any] | None: # type: ignore[override]
"""Async version of after_model.
Args:
state: Current agent state containing messages.
runtime: Agent runtime context.
Returns:
Updated state with moderated messages, or `None` if no changes.
"""
if not self.check_output:
return None
messages = list(state.get("messages", []))
if not messages:
return None
return await self._amoderate_output(messages)
def _moderate_inputs(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
working = list(messages)
modified = False
if self.check_tool_results:
action = self._moderate_tool_messages(working)
if action:
if "jump_to" in action:
return action
working = cast("list[BaseMessage]", action["messages"])
modified = True
if self.check_input:
action = self._moderate_user_message(working)
if action:
if "jump_to" in action:
return action
working = cast("list[BaseMessage]", action["messages"])
modified = True
if modified:
return {"messages": working}
return None
async def _amoderate_inputs(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
working = list(messages)
modified = False
if self.check_tool_results:
action = await self._amoderate_tool_messages(working)
if action:
if "jump_to" in action:
return action
working = cast("list[BaseMessage]", action["messages"])
modified = True
if self.check_input:
action = await self._amoderate_user_message(working)
if action:
if "jump_to" in action:
return action
working = cast("list[BaseMessage]", action["messages"])
modified = True
if modified:
return {"messages": working}
return None
def _moderate_output(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
last_ai_idx = self._find_last_index(messages, AIMessage)
if last_ai_idx is None:
return None
ai_message = messages[last_ai_idx]
text = self._extract_text(ai_message)
if not text:
return None
result = self._moderate(text)
if not result.flagged:
return None
return self._apply_violation(
messages, index=last_ai_idx, stage="output", content=text, result=result
)
async def _amoderate_output(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
last_ai_idx = self._find_last_index(messages, AIMessage)
if last_ai_idx is None:
return None
ai_message = messages[last_ai_idx]
text = self._extract_text(ai_message)
if not text:
return None
result = await self._amoderate(text)
if not result.flagged:
return None
return self._apply_violation(
messages, index=last_ai_idx, stage="output", content=text, result=result
)
def _moderate_tool_messages(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
last_ai_idx = self._find_last_index(messages, AIMessage)
if last_ai_idx is None:
return None
working = list(messages)
modified = False
for idx in range(last_ai_idx + 1, len(working)):
msg = working[idx]
if not isinstance(msg, ToolMessage):
continue
text = self._extract_text(msg)
if not text:
continue
result = self._moderate(text)
if not result.flagged:
continue
action = self._apply_violation(
working, index=idx, stage="tool", content=text, result=result
)
if action:
if "jump_to" in action:
return action
working = cast("list[BaseMessage]", action["messages"])
modified = True
if modified:
return {"messages": working}
return None
async def _amoderate_tool_messages(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
last_ai_idx = self._find_last_index(messages, AIMessage)
if last_ai_idx is None:
return None
working = list(messages)
modified = False
for idx in range(last_ai_idx + 1, len(working)):
msg = working[idx]
if not isinstance(msg, ToolMessage):
continue
text = self._extract_text(msg)
if not text:
continue
result = await self._amoderate(text)
if not result.flagged:
continue
action = self._apply_violation(
working, index=idx, stage="tool", content=text, result=result
)
if action:
if "jump_to" in action:
return action
working = cast("list[BaseMessage]", action["messages"])
modified = True
if modified:
return {"messages": working}
return None
def _moderate_user_message(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
idx = self._find_last_index(messages, HumanMessage)
if idx is None:
return None
message = messages[idx]
text = self._extract_text(message)
if not text:
return None
result = self._moderate(text)
if not result.flagged:
return None
return self._apply_violation(
messages, index=idx, stage="input", content=text, result=result
)
async def _amoderate_user_message(
self, messages: Sequence[BaseMessage]
) -> dict[str, Any] | None:
idx = self._find_last_index(messages, HumanMessage)
if idx is None:
return None
message = messages[idx]
text = self._extract_text(message)
if not text:
return None
result = await self._amoderate(text)
if not result.flagged:
return None
return self._apply_violation(
messages, index=idx, stage="input", content=text, result=result
)
def _apply_violation(
self,
messages: Sequence[BaseMessage],
*,
index: int | None,
stage: ViolationStage,
content: str,
result: Moderation,
) -> dict[str, Any] | None:
violation_text = self._format_violation_message(content, result)
if self.exit_behavior == "error":
raise OpenAIModerationError(
content=content,
stage=stage,
result=result,
message=violation_text,
)
if self.exit_behavior == "end":
return {"jump_to": "end", "messages": [AIMessage(content=violation_text)]}
if index is None:
return None
new_messages = list(messages)
original = new_messages[index]
new_messages[index] = cast(
BaseMessage, original.model_copy(update={"content": violation_text})
)
return {"messages": new_messages}
def _moderate(self, text: str) -> Moderation:
if self._client is None:
self._client = self._build_client()
response = self._client.moderations.create(model=self.model, input=text)
return response.results[0]
async def _amoderate(self, text: str) -> Moderation:
if self._async_client is None:
self._async_client = self._build_async_client()
response = await self._async_client.moderations.create(
model=self.model, input=text
)
return response.results[0]
def _build_client(self) -> OpenAI:
self._client = OpenAI()
return self._client
def _build_async_client(self) -> AsyncOpenAI:
self._async_client = AsyncOpenAI()
return self._async_client
def _format_violation_message(self, content: str, result: Moderation) -> str:
# Convert categories to dict and filter for flagged items
categories_dict = result.categories.model_dump()
categories = [
name.replace("_", " ")
for name, flagged in categories_dict.items()
if flagged
]
category_label = (
", ".join(categories) if categories else "OpenAI's safety policies"
)
template = self.violation_message or DEFAULT_VIOLATION_TEMPLATE
scores_json = json.dumps(result.category_scores.model_dump(), sort_keys=True)
try:
message = template.format(
categories=category_label,
category_scores=scores_json,
original_content=content,
)
except KeyError:
message = template
return message
def _find_last_index(
self, messages: Sequence[BaseMessage], message_type: type[BaseMessage]
) -> int | None:
for idx in range(len(messages) - 1, -1, -1):
if isinstance(messages[idx], message_type):
return idx
return None
def _extract_text(self, message: BaseMessage) -> str | None:
if message.content is None:
return None
text_accessor = getattr(message, "text", None)
if text_accessor is None:
return str(message.content)
text = str(text_accessor)
return text if text else None
__all__ = [
"OpenAIModerationError",
"OpenAIModerationMiddleware",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openai/langchain_openai/middleware/openai_moderation.py",
"license": "MIT License",
"lines": 393,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/partners/openai/tests/unit_tests/middleware/test_openai_moderation_middleware.py | from __future__ import annotations
from collections.abc import Mapping
from copy import deepcopy
from typing import Any, cast
from unittest.mock import Mock
import pytest
from langchain.agents.middleware.types import AgentState
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from openai.types.moderation import Moderation
from langchain_openai.middleware.openai_moderation import (
OpenAIModerationError,
OpenAIModerationMiddleware,
)
DEFAULT_OK_DATA: dict[str, Any] = {
"flagged": False,
"categories": {
"harassment": False,
"harassment/threatening": False,
"hate": False,
"hate/threatening": False,
"illicit": False,
"illicit/violent": False,
"self-harm": False,
"self-harm/instructions": False,
"self-harm/intent": False,
"sexual": False,
"sexual/minors": False,
"violence": False,
"violence/graphic": False,
},
"category_scores": {
"harassment": 0.0,
"harassment/threatening": 0.0,
"hate": 0.0,
"hate/threatening": 0.0,
"illicit": 0.0,
"illicit/violent": 0.0,
"self-harm": 0.0,
"self-harm/instructions": 0.0,
"self-harm/intent": 0.0,
"sexual": 0.0,
"sexual/minors": 0.0,
"violence": 0.0,
"violence/graphic": 0.0,
},
"category_applied_input_types": {
"harassment": ["text"],
"harassment/threatening": ["text"],
"hate": ["text"],
"hate/threatening": ["text"],
"illicit": ["text"],
"illicit/violent": ["text"],
"self-harm": ["text"],
"self-harm/instructions": ["text"],
"self-harm/intent": ["text"],
"sexual": ["text"],
"sexual/minors": ["text"],
"violence": ["text"],
"violence/graphic": ["text"],
},
}
DEFAULT_OK = Moderation.model_validate(DEFAULT_OK_DATA)
def flagged_result() -> Moderation:
flagged_data = deepcopy(DEFAULT_OK_DATA)
flagged_data["flagged"] = True
flagged_data["categories"]["self-harm"] = True
flagged_data["category_scores"]["self-harm"] = 0.9
return Moderation.model_validate(flagged_data)
class StubModerationMiddleware(OpenAIModerationMiddleware):
"""Override OpenAI calls with deterministic fixtures."""
def __init__(self, decisions: Mapping[str, Moderation], **kwargs: Any) -> None:
super().__init__(**kwargs)
self._decisions = decisions
def _moderate(self, text: str) -> Moderation:
return self._decisions.get(text, DEFAULT_OK)
async def _amoderate(self, text: str) -> Moderation:
return self._moderate(text)
def make_state(
messages: list[AIMessage | HumanMessage | ToolMessage],
) -> AgentState[Any]:
return cast(AgentState[Any], {"messages": messages})
def test_before_model_allows_clean_input() -> None:
middleware = StubModerationMiddleware({}, model="test")
state = make_state([HumanMessage(content="hello")])
assert middleware.before_model(state, Mock()) is None
def test_before_model_errors_on_flagged_input() -> None:
middleware = StubModerationMiddleware(
{"bad": flagged_result()}, model="test", exit_behavior="error"
)
state = make_state([HumanMessage(content="bad")])
with pytest.raises(OpenAIModerationError) as exc:
middleware.before_model(state, Mock())
assert exc.value.result.flagged is True
assert exc.value.stage == "input"
def test_before_model_jump_on_end_behavior() -> None:
middleware = StubModerationMiddleware(
{"bad": flagged_result()}, model="test", exit_behavior="end"
)
state = make_state([HumanMessage(content="bad")])
response = middleware.before_model(state, Mock())
assert response is not None
assert response["jump_to"] == "end"
ai_message = response["messages"][0]
assert isinstance(ai_message, AIMessage)
assert "flagged" in ai_message.content
def test_custom_violation_message_template() -> None:
middleware = StubModerationMiddleware(
{"bad": flagged_result()},
model="test",
exit_behavior="end",
violation_message="Policy block: {categories}",
)
state = make_state([HumanMessage(content="bad")])
response = middleware.before_model(state, Mock())
assert response is not None
assert response["messages"][0].content == "Policy block: self harm"
def test_after_model_replaces_flagged_message() -> None:
middleware = StubModerationMiddleware(
{"unsafe": flagged_result()}, model="test", exit_behavior="replace"
)
state = make_state([AIMessage(content="unsafe", id="ai-1")])
response = middleware.after_model(state, Mock())
assert response is not None
updated_messages = response["messages"]
assert isinstance(updated_messages[-1], AIMessage)
assert updated_messages[-1].id == "ai-1"
assert "flagged" in updated_messages[-1].content
def test_tool_messages_are_moderated_when_enabled() -> None:
middleware = StubModerationMiddleware(
{"dangerous": flagged_result()},
model="test",
check_tool_results=True,
exit_behavior="replace",
)
state = make_state(
[
HumanMessage(content="question"),
AIMessage(content="call tool"),
ToolMessage(content="dangerous", tool_call_id="tool-1"),
]
)
response = middleware.before_model(state, Mock())
assert response is not None
updated_messages = response["messages"]
tool_message = updated_messages[-1]
assert isinstance(tool_message, ToolMessage)
assert tool_message.tool_call_id == "tool-1"
assert "flagged" in tool_message.content
@pytest.mark.asyncio
async def test_async_before_model_uses_async_moderation() -> None:
middleware = StubModerationMiddleware(
{"async": flagged_result()}, model="test", exit_behavior="end"
)
state = make_state([HumanMessage(content="async")])
response = await middleware.abefore_model(state, Mock())
assert response is not None
assert response["jump_to"] == "end"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openai/tests/unit_tests/middleware/test_openai_moderation_middleware.py",
"license": "MIT License",
"lines": 157,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/mistralai/langchain_mistralai/_compat.py | """Derivations of standard content blocks from mistral content."""
from __future__ import annotations
from langchain_core.messages import AIMessage, AIMessageChunk
from langchain_core.messages import content as types
from langchain_core.messages.block_translators import register_translator
def _convert_from_v1_to_mistral(
content: list[types.ContentBlock],
model_provider: str | None,
) -> str | list[str | dict]:
new_content: list = []
for block in content:
if block["type"] == "text":
new_content.append({"text": block.get("text", ""), "type": "text"})
elif (
block["type"] == "reasoning"
and (reasoning := block.get("reasoning"))
and isinstance(reasoning, str)
and model_provider == "mistralai"
):
new_content.append(
{
"type": "thinking",
"thinking": [{"type": "text", "text": reasoning}],
}
)
elif (
block["type"] == "non_standard"
and "value" in block
and model_provider == "mistralai"
):
new_content.append(block["value"])
elif block["type"] == "tool_call":
continue
else:
new_content.append(block)
return new_content
def _convert_to_v1_from_mistral(message: AIMessage) -> list[types.ContentBlock]:
"""Convert mistral message content to v1 format."""
if isinstance(message.content, str):
content_blocks: list[types.ContentBlock] = [
{"type": "text", "text": message.content}
]
else:
content_blocks = []
for block in message.content:
if isinstance(block, str):
content_blocks.append({"type": "text", "text": block})
elif isinstance(block, dict):
if block.get("type") == "text" and isinstance(block.get("text"), str):
text_block: types.TextContentBlock = {
"type": "text",
"text": block["text"],
}
if "index" in block:
text_block["index"] = block["index"]
content_blocks.append(text_block)
elif block.get("type") == "thinking" and isinstance(
block.get("thinking"), list
):
for sub_block in block["thinking"]:
if (
isinstance(sub_block, dict)
and sub_block.get("type") == "text"
):
reasoning_block: types.ReasoningContentBlock = {
"type": "reasoning",
"reasoning": sub_block.get("text", ""),
}
if "index" in block:
reasoning_block["index"] = block["index"]
content_blocks.append(reasoning_block)
else:
non_standard_block: types.NonStandardContentBlock = {
"type": "non_standard",
"value": block,
}
content_blocks.append(non_standard_block)
else:
continue
if (
len(content_blocks) == 1
and content_blocks[0].get("type") == "text"
and content_blocks[0].get("text") == ""
and message.tool_calls
):
content_blocks = []
for tool_call in message.tool_calls:
content_blocks.append(
{
"type": "tool_call",
"name": tool_call["name"],
"args": tool_call["args"],
"id": tool_call.get("id"),
}
)
return content_blocks
def translate_content(message: AIMessage) -> list[types.ContentBlock]:
"""Derive standard content blocks from a message with mistral content."""
return _convert_to_v1_from_mistral(message)
def translate_content_chunk(message: AIMessageChunk) -> list[types.ContentBlock]:
"""Derive standard content blocks from a message chunk with mistral content."""
return _convert_to_v1_from_mistral(message)
register_translator("mistralai", translate_content, translate_content_chunk)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/mistralai/langchain_mistralai/_compat.py",
"license": "MIT License",
"lines": 103,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/partners/anthropic/langchain_anthropic/middleware/prompt_caching.py | """Anthropic prompt caching middleware.
Requires:
- `langchain`: For agent middleware framework
- `langchain-anthropic`: For `ChatAnthropic` model (already a dependency)
"""
from collections.abc import Awaitable, Callable
from typing import Literal
from warnings import warn
from langchain_anthropic.chat_models import ChatAnthropic
try:
from langchain.agents.middleware.types import (
AgentMiddleware,
ModelCallResult,
ModelRequest,
ModelResponse,
)
except ImportError as e:
msg = (
"AnthropicPromptCachingMiddleware requires 'langchain' to be installed. "
"This middleware is designed for use with LangChain agents. "
"Install it with: pip install langchain"
)
raise ImportError(msg) from e
class AnthropicPromptCachingMiddleware(AgentMiddleware):
"""Prompt Caching Middleware.
Optimizes API usage by caching conversation prefixes for Anthropic models.
Requires both `langchain` and `langchain-anthropic` packages to be installed.
Learn more about Anthropic prompt caching
[here](https://platform.claude.com/docs/en/build-with-claude/prompt-caching).
"""
def __init__(
self,
type: Literal["ephemeral"] = "ephemeral", # noqa: A002
ttl: Literal["5m", "1h"] = "5m",
min_messages_to_cache: int = 0,
unsupported_model_behavior: Literal["ignore", "warn", "raise"] = "warn",
) -> None:
"""Initialize the middleware with cache control settings.
Args:
type: The type of cache to use, only `'ephemeral'` is supported.
ttl: The time to live for the cache, only `'5m'` and `'1h'` are
supported.
min_messages_to_cache: The minimum number of messages until the
cache is used.
unsupported_model_behavior: The behavior to take when an
unsupported model is used.
`'ignore'` will ignore the unsupported model and continue without
caching.
`'warn'` will warn the user and continue without caching.
`'raise'` will raise an error and stop the agent.
"""
self.type = type
self.ttl = ttl
self.min_messages_to_cache = min_messages_to_cache
self.unsupported_model_behavior = unsupported_model_behavior
def _should_apply_caching(self, request: ModelRequest) -> bool:
"""Check if caching should be applied to the request.
Args:
request: The model request to check.
Returns:
`True` if caching should be applied, `False` otherwise.
Raises:
ValueError: If model is unsupported and behavior is set to `'raise'`.
"""
if not isinstance(request.model, ChatAnthropic):
msg = (
"AnthropicPromptCachingMiddleware caching middleware only supports "
f"Anthropic models, not instances of {type(request.model)}"
)
if self.unsupported_model_behavior == "raise":
raise ValueError(msg)
if self.unsupported_model_behavior == "warn":
warn(msg, stacklevel=3)
return False
messages_count = (
len(request.messages) + 1
if request.system_message
else len(request.messages)
)
return messages_count >= self.min_messages_to_cache
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
"""Modify the model request to add cache control blocks.
Args:
request: The model request to potentially modify.
handler: The handler to execute the model request.
Returns:
The model response from the handler.
"""
if not self._should_apply_caching(request):
return handler(request)
model_settings = request.model_settings
new_model_settings = {
**model_settings,
"cache_control": {"type": self.type, "ttl": self.ttl},
}
return handler(request.override(model_settings=new_model_settings))
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
"""Modify the model request to add cache control blocks (async version).
Args:
request: The model request to potentially modify.
handler: The async handler to execute the model request.
Returns:
The model response from the handler.
"""
if not self._should_apply_caching(request):
return await handler(request)
model_settings = request.model_settings
new_model_settings = {
**model_settings,
"cache_control": {"type": self.type, "ttl": self.ttl},
}
return await handler(request.override(model_settings=new_model_settings))
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/anthropic/langchain_anthropic/middleware/prompt_caching.py",
"license": "MIT License",
"lines": 120,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/partners/anthropic/tests/unit_tests/middleware/test_prompt_caching.py | """Tests for Anthropic prompt caching middleware."""
import warnings
from typing import Any, cast
from unittest.mock import MagicMock
import pytest
from langchain.agents.middleware.types import ModelRequest, ModelResponse
from langchain_core.callbacks import (
AsyncCallbackManagerForLLMRun,
CallbackManagerForLLMRun,
)
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from langgraph.runtime import Runtime
from langchain_anthropic.chat_models import (
ChatAnthropic,
_collect_code_execution_tool_ids,
_is_code_execution_related_block,
)
from langchain_anthropic.middleware import AnthropicPromptCachingMiddleware
class FakeToolCallingModel(BaseChatModel):
"""Fake model for testing middleware."""
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
"""Top Level call"""
messages_string = "-".join([str(m.content) for m in messages])
message = AIMessage(content=messages_string, id="0")
return ChatResult(generations=[ChatGeneration(message=message)])
async def _agenerate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
"""Async top level call"""
messages_string = "-".join([str(m.content) for m in messages])
message = AIMessage(content=messages_string, id="0")
return ChatResult(generations=[ChatGeneration(message=message)])
@property
def _llm_type(self) -> str:
return "fake-tool-call-model"
def test_anthropic_prompt_caching_middleware_initialization() -> None:
"""Test AnthropicPromptCachingMiddleware initialization."""
# Test with custom values
middleware = AnthropicPromptCachingMiddleware(
type="ephemeral", ttl="1h", min_messages_to_cache=5
)
assert middleware.type == "ephemeral"
assert middleware.ttl == "1h"
assert middleware.min_messages_to_cache == 5
# Test with default values
middleware = AnthropicPromptCachingMiddleware()
assert middleware.type == "ephemeral"
assert middleware.ttl == "5m"
assert middleware.min_messages_to_cache == 0
# Create a mock ChatAnthropic instance
mock_chat_anthropic = MagicMock(spec=ChatAnthropic)
fake_request = ModelRequest(
model=mock_chat_anthropic,
messages=[HumanMessage("Hello")],
system_prompt=None,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": [HumanMessage("Hello")]},
runtime=cast(Runtime, object()),
model_settings={},
)
modified_request: ModelRequest | None = None
def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal modified_request
modified_request = req
return ModelResponse(result=[AIMessage(content="mock response")])
middleware.wrap_model_call(fake_request, mock_handler)
# Check that model_settings were passed through via the request
assert modified_request is not None
assert modified_request.model_settings == {
"cache_control": {"type": "ephemeral", "ttl": "5m"}
}
def test_anthropic_prompt_caching_middleware_unsupported_model() -> None:
"""Test AnthropicPromptCachingMiddleware with unsupported model."""
fake_request = ModelRequest(
model=FakeToolCallingModel(),
messages=[HumanMessage("Hello")],
system_prompt=None,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": [HumanMessage("Hello")]},
runtime=cast(Runtime, object()),
model_settings={},
)
middleware = AnthropicPromptCachingMiddleware(unsupported_model_behavior="raise")
def mock_handler(req: ModelRequest) -> ModelResponse:
return ModelResponse(result=[AIMessage(content="mock response")])
# Since we're in the langchain-anthropic package, ChatAnthropic is always
# available. Test that it raises an error for unsupported model instances
with pytest.raises(
ValueError,
match=(
"AnthropicPromptCachingMiddleware caching middleware only supports "
"Anthropic models, not instances of"
),
):
middleware.wrap_model_call(fake_request, mock_handler)
middleware = AnthropicPromptCachingMiddleware(unsupported_model_behavior="warn")
# Test warn behavior for unsupported model instances
with warnings.catch_warnings(record=True) as w:
result = middleware.wrap_model_call(fake_request, mock_handler)
assert isinstance(result, ModelResponse)
assert len(w) == 1
assert (
"AnthropicPromptCachingMiddleware caching middleware only supports "
"Anthropic models, not instances of"
) in str(w[-1].message)
# Test ignore behavior
middleware = AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore")
result = middleware.wrap_model_call(fake_request, mock_handler)
assert isinstance(result, ModelResponse)
async def test_anthropic_prompt_caching_middleware_async() -> None:
"""Test AnthropicPromptCachingMiddleware async path."""
# Test with custom values
middleware = AnthropicPromptCachingMiddleware(
type="ephemeral", ttl="1h", min_messages_to_cache=5
)
# Create a mock ChatAnthropic instance
mock_chat_anthropic = MagicMock(spec=ChatAnthropic)
fake_request = ModelRequest(
model=mock_chat_anthropic,
messages=[HumanMessage("Hello")] * 6,
system_prompt=None,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": [HumanMessage("Hello")] * 6},
runtime=cast(Runtime, object()),
model_settings={},
)
modified_request: ModelRequest | None = None
async def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal modified_request
modified_request = req
return ModelResponse(result=[AIMessage(content="mock response")])
result = await middleware.awrap_model_call(fake_request, mock_handler)
assert isinstance(result, ModelResponse)
# Check that model_settings were passed through via the request
assert modified_request is not None
assert modified_request.model_settings == {
"cache_control": {"type": "ephemeral", "ttl": "1h"}
}
async def test_anthropic_prompt_caching_middleware_async_unsupported_model() -> None:
"""Test AnthropicPromptCachingMiddleware async path with unsupported model."""
fake_request = ModelRequest(
model=FakeToolCallingModel(),
messages=[HumanMessage("Hello")],
system_prompt=None,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": [HumanMessage("Hello")]},
runtime=cast(Runtime, object()),
model_settings={},
)
middleware = AnthropicPromptCachingMiddleware(unsupported_model_behavior="raise")
async def mock_handler(req: ModelRequest) -> ModelResponse:
return ModelResponse(result=[AIMessage(content="mock response")])
# Test that it raises an error for unsupported model instances
with pytest.raises(
ValueError,
match=(
"AnthropicPromptCachingMiddleware caching middleware only supports "
"Anthropic models, not instances of"
),
):
await middleware.awrap_model_call(fake_request, mock_handler)
middleware = AnthropicPromptCachingMiddleware(unsupported_model_behavior="warn")
# Test warn behavior for unsupported model instances
with warnings.catch_warnings(record=True) as w:
result = await middleware.awrap_model_call(fake_request, mock_handler)
assert isinstance(result, ModelResponse)
assert len(w) == 1
assert (
"AnthropicPromptCachingMiddleware caching middleware only supports "
"Anthropic models, not instances of"
) in str(w[-1].message)
# Test ignore behavior
middleware = AnthropicPromptCachingMiddleware(unsupported_model_behavior="ignore")
result = await middleware.awrap_model_call(fake_request, mock_handler)
assert isinstance(result, ModelResponse)
async def test_anthropic_prompt_caching_middleware_async_min_messages() -> None:
"""Test async path respects min_messages_to_cache."""
middleware = AnthropicPromptCachingMiddleware(min_messages_to_cache=5)
# Test with fewer messages than minimum
fake_request = ModelRequest(
model=FakeToolCallingModel(),
messages=[HumanMessage("Hello")] * 3,
system_prompt=None,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": [HumanMessage("Hello")] * 3},
runtime=cast(Runtime, object()),
model_settings={},
)
modified_request: ModelRequest | None = None
async def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal modified_request
modified_request = req
return ModelResponse(result=[AIMessage(content="mock response")])
result = await middleware.awrap_model_call(fake_request, mock_handler)
assert isinstance(result, ModelResponse)
# Cache control should NOT be added when message count is below minimum
assert modified_request is not None
assert modified_request.model_settings == {}
async def test_anthropic_prompt_caching_middleware_async_with_system_prompt() -> None:
"""Test async path counts system prompt in message count."""
middleware = AnthropicPromptCachingMiddleware(
type="ephemeral", ttl="1h", min_messages_to_cache=3
)
# Create a mock ChatAnthropic instance
mock_chat_anthropic = MagicMock(spec=ChatAnthropic)
# Test with system prompt: 2 messages + 1 system = 3 total (meets minimum)
fake_request = ModelRequest(
model=mock_chat_anthropic,
messages=[HumanMessage("Hello"), HumanMessage("World")],
system_prompt="You are a helpful assistant",
tool_choice=None,
tools=[],
response_format=None,
state={"messages": [HumanMessage("Hello"), HumanMessage("World")]},
runtime=cast(Runtime, object()),
model_settings={},
)
modified_request: ModelRequest | None = None
async def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal modified_request
modified_request = req
return ModelResponse(result=[AIMessage(content="mock response")])
result = await middleware.awrap_model_call(fake_request, mock_handler)
assert isinstance(result, ModelResponse)
# Cache control should be added when system prompt pushes count to minimum
assert modified_request is not None
assert modified_request.model_settings == {
"cache_control": {"type": "ephemeral", "ttl": "1h"}
}
async def test_anthropic_prompt_caching_middleware_async_default_values() -> None:
"""Test async path with default middleware initialization."""
# Test with default values (min_messages_to_cache=0)
middleware = AnthropicPromptCachingMiddleware()
# Create a mock ChatAnthropic instance
mock_chat_anthropic = MagicMock(spec=ChatAnthropic)
# Single message should trigger caching with default settings
fake_request = ModelRequest(
model=mock_chat_anthropic,
messages=[HumanMessage("Hello")],
system_prompt=None,
tool_choice=None,
tools=[],
response_format=None,
state={"messages": [HumanMessage("Hello")]},
runtime=cast(Runtime, object()),
model_settings={},
)
modified_request: ModelRequest | None = None
async def mock_handler(req: ModelRequest) -> ModelResponse:
nonlocal modified_request
modified_request = req
return ModelResponse(result=[AIMessage(content="mock response")])
result = await middleware.awrap_model_call(fake_request, mock_handler)
assert isinstance(result, ModelResponse)
# Check that model_settings were added with default values
assert modified_request is not None
assert modified_request.model_settings == {
"cache_control": {"type": "ephemeral", "ttl": "5m"}
}
class TestCollectCodeExecutionToolIds:
"""Tests for _collect_code_execution_tool_ids function."""
def test_empty_messages(self) -> None:
"""Test with empty messages list."""
result = _collect_code_execution_tool_ids([])
assert result == set()
def test_no_code_execution_calls(self) -> None:
"""Test messages without any code_execution calls."""
messages = [
{
"role": "user",
"content": [{"type": "text", "text": "Hello"}],
},
{
"role": "assistant",
"content": [
{
"type": "tool_use",
"id": "toolu_regular",
"name": "get_weather",
"input": {"location": "NYC"},
}
],
},
]
result = _collect_code_execution_tool_ids(messages)
assert result == set()
def test_single_code_execution_call(self) -> None:
"""Test with a single code_execution tool call."""
messages = [
{
"role": "assistant",
"content": [
{
"type": "tool_use",
"id": "toolu_code_exec_1",
"name": "get_weather",
"input": {"location": "NYC"},
"caller": {
"type": "code_execution_20250825",
"tool_id": "srvtoolu_abc123",
},
}
],
},
]
result = _collect_code_execution_tool_ids(messages)
assert result == {"toolu_code_exec_1"}
def test_multiple_code_execution_calls(self) -> None:
"""Test with multiple code_execution tool calls."""
messages = [
{
"role": "assistant",
"content": [
{
"type": "tool_use",
"id": "toolu_regular",
"name": "search",
"input": {"query": "test"},
},
{
"type": "tool_use",
"id": "toolu_code_exec_1",
"name": "get_weather",
"input": {"location": "NYC"},
"caller": {
"type": "code_execution_20250825",
"tool_id": "srvtoolu_abc",
},
},
{
"type": "tool_use",
"id": "toolu_code_exec_2",
"name": "get_weather",
"input": {"location": "SF"},
"caller": {
"type": "code_execution_20250825",
"tool_id": "srvtoolu_def",
},
},
],
},
]
result = _collect_code_execution_tool_ids(messages)
assert result == {"toolu_code_exec_1", "toolu_code_exec_2"}
assert "toolu_regular" not in result
def test_future_code_execution_version(self) -> None:
"""Test with a hypothetical future code_execution version."""
messages = [
{
"role": "assistant",
"content": [
{
"type": "tool_use",
"id": "toolu_future",
"name": "get_weather",
"input": {},
"caller": {
"type": "code_execution_20260101",
"tool_id": "srvtoolu_future",
},
}
],
},
]
result = _collect_code_execution_tool_ids(messages)
assert result == {"toolu_future"}
def test_ignores_user_messages(self) -> None:
"""Test that user messages are ignored."""
messages = [
{
"role": "user",
"content": [
{
"type": "tool_result",
"tool_use_id": "toolu_123",
"content": "result",
}
],
},
]
result = _collect_code_execution_tool_ids(messages)
assert result == set()
def test_handles_string_content(self) -> None:
"""Test that string content is handled gracefully."""
messages = [
{
"role": "assistant",
"content": "Just a text response",
},
]
result = _collect_code_execution_tool_ids(messages)
assert result == set()
class TestIsCodeExecutionRelatedBlock:
"""Tests for _is_code_execution_related_block function."""
def test_regular_tool_use_block(self) -> None:
"""Test regular tool_use block without caller."""
block = {
"type": "tool_use",
"id": "toolu_regular",
"name": "get_weather",
"input": {"location": "NYC"},
}
assert not _is_code_execution_related_block(block, set())
def test_code_execution_tool_use_block(self) -> None:
"""Test tool_use block called by code_execution."""
block = {
"type": "tool_use",
"id": "toolu_code_exec",
"name": "get_weather",
"input": {"location": "NYC"},
"caller": {
"type": "code_execution_20250825",
"tool_id": "srvtoolu_abc",
},
}
assert _is_code_execution_related_block(block, set())
def test_regular_tool_result_block(self) -> None:
"""Test tool_result block for regular tool."""
block = {
"type": "tool_result",
"tool_use_id": "toolu_regular",
"content": "Sunny, 72°F",
}
code_exec_ids = {"toolu_code_exec"}
assert not _is_code_execution_related_block(block, code_exec_ids)
def test_code_execution_tool_result_block(self) -> None:
"""Test tool_result block for code_execution called tool."""
block = {
"type": "tool_result",
"tool_use_id": "toolu_code_exec",
"content": "Sunny, 72°F",
}
code_exec_ids = {"toolu_code_exec"}
assert _is_code_execution_related_block(block, code_exec_ids)
def test_text_block(self) -> None:
"""Test that text blocks are not flagged."""
block = {"type": "text", "text": "Hello world"}
assert not _is_code_execution_related_block(block, set())
def test_non_dict_block(self) -> None:
"""Test that non-dict values return False."""
assert not _is_code_execution_related_block("string", set()) # type: ignore[arg-type]
assert not _is_code_execution_related_block(None, set()) # type: ignore[arg-type]
assert not _is_code_execution_related_block(123, set()) # type: ignore[arg-type]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/anthropic/tests/unit_tests/middleware/test_prompt_caching.py",
"license": "MIT License",
"lines": 465,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/groq/langchain_groq/_compat.py | from __future__ import annotations
import json
from typing import Any, cast
from langchain_core.messages import content as types
def _convert_from_v1_to_groq(
content: list[types.ContentBlock],
model_provider: str | None,
) -> tuple[list[dict[str, Any] | str], dict]:
new_content: list = []
new_additional_kwargs: dict = {}
for i, block in enumerate(content):
if block["type"] == "text":
new_content.append({"text": block.get("text", ""), "type": "text"})
elif (
block["type"] == "reasoning"
and (reasoning := block.get("reasoning"))
and model_provider == "groq"
):
new_additional_kwargs["reasoning_content"] = reasoning
elif block["type"] == "server_tool_call" and model_provider == "groq":
new_block = {}
if "args" in block:
new_block["arguments"] = json.dumps(block["args"])
if idx := block.get("extras", {}).get("index"):
new_block["index"] = idx
if block.get("name") == "web_search":
new_block["type"] = "search"
elif block.get("name") == "code_interpreter":
new_block["type"] = "python"
else:
new_block["type"] = ""
if i < len(content) - 1 and content[i + 1]["type"] == "server_tool_result":
result = cast("types.ServerToolResult", content[i + 1])
for k, v in result.get("extras", {}).items():
new_block[k] = v # noqa: PERF403
if "output" in result:
new_block["output"] = result["output"]
if "executed_tools" not in new_additional_kwargs:
new_additional_kwargs["executed_tools"] = []
new_additional_kwargs["executed_tools"].append(new_block)
elif block["type"] == "server_tool_result":
continue
elif (
block["type"] == "non_standard"
and "value" in block
and model_provider == "groq"
):
new_content.append(block["value"])
else:
new_content.append(block)
# For consistency with v0 payloads, we cast single text blocks to str
if (
len(new_content) == 1
and isinstance(new_content[0], dict)
and new_content[0].get("type") == "text"
and (text_content := new_content[0].get("text"))
and isinstance(text_content, str)
):
return text_content, new_additional_kwargs
return new_content, new_additional_kwargs
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/groq/langchain_groq/_compat.py",
"license": "MIT License",
"lines": 60,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/partners/fireworks/langchain_fireworks/_compat.py | """Converts between AIMessage output formats, governed by `output_version`."""
from __future__ import annotations
from langchain_core.messages import AIMessage
def _convert_from_v1_to_chat_completions(message: AIMessage) -> AIMessage:
"""Convert a v1 message to the Chat Completions format."""
if isinstance(message.content, list):
new_content: list = []
for block in message.content:
if isinstance(block, dict):
block_type = block.get("type")
if block_type == "text":
# Strip annotations
new_content.append({"type": "text", "text": block["text"]})
elif block_type in ("reasoning", "tool_call"):
pass
else:
new_content.append(block)
else:
new_content.append(block)
return message.model_copy(update={"content": new_content})
return message
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/fireworks/langchain_fireworks/_compat.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/tool_emulator.py | """Tool emulator middleware for testing."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Generic
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import HumanMessage, ToolMessage
from langchain.agents.middleware.types import AgentMiddleware, AgentState, ContextT
from langchain.chat_models.base import init_chat_model
if TYPE_CHECKING:
from collections.abc import Awaitable, Callable
from langgraph.types import Command
from langchain.agents.middleware.types import ToolCallRequest
from langchain.tools import BaseTool
class LLMToolEmulator(AgentMiddleware[AgentState[Any], ContextT], Generic[ContextT]):
"""Emulates specified tools using an LLM instead of executing them.
This middleware allows selective emulation of tools for testing purposes.
By default (when `tools=None`), all tools are emulated. You can specify which
tools to emulate by passing a list of tool names or `BaseTool` instances.
Examples:
!!! example "Emulate all tools (default behavior)"
```python
from langchain.agents.middleware import LLMToolEmulator
middleware = LLMToolEmulator()
agent = create_agent(
model="openai:gpt-4o",
tools=[get_weather, get_user_location, calculator],
middleware=[middleware],
)
```
!!! example "Emulate specific tools by name"
```python
middleware = LLMToolEmulator(tools=["get_weather", "get_user_location"])
```
!!! example "Use a custom model for emulation"
```python
middleware = LLMToolEmulator(
tools=["get_weather"], model="anthropic:claude-sonnet-4-5-20250929"
)
```
!!! example "Emulate specific tools by passing tool instances"
```python
middleware = LLMToolEmulator(tools=[get_weather, get_user_location])
```
"""
def __init__(
self,
*,
tools: list[str | BaseTool] | None = None,
model: str | BaseChatModel | None = None,
) -> None:
"""Initialize the tool emulator.
Args:
tools: List of tool names (`str`) or `BaseTool` instances to emulate.
If `None`, ALL tools will be emulated.
If empty list, no tools will be emulated.
model: Model to use for emulation.
Defaults to `'anthropic:claude-sonnet-4-5-20250929'`.
Can be a model identifier string or `BaseChatModel` instance.
"""
super().__init__()
# Extract tool names from tools
# None means emulate all tools
self.emulate_all = tools is None
self.tools_to_emulate: set[str] = set()
if not self.emulate_all and tools is not None:
for tool in tools:
if isinstance(tool, str):
self.tools_to_emulate.add(tool)
else:
# Assume BaseTool with .name attribute
self.tools_to_emulate.add(tool.name)
# Initialize emulator model
if model is None:
self.model = init_chat_model("anthropic:claude-sonnet-4-5-20250929", temperature=1)
elif isinstance(model, BaseChatModel):
self.model = model
else:
self.model = init_chat_model(model, temperature=1)
def wrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], ToolMessage | Command[Any]],
) -> ToolMessage | Command[Any]:
"""Emulate tool execution using LLM if tool should be emulated.
Args:
request: Tool call request to potentially emulate.
handler: Callback to execute the tool (can be called multiple times).
Returns:
ToolMessage with emulated response if tool should be emulated,
otherwise calls handler for normal execution.
"""
tool_name = request.tool_call["name"]
# Check if this tool should be emulated
should_emulate = self.emulate_all or tool_name in self.tools_to_emulate
if not should_emulate:
# Let it execute normally by calling the handler
return handler(request)
# Extract tool information for emulation
tool_args = request.tool_call["args"]
tool_description = request.tool.description if request.tool else "No description available"
# Build prompt for emulator LLM
prompt = (
f"You are emulating a tool call for testing purposes.\n\n"
f"Tool: {tool_name}\n"
f"Description: {tool_description}\n"
f"Arguments: {tool_args}\n\n"
f"Generate a realistic response that this tool would return "
f"given these arguments.\n"
f"Return ONLY the tool's output, no explanation or preamble. "
f"Introduce variation into your responses."
)
# Get emulated response from LLM
response = self.model.invoke([HumanMessage(prompt)])
# Short-circuit: return emulated result without executing real tool
return ToolMessage(
content=response.content,
tool_call_id=request.tool_call["id"],
name=tool_name,
)
async def awrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]],
) -> ToolMessage | Command[Any]:
"""Async version of `wrap_tool_call`.
Emulate tool execution using LLM if tool should be emulated.
Args:
request: Tool call request to potentially emulate.
handler: Async callback to execute the tool (can be called multiple times).
Returns:
ToolMessage with emulated response if tool should be emulated,
otherwise calls handler for normal execution.
"""
tool_name = request.tool_call["name"]
# Check if this tool should be emulated
should_emulate = self.emulate_all or tool_name in self.tools_to_emulate
if not should_emulate:
# Let it execute normally by calling the handler
return await handler(request)
# Extract tool information for emulation
tool_args = request.tool_call["args"]
tool_description = request.tool.description if request.tool else "No description available"
# Build prompt for emulator LLM
prompt = (
f"You are emulating a tool call for testing purposes.\n\n"
f"Tool: {tool_name}\n"
f"Description: {tool_description}\n"
f"Arguments: {tool_args}\n\n"
f"Generate a realistic response that this tool would return "
f"given these arguments.\n"
f"Return ONLY the tool's output, no explanation or preamble. "
f"Introduce variation into your responses."
)
# Get emulated response from LLM (using async invoke)
response = await self.model.ainvoke([HumanMessage(prompt)])
# Short-circuit: return emulated result without executing real tool
return ToolMessage(
content=response.content,
tool_call_id=request.tool_call["id"],
name=tool_name,
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/middleware/tool_emulator.py",
"license": "MIT License",
"lines": 160,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/core/tests/unit_tests/prompts/test_string.py | import pytest
from packaging import version
from langchain_core.prompts.string import get_template_variables, mustache_schema
from langchain_core.utils.pydantic import PYDANTIC_VERSION
PYDANTIC_VERSION_AT_LEAST_29 = version.parse("2.9") <= PYDANTIC_VERSION
@pytest.mark.skipif(
not PYDANTIC_VERSION_AT_LEAST_29,
reason=(
"Only test with most recent version of pydantic. "
"Pydantic introduced small fixes to generated JSONSchema on minor versions."
),
)
def test_mustache_schema_parent_child() -> None:
template = "{{x.y}} {{x}}"
expected = {
"$defs": {
"x": {
"properties": {"y": {"default": None, "title": "Y", "type": "string"}},
"title": "x",
"type": "object",
}
},
"properties": {"x": {"$ref": "#/$defs/x", "default": None}},
"title": "PromptInput",
"type": "object",
}
actual = mustache_schema(template).model_json_schema()
assert expected == actual
def test_get_template_variables_mustache_nested() -> None:
template = "Hello {{user.name}}, your role is {{user.role}}"
template_format = "mustache"
# Returns only the top-level key for mustache templates
expected = ["user"]
actual = get_template_variables(template, template_format)
assert actual == expected
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/prompts/test_string.py",
"license": "MIT License",
"lines": 35,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/context_editing.py | """Context editing middleware.
Mirrors Anthropic's context editing capabilities by clearing older tool results once the
conversation grows beyond a configurable token threshold.
The implementation is intentionally model-agnostic so it can be used with any LangChain
chat model.
"""
from __future__ import annotations
from collections.abc import Awaitable, Callable, Iterable, Sequence
from copy import deepcopy
from dataclasses import dataclass
from typing import Literal
from langchain_core.messages import (
AIMessage,
AnyMessage,
BaseMessage,
ToolMessage,
)
from langchain_core.messages.utils import count_tokens_approximately
from typing_extensions import Protocol
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ModelRequest,
ModelResponse,
ResponseT,
)
DEFAULT_TOOL_PLACEHOLDER = "[cleared]"
TokenCounter = Callable[
[Sequence[BaseMessage]],
int,
]
class ContextEdit(Protocol):
"""Protocol describing a context editing strategy."""
def apply(
self,
messages: list[AnyMessage],
*,
count_tokens: TokenCounter,
) -> None:
"""Apply an edit to the message list in place."""
...
@dataclass(slots=True)
class ClearToolUsesEdit(ContextEdit):
"""Configuration for clearing tool outputs when token limits are exceeded."""
trigger: int = 100_000
"""Token count that triggers the edit."""
clear_at_least: int = 0
"""Minimum number of tokens to reclaim when the edit runs."""
keep: int = 3
"""Number of most recent tool results that must be preserved."""
clear_tool_inputs: bool = False
"""Whether to clear the originating tool call parameters on the AI message."""
exclude_tools: Sequence[str] = ()
"""List of tool names to exclude from clearing."""
placeholder: str = DEFAULT_TOOL_PLACEHOLDER
"""Placeholder text inserted for cleared tool outputs."""
def apply(
self,
messages: list[AnyMessage],
*,
count_tokens: TokenCounter,
) -> None:
"""Apply the clear-tool-uses strategy."""
tokens = count_tokens(messages)
if tokens <= self.trigger:
return
candidates = [
(idx, msg) for idx, msg in enumerate(messages) if isinstance(msg, ToolMessage)
]
if self.keep >= len(candidates):
candidates = []
elif self.keep:
candidates = candidates[: -self.keep]
cleared_tokens = 0
excluded_tools = set(self.exclude_tools)
for idx, tool_message in candidates:
if tool_message.response_metadata.get("context_editing", {}).get("cleared"):
continue
ai_message = next(
(m for m in reversed(messages[:idx]) if isinstance(m, AIMessage)), None
)
if ai_message is None:
continue
tool_call = next(
(
call
for call in ai_message.tool_calls
if call.get("id") == tool_message.tool_call_id
),
None,
)
if tool_call is None:
continue
if (tool_message.name or tool_call["name"]) in excluded_tools:
continue
messages[idx] = tool_message.model_copy(
update={
"artifact": None,
"content": self.placeholder,
"response_metadata": {
**tool_message.response_metadata,
"context_editing": {
"cleared": True,
"strategy": "clear_tool_uses",
},
},
}
)
if self.clear_tool_inputs:
messages[messages.index(ai_message)] = self._build_cleared_tool_input_message(
ai_message,
tool_message.tool_call_id,
)
if self.clear_at_least > 0:
new_token_count = count_tokens(messages)
cleared_tokens = max(0, tokens - new_token_count)
if cleared_tokens >= self.clear_at_least:
break
return
@staticmethod
def _build_cleared_tool_input_message(
message: AIMessage,
tool_call_id: str,
) -> AIMessage:
updated_tool_calls = []
cleared_any = False
for tool_call in message.tool_calls:
updated_call = dict(tool_call)
if updated_call.get("id") == tool_call_id:
updated_call["args"] = {}
cleared_any = True
updated_tool_calls.append(updated_call)
metadata = dict(getattr(message, "response_metadata", {}))
context_entry = dict(metadata.get("context_editing", {}))
if cleared_any:
cleared_ids = set(context_entry.get("cleared_tool_inputs", []))
cleared_ids.add(tool_call_id)
context_entry["cleared_tool_inputs"] = sorted(cleared_ids)
metadata["context_editing"] = context_entry
return message.model_copy(
update={
"tool_calls": updated_tool_calls,
"response_metadata": metadata,
}
)
class ContextEditingMiddleware(AgentMiddleware[AgentState[ResponseT], ContextT, ResponseT]):
"""Automatically prune tool results to manage context size.
The middleware applies a sequence of edits when the total input token count exceeds
configured thresholds.
Currently the `ClearToolUsesEdit` strategy is supported, aligning with Anthropic's
`clear_tool_uses_20250919` behavior [(read more)](https://platform.claude.com/docs/en/agents-and-tools/tool-use/memory-tool).
"""
edits: list[ContextEdit]
token_count_method: Literal["approximate", "model"]
def __init__(
self,
*,
edits: Iterable[ContextEdit] | None = None,
token_count_method: Literal["approximate", "model"] = "approximate", # noqa: S107
) -> None:
"""Initialize an instance of context editing middleware.
Args:
edits: Sequence of edit strategies to apply.
Defaults to a single `ClearToolUsesEdit` mirroring Anthropic defaults.
token_count_method: Whether to use approximate token counting
(faster, less accurate) or exact counting implemented by the
chat model (potentially slower, more accurate).
"""
super().__init__()
self.edits = list(edits or (ClearToolUsesEdit(),))
self.token_count_method = token_count_method
def wrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse[ResponseT]],
) -> ModelResponse[ResponseT] | AIMessage:
"""Apply context edits before invoking the model via handler.
Args:
request: Model request to execute (includes state and runtime).
handler: Async callback that executes the model request and returns
`ModelResponse`.
Returns:
The result of invoking the handler with potentially edited messages.
"""
if not request.messages:
return handler(request)
if self.token_count_method == "approximate": # noqa: S105
def count_tokens(messages: Sequence[BaseMessage]) -> int:
return count_tokens_approximately(messages)
else:
system_msg = [request.system_message] if request.system_message else []
def count_tokens(messages: Sequence[BaseMessage]) -> int:
return request.model.get_num_tokens_from_messages(
system_msg + list(messages), request.tools
)
edited_messages = deepcopy(list(request.messages))
for edit in self.edits:
edit.apply(edited_messages, count_tokens=count_tokens)
return handler(request.override(messages=edited_messages))
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]],
) -> ModelResponse[ResponseT] | AIMessage:
"""Apply context edits before invoking the model via handler.
Args:
request: Model request to execute (includes state and runtime).
handler: Async callback that executes the model request and returns
`ModelResponse`.
Returns:
The result of invoking the handler with potentially edited messages.
"""
if not request.messages:
return await handler(request)
if self.token_count_method == "approximate": # noqa: S105
def count_tokens(messages: Sequence[BaseMessage]) -> int:
return count_tokens_approximately(messages)
else:
system_msg = [request.system_message] if request.system_message else []
def count_tokens(messages: Sequence[BaseMessage]) -> int:
return request.model.get_num_tokens_from_messages(
system_msg + list(messages), request.tools
)
edited_messages = deepcopy(list(request.messages))
for edit in self.edits:
edit.apply(edited_messages, count_tokens=count_tokens)
return await handler(request.override(messages=edited_messages))
__all__ = [
"ClearToolUsesEdit",
"ContextEditingMiddleware",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/middleware/context_editing.py",
"license": "MIT License",
"lines": 232,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/model_fallback.py | """Model fallback middleware for agents."""
from __future__ import annotations
from typing import TYPE_CHECKING
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ModelRequest,
ModelResponse,
ResponseT,
)
from langchain.chat_models import init_chat_model
if TYPE_CHECKING:
from collections.abc import Awaitable, Callable
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage
class ModelFallbackMiddleware(AgentMiddleware[AgentState[ResponseT], ContextT, ResponseT]):
"""Automatic fallback to alternative models on errors.
Retries failed model calls with alternative models in sequence until
success or all models exhausted. Primary model specified in `create_agent`.
Example:
```python
from langchain.agents.middleware.model_fallback import ModelFallbackMiddleware
from langchain.agents import create_agent
fallback = ModelFallbackMiddleware(
"openai:gpt-4o-mini", # Try first on error
"anthropic:claude-sonnet-4-5-20250929", # Then this
)
agent = create_agent(
model="openai:gpt-4o", # Primary model
middleware=[fallback],
)
# If primary fails: tries gpt-4o-mini, then claude-sonnet-4-5-20250929
result = await agent.invoke({"messages": [HumanMessage("Hello")]})
```
"""
def __init__(
self,
first_model: str | BaseChatModel,
*additional_models: str | BaseChatModel,
) -> None:
"""Initialize model fallback middleware.
Args:
first_model: First fallback model (string name or instance).
*additional_models: Additional fallbacks in order.
"""
super().__init__()
# Initialize all fallback models
all_models = (first_model, *additional_models)
self.models: list[BaseChatModel] = []
for model in all_models:
if isinstance(model, str):
self.models.append(init_chat_model(model))
else:
self.models.append(model)
def wrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse[ResponseT]],
) -> ModelResponse[ResponseT] | AIMessage:
"""Try fallback models in sequence on errors.
Args:
request: Initial model request.
handler: Callback to execute the model.
Returns:
AIMessage from successful model call.
Raises:
Exception: If all models fail, re-raises last exception.
"""
# Try primary model first
last_exception: Exception
try:
return handler(request)
except Exception as e:
last_exception = e
# Try fallback models
for fallback_model in self.models:
try:
return handler(request.override(model=fallback_model))
except Exception as e:
last_exception = e
continue
raise last_exception
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]],
) -> ModelResponse[ResponseT] | AIMessage:
"""Try fallback models in sequence on errors (async version).
Args:
request: Initial model request.
handler: Async callback to execute the model.
Returns:
AIMessage from successful model call.
Raises:
Exception: If all models fail, re-raises last exception.
"""
# Try primary model first
last_exception: Exception
try:
return await handler(request)
except Exception as e:
last_exception = e
# Try fallback models
for fallback_model in self.models:
try:
return await handler(request.override(model=fallback_model))
except Exception as e:
last_exception = e
continue
raise last_exception
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/middleware/model_fallback.py",
"license": "MIT License",
"lines": 111,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/tool_selection.py | """LLM-based tool selector middleware."""
from __future__ import annotations
import logging
from dataclasses import dataclass
from typing import TYPE_CHECKING, Annotated, Any, Literal, Union
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, HumanMessage
from pydantic import Field, TypeAdapter
from typing_extensions import TypedDict
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ModelRequest,
ModelResponse,
ResponseT,
)
from langchain.chat_models.base import init_chat_model
if TYPE_CHECKING:
from collections.abc import Awaitable, Callable
from langchain.tools import BaseTool
logger = logging.getLogger(__name__)
DEFAULT_SYSTEM_PROMPT = (
"Your goal is to select the most relevant tools for answering the user's query."
)
@dataclass
class _SelectionRequest:
"""Prepared inputs for tool selection."""
available_tools: list[BaseTool]
system_message: str
last_user_message: HumanMessage
model: BaseChatModel
valid_tool_names: list[str]
def _create_tool_selection_response(tools: list[BaseTool]) -> TypeAdapter[Any]:
"""Create a structured output schema for tool selection.
Args:
tools: Available tools to include in the schema.
Returns:
`TypeAdapter` for a schema where each tool name is a `Literal` with its
description.
Raises:
AssertionError: If `tools` is empty.
"""
if not tools:
msg = "Invalid usage: tools must be non-empty"
raise AssertionError(msg)
# Create a Union of Annotated Literal types for each tool name with description
# For instance: Union[Annotated[Literal["tool1"], Field(description="...")], ...]
literals = [
Annotated[Literal[tool.name], Field(description=tool.description)] for tool in tools
]
selected_tool_type = Union[tuple(literals)] # type: ignore[valid-type] # noqa: UP007
description = "Tools to use. Place the most relevant tools first."
class ToolSelectionResponse(TypedDict):
"""Use to select relevant tools."""
tools: Annotated[list[selected_tool_type], Field(description=description)] # type: ignore[valid-type]
return TypeAdapter(ToolSelectionResponse)
def _render_tool_list(tools: list[BaseTool]) -> str:
"""Format tools as markdown list.
Args:
tools: Tools to format.
Returns:
Markdown string with each tool on a new line.
"""
return "\n".join(f"- {tool.name}: {tool.description}" for tool in tools)
class LLMToolSelectorMiddleware(AgentMiddleware[AgentState[ResponseT], ContextT, ResponseT]):
"""Uses an LLM to select relevant tools before calling the main model.
When an agent has many tools available, this middleware filters them down
to only the most relevant ones for the user's query. This reduces token usage
and helps the main model focus on the right tools.
Examples:
!!! example "Limit to 3 tools"
```python
from langchain.agents.middleware import LLMToolSelectorMiddleware
middleware = LLMToolSelectorMiddleware(max_tools=3)
agent = create_agent(
model="openai:gpt-4o",
tools=[tool1, tool2, tool3, tool4, tool5],
middleware=[middleware],
)
```
!!! example "Use a smaller model for selection"
```python
middleware = LLMToolSelectorMiddleware(model="openai:gpt-4o-mini", max_tools=2)
```
"""
def __init__(
self,
*,
model: str | BaseChatModel | None = None,
system_prompt: str = DEFAULT_SYSTEM_PROMPT,
max_tools: int | None = None,
always_include: list[str] | None = None,
) -> None:
"""Initialize the tool selector.
Args:
model: Model to use for selection.
If not provided, uses the agent's main model.
Can be a model identifier string or `BaseChatModel` instance.
system_prompt: Instructions for the selection model.
max_tools: Maximum number of tools to select.
If the model selects more, only the first `max_tools` will be used.
If not specified, there is no limit.
always_include: Tool names to always include regardless of selection.
These do not count against the `max_tools` limit.
"""
super().__init__()
self.system_prompt = system_prompt
self.max_tools = max_tools
self.always_include = always_include or []
if isinstance(model, (BaseChatModel, type(None))):
self.model: BaseChatModel | None = model
else:
self.model = init_chat_model(model)
def _prepare_selection_request(
self, request: ModelRequest[ContextT]
) -> _SelectionRequest | None:
"""Prepare inputs for tool selection.
Args:
request: the model request.
Returns:
`SelectionRequest` with prepared inputs, or `None` if no selection is
needed.
Raises:
ValueError: If tools in `always_include` are not found in the request.
AssertionError: If no user message is found in the request messages.
"""
# If no tools available, return None
if not request.tools or len(request.tools) == 0:
return None
# Filter to only BaseTool instances (exclude provider-specific tool dicts)
base_tools = [tool for tool in request.tools if not isinstance(tool, dict)]
# Validate that always_include tools exist
if self.always_include:
available_tool_names = {tool.name for tool in base_tools}
missing_tools = [
name for name in self.always_include if name not in available_tool_names
]
if missing_tools:
msg = (
f"Tools in always_include not found in request: {missing_tools}. "
f"Available tools: {sorted(available_tool_names)}"
)
raise ValueError(msg)
# Separate tools that are always included from those available for selection
available_tools = [tool for tool in base_tools if tool.name not in self.always_include]
# If no tools available for selection, return None
if not available_tools:
return None
system_message = self.system_prompt
# If there's a max_tools limit, append instructions to the system prompt
if self.max_tools is not None:
system_message += (
f"\nIMPORTANT: List the tool names in order of relevance, "
f"with the most relevant first. "
f"If you exceed the maximum number of tools, "
f"only the first {self.max_tools} will be used."
)
# Get the last user message from the conversation history
last_user_message: HumanMessage
for message in reversed(request.messages):
if isinstance(message, HumanMessage):
last_user_message = message
break
else:
msg = "No user message found in request messages"
raise AssertionError(msg)
model = self.model or request.model
valid_tool_names = [tool.name for tool in available_tools]
return _SelectionRequest(
available_tools=available_tools,
system_message=system_message,
last_user_message=last_user_message,
model=model,
valid_tool_names=valid_tool_names,
)
def _process_selection_response(
self,
response: dict[str, Any],
available_tools: list[BaseTool],
valid_tool_names: list[str],
request: ModelRequest[ContextT],
) -> ModelRequest[ContextT]:
"""Process the selection response and return filtered `ModelRequest`."""
selected_tool_names: list[str] = []
invalid_tool_selections = []
for tool_name in response["tools"]:
if tool_name not in valid_tool_names:
invalid_tool_selections.append(tool_name)
continue
# Only add if not already selected and within max_tools limit
if tool_name not in selected_tool_names and (
self.max_tools is None or len(selected_tool_names) < self.max_tools
):
selected_tool_names.append(tool_name)
if invalid_tool_selections:
msg = f"Model selected invalid tools: {invalid_tool_selections}"
raise ValueError(msg)
# Filter tools based on selection and append always-included tools
selected_tools: list[BaseTool] = [
tool for tool in available_tools if tool.name in selected_tool_names
]
always_included_tools: list[BaseTool] = [
tool
for tool in request.tools
if not isinstance(tool, dict) and tool.name in self.always_include
]
selected_tools.extend(always_included_tools)
# Also preserve any provider-specific tool dicts from the original request
provider_tools = [tool for tool in request.tools if isinstance(tool, dict)]
return request.override(tools=[*selected_tools, *provider_tools])
def wrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse[ResponseT]],
) -> ModelResponse[ResponseT] | AIMessage:
"""Filter tools based on LLM selection before invoking the model via handler.
Args:
request: Model request to execute (includes state and runtime).
handler: Async callback that executes the model request and returns
`ModelResponse`.
Returns:
The model call result.
Raises:
AssertionError: If the selection model response is not a dict.
"""
selection_request = self._prepare_selection_request(request)
if selection_request is None:
return handler(request)
# Create dynamic response model with Literal enum of available tool names
type_adapter = _create_tool_selection_response(selection_request.available_tools)
schema = type_adapter.json_schema()
structured_model = selection_request.model.with_structured_output(schema)
response = structured_model.invoke(
[
{"role": "system", "content": selection_request.system_message},
selection_request.last_user_message,
]
)
# Response should be a dict since we're passing a schema (not a Pydantic model class)
if not isinstance(response, dict):
msg = f"Expected dict response, got {type(response)}"
raise AssertionError(msg) # noqa: TRY004
modified_request = self._process_selection_response(
response, selection_request.available_tools, selection_request.valid_tool_names, request
)
return handler(modified_request)
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]],
) -> ModelResponse[ResponseT] | AIMessage:
"""Filter tools based on LLM selection before invoking the model via handler.
Args:
request: Model request to execute (includes state and runtime).
handler: Async callback that executes the model request and returns
`ModelResponse`.
Returns:
The model call result.
Raises:
AssertionError: If the selection model response is not a dict.
"""
selection_request = self._prepare_selection_request(request)
if selection_request is None:
return await handler(request)
# Create dynamic response model with Literal enum of available tool names
type_adapter = _create_tool_selection_response(selection_request.available_tools)
schema = type_adapter.json_schema()
structured_model = selection_request.model.with_structured_output(schema)
response = await structured_model.ainvoke(
[
{"role": "system", "content": selection_request.system_message},
selection_request.last_user_message,
]
)
# Response should be a dict since we're passing a schema (not a Pydantic model class)
if not isinstance(response, dict):
msg = f"Expected dict response, got {type(response)}"
raise AssertionError(msg) # noqa: TRY004
modified_request = self._process_selection_response(
response, selection_request.available_tools, selection_request.valid_tool_names, request
)
return await handler(modified_request)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/middleware/tool_selection.py",
"license": "MIT License",
"lines": 283,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/pii.py | """PII detection and handling middleware for agents."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Literal
from langchain_core.messages import AIMessage, AnyMessage, HumanMessage, ToolMessage
from typing_extensions import override
from langchain.agents.middleware._redaction import (
PIIDetectionError,
PIIMatch,
RedactionRule,
ResolvedRedactionRule,
apply_strategy,
detect_credit_card,
detect_email,
detect_ip,
detect_mac_address,
detect_url,
)
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ResponseT,
hook_config,
)
if TYPE_CHECKING:
from collections.abc import Callable
from langgraph.runtime import Runtime
class PIIMiddleware(AgentMiddleware[AgentState[ResponseT], ContextT, ResponseT]):
"""Detect and handle Personally Identifiable Information (PII) in conversations.
This middleware detects common PII types and applies configurable strategies
to handle them. It can detect emails, credit cards, IP addresses, MAC addresses, and
URLs in both user input and agent output.
Built-in PII types:
- `email`: Email addresses
- `credit_card`: Credit card numbers (validated with Luhn algorithm)
- `ip`: IP addresses (validated with stdlib)
- `mac_address`: MAC addresses
- `url`: URLs (both `http`/`https` and bare URLs)
Strategies:
- `block`: Raise an exception when PII is detected
- `redact`: Replace PII with `[REDACTED_TYPE]` placeholders
- `mask`: Partially mask PII (e.g., `****-****-****-1234` for credit card)
- `hash`: Replace PII with deterministic hash (e.g., `<email_hash:a1b2c3d4>`)
Strategy Selection Guide:
| Strategy | Preserves Identity? | Best For |
| -------- | ------------------- | --------------------------------------- |
| `block` | N/A | Avoid PII completely |
| `redact` | No | General compliance, log sanitization |
| `mask` | No | Human readability, customer service UIs |
| `hash` | Yes (pseudonymous) | Analytics, debugging |
Example:
```python
from langchain.agents.middleware import PIIMiddleware
from langchain.agents import create_agent
# Redact all emails in user input
agent = create_agent(
"openai:gpt-5",
middleware=[
PIIMiddleware("email", strategy="redact"),
],
)
# Use different strategies for different PII types
agent = create_agent(
"openai:gpt-4o",
middleware=[
PIIMiddleware("credit_card", strategy="mask"),
PIIMiddleware("url", strategy="redact"),
PIIMiddleware("ip", strategy="hash"),
],
)
# Custom PII type with regex
agent = create_agent(
"openai:gpt-5",
middleware=[
PIIMiddleware("api_key", detector=r"sk-[a-zA-Z0-9]{32}", strategy="block"),
],
)
```
"""
def __init__(
self,
# From a typing point of view, the literals are covered by 'str'.
# Nonetheless, we escape PYI051 to keep hints and autocompletion for the caller.
pii_type: Literal["email", "credit_card", "ip", "mac_address", "url"] | str, # noqa: PYI051
*,
strategy: Literal["block", "redact", "mask", "hash"] = "redact",
detector: Callable[[str], list[PIIMatch]] | str | None = None,
apply_to_input: bool = True,
apply_to_output: bool = False,
apply_to_tool_results: bool = False,
) -> None:
"""Initialize the PII detection middleware.
Args:
pii_type: Type of PII to detect.
Can be a built-in type (`email`, `credit_card`, `ip`, `mac_address`,
`url`) or a custom type name.
strategy: How to handle detected PII.
Options:
* `block`: Raise `PIIDetectionError` when PII is detected
* `redact`: Replace with `[REDACTED_TYPE]` placeholders
* `mask`: Partially mask PII (show last few characters)
* `hash`: Replace with deterministic hash (format: `<type_hash:digest>`)
detector: Custom detector function or regex pattern.
* If `Callable`: Function that takes content string and returns
list of `PIIMatch` objects
* If `str`: Regex pattern to match PII
* If `None`: Uses built-in detector for the `pii_type`
apply_to_input: Whether to check user messages before model call.
apply_to_output: Whether to check AI messages after model call.
apply_to_tool_results: Whether to check tool result messages after tool execution.
Raises:
ValueError: If `pii_type` is not built-in and no detector is provided.
"""
super().__init__()
self.apply_to_input = apply_to_input
self.apply_to_output = apply_to_output
self.apply_to_tool_results = apply_to_tool_results
self._resolved_rule: ResolvedRedactionRule = RedactionRule(
pii_type=pii_type,
strategy=strategy,
detector=detector,
).resolve()
self.pii_type = self._resolved_rule.pii_type
self.strategy = self._resolved_rule.strategy
self.detector = self._resolved_rule.detector
@property
def name(self) -> str:
"""Name of the middleware."""
return f"{self.__class__.__name__}[{self.pii_type}]"
def _process_content(self, content: str) -> tuple[str, list[PIIMatch]]:
"""Apply the configured redaction rule to the provided content."""
matches = self.detector(content)
if not matches:
return content, []
sanitized = apply_strategy(content, matches, self.strategy)
return sanitized, matches
@hook_config(can_jump_to=["end"])
@override
def before_model(
self,
state: AgentState[Any],
runtime: Runtime[ContextT],
) -> dict[str, Any] | None:
"""Check user messages and tool results for PII before model invocation.
Args:
state: The current agent state.
runtime: The langgraph runtime.
Returns:
Updated state with PII handled according to strategy, or `None` if no PII
detected.
Raises:
PIIDetectionError: If PII is detected and strategy is `'block'`.
"""
if not self.apply_to_input and not self.apply_to_tool_results:
return None
messages = state["messages"]
if not messages:
return None
new_messages = list(messages)
any_modified = False
# Check user input if enabled
if self.apply_to_input:
# Get last user message
last_user_msg = None
last_user_idx = None
for i in range(len(messages) - 1, -1, -1):
if isinstance(messages[i], HumanMessage):
last_user_msg = messages[i]
last_user_idx = i
break
if last_user_idx is not None and last_user_msg and last_user_msg.content:
# Detect PII in message content
content = str(last_user_msg.content)
new_content, matches = self._process_content(content)
if matches:
updated_message: AnyMessage = HumanMessage(
content=new_content,
id=last_user_msg.id,
name=last_user_msg.name,
)
new_messages[last_user_idx] = updated_message
any_modified = True
# Check tool results if enabled
if self.apply_to_tool_results:
# Find the last AIMessage, then process all `ToolMessage` objects after it
last_ai_idx = None
for i in range(len(messages) - 1, -1, -1):
if isinstance(messages[i], AIMessage):
last_ai_idx = i
break
if last_ai_idx is not None:
# Get all tool messages after the last AI message
for i in range(last_ai_idx + 1, len(messages)):
msg = messages[i]
if isinstance(msg, ToolMessage):
tool_msg = msg
if not tool_msg.content:
continue
content = str(tool_msg.content)
new_content, matches = self._process_content(content)
if not matches:
continue
# Create updated tool message
updated_message = ToolMessage(
content=new_content,
id=tool_msg.id,
name=tool_msg.name,
tool_call_id=tool_msg.tool_call_id,
)
new_messages[i] = updated_message
any_modified = True
if any_modified:
return {"messages": new_messages}
return None
@hook_config(can_jump_to=["end"])
async def abefore_model(
self,
state: AgentState[Any],
runtime: Runtime[ContextT],
) -> dict[str, Any] | None:
"""Async check user messages and tool results for PII before model invocation.
Args:
state: The current agent state.
runtime: The langgraph runtime.
Returns:
Updated state with PII handled according to strategy, or `None` if no PII
detected.
Raises:
PIIDetectionError: If PII is detected and strategy is `'block'`.
"""
return self.before_model(state, runtime)
@override
def after_model(
self,
state: AgentState[Any],
runtime: Runtime[ContextT],
) -> dict[str, Any] | None:
"""Check AI messages for PII after model invocation.
Args:
state: The current agent state.
runtime: The langgraph runtime.
Returns:
Updated state with PII handled according to strategy, or None if no PII
detected.
Raises:
PIIDetectionError: If PII is detected and strategy is `'block'`.
"""
if not self.apply_to_output:
return None
messages = state["messages"]
if not messages:
return None
# Get last AI message
last_ai_msg = None
last_ai_idx = None
for i in range(len(messages) - 1, -1, -1):
msg = messages[i]
if isinstance(msg, AIMessage):
last_ai_msg = msg
last_ai_idx = i
break
if last_ai_idx is None or not last_ai_msg or not last_ai_msg.content:
return None
# Detect PII in message content
content = str(last_ai_msg.content)
new_content, matches = self._process_content(content)
if not matches:
return None
# Create updated message
updated_message = AIMessage(
content=new_content,
id=last_ai_msg.id,
name=last_ai_msg.name,
tool_calls=last_ai_msg.tool_calls,
)
# Return updated messages
new_messages = list(messages)
new_messages[last_ai_idx] = updated_message
return {"messages": new_messages}
async def aafter_model(
self,
state: AgentState[Any],
runtime: Runtime[ContextT],
) -> dict[str, Any] | None:
"""Async check AI messages for PII after model invocation.
Args:
state: The current agent state.
runtime: The langgraph runtime.
Returns:
Updated state with PII handled according to strategy, or None if no PII
detected.
Raises:
PIIDetectionError: If PII is detected and strategy is `'block'`.
"""
return self.after_model(state, runtime)
__all__ = [
"PIIDetectionError",
"PIIMatch",
"PIIMiddleware",
"detect_credit_card",
"detect_email",
"detect_ip",
"detect_mac_address",
"detect_url",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/middleware/pii.py",
"license": "MIT License",
"lines": 305,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/adapters/openai.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.adapters.openai import (
Chat,
ChatCompletion,
ChatCompletionChunk,
ChatCompletions,
Choice,
ChoiceChunk,
Completions,
IndexableBaseModel,
chat,
convert_dict_to_message,
convert_message_to_dict,
convert_messages_for_finetuning,
convert_openai_messages,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
MODULE_LOOKUP = {
"IndexableBaseModel": "langchain_community.adapters.openai",
"Choice": "langchain_community.adapters.openai",
"ChatCompletions": "langchain_community.adapters.openai",
"ChoiceChunk": "langchain_community.adapters.openai",
"ChatCompletionChunk": "langchain_community.adapters.openai",
"convert_dict_to_message": "langchain_community.adapters.openai",
"convert_message_to_dict": "langchain_community.adapters.openai",
"convert_openai_messages": "langchain_community.adapters.openai",
"ChatCompletion": "langchain_community.adapters.openai",
"convert_messages_for_finetuning": "langchain_community.adapters.openai",
"Completions": "langchain_community.adapters.openai",
"Chat": "langchain_community.adapters.openai",
"chat": "langchain_community.adapters.openai",
}
_import_attribute = create_importer(__file__, deprecated_lookups=MODULE_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"Chat",
"ChatCompletion",
"ChatCompletionChunk",
"ChatCompletions",
"Choice",
"ChoiceChunk",
"Completions",
"IndexableBaseModel",
"chat",
"convert_dict_to_message",
"convert_message_to_dict",
"convert_messages_for_finetuning",
"convert_openai_messages",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/adapters/openai.py",
"license": "MIT License",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent.py | """Chain that takes in an input and produces an action and action input."""
from __future__ import annotations
import asyncio
import builtins
import contextlib
import json
import logging
import time
from abc import abstractmethod
from collections.abc import AsyncIterator, Callable, Iterator, Sequence
from pathlib import Path
from typing import (
Any,
cast,
)
import yaml
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction, AgentFinish, AgentStep
from langchain_core.callbacks import (
AsyncCallbackManagerForChainRun,
AsyncCallbackManagerForToolRun,
BaseCallbackManager,
CallbackManagerForChainRun,
CallbackManagerForToolRun,
Callbacks,
)
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain_core.prompts.few_shot import FewShotPromptTemplate
from langchain_core.prompts.prompt import PromptTemplate
from langchain_core.runnables import Runnable, RunnableConfig, ensure_config
from langchain_core.runnables.utils import AddableDict
from langchain_core.tools import BaseTool
from langchain_core.utils.input import get_color_mapping
from pydantic import BaseModel, ConfigDict, model_validator
from typing_extensions import Self, override
from langchain_classic._api.deprecation import AGENT_DEPRECATION_WARNING
from langchain_classic.agents.agent_iterator import AgentExecutorIterator
from langchain_classic.agents.agent_types import AgentType
from langchain_classic.agents.tools import InvalidTool
from langchain_classic.chains.base import Chain
from langchain_classic.chains.llm import LLMChain
from langchain_classic.utilities.asyncio import asyncio_timeout
logger = logging.getLogger(__name__)
class BaseSingleActionAgent(BaseModel):
"""Base Single Action Agent class."""
@property
def return_values(self) -> list[str]:
"""Return values of the agent."""
return ["output"]
def get_allowed_tools(self) -> list[str] | None:
"""Get allowed tools."""
return None
@abstractmethod
def plan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentAction | AgentFinish:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
@abstractmethod
async def aplan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentAction | AgentFinish:
"""Async given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
@property
@abstractmethod
def input_keys(self) -> list[str]:
"""Return the input keys."""
def return_stopped_response(
self,
early_stopping_method: str,
intermediate_steps: list[tuple[AgentAction, str]], # noqa: ARG002
**_: Any,
) -> AgentFinish:
"""Return response when agent has been stopped due to max iterations.
Args:
early_stopping_method: Method to use for early stopping.
intermediate_steps: Steps the LLM has taken to date,
along with observations.
Returns:
Agent finish object.
Raises:
ValueError: If `early_stopping_method` is not supported.
"""
if early_stopping_method == "force":
# `force` just returns a constant string
return AgentFinish(
{"output": "Agent stopped due to iteration limit or time limit."},
"",
)
msg = f"Got unsupported early_stopping_method `{early_stopping_method}`"
raise ValueError(msg)
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: BaseCallbackManager | None = None,
**kwargs: Any,
) -> BaseSingleActionAgent:
"""Construct an agent from an LLM and tools.
Args:
llm: Language model to use.
tools: Tools to use.
callback_manager: Callback manager to use.
kwargs: Additional arguments.
Returns:
Agent object.
"""
raise NotImplementedError
@property
def _agent_type(self) -> str:
"""Return Identifier of an agent type."""
raise NotImplementedError
@override
def dict(self, **kwargs: Any) -> builtins.dict:
"""Return dictionary representation of agent.
Returns:
Dictionary representation of agent.
"""
_dict = super().model_dump()
try:
_type = self._agent_type
except NotImplementedError:
_type = None
if isinstance(_type, AgentType):
_dict["_type"] = str(_type.value)
elif _type is not None:
_dict["_type"] = _type
return _dict
def save(self, file_path: Path | str) -> None:
"""Save the agent.
Args:
file_path: Path to file to save the agent to.
Example:
```python
# If working with agent executor
agent.agent.save(file_path="path/agent.yaml")
```
"""
# Convert file to Path object.
save_path = Path(file_path) if isinstance(file_path, str) else file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
agent_dict = self.dict()
if "_type" not in agent_dict:
msg = f"Agent {self} does not support saving"
raise NotImplementedError(msg)
if save_path.suffix == ".json":
with save_path.open("w") as f:
json.dump(agent_dict, f, indent=4)
elif save_path.suffix.endswith((".yaml", ".yml")):
with save_path.open("w") as f:
yaml.dump(agent_dict, f, default_flow_style=False)
else:
msg = f"{save_path} must be json or yaml"
raise ValueError(msg)
def tool_run_logging_kwargs(self) -> builtins.dict:
"""Return logging kwargs for tool run."""
return {}
class BaseMultiActionAgent(BaseModel):
"""Base Multi Action Agent class."""
@property
def return_values(self) -> list[str]:
"""Return values of the agent."""
return ["output"]
def get_allowed_tools(self) -> list[str] | None:
"""Get allowed tools.
Returns:
Allowed tools.
"""
return None
@abstractmethod
def plan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> list[AgentAction] | AgentFinish:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with the observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Actions specifying what tool to use.
"""
@abstractmethod
async def aplan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> list[AgentAction] | AgentFinish:
"""Async given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with the observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Actions specifying what tool to use.
"""
@property
@abstractmethod
def input_keys(self) -> list[str]:
"""Return the input keys."""
def return_stopped_response(
self,
early_stopping_method: str,
intermediate_steps: list[tuple[AgentAction, str]], # noqa: ARG002
**_: Any,
) -> AgentFinish:
"""Return response when agent has been stopped due to max iterations.
Args:
early_stopping_method: Method to use for early stopping.
intermediate_steps: Steps the LLM has taken to date,
along with observations.
Returns:
Agent finish object.
Raises:
ValueError: If `early_stopping_method` is not supported.
"""
if early_stopping_method == "force":
# `force` just returns a constant string
return AgentFinish({"output": "Agent stopped due to max iterations."}, "")
msg = f"Got unsupported early_stopping_method `{early_stopping_method}`"
raise ValueError(msg)
@property
def _agent_type(self) -> str:
"""Return Identifier of an agent type."""
raise NotImplementedError
@override
def dict(self, **kwargs: Any) -> builtins.dict:
"""Return dictionary representation of agent."""
_dict = super().model_dump()
with contextlib.suppress(NotImplementedError):
_dict["_type"] = str(self._agent_type)
return _dict
def save(self, file_path: Path | str) -> None:
"""Save the agent.
Args:
file_path: Path to file to save the agent to.
Raises:
NotImplementedError: If agent does not support saving.
ValueError: If `file_path` is not json or yaml.
Example:
```python
# If working with agent executor
agent.agent.save(file_path="path/agent.yaml")
```
"""
# Convert file to Path object.
save_path = Path(file_path) if isinstance(file_path, str) else file_path
# Fetch dictionary to save
agent_dict = self.dict()
if "_type" not in agent_dict:
msg = f"Agent {self} does not support saving."
raise NotImplementedError(msg)
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
if save_path.suffix == ".json":
with save_path.open("w") as f:
json.dump(agent_dict, f, indent=4)
elif save_path.suffix.endswith((".yaml", ".yml")):
with save_path.open("w") as f:
yaml.dump(agent_dict, f, default_flow_style=False)
else:
msg = f"{save_path} must be json or yaml"
raise ValueError(msg)
def tool_run_logging_kwargs(self) -> builtins.dict:
"""Return logging kwargs for tool run."""
return {}
class AgentOutputParser(BaseOutputParser[AgentAction | AgentFinish]):
"""Base class for parsing agent output into agent action/finish."""
@abstractmethod
def parse(self, text: str) -> AgentAction | AgentFinish:
"""Parse text into agent action/finish."""
class MultiActionAgentOutputParser(
BaseOutputParser[list[AgentAction] | AgentFinish],
):
"""Base class for parsing agent output into agent actions/finish.
This is used for agents that can return multiple actions.
"""
@abstractmethod
def parse(self, text: str) -> list[AgentAction] | AgentFinish:
"""Parse text into agent actions/finish.
Args:
text: Text to parse.
Returns:
List of agent actions or agent finish.
"""
class RunnableAgent(BaseSingleActionAgent):
"""Agent powered by Runnables."""
runnable: Runnable[dict, AgentAction | AgentFinish]
"""Runnable to call to get agent action."""
input_keys_arg: list[str] = []
return_keys_arg: list[str] = []
stream_runnable: bool = True
"""Whether to stream from the runnable or not.
If `True` then underlying LLM is invoked in a streaming fashion to make it possible
to get access to the individual LLM tokens when using stream_log with the
`AgentExecutor`. If `False` then LLM is invoked in a non-streaming fashion and
individual LLM tokens will not be available in stream_log.
"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@property
def return_values(self) -> list[str]:
"""Return values of the agent."""
return self.return_keys_arg
@property
def input_keys(self) -> list[str]:
"""Return the input keys."""
return self.input_keys_arg
def plan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentAction | AgentFinish:
"""Based on past history and current inputs, decide what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with the observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
inputs = {**kwargs, "intermediate_steps": intermediate_steps}
final_output: Any = None
if self.stream_runnable:
# Use streaming to make sure that the underlying LLM is invoked in a
# streaming
# fashion to make it possible to get access to the individual LLM tokens
# when using stream_log with the AgentExecutor.
# Because the response from the plan is not a generator, we need to
# accumulate the output into final output and return that.
for chunk in self.runnable.stream(inputs, config={"callbacks": callbacks}):
if final_output is None:
final_output = chunk
else:
final_output += chunk
else:
final_output = self.runnable.invoke(inputs, config={"callbacks": callbacks})
return final_output
async def aplan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentAction | AgentFinish:
"""Async based on past history and current inputs, decide what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
inputs = {**kwargs, "intermediate_steps": intermediate_steps}
final_output: Any = None
if self.stream_runnable:
# Use streaming to make sure that the underlying LLM is invoked in a
# streaming
# fashion to make it possible to get access to the individual LLM tokens
# when using stream_log with the AgentExecutor.
# Because the response from the plan is not a generator, we need to
# accumulate the output into final output and return that.
async for chunk in self.runnable.astream(
inputs,
config={"callbacks": callbacks},
):
if final_output is None:
final_output = chunk
else:
final_output += chunk
else:
final_output = await self.runnable.ainvoke(
inputs,
config={"callbacks": callbacks},
)
return final_output
class RunnableMultiActionAgent(BaseMultiActionAgent):
"""Agent powered by Runnables."""
runnable: Runnable[dict, list[AgentAction] | AgentFinish]
"""Runnable to call to get agent actions."""
input_keys_arg: list[str] = []
return_keys_arg: list[str] = []
stream_runnable: bool = True
"""Whether to stream from the runnable or not.
If `True` then underlying LLM is invoked in a streaming fashion to make it possible
to get access to the individual LLM tokens when using stream_log with the
`AgentExecutor`. If `False` then LLM is invoked in a non-streaming fashion and
individual LLM tokens will not be available in stream_log.
"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@property
def return_values(self) -> list[str]:
"""Return values of the agent."""
return self.return_keys_arg
@property
def input_keys(self) -> list[str]:
"""Return the input keys.
Returns:
List of input keys.
"""
return self.input_keys_arg
def plan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> list[AgentAction] | AgentFinish:
"""Based on past history and current inputs, decide what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with the observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
inputs = {**kwargs, "intermediate_steps": intermediate_steps}
final_output: Any = None
if self.stream_runnable:
# Use streaming to make sure that the underlying LLM is invoked in a
# streaming
# fashion to make it possible to get access to the individual LLM tokens
# when using stream_log with the AgentExecutor.
# Because the response from the plan is not a generator, we need to
# accumulate the output into final output and return that.
for chunk in self.runnable.stream(inputs, config={"callbacks": callbacks}):
if final_output is None:
final_output = chunk
else:
final_output += chunk
else:
final_output = self.runnable.invoke(inputs, config={"callbacks": callbacks})
return final_output
async def aplan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> list[AgentAction] | AgentFinish:
"""Async based on past history and current inputs, decide what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
inputs = {**kwargs, "intermediate_steps": intermediate_steps}
final_output: Any = None
if self.stream_runnable:
# Use streaming to make sure that the underlying LLM is invoked in a
# streaming
# fashion to make it possible to get access to the individual LLM tokens
# when using stream_log with the AgentExecutor.
# Because the response from the plan is not a generator, we need to
# accumulate the output into final output and return that.
async for chunk in self.runnable.astream(
inputs,
config={"callbacks": callbacks},
):
if final_output is None:
final_output = chunk
else:
final_output += chunk
else:
final_output = await self.runnable.ainvoke(
inputs,
config={"callbacks": callbacks},
)
return final_output
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
class LLMSingleActionAgent(BaseSingleActionAgent):
"""Base class for single action agents."""
llm_chain: LLMChain
"""LLMChain to use for agent."""
output_parser: AgentOutputParser
"""Output parser to use for agent."""
stop: list[str]
"""List of strings to stop on."""
@property
def input_keys(self) -> list[str]:
"""Return the input keys.
Returns:
List of input keys.
"""
return list(set(self.llm_chain.input_keys) - {"intermediate_steps"})
@override
def dict(self, **kwargs: Any) -> builtins.dict:
"""Return dictionary representation of agent."""
_dict = super().dict()
del _dict["output_parser"]
return _dict
def plan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentAction | AgentFinish:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with the observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
output = self.llm_chain.run(
intermediate_steps=intermediate_steps,
stop=self.stop,
callbacks=callbacks,
**kwargs,
)
return self.output_parser.parse(output)
async def aplan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentAction | AgentFinish:
"""Async given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
output = await self.llm_chain.arun(
intermediate_steps=intermediate_steps,
stop=self.stop,
callbacks=callbacks,
**kwargs,
)
return self.output_parser.parse(output)
def tool_run_logging_kwargs(self) -> builtins.dict:
"""Return logging kwargs for tool run."""
return {
"llm_prefix": "",
"observation_prefix": "" if len(self.stop) == 0 else self.stop[0],
}
@deprecated(
"0.1.0",
message=AGENT_DEPRECATION_WARNING,
removal="1.0",
)
class Agent(BaseSingleActionAgent):
"""Agent that calls the language model and deciding the action.
This is driven by a LLMChain. The prompt in the LLMChain MUST include
a variable called "agent_scratchpad" where the agent can put its
intermediary work.
"""
llm_chain: LLMChain
"""LLMChain to use for agent."""
output_parser: AgentOutputParser
"""Output parser to use for agent."""
allowed_tools: list[str] | None = None
"""Allowed tools for the agent. If `None`, all tools are allowed."""
@override
def dict(self, **kwargs: Any) -> builtins.dict:
"""Return dictionary representation of agent."""
_dict = super().dict()
del _dict["output_parser"]
return _dict
def get_allowed_tools(self) -> list[str] | None:
"""Get allowed tools."""
return self.allowed_tools
@property
def return_values(self) -> list[str]:
"""Return values of the agent."""
return ["output"]
@property
def _stop(self) -> list[str]:
return [
f"\n{self.observation_prefix.rstrip()}",
f"\n\t{self.observation_prefix.rstrip()}",
]
def _construct_scratchpad(
self,
intermediate_steps: list[tuple[AgentAction, str]],
) -> str | list[BaseMessage]:
"""Construct the scratchpad that lets the agent continue its thought process."""
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}"
return thoughts
def plan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentAction | AgentFinish:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs)
return self.output_parser.parse(full_output)
async def aplan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentAction | AgentFinish:
"""Async given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Action specifying what tool to use.
"""
full_inputs = self.get_full_inputs(intermediate_steps, **kwargs)
full_output = await self.llm_chain.apredict(callbacks=callbacks, **full_inputs)
return await self.output_parser.aparse(full_output)
def get_full_inputs(
self,
intermediate_steps: list[tuple[AgentAction, str]],
**kwargs: Any,
) -> builtins.dict[str, Any]:
"""Create the full inputs for the LLMChain from intermediate steps.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with observations.
**kwargs: User inputs.
Returns:
Full inputs for the LLMChain.
"""
thoughts = self._construct_scratchpad(intermediate_steps)
new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop}
return {**kwargs, **new_inputs}
@property
def input_keys(self) -> list[str]:
"""Return the input keys."""
return list(set(self.llm_chain.input_keys) - {"agent_scratchpad"})
@model_validator(mode="after")
def validate_prompt(self) -> Self:
"""Validate that prompt matches format.
Args:
values: Values to validate.
Returns:
Validated values.
Raises:
ValueError: If `agent_scratchpad` is not in prompt.input_variables
and prompt is not a FewShotPromptTemplate or a PromptTemplate.
"""
prompt = self.llm_chain.prompt
if "agent_scratchpad" not in prompt.input_variables:
logger.warning(
"`agent_scratchpad` should be a variable in prompt.input_variables."
" Did not find it, so adding it at the end.",
)
prompt.input_variables.append("agent_scratchpad")
if isinstance(prompt, PromptTemplate):
prompt.template += "\n{agent_scratchpad}"
elif isinstance(prompt, FewShotPromptTemplate):
prompt.suffix += "\n{agent_scratchpad}"
else:
msg = f"Got unexpected prompt type {type(prompt)}"
raise ValueError(msg)
return self
@property
@abstractmethod
def observation_prefix(self) -> str:
"""Prefix to append the observation with."""
@property
@abstractmethod
def llm_prefix(self) -> str:
"""Prefix to append the LLM call with."""
@classmethod
@abstractmethod
def create_prompt(cls, tools: Sequence[BaseTool]) -> BasePromptTemplate:
"""Create a prompt for this class.
Args:
tools: Tools to use.
Returns:
Prompt template.
"""
@classmethod
def _validate_tools(cls, tools: Sequence[BaseTool]) -> None:
"""Validate that appropriate tools are passed in.
Args:
tools: Tools to use.
"""
@classmethod
@abstractmethod
def _get_default_output_parser(cls, **kwargs: Any) -> AgentOutputParser:
"""Get default output parser for this class."""
@classmethod
def from_llm_and_tools(
cls,
llm: BaseLanguageModel,
tools: Sequence[BaseTool],
callback_manager: BaseCallbackManager | None = None,
output_parser: AgentOutputParser | None = None,
**kwargs: Any,
) -> Agent:
"""Construct an agent from an LLM and tools.
Args:
llm: Language model to use.
tools: Tools to use.
callback_manager: Callback manager to use.
output_parser: Output parser to use.
kwargs: Additional arguments.
Returns:
Agent object.
"""
cls._validate_tools(tools)
llm_chain = LLMChain(
llm=llm,
prompt=cls.create_prompt(tools),
callback_manager=callback_manager,
)
tool_names = [tool.name for tool in tools]
_output_parser = output_parser or cls._get_default_output_parser()
return cls(
llm_chain=llm_chain,
allowed_tools=tool_names,
output_parser=_output_parser,
**kwargs,
)
def return_stopped_response(
self,
early_stopping_method: str,
intermediate_steps: list[tuple[AgentAction, str]],
**kwargs: Any,
) -> AgentFinish:
"""Return response when agent has been stopped due to max iterations.
Args:
early_stopping_method: Method to use for early stopping.
intermediate_steps: Steps the LLM has taken to date,
along with observations.
**kwargs: User inputs.
Returns:
Agent finish object.
Raises:
ValueError: If `early_stopping_method` is not in ['force', 'generate'].
"""
if early_stopping_method == "force":
# `force` just returns a constant string
return AgentFinish(
{"output": "Agent stopped due to iteration limit or time limit."},
"",
)
if early_stopping_method == "generate":
# Generate does one final forward pass
thoughts = ""
for action, observation in intermediate_steps:
thoughts += action.log
thoughts += (
f"\n{self.observation_prefix}{observation}\n{self.llm_prefix}"
)
# Adding to the previous steps, we now tell the LLM to make a final pred
thoughts += (
"\n\nI now need to return a final answer based on the previous steps:"
)
new_inputs = {"agent_scratchpad": thoughts, "stop": self._stop}
full_inputs = {**kwargs, **new_inputs}
full_output = self.llm_chain.predict(**full_inputs)
# We try to extract a final answer
parsed_output = self.output_parser.parse(full_output)
if isinstance(parsed_output, AgentFinish):
# If we can extract, we send the correct stuff
return parsed_output
# If we can extract, but the tool is not the final tool,
# we just return the full output
return AgentFinish({"output": full_output}, full_output)
msg = (
"early_stopping_method should be one of `force` or `generate`, "
f"got {early_stopping_method}"
)
raise ValueError(msg)
def tool_run_logging_kwargs(self) -> builtins.dict:
"""Return logging kwargs for tool run."""
return {
"llm_prefix": self.llm_prefix,
"observation_prefix": self.observation_prefix,
}
class ExceptionTool(BaseTool):
"""Tool that just returns the query."""
name: str = "_Exception"
"""Name of the tool."""
description: str = "Exception tool"
"""Description of the tool."""
@override
def _run(
self,
query: str,
run_manager: CallbackManagerForToolRun | None = None,
) -> str:
return query
@override
async def _arun(
self,
query: str,
run_manager: AsyncCallbackManagerForToolRun | None = None,
) -> str:
return query
NextStepOutput = list[AgentFinish | AgentAction | AgentStep]
RunnableAgentType = RunnableAgent | RunnableMultiActionAgent
class AgentExecutor(Chain):
"""Agent that is using tools."""
agent: BaseSingleActionAgent | BaseMultiActionAgent | Runnable
"""The agent to run for creating a plan and determining actions
to take at each step of the execution loop."""
tools: Sequence[BaseTool]
"""The valid tools the agent can call."""
return_intermediate_steps: bool = False
"""Whether to return the agent's trajectory of intermediate steps
at the end in addition to the final output."""
max_iterations: int | None = 15
"""The maximum number of steps to take before ending the execution
loop.
Setting to 'None' could lead to an infinite loop."""
max_execution_time: float | None = None
"""The maximum amount of wall clock time to spend in the execution
loop.
"""
early_stopping_method: str = "force"
"""The method to use for early stopping if the agent never
returns `AgentFinish`. Either 'force' or 'generate'.
`"force"` returns a string saying that it stopped because it met a
time or iteration limit.
`"generate"` calls the agent's LLM Chain one final time to generate
a final answer based on the previous steps.
"""
handle_parsing_errors: bool | str | Callable[[OutputParserException], str] = False
"""How to handle errors raised by the agent's output parser.
Defaults to `False`, which raises the error.
If `true`, the error will be sent back to the LLM as an observation.
If a string, the string itself will be sent to the LLM as an observation.
If a callable function, the function will be called with the exception as an
argument, and the result of that function will be passed to the agent as an
observation.
"""
trim_intermediate_steps: (
int | Callable[[list[tuple[AgentAction, str]]], list[tuple[AgentAction, str]]]
) = -1
"""How to trim the intermediate steps before returning them.
Defaults to -1, which means no trimming.
"""
@classmethod
def from_agent_and_tools(
cls,
agent: BaseSingleActionAgent | BaseMultiActionAgent | Runnable,
tools: Sequence[BaseTool],
callbacks: Callbacks = None,
**kwargs: Any,
) -> AgentExecutor:
"""Create from agent and tools.
Args:
agent: Agent to use.
tools: Tools to use.
callbacks: Callbacks to use.
kwargs: Additional arguments.
Returns:
Agent executor object.
"""
return cls(
agent=agent,
tools=tools,
callbacks=callbacks,
**kwargs,
)
@model_validator(mode="after")
def validate_tools(self) -> Self:
"""Validate that tools are compatible with agent.
Args:
values: Values to validate.
Returns:
Validated values.
Raises:
ValueError: If allowed tools are different than provided tools.
"""
agent = self.agent
tools = self.tools
allowed_tools = agent.get_allowed_tools() # type: ignore[union-attr]
if allowed_tools is not None and set(allowed_tools) != {
tool.name for tool in tools
}:
msg = (
f"Allowed tools ({allowed_tools}) different than "
f"provided tools ({[tool.name for tool in tools]})"
)
raise ValueError(msg)
return self
@model_validator(mode="before")
@classmethod
def validate_runnable_agent(cls, values: dict) -> Any:
"""Convert runnable to agent if passed in.
Args:
values: Values to validate.
Returns:
Validated values.
"""
agent = values.get("agent")
if agent and isinstance(agent, Runnable):
try:
output_type = agent.OutputType
except TypeError:
multi_action = False
except Exception:
logger.exception("Unexpected error getting OutputType from agent")
multi_action = False
else:
multi_action = output_type == list[AgentAction] | AgentFinish
stream_runnable = values.pop("stream_runnable", True)
if multi_action:
values["agent"] = RunnableMultiActionAgent(
runnable=agent,
stream_runnable=stream_runnable,
)
else:
values["agent"] = RunnableAgent(
runnable=agent,
stream_runnable=stream_runnable,
)
return values
@property
def _action_agent(self) -> BaseSingleActionAgent | BaseMultiActionAgent:
"""Type cast self.agent.
If the `agent` attribute is a Runnable, it will be converted one of
RunnableAgentType in the validate_runnable_agent root_validator.
To support instantiating with a Runnable, here we explicitly cast the type
to reflect the changes made in the root_validator.
"""
if isinstance(self.agent, Runnable):
return cast("RunnableAgentType", self.agent)
return self.agent
@override
def save(self, file_path: Path | str) -> None:
"""Raise error - saving not supported for Agent Executors.
Args:
file_path: Path to save to.
Raises:
ValueError: Saving not supported for agent executors.
"""
msg = (
"Saving not supported for agent executors. "
"If you are trying to save the agent, please use the "
"`.save_agent(...)`"
)
raise ValueError(msg)
def save_agent(self, file_path: Path | str) -> None:
"""Save the underlying agent.
Args:
file_path: Path to save to.
"""
return self._action_agent.save(file_path)
def iter(
self,
inputs: Any,
callbacks: Callbacks = None,
*,
include_run_info: bool = False,
async_: bool = False, # noqa: ARG002 arg kept for backwards compat, but ignored
) -> AgentExecutorIterator:
"""Enables iteration over steps taken to reach final output.
Args:
inputs: Inputs to the agent.
callbacks: Callbacks to run.
include_run_info: Whether to include run info.
async_: Whether to run async. (Ignored)
Returns:
Agent executor iterator object.
"""
return AgentExecutorIterator(
self,
inputs,
callbacks,
tags=self.tags,
include_run_info=include_run_info,
)
@property
def input_keys(self) -> list[str]:
"""Return the input keys."""
return self._action_agent.input_keys
@property
def output_keys(self) -> list[str]:
"""Return the singular output key."""
if self.return_intermediate_steps:
return [*self._action_agent.return_values, "intermediate_steps"]
return self._action_agent.return_values
def lookup_tool(self, name: str) -> BaseTool:
"""Lookup tool by name.
Args:
name: Name of tool.
Returns:
Tool object.
"""
return {tool.name: tool for tool in self.tools}[name]
def _should_continue(self, iterations: int, time_elapsed: float) -> bool:
if self.max_iterations is not None and iterations >= self.max_iterations:
return False
return self.max_execution_time is None or time_elapsed < self.max_execution_time
def _return(
self,
output: AgentFinish,
intermediate_steps: list,
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
if run_manager:
run_manager.on_agent_finish(output, color="green", verbose=self.verbose)
final_output = output.return_values
if self.return_intermediate_steps:
final_output["intermediate_steps"] = intermediate_steps
return final_output
async def _areturn(
self,
output: AgentFinish,
intermediate_steps: list,
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
if run_manager:
await run_manager.on_agent_finish(
output,
color="green",
verbose=self.verbose,
)
final_output = output.return_values
if self.return_intermediate_steps:
final_output["intermediate_steps"] = intermediate_steps
return final_output
def _consume_next_step(
self,
values: NextStepOutput,
) -> AgentFinish | list[tuple[AgentAction, str]]:
if isinstance(values[-1], AgentFinish):
if len(values) != 1:
msg = "Expected a single AgentFinish output, but got multiple values."
raise ValueError(msg)
return values[-1]
return [(a.action, a.observation) for a in values if isinstance(a, AgentStep)]
def _take_next_step(
self,
name_to_tool_map: dict[str, BaseTool],
color_mapping: dict[str, str],
inputs: dict[str, str],
intermediate_steps: list[tuple[AgentAction, str]],
run_manager: CallbackManagerForChainRun | None = None,
) -> AgentFinish | list[tuple[AgentAction, str]]:
return self._consume_next_step(
list(
self._iter_next_step(
name_to_tool_map,
color_mapping,
inputs,
intermediate_steps,
run_manager,
),
),
)
def _iter_next_step(
self,
name_to_tool_map: dict[str, BaseTool],
color_mapping: dict[str, str],
inputs: dict[str, str],
intermediate_steps: list[tuple[AgentAction, str]],
run_manager: CallbackManagerForChainRun | None = None,
) -> Iterator[AgentFinish | AgentAction | AgentStep]:
"""Take a single step in the thought-action-observation loop.
Override this to take control of how the agent makes and acts on choices.
"""
try:
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
# Call the LLM to see what to do.
output = self._action_agent.plan(
intermediate_steps,
callbacks=run_manager.get_child() if run_manager else None,
**inputs,
)
except OutputParserException as e:
if isinstance(self.handle_parsing_errors, bool):
raise_error = not self.handle_parsing_errors
else:
raise_error = False
if raise_error:
msg = (
"An output parsing error occurred. "
"In order to pass this error back to the agent and have it try "
"again, pass `handle_parsing_errors=True` to the AgentExecutor. "
f"This is the error: {e!s}"
)
raise ValueError(msg) from e
text = str(e)
if isinstance(self.handle_parsing_errors, bool):
if e.send_to_llm:
observation = str(e.observation)
text = str(e.llm_output)
else:
observation = "Invalid or incomplete response"
elif isinstance(self.handle_parsing_errors, str):
observation = self.handle_parsing_errors
elif callable(self.handle_parsing_errors):
observation = self.handle_parsing_errors(e)
else:
msg = "Got unexpected type of `handle_parsing_errors`" # type: ignore[unreachable]
raise ValueError(msg) from e # noqa: TRY004
output = AgentAction("_Exception", observation, text)
if run_manager:
run_manager.on_agent_action(output, color="green")
tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()
observation = ExceptionTool().run(
output.tool_input,
verbose=self.verbose,
color=None,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
yield AgentStep(action=output, observation=observation)
return
# If the tool chosen is the finishing tool, then we end and return.
if isinstance(output, AgentFinish):
yield output
return
actions: list[AgentAction]
actions = [output] if isinstance(output, AgentAction) else output
for agent_action in actions:
yield agent_action
for agent_action in actions:
yield self._perform_agent_action(
name_to_tool_map,
color_mapping,
agent_action,
run_manager,
)
def _perform_agent_action(
self,
name_to_tool_map: dict[str, BaseTool],
color_mapping: dict[str, str],
agent_action: AgentAction,
run_manager: CallbackManagerForChainRun | None = None,
) -> AgentStep:
if run_manager:
run_manager.on_agent_action(agent_action, color="green")
# Otherwise we lookup the tool
if agent_action.tool in name_to_tool_map:
tool = name_to_tool_map[agent_action.tool]
return_direct = tool.return_direct
color = color_mapping[agent_action.tool]
tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()
if return_direct:
tool_run_kwargs["llm_prefix"] = ""
# We then call the tool on the tool input to get an observation
observation = tool.run(
agent_action.tool_input,
verbose=self.verbose,
color=color,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
else:
tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()
observation = InvalidTool().run(
{
"requested_tool_name": agent_action.tool,
"available_tool_names": list(name_to_tool_map.keys()),
},
verbose=self.verbose,
color=None,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
return AgentStep(action=agent_action, observation=observation)
async def _atake_next_step(
self,
name_to_tool_map: dict[str, BaseTool],
color_mapping: dict[str, str],
inputs: dict[str, str],
intermediate_steps: list[tuple[AgentAction, str]],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> AgentFinish | list[tuple[AgentAction, str]]:
return self._consume_next_step(
[
a
async for a in self._aiter_next_step(
name_to_tool_map,
color_mapping,
inputs,
intermediate_steps,
run_manager,
)
],
)
async def _aiter_next_step(
self,
name_to_tool_map: dict[str, BaseTool],
color_mapping: dict[str, str],
inputs: dict[str, str],
intermediate_steps: list[tuple[AgentAction, str]],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> AsyncIterator[AgentFinish | AgentAction | AgentStep]:
"""Take a single step in the thought-action-observation loop.
Override this to take control of how the agent makes and acts on choices.
"""
try:
intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)
# Call the LLM to see what to do.
output = await self._action_agent.aplan(
intermediate_steps,
callbacks=run_manager.get_child() if run_manager else None,
**inputs,
)
except OutputParserException as e:
if isinstance(self.handle_parsing_errors, bool):
raise_error = not self.handle_parsing_errors
else:
raise_error = False
if raise_error:
msg = (
"An output parsing error occurred. "
"In order to pass this error back to the agent and have it try "
"again, pass `handle_parsing_errors=True` to the AgentExecutor. "
f"This is the error: {e!s}"
)
raise ValueError(msg) from e
text = str(e)
if isinstance(self.handle_parsing_errors, bool):
if e.send_to_llm:
observation = str(e.observation)
text = str(e.llm_output)
else:
observation = "Invalid or incomplete response"
elif isinstance(self.handle_parsing_errors, str):
observation = self.handle_parsing_errors
elif callable(self.handle_parsing_errors):
observation = self.handle_parsing_errors(e)
else:
msg = "Got unexpected type of `handle_parsing_errors`" # type: ignore[unreachable]
raise ValueError(msg) from e # noqa: TRY004
output = AgentAction("_Exception", observation, text)
tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()
observation = await ExceptionTool().arun(
output.tool_input,
verbose=self.verbose,
color=None,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
yield AgentStep(action=output, observation=observation)
return
# If the tool chosen is the finishing tool, then we end and return.
if isinstance(output, AgentFinish):
yield output
return
actions: list[AgentAction]
actions = [output] if isinstance(output, AgentAction) else output
for agent_action in actions:
yield agent_action
# Use asyncio.gather to run multiple tool.arun() calls concurrently
result = await asyncio.gather(
*[
self._aperform_agent_action(
name_to_tool_map,
color_mapping,
agent_action,
run_manager,
)
for agent_action in actions
],
)
# TODO: This could yield each result as it becomes available
for chunk in result:
yield chunk
async def _aperform_agent_action(
self,
name_to_tool_map: dict[str, BaseTool],
color_mapping: dict[str, str],
agent_action: AgentAction,
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> AgentStep:
if run_manager:
await run_manager.on_agent_action(
agent_action,
verbose=self.verbose,
color="green",
)
# Otherwise we lookup the tool
if agent_action.tool in name_to_tool_map:
tool = name_to_tool_map[agent_action.tool]
return_direct = tool.return_direct
color = color_mapping[agent_action.tool]
tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()
if return_direct:
tool_run_kwargs["llm_prefix"] = ""
# We then call the tool on the tool input to get an observation
observation = await tool.arun(
agent_action.tool_input,
verbose=self.verbose,
color=color,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
else:
tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()
observation = await InvalidTool().arun(
{
"requested_tool_name": agent_action.tool,
"available_tool_names": list(name_to_tool_map.keys()),
},
verbose=self.verbose,
color=None,
callbacks=run_manager.get_child() if run_manager else None,
**tool_run_kwargs,
)
return AgentStep(action=agent_action, observation=observation)
def _call(
self,
inputs: dict[str, str],
run_manager: CallbackManagerForChainRun | None = None,
) -> dict[str, Any]:
"""Run text through and get agent response."""
# Construct a mapping of tool name to tool for easy lookup
name_to_tool_map = {tool.name: tool for tool in self.tools}
# We construct a mapping from each tool to a color, used for logging.
color_mapping = get_color_mapping(
[tool.name for tool in self.tools],
excluded_colors=["green", "red"],
)
intermediate_steps: list[tuple[AgentAction, str]] = []
# Let's start tracking the number of iterations and time elapsed
iterations = 0
time_elapsed = 0.0
start_time = time.time()
# We now enter the agent loop (until it returns something).
while self._should_continue(iterations, time_elapsed):
next_step_output = self._take_next_step(
name_to_tool_map,
color_mapping,
inputs,
intermediate_steps,
run_manager=run_manager,
)
if isinstance(next_step_output, AgentFinish):
return self._return(
next_step_output,
intermediate_steps,
run_manager=run_manager,
)
intermediate_steps.extend(next_step_output)
if len(next_step_output) == 1:
next_step_action = next_step_output[0]
# See if tool should return directly
tool_return = self._get_tool_return(next_step_action)
if tool_return is not None:
return self._return(
tool_return,
intermediate_steps,
run_manager=run_manager,
)
iterations += 1
time_elapsed = time.time() - start_time
output = self._action_agent.return_stopped_response(
self.early_stopping_method,
intermediate_steps,
**inputs,
)
return self._return(output, intermediate_steps, run_manager=run_manager)
async def _acall(
self,
inputs: dict[str, str],
run_manager: AsyncCallbackManagerForChainRun | None = None,
) -> dict[str, str]:
"""Async run text through and get agent response."""
# Construct a mapping of tool name to tool for easy lookup
name_to_tool_map = {tool.name: tool for tool in self.tools}
# We construct a mapping from each tool to a color, used for logging.
color_mapping = get_color_mapping(
[tool.name for tool in self.tools],
excluded_colors=["green"],
)
intermediate_steps: list[tuple[AgentAction, str]] = []
# Let's start tracking the number of iterations and time elapsed
iterations = 0
time_elapsed = 0.0
start_time = time.time()
# We now enter the agent loop (until it returns something).
try:
async with asyncio_timeout(self.max_execution_time):
while self._should_continue(iterations, time_elapsed):
next_step_output = await self._atake_next_step(
name_to_tool_map,
color_mapping,
inputs,
intermediate_steps,
run_manager=run_manager,
)
if isinstance(next_step_output, AgentFinish):
return await self._areturn(
next_step_output,
intermediate_steps,
run_manager=run_manager,
)
intermediate_steps.extend(next_step_output)
if len(next_step_output) == 1:
next_step_action = next_step_output[0]
# See if tool should return directly
tool_return = self._get_tool_return(next_step_action)
if tool_return is not None:
return await self._areturn(
tool_return,
intermediate_steps,
run_manager=run_manager,
)
iterations += 1
time_elapsed = time.time() - start_time
output = self._action_agent.return_stopped_response(
self.early_stopping_method,
intermediate_steps,
**inputs,
)
return await self._areturn(
output,
intermediate_steps,
run_manager=run_manager,
)
except (TimeoutError, asyncio.TimeoutError):
# stop early when interrupted by the async timeout
output = self._action_agent.return_stopped_response(
self.early_stopping_method,
intermediate_steps,
**inputs,
)
return await self._areturn(
output,
intermediate_steps,
run_manager=run_manager,
)
def _get_tool_return(
self,
next_step_output: tuple[AgentAction, str],
) -> AgentFinish | None:
"""Check if the tool is a returning tool."""
agent_action, observation = next_step_output
name_to_tool_map = {tool.name: tool for tool in self.tools}
return_value_key = "output"
if len(self._action_agent.return_values) > 0:
return_value_key = self._action_agent.return_values[0]
# Invalid tools won't be in the map, so we return False.
if (
agent_action.tool in name_to_tool_map
and name_to_tool_map[agent_action.tool].return_direct
):
return AgentFinish(
{return_value_key: observation},
"",
)
return None
def _prepare_intermediate_steps(
self,
intermediate_steps: list[tuple[AgentAction, str]],
) -> list[tuple[AgentAction, str]]:
if (
isinstance(self.trim_intermediate_steps, int)
and self.trim_intermediate_steps > 0
):
return intermediate_steps[-self.trim_intermediate_steps :]
if callable(self.trim_intermediate_steps):
return self.trim_intermediate_steps(intermediate_steps)
return intermediate_steps
@override
def stream(
self,
input: dict[str, Any] | Any,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> Iterator[AddableDict]:
"""Enables streaming over steps taken to reach final output.
Args:
input: Input to the agent.
config: Config to use.
kwargs: Additional arguments.
Yields:
Addable dictionary.
"""
config = ensure_config(config)
iterator = AgentExecutorIterator(
self,
input,
config.get("callbacks"),
tags=config.get("tags"),
metadata=config.get("metadata"),
run_name=config.get("run_name"),
run_id=config.get("run_id"),
yield_actions=True,
**kwargs,
)
yield from iterator
@override
async def astream(
self,
input: dict[str, Any] | Any,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> AsyncIterator[AddableDict]:
"""Async enables streaming over steps taken to reach final output.
Args:
input: Input to the agent.
config: Config to use.
kwargs: Additional arguments.
Yields:
Addable dictionary.
"""
config = ensure_config(config)
iterator = AgentExecutorIterator(
self,
input,
config.get("callbacks"),
tags=config.get("tags"),
metadata=config.get("metadata"),
run_name=config.get("run_name"),
run_id=config.get("run_id"),
yield_actions=True,
**kwargs,
)
async for step in iterator:
yield step
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent.py",
"license": "MIT License",
"lines": 1569,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/ainetwork/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.ainetwork.toolkit import AINetworkToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AINetworkToolkit": "langchain_community.agent_toolkits.ainetwork.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"AINetworkToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/ainetwork/toolkit.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/amadeus/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.amadeus.toolkit import AmadeusToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"AmadeusToolkit": "langchain_community.agent_toolkits.amadeus.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["AmadeusToolkit"]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/amadeus/toolkit.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/clickup/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.clickup.toolkit import ClickupToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"ClickupToolkit": "langchain_community.agent_toolkits.clickup.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ClickupToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/clickup/toolkit.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/conversational_retrieval/openai_functions.py | from typing import Any
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import SystemMessage
from langchain_core.prompts.chat import MessagesPlaceholder
from langchain_core.tools import BaseTool
from langchain_classic.agents.agent import AgentExecutor
from langchain_classic.agents.openai_functions_agent.agent_token_buffer_memory import (
AgentTokenBufferMemory,
)
from langchain_classic.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain_classic.base_memory import BaseMemory
from langchain_classic.memory.token_buffer import ConversationTokenBufferMemory
def _get_default_system_message() -> SystemMessage:
return SystemMessage(
content=(
"Do your best to answer the questions. "
"Feel free to use any tools available to look up "
"relevant information, only if necessary"
),
)
def create_conversational_retrieval_agent(
llm: BaseLanguageModel,
tools: list[BaseTool],
remember_intermediate_steps: bool = True, # noqa: FBT001,FBT002
memory_key: str = "chat_history",
system_message: SystemMessage | None = None,
verbose: bool = False, # noqa: FBT001,FBT002
max_token_limit: int = 2000,
**kwargs: Any,
) -> AgentExecutor:
"""A convenience method for creating a conversational retrieval agent.
Args:
llm: The language model to use, should be `ChatOpenAI`
tools: A list of tools the agent has access to
remember_intermediate_steps: Whether the agent should remember intermediate
steps or not. Intermediate steps refer to prior action/observation
pairs from previous questions. The benefit of remembering these is if
there is relevant information in there, the agent can use it to answer
follow up questions. The downside is it will take up more tokens.
memory_key: The name of the memory key in the prompt.
system_message: The system message to use. By default, a basic one will
be used.
verbose: Whether or not the final AgentExecutor should be verbose or not.
max_token_limit: The max number of tokens to keep around in memory.
**kwargs: Additional keyword arguments to pass to the `AgentExecutor`.
Returns:
An agent executor initialized appropriately
"""
if remember_intermediate_steps:
memory: BaseMemory = AgentTokenBufferMemory(
memory_key=memory_key,
llm=llm,
max_token_limit=max_token_limit,
)
else:
memory = ConversationTokenBufferMemory(
memory_key=memory_key,
return_messages=True,
output_key="output",
llm=llm,
max_token_limit=max_token_limit,
)
_system_message = system_message or _get_default_system_message()
prompt = OpenAIFunctionsAgent.create_prompt(
system_message=_system_message,
extra_prompt_messages=[MessagesPlaceholder(variable_name=memory_key)],
)
agent = OpenAIFunctionsAgent(llm=llm, tools=tools, prompt=prompt)
return AgentExecutor(
agent=agent,
tools=tools,
memory=memory,
verbose=verbose,
return_intermediate_steps=remember_intermediate_steps,
**kwargs,
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/conversational_retrieval/openai_functions.py",
"license": "MIT License",
"lines": 76,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/file_management/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.file_management.toolkit import (
FileManagementToolkit,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"FileManagementToolkit": (
"langchain_community.agent_toolkits.file_management.toolkit"
),
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"FileManagementToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/file_management/toolkit.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/github/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.github.toolkit import (
BranchName,
CommentOnIssue,
CreateFile,
CreatePR,
CreateReviewRequest,
DeleteFile,
DirectoryPath,
GetIssue,
GetPR,
GitHubToolkit,
NoInput,
ReadFile,
SearchCode,
SearchIssuesAndPRs,
UpdateFile,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"NoInput": "langchain_community.agent_toolkits.github.toolkit",
"GetIssue": "langchain_community.agent_toolkits.github.toolkit",
"CommentOnIssue": "langchain_community.agent_toolkits.github.toolkit",
"GetPR": "langchain_community.agent_toolkits.github.toolkit",
"CreatePR": "langchain_community.agent_toolkits.github.toolkit",
"CreateFile": "langchain_community.agent_toolkits.github.toolkit",
"ReadFile": "langchain_community.agent_toolkits.github.toolkit",
"UpdateFile": "langchain_community.agent_toolkits.github.toolkit",
"DeleteFile": "langchain_community.agent_toolkits.github.toolkit",
"DirectoryPath": "langchain_community.agent_toolkits.github.toolkit",
"BranchName": "langchain_community.agent_toolkits.github.toolkit",
"SearchCode": "langchain_community.agent_toolkits.github.toolkit",
"CreateReviewRequest": "langchain_community.agent_toolkits.github.toolkit",
"SearchIssuesAndPRs": "langchain_community.agent_toolkits.github.toolkit",
"GitHubToolkit": "langchain_community.agent_toolkits.github.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BranchName",
"CommentOnIssue",
"CreateFile",
"CreatePR",
"CreateReviewRequest",
"DeleteFile",
"DirectoryPath",
"GetIssue",
"GetPR",
"GitHubToolkit",
"NoInput",
"ReadFile",
"SearchCode",
"SearchIssuesAndPRs",
"UpdateFile",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/github/toolkit.py",
"license": "MIT License",
"lines": 61,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/gitlab/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.gitlab.toolkit import GitLabToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"GitLabToolkit": "langchain_community.agent_toolkits.gitlab.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GitLabToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/gitlab/toolkit.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/gmail/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.gmail.toolkit import GmailToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"GmailToolkit": "langchain_community.agent_toolkits.gmail.toolkit"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"GmailToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/gmail/toolkit.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/jira/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.jira.toolkit import JiraToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"JiraToolkit": "langchain_community.agent_toolkits.jira.toolkit"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"JiraToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/jira/toolkit.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/json/base.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.json.base import create_json_agent
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"create_json_agent": "langchain_community.agent_toolkits.json.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"create_json_agent",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/json/base.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/json/prompt.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.json.prompt import JSON_PREFIX, JSON_SUFFIX
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"JSON_PREFIX": "langchain_community.agent_toolkits.json.prompt",
"JSON_SUFFIX": "langchain_community.agent_toolkits.json.prompt",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = ["JSON_PREFIX", "JSON_SUFFIX"]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/json/prompt.py",
"license": "MIT License",
"lines": 16,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/json/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"JsonToolkit": "langchain_community.agent_toolkits.json.toolkit"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"JsonToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/json/toolkit.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/multion/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.multion.toolkit import MultionToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"MultionToolkit": "langchain_community.agent_toolkits.multion.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"MultionToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/multion/toolkit.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/nasa/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.nasa.toolkit import NasaToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"NasaToolkit": "langchain_community.agent_toolkits.nasa.toolkit"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"NasaToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/nasa/toolkit.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/nla/tool.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.nla.tool import NLATool
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"NLATool": "langchain_community.agent_toolkits.nla.tool"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"NLATool",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/nla/tool.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/nla/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.nla.toolkit import NLAToolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"NLAToolkit": "langchain_community.agent_toolkits.nla.toolkit"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"NLAToolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/nla/toolkit.py",
"license": "MIT License",
"lines": 15,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/office365/toolkit.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.office365.toolkit import O365Toolkit
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"O365Toolkit": "langchain_community.agent_toolkits.office365.toolkit",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"O365Toolkit",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/office365/toolkit.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain/langchain_classic/agents/agent_toolkits/openapi/base.py | from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.openapi.base import create_openapi_agent
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"create_openapi_agent": "langchain_community.agent_toolkits.openapi.base",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"create_openapi_agent",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/langchain_classic/agents/agent_toolkits/openapi/base.py",
"license": "MIT License",
"lines": 17,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.