sample_id stringlengths 21 196 | text stringlengths 105 936k | metadata dict | category stringclasses 6
values |
|---|---|---|---|
langchain-ai/langchain:libs/core/tests/unit_tests/messages/block_translators/test_bedrock_converse.py | from langchain_core.messages import AIMessage, AIMessageChunk, HumanMessage
from langchain_core.messages import content as types
def test_convert_to_v1_from_bedrock_converse() -> None:
message = AIMessage(
[
{
"type": "reasoning_content",
"reasoning_content": {"text": "foo", "signature": "foo_signature"},
},
{"type": "text", "text": "Let's call a tool."},
{
"type": "tool_use",
"id": "abc_123",
"name": "get_weather",
"input": {"location": "San Francisco"},
},
{
"type": "text",
"text": "It's sunny.",
"citations": [
{
"title": "Document Title",
"source_content": [{"text": "The weather is sunny."}],
"location": {
"document_char": {
"document_index": 0,
"start": 58,
"end": 96,
}
},
},
{
"title": "Document Title",
"source_content": [{"text": "The weather is sunny."}],
"location": {
"document_page": {"document_index": 0, "start": 1, "end": 2}
},
},
{
"title": "Document Title",
"source_content": [{"text": "The weather is sunny."}],
"location": {
"document_chunk": {
"document_index": 0,
"start": 1,
"end": 2,
}
},
},
{"bar": "baz"},
],
},
{"type": "something_else", "foo": "bar"},
],
response_metadata={"model_provider": "bedrock_converse"},
)
expected_content: list[types.ContentBlock] = [
{
"type": "reasoning",
"reasoning": "foo",
"extras": {"signature": "foo_signature"},
},
{"type": "text", "text": "Let's call a tool."},
{
"type": "tool_call",
"id": "abc_123",
"name": "get_weather",
"args": {"location": "San Francisco"},
},
{
"type": "text",
"text": "It's sunny.",
"annotations": [
{
"type": "citation",
"title": "Document Title",
"cited_text": "The weather is sunny.",
"extras": {
"location": {
"document_char": {
"document_index": 0,
"start": 58,
"end": 96,
}
},
},
},
{
"type": "citation",
"title": "Document Title",
"cited_text": "The weather is sunny.",
"extras": {
"location": {
"document_page": {"document_index": 0, "start": 1, "end": 2}
},
},
},
{
"type": "citation",
"title": "Document Title",
"cited_text": "The weather is sunny.",
"extras": {
"location": {
"document_chunk": {
"document_index": 0,
"start": 1,
"end": 2,
}
}
},
},
{"type": "citation", "extras": {"bar": "baz"}},
],
},
{
"type": "non_standard",
"value": {"type": "something_else", "foo": "bar"},
},
]
assert message.content_blocks == expected_content
# Check no mutation
assert message.content != expected_content
def test_convert_to_v1_from_converse_chunk() -> None:
chunks = [
AIMessageChunk(
content=[{"text": "Looking ", "type": "text", "index": 0}],
response_metadata={"model_provider": "bedrock_converse"},
),
AIMessageChunk(
content=[{"text": "now.", "type": "text", "index": 0}],
response_metadata={"model_provider": "bedrock_converse"},
),
AIMessageChunk(
content=[
{
"type": "tool_use",
"name": "get_weather",
"input": {},
"id": "toolu_abc123",
"index": 1,
}
],
tool_call_chunks=[
{
"type": "tool_call_chunk",
"name": "get_weather",
"args": "",
"id": "toolu_abc123",
"index": 1,
}
],
response_metadata={"model_provider": "bedrock_converse"},
),
AIMessageChunk(
content=[{"type": "input_json_delta", "partial_json": "", "index": 1}],
tool_call_chunks=[
{
"name": None,
"args": "",
"id": None,
"index": 1,
"type": "tool_call_chunk",
}
],
response_metadata={"model_provider": "bedrock_converse"},
),
AIMessageChunk(
content=[
{"type": "input_json_delta", "partial_json": '{"loca', "index": 1}
],
tool_call_chunks=[
{
"name": None,
"args": '{"loca',
"id": None,
"index": 1,
"type": "tool_call_chunk",
}
],
response_metadata={"model_provider": "bedrock_converse"},
),
AIMessageChunk(
content=[
{"type": "input_json_delta", "partial_json": 'tion": "San ', "index": 1}
],
tool_call_chunks=[
{
"name": None,
"args": 'tion": "San ',
"id": None,
"index": 1,
"type": "tool_call_chunk",
}
],
response_metadata={"model_provider": "bedrock_converse"},
),
AIMessageChunk(
content=[
{"type": "input_json_delta", "partial_json": 'Francisco"}', "index": 1}
],
tool_call_chunks=[
{
"name": None,
"args": 'Francisco"}',
"id": None,
"index": 1,
"type": "tool_call_chunk",
}
],
response_metadata={"model_provider": "bedrock_converse"},
),
]
expected_contents: list[types.ContentBlock] = [
{"type": "text", "text": "Looking ", "index": 0},
{"type": "text", "text": "now.", "index": 0},
{
"type": "tool_call_chunk",
"name": "get_weather",
"args": "",
"id": "toolu_abc123",
"index": 1,
},
{"name": None, "args": "", "id": None, "index": 1, "type": "tool_call_chunk"},
{
"name": None,
"args": '{"loca',
"id": None,
"index": 1,
"type": "tool_call_chunk",
},
{
"name": None,
"args": 'tion": "San ',
"id": None,
"index": 1,
"type": "tool_call_chunk",
},
{
"name": None,
"args": 'Francisco"}',
"id": None,
"index": 1,
"type": "tool_call_chunk",
},
]
for chunk, expected in zip(chunks, expected_contents, strict=False):
assert chunk.content_blocks == [expected]
full: AIMessageChunk | None = None
for chunk in chunks:
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
expected_content = [
{"type": "text", "text": "Looking now.", "index": 0},
{
"type": "tool_use",
"name": "get_weather",
"partial_json": '{"location": "San Francisco"}',
"input": {},
"id": "toolu_abc123",
"index": 1,
},
]
assert full.content == expected_content
expected_content_blocks = [
{"type": "text", "text": "Looking now.", "index": 0},
{
"type": "tool_call_chunk",
"name": "get_weather",
"args": '{"location": "San Francisco"}',
"id": "toolu_abc123",
"index": 1,
},
]
assert full.content_blocks == expected_content_blocks
def test_convert_to_v1_from_converse_input() -> None:
message = HumanMessage(
[
{"text": "foo"},
{
"document": {
"format": "txt",
"name": "doc_name_1",
"source": {"text": "doc_text_1"},
"context": "doc_context_1",
"citations": {"enabled": True},
},
},
{
"document": {
"format": "pdf",
"name": "doc_name_2",
"source": {"bytes": b"doc_text_2"},
},
},
{
"document": {
"format": "txt",
"name": "doc_name_3",
"source": {"content": [{"text": "doc_text"}, {"text": "_3"}]},
"context": "doc_context_3",
},
},
{
"image": {
"format": "jpeg",
"source": {"bytes": b"image_bytes"},
}
},
{
"document": {
"format": "pdf",
"name": "doc_name_4",
"source": {
"s3Location": {"uri": "s3://bla", "bucketOwner": "owner"}
},
},
},
]
)
expected: list[types.ContentBlock] = [
{"type": "text", "text": "foo"},
{
"type": "text-plain",
"mime_type": "text/plain",
"text": "doc_text_1",
"extras": {
"name": "doc_name_1",
"context": "doc_context_1",
"citations": {"enabled": True},
},
},
{
"type": "file",
"mime_type": "application/pdf",
"base64": "ZG9jX3RleHRfMg==",
"extras": {"name": "doc_name_2"},
},
{
"type": "non_standard",
"value": {
"document": {
"format": "txt",
"name": "doc_name_3",
"source": {"content": [{"text": "doc_text"}, {"text": "_3"}]},
"context": "doc_context_3",
},
},
},
{
"type": "image",
"base64": "aW1hZ2VfYnl0ZXM=",
"mime_type": "image/jpeg",
},
{
"type": "non_standard",
"value": {
"document": {
"format": "pdf",
"name": "doc_name_4",
"source": {
"s3Location": {"uri": "s3://bla", "bucketOwner": "owner"}
},
},
},
},
]
assert message.content_blocks == expected
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/messages/block_translators/test_bedrock_converse.py",
"license": "MIT License",
"lines": 367,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/core/tests/unit_tests/messages/block_translators/test_langchain_v0.py | from langchain_core.messages import HumanMessage
from langchain_core.messages import content as types
from langchain_core.messages.block_translators.langchain_v0 import (
_convert_legacy_v0_content_block_to_v1,
)
from tests.unit_tests.language_models.chat_models.test_base import (
_content_blocks_equal_ignore_id,
)
def test_convert_to_v1_from_openai_input() -> None:
message = HumanMessage(
content=[
{"type": "text", "text": "Hello"},
{
"type": "image",
"source_type": "url",
"url": "https://example.com/image.png",
},
{
"type": "image",
"source_type": "base64",
"data": "<base64 data>",
"mime_type": "image/png",
},
{
"type": "file",
"source_type": "url",
"url": "<document url>",
},
{
"type": "file",
"source_type": "base64",
"data": "<base64 data>",
"mime_type": "application/pdf",
},
{
"type": "audio",
"source_type": "base64",
"data": "<base64 data>",
"mime_type": "audio/mpeg",
},
{
"type": "file",
"source_type": "id",
"id": "<file id>",
},
]
)
expected: list[types.ContentBlock] = [
{"type": "text", "text": "Hello"},
{
"type": "image",
"url": "https://example.com/image.png",
},
{
"type": "image",
"base64": "<base64 data>",
"mime_type": "image/png",
},
{
"type": "file",
"url": "<document url>",
},
{
"type": "file",
"base64": "<base64 data>",
"mime_type": "application/pdf",
},
{
"type": "audio",
"base64": "<base64 data>",
"mime_type": "audio/mpeg",
},
{
"type": "file",
"file_id": "<file id>",
},
]
assert _content_blocks_equal_ignore_id(message.content_blocks, expected)
def test_convert_with_extras_on_v0_block() -> None:
"""Test that extras on old-style blocks are preserved in conversion.
Refer to `_extract_v0_extras` for details.
"""
block = {
"type": "image",
"source_type": "url",
"url": "https://example.com/image.png",
# extras follow
"alt_text": "An example image",
"caption": "Example caption",
"name": "example_image",
"description": None,
"attribution": None,
}
expected_output = {
"type": "image",
"url": "https://example.com/image.png",
"extras": {
"alt_text": "An example image",
"caption": "Example caption",
"name": "example_image",
# "description": None, # These are filtered out
# "attribution": None,
},
}
assert _convert_legacy_v0_content_block_to_v1(block) == expected_output
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/messages/block_translators/test_langchain_v0.py",
"license": "MIT License",
"lines": 105,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/core/tests/unit_tests/messages/block_translators/test_openai.py | import pytest
from langchain_core.messages import AIMessage, AIMessageChunk, HumanMessage
from langchain_core.messages import content as types
from langchain_core.messages.block_translators.openai import (
convert_to_openai_data_block,
)
from tests.unit_tests.language_models.chat_models.test_base import (
_content_blocks_equal_ignore_id,
)
def test_convert_to_v1_from_responses() -> None:
message = AIMessage(
[
{"type": "reasoning", "id": "abc123", "summary": []},
{
"type": "reasoning",
"id": "abc234",
"summary": [
{"type": "summary_text", "text": "foo bar"},
{"type": "summary_text", "text": "baz"},
],
},
{
"type": "function_call",
"call_id": "call_123",
"name": "get_weather",
"arguments": '{"location": "San Francisco"}',
},
{
"type": "function_call",
"call_id": "call_234",
"name": "get_weather_2",
"arguments": '{"location": "New York"}',
"id": "fc_123",
},
{"type": "text", "text": "Hello "},
{
"type": "text",
"text": "world",
"annotations": [
{"type": "url_citation", "url": "https://example.com"},
{
"type": "file_citation",
"filename": "my doc",
"index": 1,
"file_id": "file_123",
},
{"bar": "baz"},
],
},
{"type": "image_generation_call", "id": "ig_123", "result": "..."},
{
"type": "file_search_call",
"id": "fs_123",
"queries": ["query for file search"],
"results": [{"file_id": "file-123"}],
"status": "completed",
},
{"type": "something_else", "foo": "bar"},
],
tool_calls=[
{
"type": "tool_call",
"id": "call_123",
"name": "get_weather",
"args": {"location": "San Francisco"},
},
{
"type": "tool_call",
"id": "call_234",
"name": "get_weather_2",
"args": {"location": "New York"},
},
],
response_metadata={"model_provider": "openai"},
)
expected_content: list[types.ContentBlock] = [
{"type": "reasoning", "id": "abc123"},
{"type": "reasoning", "id": "abc234", "reasoning": "foo bar"},
{"type": "reasoning", "id": "abc234", "reasoning": "baz"},
{
"type": "tool_call",
"id": "call_123",
"name": "get_weather",
"args": {"location": "San Francisco"},
},
{
"type": "tool_call",
"id": "call_234",
"name": "get_weather_2",
"args": {"location": "New York"},
"extras": {"item_id": "fc_123"},
},
{"type": "text", "text": "Hello "},
{
"type": "text",
"text": "world",
"annotations": [
{"type": "citation", "url": "https://example.com"},
{
"type": "citation",
"title": "my doc",
"extras": {"file_id": "file_123", "index": 1},
},
{"type": "non_standard_annotation", "value": {"bar": "baz"}},
],
},
{"type": "image", "base64": "...", "id": "ig_123"},
{
"type": "server_tool_call",
"name": "file_search",
"id": "fs_123",
"args": {"queries": ["query for file search"]},
},
{
"type": "server_tool_result",
"tool_call_id": "fs_123",
"output": [{"file_id": "file-123"}],
"status": "success",
},
{
"type": "non_standard",
"value": {"type": "something_else", "foo": "bar"},
},
]
assert message.content_blocks == expected_content
# Check no mutation
assert message.content != expected_content
def test_convert_to_v1_from_responses_chunk() -> None:
chunks = [
AIMessageChunk(
content=[{"type": "reasoning", "id": "abc123", "summary": [], "index": 0}],
response_metadata={"model_provider": "openai"},
),
AIMessageChunk(
content=[
{
"type": "reasoning",
"id": "abc234",
"summary": [
{"type": "summary_text", "text": "foo ", "index": 0},
],
"index": 1,
}
],
response_metadata={"model_provider": "openai"},
),
AIMessageChunk(
content=[
{
"type": "reasoning",
"id": "abc234",
"summary": [
{"type": "summary_text", "text": "bar", "index": 0},
],
"index": 1,
}
],
response_metadata={"model_provider": "openai"},
),
AIMessageChunk(
content=[
{
"type": "reasoning",
"id": "abc234",
"summary": [
{"type": "summary_text", "text": "baz", "index": 1},
],
"index": 1,
}
],
response_metadata={"model_provider": "openai"},
),
]
expected_chunks = [
AIMessageChunk(
content=[{"type": "reasoning", "id": "abc123", "index": "lc_rs_305f30"}],
response_metadata={"model_provider": "openai"},
),
AIMessageChunk(
content=[
{
"type": "reasoning",
"id": "abc234",
"reasoning": "foo ",
"index": "lc_rs_315f30",
}
],
response_metadata={"model_provider": "openai"},
),
AIMessageChunk(
content=[
{
"type": "reasoning",
"id": "abc234",
"reasoning": "bar",
"index": "lc_rs_315f30",
}
],
response_metadata={"model_provider": "openai"},
),
AIMessageChunk(
content=[
{
"type": "reasoning",
"id": "abc234",
"reasoning": "baz",
"index": "lc_rs_315f31",
}
],
response_metadata={"model_provider": "openai"},
),
]
for chunk, expected in zip(chunks, expected_chunks, strict=False):
assert chunk.content_blocks == expected.content_blocks
full: AIMessageChunk | None = None
for chunk in chunks:
full = chunk if full is None else full + chunk
assert isinstance(full, AIMessageChunk)
expected_content = [
{"type": "reasoning", "id": "abc123", "summary": [], "index": 0},
{
"type": "reasoning",
"id": "abc234",
"summary": [
{"type": "summary_text", "text": "foo bar", "index": 0},
{"type": "summary_text", "text": "baz", "index": 1},
],
"index": 1,
},
]
assert full.content == expected_content
expected_content_blocks = [
{"type": "reasoning", "id": "abc123", "index": "lc_rs_305f30"},
{
"type": "reasoning",
"id": "abc234",
"reasoning": "foo bar",
"index": "lc_rs_315f30",
},
{
"type": "reasoning",
"id": "abc234",
"reasoning": "baz",
"index": "lc_rs_315f31",
},
]
assert full.content_blocks == expected_content_blocks
def test_convert_to_v1_from_openai_input() -> None:
message = HumanMessage(
content=[
{"type": "text", "text": "Hello"},
{
"type": "image_url",
"image_url": {"url": "https://example.com/image.png"},
},
{
"type": "image_url",
"image_url": {"url": "data:image/jpeg;base64,/9j/4AAQSkZJRg..."},
},
{
"type": "input_audio",
"input_audio": {
"format": "wav",
"data": "<base64 string>",
},
},
{
"type": "file",
"file": {
"filename": "draconomicon.pdf",
"file_data": "data:application/pdf;base64,<base64 string>",
},
},
{
"type": "file",
"file": {"file_id": "<file id>"},
},
]
)
expected: list[types.ContentBlock] = [
{"type": "text", "text": "Hello"},
{
"type": "image",
"url": "https://example.com/image.png",
},
{
"type": "image",
"base64": "/9j/4AAQSkZJRg...",
"mime_type": "image/jpeg",
},
{
"type": "audio",
"base64": "<base64 string>",
"mime_type": "audio/wav",
},
{
"type": "file",
"base64": "<base64 string>",
"mime_type": "application/pdf",
"extras": {"filename": "draconomicon.pdf"},
},
{"type": "file", "file_id": "<file id>"},
]
assert _content_blocks_equal_ignore_id(message.content_blocks, expected)
def test_compat_responses_v03() -> None:
# Check compatibility with v0.3 legacy message format
message_v03 = AIMessage(
content=[
{"type": "text", "text": "Hello, world!", "annotations": [{"type": "foo"}]}
],
additional_kwargs={
"reasoning": {
"type": "reasoning",
"id": "rs_123",
"summary": [
{"type": "summary_text", "text": "summary 1"},
{"type": "summary_text", "text": "summary 2"},
],
},
"tool_outputs": [
{
"type": "web_search_call",
"id": "websearch_123",
"status": "completed",
}
],
"refusal": "I cannot assist with that.",
"__openai_function_call_ids__": {"call_abc": "fc_abc"},
},
tool_calls=[
{"type": "tool_call", "name": "my_tool", "args": {"x": 3}, "id": "call_abc"}
],
response_metadata={"id": "resp_123", "model_provider": "openai"},
id="msg_123",
)
expected_content: list[types.ContentBlock] = [
{"type": "reasoning", "id": "rs_123", "reasoning": "summary 1"},
{"type": "reasoning", "id": "rs_123", "reasoning": "summary 2"},
{
"type": "text",
"text": "Hello, world!",
"annotations": [
{"type": "non_standard_annotation", "value": {"type": "foo"}}
],
"id": "msg_123",
},
{
"type": "non_standard",
"value": {"type": "refusal", "refusal": "I cannot assist with that."},
},
{
"type": "tool_call",
"name": "my_tool",
"args": {"x": 3},
"id": "call_abc",
"extras": {"item_id": "fc_abc"},
},
{
"type": "server_tool_call",
"name": "web_search",
"args": {},
"id": "websearch_123",
},
{
"type": "server_tool_result",
"tool_call_id": "websearch_123",
"status": "success",
},
]
assert message_v03.content_blocks == expected_content
# --- Test chunks --- #
# Tool calls
chunk_1 = AIMessageChunk(
content=[],
additional_kwargs={"__openai_function_call_ids__": {"call_abc": "fc_abc"}},
tool_call_chunks=[
{
"type": "tool_call_chunk",
"name": "my_tool",
"args": "",
"id": "call_abc",
"index": 0,
}
],
response_metadata={"model_provider": "openai"},
)
expected_content = [
{
"type": "tool_call_chunk",
"name": "my_tool",
"args": "",
"id": "call_abc",
"index": 0,
"extras": {"item_id": "fc_abc"},
}
]
assert chunk_1.content_blocks == expected_content
chunk_2 = AIMessageChunk(
content=[],
additional_kwargs={"__openai_function_call_ids__": {}},
tool_call_chunks=[
{
"type": "tool_call_chunk",
"name": None,
"args": "{",
"id": None,
"index": 0,
}
],
)
expected_content = [
{"type": "tool_call_chunk", "name": None, "args": "{", "id": None, "index": 0}
]
chunk = chunk_1 + chunk_2
expected_content = [
{
"type": "tool_call_chunk",
"name": "my_tool",
"args": "{",
"id": "call_abc",
"index": 0,
"extras": {"item_id": "fc_abc"},
}
]
assert chunk.content_blocks == expected_content
# Reasoning
chunk_1 = AIMessageChunk(
content=[],
additional_kwargs={
"reasoning": {"id": "rs_abc", "summary": [], "type": "reasoning"}
},
response_metadata={"model_provider": "openai"},
)
expected_content = [{"type": "reasoning", "id": "rs_abc"}]
assert chunk_1.content_blocks == expected_content
chunk_2 = AIMessageChunk(
content=[],
additional_kwargs={
"reasoning": {
"summary": [
{"index": 0, "type": "summary_text", "text": "reasoning text"}
]
}
},
response_metadata={"model_provider": "openai"},
)
expected_content = [{"type": "reasoning", "reasoning": "reasoning text"}]
assert chunk_2.content_blocks == expected_content
chunk = chunk_1 + chunk_2
expected_content = [
{"type": "reasoning", "reasoning": "reasoning text", "id": "rs_abc"}
]
assert chunk.content_blocks == expected_content
def test_convert_to_openai_data_block() -> None:
# Chat completions
# Image / url
block = {
"type": "image",
"url": "https://example.com/test.png",
}
expected = {
"type": "image_url",
"image_url": {"url": "https://example.com/test.png"},
}
result = convert_to_openai_data_block(block)
assert result == expected
# Image / base64
block = {
"type": "image",
"base64": "<base64 string>",
"mime_type": "image/png",
}
expected = {
"type": "image_url",
"image_url": {"url": "data:image/png;base64,<base64 string>"},
}
result = convert_to_openai_data_block(block)
assert result == expected
# File / url
block = {
"type": "file",
"url": "https://example.com/test.pdf",
}
with pytest.raises(ValueError, match="does not support"):
result = convert_to_openai_data_block(block)
# File / base64
block = {
"type": "file",
"base64": "<base64 string>",
"mime_type": "application/pdf",
"filename": "test.pdf",
}
expected = {
"type": "file",
"file": {
"file_data": "data:application/pdf;base64,<base64 string>",
"filename": "test.pdf",
},
}
result = convert_to_openai_data_block(block)
assert result == expected
# File / file ID
block = {
"type": "file",
"file_id": "file-abc123",
}
expected = {"type": "file", "file": {"file_id": "file-abc123"}}
result = convert_to_openai_data_block(block)
assert result == expected
# Audio / base64
block = {
"type": "audio",
"base64": "<base64 string>",
"mime_type": "audio/wav",
}
expected = {
"type": "input_audio",
"input_audio": {"data": "<base64 string>", "format": "wav"},
}
result = convert_to_openai_data_block(block)
assert result == expected
# Responses
# Image / url
block = {
"type": "image",
"url": "https://example.com/test.png",
}
expected = {"type": "input_image", "image_url": "https://example.com/test.png"}
result = convert_to_openai_data_block(block, api="responses")
assert result == expected
# Image / base64
block = {
"type": "image",
"base64": "<base64 string>",
"mime_type": "image/png",
}
expected = {
"type": "input_image",
"image_url": "data:image/png;base64,<base64 string>",
}
result = convert_to_openai_data_block(block, api="responses")
assert result == expected
# File / url
block = {
"type": "file",
"url": "https://example.com/test.pdf",
}
expected = {"type": "input_file", "file_url": "https://example.com/test.pdf"}
# File / base64
block = {
"type": "file",
"base64": "<base64 string>",
"mime_type": "application/pdf",
"filename": "test.pdf",
}
expected = {
"type": "input_file",
"file_data": "data:application/pdf;base64,<base64 string>",
"filename": "test.pdf",
}
result = convert_to_openai_data_block(block, api="responses")
assert result == expected
# File / file ID
block = {
"type": "file",
"file_id": "file-abc123",
}
expected = {"type": "input_file", "file_id": "file-abc123"}
result = convert_to_openai_data_block(block, api="responses")
assert result == expected
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/messages/block_translators/test_openai.py",
"license": "MIT License",
"lines": 570,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/core/tests/unit_tests/messages/block_translators/test_registration.py | import pkgutil
from pathlib import Path
import pytest
from langchain_core.messages.block_translators import PROVIDER_TRANSLATORS
def test_all_providers_registered() -> None:
"""Test that all block translators implemented in langchain-core are registered.
If this test fails, it is likely that a block translator is implemented but not
registered on import. Check that the provider is included in
`langchain_core.messages.block_translators.__init__._register_translators`.
"""
package_path = (
Path(__file__).parents[4] / "langchain_core" / "messages" / "block_translators"
)
for module_info in pkgutil.iter_modules([str(package_path)]):
module_name = module_info.name
# Skip the __init__ module, any private modules, and `langchain_v0`, which is
# only used to parse v0 multimodal inputs.
if module_name.startswith("_") or module_name == "langchain_v0":
continue
if module_name not in PROVIDER_TRANSLATORS:
pytest.fail(f"Block translator not registered: {module_name}")
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/messages/block_translators/test_registration.py",
"license": "MIT License",
"lines": 21,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/anthropic/langchain_anthropic/_compat.py | from __future__ import annotations
import json
from typing import Any, cast
from langchain_core.messages import content as types
def _convert_annotation_from_v1(annotation: types.Annotation) -> dict[str, Any]:
"""Convert LangChain annotation format to Anthropic's native citation format."""
if annotation["type"] == "non_standard_annotation":
return annotation["value"]
if annotation["type"] == "citation":
if "url" in annotation:
# web_search_result_location
out: dict[str, Any] = {}
if cited_text := annotation.get("cited_text"):
out["cited_text"] = cited_text
if "encrypted_index" in annotation.get("extras", {}):
out["encrypted_index"] = annotation.get("extras", {})["encrypted_index"]
if "title" in annotation:
out["title"] = annotation["title"]
out["type"] = "web_search_result_location"
out["url"] = annotation.get("url")
for key, value in annotation.get("extras", {}).items():
if key not in out:
out[key] = value
return out
if "start_char_index" in annotation.get("extras", {}):
# char_location
out = {"type": "char_location"}
for field in ["cited_text"]:
if value := annotation.get(field):
out[field] = value
if title := annotation.get("title"):
out["document_title"] = title
for key, value in annotation.get("extras", {}).items():
out[key] = value
out = {k: out[k] for k in sorted(out)}
return out
if "search_result_index" in annotation.get("extras", {}):
# search_result_location
out = {"type": "search_result_location"}
for field in ["cited_text", "title"]:
if value := annotation.get(field):
out[field] = value
for key, value in annotation.get("extras", {}).items():
out[key] = value
return out
if "start_block_index" in annotation.get("extras", {}):
# content_block_location
out = {}
if cited_text := annotation.get("cited_text"):
out["cited_text"] = cited_text
if "document_index" in annotation.get("extras", {}):
out["document_index"] = annotation.get("extras", {})["document_index"]
if "title" in annotation:
out["document_title"] = annotation["title"]
for key, value in annotation.get("extras", {}).items():
if key not in out:
out[key] = value
out["type"] = "content_block_location"
return out
if "start_page_number" in annotation.get("extras", {}):
# page_location
out = {"type": "page_location"}
for field in ["cited_text"]:
if value := annotation.get(field):
out[field] = value
if title := annotation.get("title"):
out["document_title"] = title
for key, value in annotation.get("extras", {}).items():
out[key] = value
return out
return cast(dict[str, Any], annotation)
return cast(dict[str, Any], annotation)
def _convert_from_v1_to_anthropic(
content: list[types.ContentBlock],
tool_calls: list[types.ToolCall],
model_provider: str | None,
) -> list[dict[str, Any]]:
new_content: list = []
for block in content:
if block["type"] == "text":
if model_provider == "anthropic" and "annotations" in block:
new_block: dict[str, Any] = {"type": "text"}
new_block["citations"] = [
_convert_annotation_from_v1(a) for a in block["annotations"]
]
if "text" in block:
new_block["text"] = block["text"]
else:
new_block = {"text": block.get("text", ""), "type": "text"}
new_content.append(new_block)
elif block["type"] == "tool_call":
tool_use_block = {
"type": "tool_use",
"name": block.get("name", ""),
"input": block.get("args", {}),
"id": block.get("id", ""),
}
if "caller" in block.get("extras", {}):
tool_use_block["caller"] = block["extras"]["caller"]
new_content.append(tool_use_block)
elif block["type"] == "tool_call_chunk":
if isinstance(block["args"], str):
try:
input_ = json.loads(block["args"] or "{}")
except json.JSONDecodeError:
input_ = {}
else:
input_ = block.get("args") or {}
new_content.append(
{
"type": "tool_use",
"name": block.get("name", ""),
"input": input_,
"id": block.get("id", ""),
}
)
elif block["type"] == "reasoning" and model_provider == "anthropic":
new_block = {}
if "reasoning" in block:
new_block["thinking"] = block["reasoning"]
new_block["type"] = "thinking"
if signature := block.get("extras", {}).get("signature"):
new_block["signature"] = signature
new_content.append(new_block)
elif block["type"] == "server_tool_call" and model_provider == "anthropic":
new_block = {}
if "id" in block:
new_block["id"] = block["id"]
new_block["input"] = block.get("args", {})
if partial_json := block.get("extras", {}).get("partial_json"):
new_block["input"] = {}
new_block["partial_json"] = partial_json
else:
pass
if block.get("name") == "code_interpreter":
new_block["name"] = "code_execution"
elif block.get("name") == "remote_mcp":
if "tool_name" in block.get("extras", {}):
new_block["name"] = block["extras"]["tool_name"]
if "server_name" in block.get("extras", {}):
new_block["server_name"] = block["extras"]["server_name"]
else:
new_block["name"] = block.get("name", "")
if block.get("name") == "remote_mcp":
new_block["type"] = "mcp_tool_use"
else:
new_block["type"] = "server_tool_use"
new_content.append(new_block)
elif block["type"] == "server_tool_result" and model_provider == "anthropic":
new_block = {}
if "output" in block:
new_block["content"] = block["output"]
server_tool_result_type = block.get("extras", {}).get("block_type", "")
if server_tool_result_type == "mcp_tool_result":
new_block["is_error"] = block.get("status") == "error"
if "tool_call_id" in block:
new_block["tool_use_id"] = block["tool_call_id"]
new_block["type"] = server_tool_result_type
new_content.append(new_block)
elif (
block["type"] == "non_standard"
and "value" in block
and model_provider == "anthropic"
):
new_content.append(block["value"])
else:
new_content.append(block)
return new_content
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/anthropic/langchain_anthropic/_compat.py",
"license": "MIT License",
"lines": 168,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/partners/ollama/langchain_ollama/_compat.py | """Go from v1 content blocks to Ollama SDK format."""
from typing import Any
from langchain_core.messages import content as types
def _convert_from_v1_to_ollama(
content: list[types.ContentBlock],
model_provider: str | None, # noqa: ARG001
) -> list[dict[str, Any]]:
"""Convert v1 content blocks to Ollama format.
Args:
content: List of v1 `ContentBlock` objects.
model_provider: The model provider name that generated the v1 content.
Returns:
List of content blocks in Ollama format.
"""
new_content: list = []
for block in content:
if not isinstance(block, dict) or "type" not in block:
continue
block_dict = dict(block) # (For typing)
# TextContentBlock
if block_dict["type"] == "text":
# Note: this drops all other fields/extras
new_content.append({"type": "text", "text": block_dict["text"]})
# ReasoningContentBlock
# Ollama doesn't take reasoning back in
# In the future, could consider coercing into text as an option?
# e.g.:
# if block_dict["type"] == "reasoning":
# # Attempt to preserve content in text form
# new_content.append({"text": str(block_dict["reasoning"])})
# ImageContentBlock
if block_dict["type"] == "image":
# Already handled in _get_image_from_data_content_block
new_content.append(block_dict)
# TODO: AudioContentBlock once models support
# TODO: FileContentBlock once models support
# ToolCall -> ???
# if block_dict["type"] == "tool_call":
# function_call = {}
# new_content.append(function_call)
# ToolCallChunk -> ???
# elif block_dict["type"] == "tool_call_chunk":
# function_call = {}
# new_content.append(function_call)
# NonStandardContentBlock
if block_dict["type"] == "non_standard":
# Attempt to preserve content in text form
new_content.append(
{"type": "text", "text": str(block_dict.get("value", ""))}
)
return new_content
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/ollama/langchain_ollama/_compat.py",
"license": "MIT License",
"lines": 51,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/partners/ollama/tests/unit_tests/test_auth.py | """Test URL authentication parsing functionality."""
import base64
from unittest.mock import MagicMock, patch
from langchain_ollama._utils import parse_url_with_auth
from langchain_ollama.chat_models import ChatOllama
from langchain_ollama.embeddings import OllamaEmbeddings
from langchain_ollama.llms import OllamaLLM
MODEL_NAME = "llama3.1"
class TestParseUrlWithAuth:
"""Test the parse_url_with_auth utility function."""
def test_parse_url_with_auth_none_input(self) -> None:
"""Test that None input returns None, None."""
result = parse_url_with_auth(None)
assert result == (None, None)
def test_parse_url_with_auth_no_credentials(self) -> None:
"""Test URLs without authentication credentials."""
url = "https://ollama.example.com:11434/path?query=param"
result = parse_url_with_auth(url)
assert result == (url, None)
def test_parse_url_with_auth_with_credentials(self) -> None:
"""Test URLs with authentication credentials."""
url = "https://user:password@ollama.example.com:11434"
cleaned_url, headers = parse_url_with_auth(url)
expected_url = "https://ollama.example.com:11434"
expected_credentials = base64.b64encode(b"user:password").decode()
expected_headers = {"Authorization": f"Basic {expected_credentials}"}
assert cleaned_url == expected_url
assert headers == expected_headers
def test_parse_url_with_auth_with_path_and_query(self) -> None:
"""Test URLs with auth, path, and query parameters."""
url = "https://user:pass@ollama.example.com:11434/api/v1?timeout=30"
cleaned_url, headers = parse_url_with_auth(url)
expected_url = "https://ollama.example.com:11434/api/v1?timeout=30"
expected_credentials = base64.b64encode(b"user:pass").decode()
expected_headers = {"Authorization": f"Basic {expected_credentials}"}
assert cleaned_url == expected_url
assert headers == expected_headers
def test_parse_url_with_auth_special_characters(self) -> None:
"""Test URLs with special characters in credentials."""
url = "https://user%40domain:p%40ssw0rd@ollama.example.com:11434"
cleaned_url, headers = parse_url_with_auth(url)
expected_url = "https://ollama.example.com:11434"
# Note: URL parsing handles percent-encoding automatically
expected_credentials = base64.b64encode(b"user@domain:p@ssw0rd").decode()
expected_headers = {"Authorization": f"Basic {expected_credentials}"}
assert cleaned_url == expected_url
assert headers == expected_headers
def test_parse_url_with_auth_only_username(self) -> None:
"""Test URLs with only username (no password)."""
url = "https://user@ollama.example.com:11434"
cleaned_url, headers = parse_url_with_auth(url)
expected_url = "https://ollama.example.com:11434"
expected_credentials = base64.b64encode(b"user:").decode()
expected_headers = {"Authorization": f"Basic {expected_credentials}"}
assert cleaned_url == expected_url
assert headers == expected_headers
def test_parse_url_with_auth_empty_password(self) -> None:
"""Test URLs with empty password."""
url = "https://user:@ollama.example.com:11434"
cleaned_url, headers = parse_url_with_auth(url)
expected_url = "https://ollama.example.com:11434"
expected_credentials = base64.b64encode(b"user:").decode()
expected_headers = {"Authorization": f"Basic {expected_credentials}"}
assert cleaned_url == expected_url
assert headers == expected_headers
class TestChatOllamaUrlAuth:
"""Test URL authentication integration with ChatOllama."""
@patch("langchain_ollama.chat_models.Client")
@patch("langchain_ollama.chat_models.AsyncClient")
def test_chat_ollama_url_auth_integration(
self, mock_async_client: MagicMock, mock_client: MagicMock
) -> None:
"""Test that ChatOllama properly handles URL authentication."""
url_with_auth = "https://user:password@ollama.example.com:11434"
ChatOllama(
model=MODEL_NAME,
base_url=url_with_auth,
)
# Verify the clients were called with cleaned URL and auth headers
expected_url = "https://ollama.example.com:11434"
expected_credentials = base64.b64encode(b"user:password").decode()
expected_headers = {"Authorization": f"Basic {expected_credentials}"}
mock_client.assert_called_once_with(host=expected_url, headers=expected_headers)
mock_async_client.assert_called_once_with(
host=expected_url, headers=expected_headers
)
@patch("langchain_ollama.chat_models.Client")
@patch("langchain_ollama.chat_models.AsyncClient")
def test_chat_ollama_url_auth_with_existing_headers(
self, mock_async_client: MagicMock, mock_client: MagicMock
) -> None:
"""Test that URL auth headers merge with existing headers."""
url_with_auth = "https://user:password@ollama.example.com:11434"
existing_headers = {"User-Agent": "test-agent", "X-Custom": "value"}
ChatOllama(
model=MODEL_NAME,
base_url=url_with_auth,
client_kwargs={"headers": existing_headers},
)
# Verify headers are merged
expected_url = "https://ollama.example.com:11434"
expected_credentials = base64.b64encode(b"user:password").decode()
expected_headers = {
**existing_headers,
"Authorization": f"Basic {expected_credentials}",
}
mock_client.assert_called_once_with(host=expected_url, headers=expected_headers)
mock_async_client.assert_called_once_with(
host=expected_url, headers=expected_headers
)
class TestOllamaLLMUrlAuth:
"""Test URL authentication integration with OllamaLLM."""
@patch("langchain_ollama.llms.Client")
@patch("langchain_ollama.llms.AsyncClient")
def test_ollama_llm_url_auth_integration(
self, mock_async_client: MagicMock, mock_client: MagicMock
) -> None:
"""Test that OllamaLLM properly handles URL authentication."""
url_with_auth = "https://user:password@ollama.example.com:11434"
OllamaLLM(
model=MODEL_NAME,
base_url=url_with_auth,
)
expected_url = "https://ollama.example.com:11434"
expected_credentials = base64.b64encode(b"user:password").decode()
expected_headers = {"Authorization": f"Basic {expected_credentials}"}
mock_client.assert_called_once_with(host=expected_url, headers=expected_headers)
mock_async_client.assert_called_once_with(
host=expected_url, headers=expected_headers
)
class TestOllamaEmbeddingsUrlAuth:
"""Test URL authentication integration with OllamaEmbeddings."""
@patch("langchain_ollama.embeddings.Client")
@patch("langchain_ollama.embeddings.AsyncClient")
def test_ollama_embeddings_url_auth_integration(
self, mock_async_client: MagicMock, mock_client: MagicMock
) -> None:
"""Test that OllamaEmbeddings properly handles URL authentication."""
url_with_auth = "https://user:password@ollama.example.com:11434"
OllamaEmbeddings(
model=MODEL_NAME,
base_url=url_with_auth,
)
expected_url = "https://ollama.example.com:11434"
expected_credentials = base64.b64encode(b"user:password").decode()
expected_headers = {"Authorization": f"Basic {expected_credentials}"}
mock_client.assert_called_once_with(host=expected_url, headers=expected_headers)
mock_async_client.assert_called_once_with(
host=expected_url, headers=expected_headers
)
class TestUrlAuthEdgeCases:
"""Test edge cases and error conditions for URL authentication."""
def test_parse_url_with_auth_malformed_url(self) -> None:
"""Test behavior with malformed URLs."""
malformed_url = "not-a-valid-url"
result = parse_url_with_auth(malformed_url)
# Shouldn't return a URL as it wouldn't parse correctly or reach a server
assert result == (None, None)
def test_parse_url_with_auth_no_port(self) -> None:
"""Test URLs without explicit port numbers."""
url = "https://user:password@ollama.example.com"
cleaned_url, headers = parse_url_with_auth(url)
expected_url = "https://ollama.example.com"
expected_credentials = base64.b64encode(b"user:password").decode()
expected_headers = {"Authorization": f"Basic {expected_credentials}"}
assert cleaned_url == expected_url
assert headers == expected_headers
def test_parse_url_with_auth_complex_password(self) -> None:
"""Test with complex passwords containing special characters."""
# Test password with colon, which is the delimiter
url = "https://user:pass:word@ollama.example.com:11434"
cleaned_url, headers = parse_url_with_auth(url)
expected_url = "https://ollama.example.com:11434"
# The parser should handle the first colon as the separator
expected_credentials = base64.b64encode(b"user:pass:word").decode()
expected_headers = {"Authorization": f"Basic {expected_credentials}"}
assert cleaned_url == expected_url
assert headers == expected_headers
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/ollama/tests/unit_tests/test_auth.py",
"license": "MIT License",
"lines": 178,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/exa/tests/unit_tests/test_standard.py | """Standard unit tests for ExaSearchRetriever."""
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from langchain_exa import ExaSearchRetriever
@pytest.mark.benchmark
def test_exa_retriever_init_time(benchmark: BenchmarkFixture) -> None:
"""Test ExaSearchRetriever initialization time."""
def _init_exa_retriever() -> None:
for _ in range(10):
ExaSearchRetriever()
benchmark(_init_exa_retriever)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/exa/tests/unit_tests/test_standard.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/nomic/tests/unit_tests/test_standard.py | """Unit tests for standard tests in Nomic partner integration."""
import pytest
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import]
from langchain_nomic import NomicEmbeddings
@pytest.mark.benchmark
def test_nomic_embeddings_init_time(benchmark: BenchmarkFixture) -> None:
"""Test NomicEmbeddings initialization time."""
def _init_nomic_embeddings() -> None:
for _ in range(10):
NomicEmbeddings(model="test")
benchmark(_init_nomic_embeddings)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/nomic/tests/unit_tests/test_standard.py",
"license": "MIT License",
"lines": 11,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/qdrant/tests/unit_tests/test_standard.py | import pytest
from langchain_core.embeddings import Embeddings
from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-untyped]
from langchain_qdrant import QdrantVectorStore
class MockEmbeddings(Embeddings):
"""Mock embeddings for testing."""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Mock embed_documents method."""
return [[1.0, 2.0, 3.0] for _ in texts]
def embed_query(self) -> list[float]: # type: ignore[override]
"""Mock embed_query method."""
return [1.0, 2.0, 3.0]
@pytest.mark.benchmark
def test_qdrant_vectorstore_init_time(benchmark: BenchmarkFixture) -> None:
"""Test QdrantVectorStore initialization time."""
def _init_qdrant_vectorstore() -> None:
for _ in range(10):
QdrantVectorStore.from_texts(
texts=["test"],
embedding=MockEmbeddings(),
location=":memory:",
collection_name="test",
)
benchmark(_init_qdrant_vectorstore)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/qdrant/tests/unit_tests/test_standard.py",
"license": "MIT License",
"lines": 24,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain/tests/unit_tests/chains/test_flare.py | """Tests for FlareChain.from_llm preserving supplied ChatOpenAI instance."""
from typing import cast
import pytest
from langchain_core.documents import Document
from langchain_core.retrievers import BaseRetriever
from langchain_core.runnables import RunnableSequence
from langchain_classic.chains.flare.base import FlareChain
class _EmptyRetriever(BaseRetriever):
"""Minimal no-op retriever used only for constructing FlareChain in tests."""
def _get_relevant_documents(self, query: str) -> list[Document]: # type: ignore[override]
del query # mark used
return []
async def _aget_relevant_documents(self, query: str) -> list[Document]: # type: ignore[override]
del query # mark used
return []
def test_from_llm_rejects_non_chatopenai() -> None:
class Dummy:
pass
with pytest.raises(TypeError):
FlareChain.from_llm(Dummy()) # type: ignore[arg-type]
@pytest.mark.requires("langchain_openai")
def test_from_llm_uses_supplied_chatopenai(monkeypatch: pytest.MonkeyPatch) -> None:
try:
from langchain_openai import ChatOpenAI
except ImportError: # pragma: no cover
pytest.skip("langchain-openai not installed")
# Provide dummy API key to satisfy constructor env validation.
monkeypatch.setenv("OPENAI_API_KEY", "TEST")
supplied = ChatOpenAI(temperature=0.51, logprobs=True, max_completion_tokens=21)
chain = FlareChain.from_llm(
supplied,
max_generation_len=32,
retriever=_EmptyRetriever(),
)
llm_in_chain = cast("RunnableSequence", chain.question_generator_chain).steps[1]
assert llm_in_chain is supplied
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain/tests/unit_tests/chains/test_flare.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/human_in_the_loop.py | """Human in the loop middleware."""
from typing import Any, Literal, Protocol
from langchain_core.messages import AIMessage, ToolCall, ToolMessage
from langgraph.runtime import Runtime
from langgraph.types import interrupt
from typing_extensions import NotRequired, TypedDict
from langchain.agents.middleware.types import (
AgentMiddleware,
AgentState,
ContextT,
ResponseT,
StateT,
)
class Action(TypedDict):
"""Represents an action with a name and args."""
name: str
"""The type or name of action being requested (e.g., `'add_numbers'`)."""
args: dict[str, Any]
"""Key-value pairs of args needed for the action (e.g., `{"a": 1, "b": 2}`)."""
class ActionRequest(TypedDict):
"""Represents an action request with a name, args, and description."""
name: str
"""The name of the action being requested."""
args: dict[str, Any]
"""Key-value pairs of args needed for the action (e.g., `{"a": 1, "b": 2}`)."""
description: NotRequired[str]
"""The description of the action to be reviewed."""
DecisionType = Literal["approve", "edit", "reject"]
class ReviewConfig(TypedDict):
"""Policy for reviewing a HITL request."""
action_name: str
"""Name of the action associated with this review configuration."""
allowed_decisions: list[DecisionType]
"""The decisions that are allowed for this request."""
args_schema: NotRequired[dict[str, Any]]
"""JSON schema for the args associated with the action, if edits are allowed."""
class HITLRequest(TypedDict):
"""Request for human feedback on a sequence of actions requested by a model."""
action_requests: list[ActionRequest]
"""A list of agent actions for human review."""
review_configs: list[ReviewConfig]
"""Review configuration for all possible actions."""
class ApproveDecision(TypedDict):
"""Response when a human approves the action."""
type: Literal["approve"]
"""The type of response when a human approves the action."""
class EditDecision(TypedDict):
"""Response when a human edits the action."""
type: Literal["edit"]
"""The type of response when a human edits the action."""
edited_action: Action
"""Edited action for the agent to perform.
Ex: for a tool call, a human reviewer can edit the tool name and args.
"""
class RejectDecision(TypedDict):
"""Response when a human rejects the action."""
type: Literal["reject"]
"""The type of response when a human rejects the action."""
message: NotRequired[str]
"""The message sent to the model explaining why the action was rejected."""
Decision = ApproveDecision | EditDecision | RejectDecision
class HITLResponse(TypedDict):
"""Response payload for a HITLRequest."""
decisions: list[Decision]
"""The decisions made by the human."""
class _DescriptionFactory(Protocol):
"""Callable that generates a description for a tool call."""
def __call__(
self, tool_call: ToolCall, state: AgentState[Any], runtime: Runtime[ContextT]
) -> str:
"""Generate a description for a tool call."""
...
class InterruptOnConfig(TypedDict):
"""Configuration for an action requiring human in the loop.
This is the configuration format used in the `HumanInTheLoopMiddleware.__init__`
method.
"""
allowed_decisions: list[DecisionType]
"""The decisions that are allowed for this action."""
description: NotRequired[str | _DescriptionFactory]
"""The description attached to the request for human input.
Can be either:
- A static string describing the approval request
- A callable that dynamically generates the description based on agent state,
runtime, and tool call information
Example:
```python
# Static string description
config = ToolConfig(
allowed_decisions=["approve", "reject"],
description="Please review this tool execution"
)
# Dynamic callable description
def format_tool_description(
tool_call: ToolCall,
state: AgentState,
runtime: Runtime[ContextT]
) -> str:
import json
return (
f"Tool: {tool_call['name']}\\n"
f"Arguments:\\n{json.dumps(tool_call['args'], indent=2)}"
)
config = InterruptOnConfig(
allowed_decisions=["approve", "edit", "reject"],
description=format_tool_description
)
```
"""
args_schema: NotRequired[dict[str, Any]]
"""JSON schema for the args associated with the action, if edits are allowed."""
class HumanInTheLoopMiddleware(AgentMiddleware[StateT, ContextT, ResponseT]):
"""Human in the loop middleware."""
def __init__(
self,
interrupt_on: dict[str, bool | InterruptOnConfig],
*,
description_prefix: str = "Tool execution requires approval",
) -> None:
"""Initialize the human in the loop middleware.
Args:
interrupt_on: Mapping of tool name to allowed actions.
If a tool doesn't have an entry, it's auto-approved by default.
* `True` indicates all decisions are allowed: approve, edit, and reject.
* `False` indicates that the tool is auto-approved.
* `InterruptOnConfig` indicates the specific decisions allowed for this
tool.
The `InterruptOnConfig` can include a `description` field (`str` or
`Callable`) for custom formatting of the interrupt description.
description_prefix: The prefix to use when constructing action requests.
This is used to provide context about the tool call and the action being
requested.
Not used if a tool has a `description` in its `InterruptOnConfig`.
"""
super().__init__()
resolved_configs: dict[str, InterruptOnConfig] = {}
for tool_name, tool_config in interrupt_on.items():
if isinstance(tool_config, bool):
if tool_config is True:
resolved_configs[tool_name] = InterruptOnConfig(
allowed_decisions=["approve", "edit", "reject"]
)
elif tool_config.get("allowed_decisions"):
resolved_configs[tool_name] = tool_config
self.interrupt_on = resolved_configs
self.description_prefix = description_prefix
def _create_action_and_config(
self,
tool_call: ToolCall,
config: InterruptOnConfig,
state: AgentState[Any],
runtime: Runtime[ContextT],
) -> tuple[ActionRequest, ReviewConfig]:
"""Create an ActionRequest and ReviewConfig for a tool call."""
tool_name = tool_call["name"]
tool_args = tool_call["args"]
# Generate description using the description field (str or callable)
description_value = config.get("description")
if callable(description_value):
description = description_value(tool_call, state, runtime)
elif description_value is not None:
description = description_value
else:
description = f"{self.description_prefix}\n\nTool: {tool_name}\nArgs: {tool_args}"
# Create ActionRequest with description
action_request = ActionRequest(
name=tool_name,
args=tool_args,
description=description,
)
# Create ReviewConfig
# eventually can get tool information and populate args_schema from there
review_config = ReviewConfig(
action_name=tool_name,
allowed_decisions=config["allowed_decisions"],
)
return action_request, review_config
@staticmethod
def _process_decision(
decision: Decision,
tool_call: ToolCall,
config: InterruptOnConfig,
) -> tuple[ToolCall | None, ToolMessage | None]:
"""Process a single decision and return the revised tool call and optional tool message."""
allowed_decisions = config["allowed_decisions"]
if decision["type"] == "approve" and "approve" in allowed_decisions:
return tool_call, None
if decision["type"] == "edit" and "edit" in allowed_decisions:
edited_action = decision["edited_action"]
return (
ToolCall(
type="tool_call",
name=edited_action["name"],
args=edited_action["args"],
id=tool_call["id"],
),
None,
)
if decision["type"] == "reject" and "reject" in allowed_decisions:
# Create a tool message with the human's text response
content = decision.get("message") or (
f"User rejected the tool call for `{tool_call['name']}` with id {tool_call['id']}"
)
tool_message = ToolMessage(
content=content,
name=tool_call["name"],
tool_call_id=tool_call["id"],
status="error",
)
return tool_call, tool_message
msg = (
f"Unexpected human decision: {decision}. "
f"Decision type '{decision.get('type')}' "
f"is not allowed for tool '{tool_call['name']}'. "
f"Expected one of {allowed_decisions} based on the tool's configuration."
)
raise ValueError(msg)
def after_model(
self, state: AgentState[Any], runtime: Runtime[ContextT]
) -> dict[str, Any] | None:
"""Trigger interrupt flows for relevant tool calls after an `AIMessage`.
Args:
state: The current agent state.
runtime: The runtime context.
Returns:
Updated message with the revised tool calls.
Raises:
ValueError: If the number of human decisions does not match the number of
interrupted tool calls.
"""
messages = state["messages"]
if not messages:
return None
last_ai_msg = next((msg for msg in reversed(messages) if isinstance(msg, AIMessage)), None)
if not last_ai_msg or not last_ai_msg.tool_calls:
return None
# Create action requests and review configs for tools that need approval
action_requests: list[ActionRequest] = []
review_configs: list[ReviewConfig] = []
interrupt_indices: list[int] = []
for idx, tool_call in enumerate(last_ai_msg.tool_calls):
if (config := self.interrupt_on.get(tool_call["name"])) is not None:
action_request, review_config = self._create_action_and_config(
tool_call, config, state, runtime
)
action_requests.append(action_request)
review_configs.append(review_config)
interrupt_indices.append(idx)
# If no interrupts needed, return early
if not action_requests:
return None
# Create single HITLRequest with all actions and configs
hitl_request = HITLRequest(
action_requests=action_requests,
review_configs=review_configs,
)
# Send interrupt and get response
decisions = interrupt(hitl_request)["decisions"]
# Validate that the number of decisions matches the number of interrupt tool calls
if (decisions_len := len(decisions)) != (interrupt_count := len(interrupt_indices)):
msg = (
f"Number of human decisions ({decisions_len}) does not match "
f"number of hanging tool calls ({interrupt_count})."
)
raise ValueError(msg)
# Process decisions and rebuild tool calls in original order
revised_tool_calls: list[ToolCall] = []
artificial_tool_messages: list[ToolMessage] = []
decision_idx = 0
for idx, tool_call in enumerate(last_ai_msg.tool_calls):
if idx in interrupt_indices:
# This was an interrupt tool call - process the decision
config = self.interrupt_on[tool_call["name"]]
decision = decisions[decision_idx]
decision_idx += 1
revised_tool_call, tool_message = self._process_decision(
decision, tool_call, config
)
if revised_tool_call is not None:
revised_tool_calls.append(revised_tool_call)
if tool_message:
artificial_tool_messages.append(tool_message)
else:
# This was auto-approved - keep original
revised_tool_calls.append(tool_call)
# Update the AI message to only include approved tool calls
last_ai_msg.tool_calls = revised_tool_calls
return {"messages": [last_ai_msg, *artificial_tool_messages]}
async def aafter_model(
self, state: AgentState[Any], runtime: Runtime[ContextT]
) -> dict[str, Any] | None:
"""Async trigger interrupt flows for relevant tool calls after an `AIMessage`.
Args:
state: The current agent state.
runtime: The runtime context.
Returns:
Updated message with the revised tool calls.
"""
return self.after_model(state, runtime)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/middleware/human_in_the_loop.py",
"license": "MIT License",
"lines": 299,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/summarization.py | """Summarization middleware."""
import uuid
import warnings
from collections.abc import Callable, Iterable, Mapping
from functools import partial
from typing import Any, Literal, cast
from langchain_core.messages import (
AIMessage,
AnyMessage,
MessageLikeRepresentation,
RemoveMessage,
ToolMessage,
)
from langchain_core.messages.human import HumanMessage
from langchain_core.messages.utils import (
count_tokens_approximately,
get_buffer_string,
trim_messages,
)
from langgraph.graph.message import (
REMOVE_ALL_MESSAGES,
)
from langgraph.runtime import Runtime
from typing_extensions import override
from langchain.agents.middleware.types import AgentMiddleware, AgentState, ContextT, ResponseT
from langchain.chat_models import BaseChatModel, init_chat_model
TokenCounter = Callable[[Iterable[MessageLikeRepresentation]], int]
DEFAULT_SUMMARY_PROMPT = """<role>
Context Extraction Assistant
</role>
<primary_objective>
Your sole objective in this task is to extract the highest quality/most relevant context from the conversation history below.
</primary_objective>
<objective_information>
You're nearing the total number of input tokens you can accept, so you must extract the highest quality/most relevant pieces of information from your conversation history.
This context will then overwrite the conversation history presented below. Because of this, ensure the context you extract is only the most important information to continue working toward your overall goal.
</objective_information>
<instructions>
The conversation history below will be replaced with the context you extract in this step.
You want to ensure that you don't repeat any actions you've already completed, so the context you extract from the conversation history should be focused on the most important information to your overall goal.
You should structure your summary using the following sections. Each section acts as a checklist - you must populate it with relevant information or explicitly state "None" if there is nothing to report for that section:
## SESSION INTENT
What is the user's primary goal or request? What overall task are you trying to accomplish? This should be concise but complete enough to understand the purpose of the entire session.
## SUMMARY
Extract and record all of the most important context from the conversation history. Include important choices, conclusions, or strategies determined during this conversation. Include the reasoning behind key decisions. Document any rejected options and why they were not pursued.
## ARTIFACTS
What artifacts, files, or resources were created, modified, or accessed during this conversation? For file modifications, list specific file paths and briefly describe the changes made to each. This section prevents silent loss of artifact information.
## NEXT STEPS
What specific tasks remain to be completed to achieve the session intent? What should you do next?
</instructions>
The user will message you with the full message history from which you'll extract context to create a replacement. Carefully read through it all and think deeply about what information is most important to your overall goal and should be saved:
With all of this in mind, please carefully read over the entire conversation history, and extract the most important and relevant context to replace it so that you can free up space in the conversation history.
Respond ONLY with the extracted context. Do not include any additional information, or text before or after the extracted context.
<messages>
Messages to summarize:
{messages}
</messages>""" # noqa: E501
_DEFAULT_MESSAGES_TO_KEEP = 20
_DEFAULT_TRIM_TOKEN_LIMIT = 4000
_DEFAULT_FALLBACK_MESSAGE_COUNT = 15
ContextFraction = tuple[Literal["fraction"], float]
"""Fraction of model's maximum input tokens.
Example:
To specify 50% of the model's max input tokens:
```python
("fraction", 0.5)
```
"""
ContextTokens = tuple[Literal["tokens"], int]
"""Absolute number of tokens.
Example:
To specify 3000 tokens:
```python
("tokens", 3000)
```
"""
ContextMessages = tuple[Literal["messages"], int]
"""Absolute number of messages.
Example:
To specify 50 messages:
```python
("messages", 50)
```
"""
ContextSize = ContextFraction | ContextTokens | ContextMessages
"""Union type for context size specifications.
Can be either:
- [`ContextFraction`][langchain.agents.middleware.summarization.ContextFraction]: A
fraction of the model's maximum input tokens.
- [`ContextTokens`][langchain.agents.middleware.summarization.ContextTokens]: An absolute
number of tokens.
- [`ContextMessages`][langchain.agents.middleware.summarization.ContextMessages]: An
absolute number of messages.
Depending on use with `trigger` or `keep` parameters, this type indicates either
when to trigger summarization or how much context to retain.
Example:
```python
# ContextFraction
context_size: ContextSize = ("fraction", 0.5)
# ContextTokens
context_size: ContextSize = ("tokens", 3000)
# ContextMessages
context_size: ContextSize = ("messages", 50)
```
"""
def _get_approximate_token_counter(model: BaseChatModel) -> TokenCounter:
"""Tune parameters of approximate token counter based on model type."""
if model._llm_type == "anthropic-chat": # noqa: SLF001
# 3.3 was estimated in an offline experiment, comparing with Claude's token-counting
# API: https://platform.claude.com/docs/en/build-with-claude/token-counting
return partial(
count_tokens_approximately, use_usage_metadata_scaling=True, chars_per_token=3.3
)
return partial(count_tokens_approximately, use_usage_metadata_scaling=True)
class SummarizationMiddleware(AgentMiddleware[AgentState[ResponseT], ContextT, ResponseT]):
"""Summarizes conversation history when token limits are approached.
This middleware monitors message token counts and automatically summarizes older
messages when a threshold is reached, preserving recent messages and maintaining
context continuity by ensuring AI/Tool message pairs remain together.
"""
def __init__(
self,
model: str | BaseChatModel,
*,
trigger: ContextSize | list[ContextSize] | None = None,
keep: ContextSize = ("messages", _DEFAULT_MESSAGES_TO_KEEP),
token_counter: TokenCounter = count_tokens_approximately,
summary_prompt: str = DEFAULT_SUMMARY_PROMPT,
trim_tokens_to_summarize: int | None = _DEFAULT_TRIM_TOKEN_LIMIT,
**deprecated_kwargs: Any,
) -> None:
"""Initialize summarization middleware.
Args:
model: The language model to use for generating summaries.
trigger: One or more thresholds that trigger summarization.
Provide a single
[`ContextSize`][langchain.agents.middleware.summarization.ContextSize]
tuple or a list of tuples, in which case summarization runs when any
threshold is met.
!!! example
```python
# Trigger summarization when 50 messages is reached
("messages", 50)
# Trigger summarization when 3000 tokens is reached
("tokens", 3000)
# Trigger summarization either when 80% of model's max input tokens
# is reached or when 100 messages is reached (whichever comes first)
[("fraction", 0.8), ("messages", 100)]
```
See [`ContextSize`][langchain.agents.middleware.summarization.ContextSize]
for more details.
keep: Context retention policy applied after summarization.
Provide a [`ContextSize`][langchain.agents.middleware.summarization.ContextSize]
tuple to specify how much history to preserve.
Defaults to keeping the most recent `20` messages.
Does not support multiple values like `trigger`.
!!! example
```python
# Keep the most recent 20 messages
("messages", 20)
# Keep the most recent 3000 tokens
("tokens", 3000)
# Keep the most recent 30% of the model's max input tokens
("fraction", 0.3)
```
token_counter: Function to count tokens in messages.
summary_prompt: Prompt template for generating summaries.
trim_tokens_to_summarize: Maximum tokens to keep when preparing messages for
the summarization call.
Pass `None` to skip trimming entirely.
"""
# Handle deprecated parameters
if "max_tokens_before_summary" in deprecated_kwargs:
value = deprecated_kwargs["max_tokens_before_summary"]
warnings.warn(
"max_tokens_before_summary is deprecated. Use trigger=('tokens', value) instead.",
DeprecationWarning,
stacklevel=2,
)
if trigger is None and value is not None:
trigger = ("tokens", value)
if "messages_to_keep" in deprecated_kwargs:
value = deprecated_kwargs["messages_to_keep"]
warnings.warn(
"messages_to_keep is deprecated. Use keep=('messages', value) instead.",
DeprecationWarning,
stacklevel=2,
)
if keep == ("messages", _DEFAULT_MESSAGES_TO_KEEP):
keep = ("messages", value)
super().__init__()
if isinstance(model, str):
model = init_chat_model(model)
self.model = model
if trigger is None:
self.trigger: ContextSize | list[ContextSize] | None = None
trigger_conditions: list[ContextSize] = []
elif isinstance(trigger, list):
validated_list = [self._validate_context_size(item, "trigger") for item in trigger]
self.trigger = validated_list
trigger_conditions = validated_list
else:
validated = self._validate_context_size(trigger, "trigger")
self.trigger = validated
trigger_conditions = [validated]
self._trigger_conditions = trigger_conditions
self.keep = self._validate_context_size(keep, "keep")
if token_counter is count_tokens_approximately:
self.token_counter = _get_approximate_token_counter(self.model)
self._partial_token_counter: TokenCounter = partial( # type: ignore[call-arg]
self.token_counter, use_usage_metadata_scaling=False
)
else:
self.token_counter = token_counter
self._partial_token_counter = token_counter
self.summary_prompt = summary_prompt
self.trim_tokens_to_summarize = trim_tokens_to_summarize
requires_profile = any(condition[0] == "fraction" for condition in self._trigger_conditions)
if self.keep[0] == "fraction":
requires_profile = True
if requires_profile and self._get_profile_limits() is None:
msg = (
"Model profile information is required to use fractional token limits, "
"and is unavailable for the specified model. Please use absolute token "
"counts instead, or pass "
'`\n\nChatModel(..., profile={"max_input_tokens": ...})`.\n\n'
"with a desired integer value of the model's maximum input tokens."
)
raise ValueError(msg)
@override
def before_model(
self, state: AgentState[Any], runtime: Runtime[ContextT]
) -> dict[str, Any] | None:
"""Process messages before model invocation, potentially triggering summarization.
Args:
state: The agent state.
runtime: The runtime environment.
Returns:
An updated state with summarized messages if summarization was performed.
"""
messages = state["messages"]
self._ensure_message_ids(messages)
total_tokens = self.token_counter(messages)
if not self._should_summarize(messages, total_tokens):
return None
cutoff_index = self._determine_cutoff_index(messages)
if cutoff_index <= 0:
return None
messages_to_summarize, preserved_messages = self._partition_messages(messages, cutoff_index)
summary = self._create_summary(messages_to_summarize)
new_messages = self._build_new_messages(summary)
return {
"messages": [
RemoveMessage(id=REMOVE_ALL_MESSAGES),
*new_messages,
*preserved_messages,
]
}
@override
async def abefore_model(
self, state: AgentState[Any], runtime: Runtime[ContextT]
) -> dict[str, Any] | None:
"""Process messages before model invocation, potentially triggering summarization.
Args:
state: The agent state.
runtime: The runtime environment.
Returns:
An updated state with summarized messages if summarization was performed.
"""
messages = state["messages"]
self._ensure_message_ids(messages)
total_tokens = self.token_counter(messages)
if not self._should_summarize(messages, total_tokens):
return None
cutoff_index = self._determine_cutoff_index(messages)
if cutoff_index <= 0:
return None
messages_to_summarize, preserved_messages = self._partition_messages(messages, cutoff_index)
summary = await self._acreate_summary(messages_to_summarize)
new_messages = self._build_new_messages(summary)
return {
"messages": [
RemoveMessage(id=REMOVE_ALL_MESSAGES),
*new_messages,
*preserved_messages,
]
}
def _should_summarize_based_on_reported_tokens(
self, messages: list[AnyMessage], threshold: float
) -> bool:
"""Check if reported token usage from last AIMessage exceeds threshold."""
last_ai_message = next(
(msg for msg in reversed(messages) if isinstance(msg, AIMessage)),
None,
)
if ( # noqa: SIM103
isinstance(last_ai_message, AIMessage)
and last_ai_message.usage_metadata is not None
and (reported_tokens := last_ai_message.usage_metadata.get("total_tokens", -1))
and reported_tokens >= threshold
and (message_provider := last_ai_message.response_metadata.get("model_provider"))
and message_provider == self.model._get_ls_params().get("ls_provider") # noqa: SLF001
):
return True
return False
def _should_summarize(self, messages: list[AnyMessage], total_tokens: int) -> bool:
"""Determine whether summarization should run for the current token usage."""
if not self._trigger_conditions:
return False
for kind, value in self._trigger_conditions:
if kind == "messages" and len(messages) >= value:
return True
if kind == "tokens" and total_tokens >= value:
return True
if kind == "tokens" and self._should_summarize_based_on_reported_tokens(
messages, value
):
return True
if kind == "fraction":
max_input_tokens = self._get_profile_limits()
if max_input_tokens is None:
continue
threshold = int(max_input_tokens * value)
if threshold <= 0:
threshold = 1
if total_tokens >= threshold:
return True
if self._should_summarize_based_on_reported_tokens(messages, threshold):
return True
return False
def _determine_cutoff_index(self, messages: list[AnyMessage]) -> int:
"""Choose cutoff index respecting retention configuration."""
kind, value = self.keep
if kind in {"tokens", "fraction"}:
token_based_cutoff = self._find_token_based_cutoff(messages)
if token_based_cutoff is not None:
return token_based_cutoff
# None cutoff -> model profile data not available (caught in __init__ but
# here for safety), fallback to message count
return self._find_safe_cutoff(messages, _DEFAULT_MESSAGES_TO_KEEP)
return self._find_safe_cutoff(messages, cast("int", value))
def _find_token_based_cutoff(self, messages: list[AnyMessage]) -> int | None:
"""Find cutoff index based on target token retention."""
if not messages:
return 0
kind, value = self.keep
if kind == "fraction":
max_input_tokens = self._get_profile_limits()
if max_input_tokens is None:
return None
target_token_count = int(max_input_tokens * value)
elif kind == "tokens":
target_token_count = int(value)
else:
return None
if target_token_count <= 0:
target_token_count = 1
if self.token_counter(messages) <= target_token_count:
return 0
# Use binary search to identify the earliest message index that keeps the
# suffix within the token budget.
left, right = 0, len(messages)
cutoff_candidate = len(messages)
max_iterations = len(messages).bit_length() + 1
for _ in range(max_iterations):
if left >= right:
break
mid = (left + right) // 2
if self._partial_token_counter(messages[mid:]) <= target_token_count:
cutoff_candidate = mid
right = mid
else:
left = mid + 1
if cutoff_candidate == len(messages):
cutoff_candidate = left
if cutoff_candidate >= len(messages):
if len(messages) == 1:
return 0
cutoff_candidate = len(messages) - 1
# Advance past any ToolMessages to avoid splitting AI/Tool pairs
return self._find_safe_cutoff_point(messages, cutoff_candidate)
def _get_profile_limits(self) -> int | None:
"""Retrieve max input token limit from the model profile."""
try:
profile = self.model.profile
except AttributeError:
return None
if not isinstance(profile, Mapping):
return None
max_input_tokens = profile.get("max_input_tokens")
if not isinstance(max_input_tokens, int):
return None
return max_input_tokens
@staticmethod
def _validate_context_size(context: ContextSize, parameter_name: str) -> ContextSize:
"""Validate context configuration tuples."""
kind, value = context
if kind == "fraction":
if not 0 < value <= 1:
msg = f"Fractional {parameter_name} values must be between 0 and 1, got {value}."
raise ValueError(msg)
elif kind in {"tokens", "messages"}:
if value <= 0:
msg = f"{parameter_name} thresholds must be greater than 0, got {value}."
raise ValueError(msg)
else:
msg = f"Unsupported context size type {kind} for {parameter_name}."
raise ValueError(msg)
return context
@staticmethod
def _build_new_messages(summary: str) -> list[HumanMessage]:
return [
HumanMessage(
content=f"Here is a summary of the conversation to date:\n\n{summary}",
additional_kwargs={"lc_source": "summarization"},
)
]
@staticmethod
def _ensure_message_ids(messages: list[AnyMessage]) -> None:
"""Ensure all messages have unique IDs for the add_messages reducer."""
for msg in messages:
if msg.id is None:
msg.id = str(uuid.uuid4())
@staticmethod
def _partition_messages(
conversation_messages: list[AnyMessage],
cutoff_index: int,
) -> tuple[list[AnyMessage], list[AnyMessage]]:
"""Partition messages into those to summarize and those to preserve."""
messages_to_summarize = conversation_messages[:cutoff_index]
preserved_messages = conversation_messages[cutoff_index:]
return messages_to_summarize, preserved_messages
def _find_safe_cutoff(self, messages: list[AnyMessage], messages_to_keep: int) -> int:
"""Find safe cutoff point that preserves AI/Tool message pairs.
Returns the index where messages can be safely cut without separating
related AI and Tool messages. Returns `0` if no safe cutoff is found.
This is aggressive with summarization - if the target cutoff lands in the
middle of tool messages, we advance past all of them (summarizing more).
"""
if len(messages) <= messages_to_keep:
return 0
target_cutoff = len(messages) - messages_to_keep
return self._find_safe_cutoff_point(messages, target_cutoff)
@staticmethod
def _find_safe_cutoff_point(messages: list[AnyMessage], cutoff_index: int) -> int:
"""Find a safe cutoff point that doesn't split AI/Tool message pairs.
If the message at `cutoff_index` is a `ToolMessage`, search backward for the
`AIMessage` containing the corresponding `tool_calls` and adjust the cutoff to
include it. This ensures tool call requests and responses stay together.
Falls back to advancing forward past `ToolMessage` objects only if no matching
`AIMessage` is found (edge case).
"""
if cutoff_index >= len(messages) or not isinstance(messages[cutoff_index], ToolMessage):
return cutoff_index
# Collect tool_call_ids from consecutive ToolMessages at/after cutoff
tool_call_ids: set[str] = set()
idx = cutoff_index
while idx < len(messages) and isinstance(messages[idx], ToolMessage):
tool_msg = cast("ToolMessage", messages[idx])
if tool_msg.tool_call_id:
tool_call_ids.add(tool_msg.tool_call_id)
idx += 1
# Search backward for AIMessage with matching tool_calls
for i in range(cutoff_index - 1, -1, -1):
msg = messages[i]
if isinstance(msg, AIMessage) and msg.tool_calls:
ai_tool_call_ids = {tc.get("id") for tc in msg.tool_calls if tc.get("id")}
if tool_call_ids & ai_tool_call_ids:
# Found the AIMessage - move cutoff to include it
return i
# Fallback: no matching AIMessage found, advance past ToolMessages to avoid
# orphaned tool responses
return idx
def _create_summary(self, messages_to_summarize: list[AnyMessage]) -> str:
"""Generate summary for the given messages.
Args:
messages_to_summarize: Messages to summarize.
"""
if not messages_to_summarize:
return "No previous conversation history."
trimmed_messages = self._trim_messages_for_summary(messages_to_summarize)
if not trimmed_messages:
return "Previous conversation was too long to summarize."
# Format messages to avoid token inflation from metadata when str() is called on
# message objects
formatted_messages = get_buffer_string(trimmed_messages)
try:
response = self.model.invoke(
self.summary_prompt.format(messages=formatted_messages).rstrip(),
config={"metadata": {"lc_source": "summarization"}},
)
return response.text.strip()
except Exception as e:
return f"Error generating summary: {e!s}"
async def _acreate_summary(self, messages_to_summarize: list[AnyMessage]) -> str:
"""Generate summary for the given messages.
Args:
messages_to_summarize: Messages to summarize.
"""
if not messages_to_summarize:
return "No previous conversation history."
trimmed_messages = self._trim_messages_for_summary(messages_to_summarize)
if not trimmed_messages:
return "Previous conversation was too long to summarize."
# Format messages to avoid token inflation from metadata when str() is called on
# message objects
formatted_messages = get_buffer_string(trimmed_messages)
try:
response = await self.model.ainvoke(
self.summary_prompt.format(messages=formatted_messages).rstrip(),
config={"metadata": {"lc_source": "summarization"}},
)
return response.text.strip()
except Exception as e:
return f"Error generating summary: {e!s}"
def _trim_messages_for_summary(self, messages: list[AnyMessage]) -> list[AnyMessage]:
"""Trim messages to fit within summary generation limits."""
try:
if self.trim_tokens_to_summarize is None:
return messages
return cast(
"list[AnyMessage]",
trim_messages(
messages,
max_tokens=self.trim_tokens_to_summarize,
token_counter=self.token_counter,
start_on="human",
strategy="last",
allow_partial=True,
include_system=True,
),
)
except Exception:
return messages[-_DEFAULT_FALLBACK_MESSAGE_COUNT:]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/middleware/summarization.py",
"license": "MIT License",
"lines": 534,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/middleware/types.py | """Types for middleware and agents."""
from __future__ import annotations
from collections.abc import Awaitable, Callable, Sequence
from dataclasses import dataclass, field, replace
from inspect import iscoroutinefunction
from typing import (
TYPE_CHECKING,
Annotated,
Any,
Generic,
Literal,
Protocol,
cast,
overload,
)
if TYPE_CHECKING:
from collections.abc import Awaitable
# Needed as top level import for Pydantic schema generation on AgentState
import warnings
from typing import TypeAlias
from langchain_core.messages import (
AIMessage,
AnyMessage,
BaseMessage,
SystemMessage,
ToolMessage,
)
from langgraph.channels.ephemeral_value import EphemeralValue
from langgraph.graph.message import add_messages
from langgraph.prebuilt.tool_node import ToolCallRequest, ToolCallWrapper
from langgraph.typing import ContextT
from typing_extensions import NotRequired, Required, TypedDict, TypeVar, Unpack
if TYPE_CHECKING:
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.tools import BaseTool
from langgraph.runtime import Runtime
from langgraph.types import Command
from langchain.agents.structured_output import ResponseFormat
__all__ = [
"AgentMiddleware",
"AgentState",
"ContextT",
"ExtendedModelResponse",
"ModelCallResult",
"ModelRequest",
"ModelResponse",
"OmitFromSchema",
"ResponseT",
"StateT_co",
"ToolCallRequest",
"ToolCallWrapper",
"after_agent",
"after_model",
"before_agent",
"before_model",
"dynamic_prompt",
"hook_config",
"wrap_tool_call",
]
JumpTo = Literal["tools", "model", "end"]
"""Destination to jump to when a middleware node returns."""
ResponseT = TypeVar("ResponseT", default=Any)
class _ModelRequestOverrides(TypedDict, total=False):
"""Possible overrides for `ModelRequest.override()` method."""
model: BaseChatModel
system_message: SystemMessage | None
messages: list[AnyMessage]
tool_choice: Any | None
tools: list[BaseTool | dict[str, Any]]
response_format: ResponseFormat[Any] | None
model_settings: dict[str, Any]
state: AgentState[Any]
@dataclass(init=False)
class ModelRequest(Generic[ContextT]):
"""Model request information for the agent.
Type Parameters:
ContextT: The type of the runtime context. Defaults to `None` if not specified.
"""
model: BaseChatModel
messages: list[AnyMessage] # excluding system message
system_message: SystemMessage | None
tool_choice: Any | None
tools: list[BaseTool | dict[str, Any]]
response_format: ResponseFormat[Any] | None
state: AgentState[Any]
runtime: Runtime[ContextT]
model_settings: dict[str, Any] = field(default_factory=dict)
def __init__(
self,
*,
model: BaseChatModel,
messages: list[AnyMessage],
system_message: SystemMessage | None = None,
system_prompt: str | None = None,
tool_choice: Any | None = None,
tools: list[BaseTool | dict[str, Any]] | None = None,
response_format: ResponseFormat[Any] | None = None,
state: AgentState[Any] | None = None,
runtime: Runtime[ContextT] | None = None,
model_settings: dict[str, Any] | None = None,
) -> None:
"""Initialize ModelRequest with backward compatibility for system_prompt.
Args:
model: The chat model to use.
messages: List of messages (excluding system prompt).
tool_choice: Tool choice configuration.
tools: List of available tools.
response_format: Response format specification.
state: Agent state.
runtime: Runtime context.
model_settings: Additional model settings.
system_message: System message instance (preferred).
system_prompt: System prompt string (deprecated, converted to SystemMessage).
Raises:
ValueError: If both `system_prompt` and `system_message` are provided.
"""
# Handle system_prompt/system_message conversion and validation
if system_prompt is not None and system_message is not None:
msg = "Cannot specify both system_prompt and system_message"
raise ValueError(msg)
if system_prompt is not None:
system_message = SystemMessage(content=system_prompt)
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
self.model = model
self.messages = messages
self.system_message = system_message
self.tool_choice = tool_choice
self.tools = tools if tools is not None else []
self.response_format = response_format
self.state = state if state is not None else {"messages": []}
self.runtime = runtime # type: ignore[assignment]
self.model_settings = model_settings if model_settings is not None else {}
@property
def system_prompt(self) -> str | None:
"""Get system prompt text from system_message.
Returns:
The content of the system message if present, otherwise `None`.
"""
if self.system_message is None:
return None
return self.system_message.text
def __setattr__(self, name: str, value: Any) -> None:
"""Set an attribute with a deprecation warning.
Direct attribute assignment on `ModelRequest` is deprecated. Use the
`override()` method instead to create a new request with modified attributes.
Args:
name: Attribute name.
value: Attribute value.
"""
# Special handling for system_prompt - convert to system_message
if name == "system_prompt":
warnings.warn(
"Direct attribute assignment to ModelRequest.system_prompt is deprecated. "
"Use request.override(system_message=SystemMessage(...)) instead to create "
"a new request with the modified system message.",
DeprecationWarning,
stacklevel=2,
)
if value is None:
object.__setattr__(self, "system_message", None)
else:
object.__setattr__(self, "system_message", SystemMessage(content=value))
return
warnings.warn(
f"Direct attribute assignment to ModelRequest.{name} is deprecated. "
f"Use request.override({name}=...) instead to create a new request "
f"with the modified attribute.",
DeprecationWarning,
stacklevel=2,
)
object.__setattr__(self, name, value)
def override(self, **overrides: Unpack[_ModelRequestOverrides]) -> ModelRequest[ContextT]:
"""Replace the request with a new request with the given overrides.
Returns a new `ModelRequest` instance with the specified attributes replaced.
This follows an immutable pattern, leaving the original request unchanged.
Args:
**overrides: Keyword arguments for attributes to override.
Supported keys:
- `model`: `BaseChatModel` instance
- `system_prompt`: deprecated, use `system_message` instead
- `system_message`: `SystemMessage` instance
- `messages`: `list` of messages
- `tool_choice`: Tool choice configuration
- `tools`: `list` of available tools
- `response_format`: Response format specification
- `model_settings`: Additional model settings
- `state`: Agent state dictionary
Returns:
New `ModelRequest` instance with specified overrides applied.
Examples:
!!! example "Create a new request with different model"
```python
new_request = request.override(model=different_model)
```
!!! example "Override system message (preferred)"
```python
from langchain_core.messages import SystemMessage
new_request = request.override(
system_message=SystemMessage(content="New instructions")
)
```
!!! example "Override multiple attributes"
```python
new_request = request.override(
model=ChatOpenAI(model="gpt-4o"),
system_message=SystemMessage(content="New instructions"),
)
```
Raises:
ValueError: If both `system_prompt` and `system_message` are provided.
"""
# Handle system_prompt/system_message conversion
if "system_prompt" in overrides and "system_message" in overrides:
msg = "Cannot specify both system_prompt and system_message"
raise ValueError(msg)
if "system_prompt" in overrides:
system_prompt = cast("str | None", overrides.pop("system_prompt")) # type: ignore[typeddict-item]
if system_prompt is None:
overrides["system_message"] = None
else:
overrides["system_message"] = SystemMessage(content=system_prompt)
return replace(self, **overrides)
@dataclass
class ModelResponse(Generic[ResponseT]):
"""Response from model execution including messages and optional structured output.
The result will usually contain a single `AIMessage`, but may include an additional
`ToolMessage` if the model used a tool for structured output.
Type Parameters:
ResponseT: The type of the structured response. Defaults to `Any` if not specified.
"""
result: list[BaseMessage]
"""List of messages from model execution."""
structured_response: ResponseT | None = None
"""Parsed structured output if `response_format` was specified, `None` otherwise."""
@dataclass
class ExtendedModelResponse(Generic[ResponseT]):
"""Model response with an optional 'Command' from 'wrap_model_call' middleware.
Use this to return a 'Command' alongside the model response from a
'wrap_model_call' handler. The command is applied as an additional state
update after the model node completes, using the graph's reducers (e.g.
'add_messages' for the 'messages' key).
Because each 'Command' is applied through the reducer, messages in the
command are **added alongside** the model response messages rather than
replacing them. For non-reducer state fields, later commands overwrite
earlier ones (outermost middleware wins over inner).
Type Parameters:
ResponseT: The type of the structured response. Defaults to 'Any' if not specified.
"""
model_response: ModelResponse[ResponseT]
"""The underlying model response."""
command: Command[Any] | None = None
"""Optional command to apply as an additional state update."""
ModelCallResult: TypeAlias = (
"ModelResponse[ResponseT] | AIMessage | ExtendedModelResponse[ResponseT]"
)
"""`TypeAlias` for model call handler return value.
Middleware can return either:
- `ModelResponse`: Full response with messages and optional structured output
- `AIMessage`: Simplified return for simple use cases
- `ExtendedModelResponse`: Response with an optional `Command` for additional state updates
`goto`, `resume`, and `graph` are not yet supported on these commands.
A `NotImplementedError` will be raised if you try to use them.
"""
@dataclass
class OmitFromSchema:
"""Annotation used to mark state attributes as omitted from input or output schemas."""
input: bool = True
"""Whether to omit the attribute from the input schema."""
output: bool = True
"""Whether to omit the attribute from the output schema."""
OmitFromInput = OmitFromSchema(input=True, output=False)
"""Annotation used to mark state attributes as omitted from input schema."""
OmitFromOutput = OmitFromSchema(input=False, output=True)
"""Annotation used to mark state attributes as omitted from output schema."""
PrivateStateAttr = OmitFromSchema(input=True, output=True)
"""Annotation used to mark state attributes as purely internal for a given middleware."""
class AgentState(TypedDict, Generic[ResponseT]):
"""State schema for the agent."""
messages: Required[Annotated[list[AnyMessage], add_messages]]
jump_to: NotRequired[Annotated[JumpTo | None, EphemeralValue, PrivateStateAttr]]
structured_response: NotRequired[Annotated[ResponseT, OmitFromInput]]
class _InputAgentState(TypedDict): # noqa: PYI049
"""Input state schema for the agent."""
messages: Required[Annotated[list[AnyMessage | dict[str, Any]], add_messages]]
class _OutputAgentState(TypedDict, Generic[ResponseT]): # noqa: PYI049
"""Output state schema for the agent."""
messages: Required[Annotated[list[AnyMessage], add_messages]]
structured_response: NotRequired[ResponseT]
StateT = TypeVar("StateT", bound=AgentState[Any], default=AgentState[Any])
StateT_co = TypeVar("StateT_co", bound=AgentState[Any], default=AgentState[Any], covariant=True)
StateT_contra = TypeVar("StateT_contra", bound=AgentState[Any], contravariant=True)
class _DefaultAgentState(AgentState[Any]):
"""AgentMiddleware default state."""
class AgentMiddleware(Generic[StateT, ContextT, ResponseT]):
"""Base middleware class for an agent.
Subclass this and implement any of the defined methods to customize agent behavior
between steps in the main agent loop.
Type Parameters:
StateT: The type of the agent state. Defaults to `AgentState[Any]`.
ContextT: The type of the runtime context. Defaults to `None`.
ResponseT: The type of the structured response. Defaults to `Any`.
"""
state_schema: type[StateT] = cast("type[StateT]", _DefaultAgentState)
"""The schema for state passed to the middleware nodes."""
tools: Sequence[BaseTool]
"""Additional tools registered by the middleware."""
@property
def name(self) -> str:
"""The name of the middleware instance.
Defaults to the class name, but can be overridden for custom naming.
"""
return self.__class__.__name__
def before_agent(self, state: StateT, runtime: Runtime[ContextT]) -> dict[str, Any] | None:
"""Logic to run before the agent execution starts.
Args:
state: The current agent state.
runtime: The runtime context.
Returns:
Agent state updates to apply before agent execution.
"""
async def abefore_agent(
self, state: StateT, runtime: Runtime[ContextT]
) -> dict[str, Any] | None:
"""Async logic to run before the agent execution starts.
Args:
state: The current agent state.
runtime: The runtime context.
Returns:
Agent state updates to apply before agent execution.
"""
def before_model(self, state: StateT, runtime: Runtime[ContextT]) -> dict[str, Any] | None:
"""Logic to run before the model is called.
Args:
state: The current agent state.
runtime: The runtime context.
Returns:
Agent state updates to apply before model call.
"""
async def abefore_model(
self, state: StateT, runtime: Runtime[ContextT]
) -> dict[str, Any] | None:
"""Async logic to run before the model is called.
Args:
state: The agent state.
runtime: The runtime context.
Returns:
Agent state updates to apply before model call.
"""
def after_model(self, state: StateT, runtime: Runtime[ContextT]) -> dict[str, Any] | None:
"""Logic to run after the model is called.
Args:
state: The current agent state.
runtime: The runtime context.
Returns:
Agent state updates to apply after model call.
"""
async def aafter_model(
self, state: StateT, runtime: Runtime[ContextT]
) -> dict[str, Any] | None:
"""Async logic to run after the model is called.
Args:
state: The current agent state.
runtime: The runtime context.
Returns:
Agent state updates to apply after model call.
"""
def wrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse[ResponseT]],
) -> ModelResponse[ResponseT] | AIMessage | ExtendedModelResponse[ResponseT]:
"""Intercept and control model execution via handler callback.
Async version is `awrap_model_call`
The handler callback executes the model request and returns a `ModelResponse`.
Middleware can call the handler multiple times for retry logic, skip calling
it to short-circuit, or modify the request/response. Multiple middleware
compose with first in list as outermost layer.
Args:
request: Model request to execute (includes state and runtime).
handler: Callback that executes the model request and returns
`ModelResponse`.
Call this to execute the model.
Can be called multiple times for retry logic.
Can skip calling it to short-circuit.
Returns:
The model call result.
Examples:
!!! example "Retry on error"
```python
def wrap_model_call(self, request, handler):
for attempt in range(3):
try:
return handler(request)
except Exception:
if attempt == 2:
raise
```
!!! example "Rewrite response"
```python
def wrap_model_call(self, request, handler):
response = handler(request)
ai_msg = response.result[0]
return ModelResponse(
result=[AIMessage(content=f"[{ai_msg.content}]")],
structured_response=response.structured_response,
)
```
!!! example "Error to fallback"
```python
def wrap_model_call(self, request, handler):
try:
return handler(request)
except Exception:
return ModelResponse(result=[AIMessage(content="Service unavailable")])
```
!!! example "Cache/short-circuit"
```python
def wrap_model_call(self, request, handler):
if cached := get_cache(request):
return cached # Short-circuit with cached result
response = handler(request)
save_cache(request, response)
return response
```
!!! example "Simple `AIMessage` return (converted automatically)"
```python
def wrap_model_call(self, request, handler):
response = handler(request)
# Can return AIMessage directly for simple cases
return AIMessage(content="Simplified response")
```
"""
msg = (
"Synchronous implementation of wrap_model_call is not available. "
"You are likely encountering this error because you defined only the async version "
"(awrap_model_call) and invoked your agent in a synchronous context "
"(e.g., using `stream()` or `invoke()`). "
"To resolve this, either: "
"(1) subclass AgentMiddleware and implement the synchronous wrap_model_call method, "
"(2) use the @wrap_model_call decorator on a standalone sync function, or "
"(3) invoke your agent asynchronously using `astream()` or `ainvoke()`."
)
raise NotImplementedError(msg)
async def awrap_model_call(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]],
) -> ModelResponse[ResponseT] | AIMessage | ExtendedModelResponse[ResponseT]:
"""Intercept and control async model execution via handler callback.
The handler callback executes the model request and returns a `ModelResponse`.
Middleware can call the handler multiple times for retry logic, skip calling
it to short-circuit, or modify the request/response. Multiple middleware
compose with first in list as outermost layer.
Args:
request: Model request to execute (includes state and runtime).
handler: Async callback that executes the model request and returns
`ModelResponse`.
Call this to execute the model.
Can be called multiple times for retry logic.
Can skip calling it to short-circuit.
Returns:
The model call result.
Examples:
!!! example "Retry on error"
```python
async def awrap_model_call(self, request, handler):
for attempt in range(3):
try:
return await handler(request)
except Exception:
if attempt == 2:
raise
```
"""
msg = (
"Asynchronous implementation of awrap_model_call is not available. "
"You are likely encountering this error because you defined only the sync version "
"(wrap_model_call) and invoked your agent in an asynchronous context "
"(e.g., using `astream()` or `ainvoke()`). "
"To resolve this, either: "
"(1) subclass AgentMiddleware and implement the asynchronous awrap_model_call method, "
"(2) use the @wrap_model_call decorator on a standalone async function, or "
"(3) invoke your agent synchronously using `stream()` or `invoke()`."
)
raise NotImplementedError(msg)
def after_agent(self, state: StateT, runtime: Runtime[ContextT]) -> dict[str, Any] | None:
"""Logic to run after the agent execution completes.
Args:
state: The current agent state.
runtime: The runtime context.
Returns:
Agent state updates to apply after agent execution.
"""
async def aafter_agent(
self, state: StateT, runtime: Runtime[ContextT]
) -> dict[str, Any] | None:
"""Async logic to run after the agent execution completes.
Args:
state: The current agent state.
runtime: The runtime context.
Returns:
Agent state updates to apply after agent execution.
"""
def wrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], ToolMessage | Command[Any]],
) -> ToolMessage | Command[Any]:
"""Intercept tool execution for retries, monitoring, or modification.
Async version is `awrap_tool_call`
Multiple middleware compose automatically (first defined = outermost).
Exceptions propagate unless `handle_tool_errors` is configured on `ToolNode`.
Args:
request: Tool call request with call `dict`, `BaseTool`, state, and runtime.
Access state via `request.state` and runtime via `request.runtime`.
handler: `Callable` to execute the tool (can be called multiple times).
Returns:
`ToolMessage` or `Command` (the final result).
The handler `Callable` can be invoked multiple times for retry logic.
Each call to handler is independent and stateless.
Examples:
!!! example "Modify request before execution"
```python
def wrap_tool_call(self, request, handler):
modified_call = {
**request.tool_call,
"args": {
**request.tool_call["args"],
"value": request.tool_call["args"]["value"] * 2,
},
}
request = request.override(tool_call=modified_call)
return handler(request)
```
!!! example "Retry on error (call handler multiple times)"
```python
def wrap_tool_call(self, request, handler):
for attempt in range(3):
try:
result = handler(request)
if is_valid(result):
return result
except Exception:
if attempt == 2:
raise
return result
```
!!! example "Conditional retry based on response"
```python
def wrap_tool_call(self, request, handler):
for attempt in range(3):
result = handler(request)
if isinstance(result, ToolMessage) and result.status != "error":
return result
if attempt < 2:
continue
return result
```
"""
msg = (
"Synchronous implementation of wrap_tool_call is not available. "
"You are likely encountering this error because you defined only the async version "
"(awrap_tool_call) and invoked your agent in a synchronous context "
"(e.g., using `stream()` or `invoke()`). "
"To resolve this, either: "
"(1) subclass AgentMiddleware and implement the synchronous wrap_tool_call method, "
"(2) use the @wrap_tool_call decorator on a standalone sync function, or "
"(3) invoke your agent asynchronously using `astream()` or `ainvoke()`."
)
raise NotImplementedError(msg)
async def awrap_tool_call(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]],
) -> ToolMessage | Command[Any]:
"""Intercept and control async tool execution via handler callback.
The handler callback executes the tool call and returns a `ToolMessage` or
`Command`. Middleware can call the handler multiple times for retry logic, skip
calling it to short-circuit, or modify the request/response. Multiple middleware
compose with first in list as outermost layer.
Args:
request: Tool call request with call `dict`, `BaseTool`, state, and runtime.
Access state via `request.state` and runtime via `request.runtime`.
handler: Async callable to execute the tool and returns `ToolMessage` or
`Command`.
Call this to execute the tool.
Can be called multiple times for retry logic.
Can skip calling it to short-circuit.
Returns:
`ToolMessage` or `Command` (the final result).
The handler `Callable` can be invoked multiple times for retry logic.
Each call to handler is independent and stateless.
Examples:
!!! example "Async retry on error"
```python
async def awrap_tool_call(self, request, handler):
for attempt in range(3):
try:
result = await handler(request)
if is_valid(result):
return result
except Exception:
if attempt == 2:
raise
return result
```
```python
async def awrap_tool_call(self, request, handler):
if cached := await get_cache_async(request):
return ToolMessage(content=cached, tool_call_id=request.tool_call["id"])
result = await handler(request)
await save_cache_async(request, result)
return result
```
"""
msg = (
"Asynchronous implementation of awrap_tool_call is not available. "
"You are likely encountering this error because you defined only the sync version "
"(wrap_tool_call) and invoked your agent in an asynchronous context "
"(e.g., using `astream()` or `ainvoke()`). "
"To resolve this, either: "
"(1) subclass AgentMiddleware and implement the asynchronous awrap_tool_call method, "
"(2) use the @wrap_tool_call decorator on a standalone async function, or "
"(3) invoke your agent synchronously using `stream()` or `invoke()`."
)
raise NotImplementedError(msg)
class _CallableWithStateAndRuntime(Protocol[StateT_contra, ContextT]):
"""Callable with `AgentState` and `Runtime` as arguments."""
def __call__(
self, state: StateT_contra, runtime: Runtime[ContextT]
) -> dict[str, Any] | Command[Any] | None | Awaitable[dict[str, Any] | Command[Any] | None]:
"""Perform some logic with the state and runtime."""
...
class _CallableReturningSystemMessage(Protocol[StateT_contra, ContextT]): # type: ignore[misc]
"""Callable that returns a prompt string or SystemMessage given `ModelRequest`."""
def __call__(
self, request: ModelRequest[ContextT]
) -> str | SystemMessage | Awaitable[str | SystemMessage]:
"""Generate a system prompt string or SystemMessage based on the request."""
...
class _CallableReturningModelResponse(Protocol[StateT_contra, ContextT, ResponseT]): # type: ignore[misc]
"""Callable for model call interception with handler callback.
Receives handler callback to execute model and returns `ModelResponse` or
`AIMessage`.
"""
def __call__(
self,
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse[ResponseT]],
) -> ModelResponse[ResponseT] | AIMessage:
"""Intercept model execution via handler callback."""
...
class _CallableReturningToolResponse(Protocol):
"""Callable for tool call interception with handler callback.
Receives handler callback to execute tool and returns final `ToolMessage` or
`Command`.
"""
def __call__(
self,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], ToolMessage | Command[Any]],
) -> ToolMessage | Command[Any]:
"""Intercept tool execution via handler callback."""
...
CallableT = TypeVar("CallableT", bound=Callable[..., Any])
def hook_config(
*,
can_jump_to: list[JumpTo] | None = None,
) -> Callable[[CallableT], CallableT]:
"""Decorator to configure hook behavior in middleware methods.
Use this decorator on `before_model` or `after_model` methods in middleware classes
to configure their behavior. Currently supports specifying which destinations they
can jump to, which establishes conditional edges in the agent graph.
Args:
can_jump_to: Optional list of valid jump destinations.
Can be:
- `'tools'`: Jump to the tools node
- `'model'`: Jump back to the model node
- `'end'`: Jump to the end of the graph
Returns:
Decorator function that marks the method with configuration metadata.
Examples:
!!! example "Using decorator on a class method"
```python
class MyMiddleware(AgentMiddleware):
@hook_config(can_jump_to=["end", "model"])
def before_model(self, state: AgentState) -> dict[str, Any] | None:
if some_condition(state):
return {"jump_to": "end"}
return None
```
Alternative: Use the `can_jump_to` parameter in `before_model`/`after_model`
decorators:
```python
@before_model(can_jump_to=["end"])
def conditional_middleware(state: AgentState) -> dict[str, Any] | None:
if should_exit(state):
return {"jump_to": "end"}
return None
```
"""
def decorator(func: CallableT) -> CallableT:
if can_jump_to is not None:
func.__can_jump_to__ = can_jump_to # type: ignore[attr-defined]
return func
return decorator
@overload
def before_model(
func: _CallableWithStateAndRuntime[StateT, ContextT],
) -> AgentMiddleware[StateT, ContextT]: ...
@overload
def before_model(
func: None = None,
*,
state_schema: type[StateT] | None = None,
tools: list[BaseTool] | None = None,
can_jump_to: list[JumpTo] | None = None,
name: str | None = None,
) -> Callable[
[_CallableWithStateAndRuntime[StateT, ContextT]], AgentMiddleware[StateT, ContextT]
]: ...
def before_model(
func: _CallableWithStateAndRuntime[StateT, ContextT] | None = None,
*,
state_schema: type[StateT] | None = None,
tools: list[BaseTool] | None = None,
can_jump_to: list[JumpTo] | None = None,
name: str | None = None,
) -> (
Callable[[_CallableWithStateAndRuntime[StateT, ContextT]], AgentMiddleware[StateT, ContextT]]
| AgentMiddleware[StateT, ContextT]
):
"""Decorator used to dynamically create a middleware with the `before_model` hook.
Args:
func: The function to be decorated.
Must accept: `state: StateT, runtime: Runtime[ContextT]` - State and runtime
context
state_schema: Optional custom state schema type.
If not provided, uses the default `AgentState` schema.
tools: Optional list of additional tools to register with this middleware.
can_jump_to: Optional list of valid jump destinations for conditional edges.
Valid values are: `'tools'`, `'model'`, `'end'`
name: Optional name for the generated middleware class.
If not provided, uses the decorated function's name.
Returns:
Either an `AgentMiddleware` instance (if func is provided directly) or a
decorator function that can be applied to a function it is wrapping.
The decorated function should return:
- `dict[str, Any]` - State updates to merge into the agent state
- `Command` - A command to control flow (e.g., jump to different node)
- `None` - No state updates or flow control
Examples:
!!! example "Basic usage"
```python
@before_model
def log_before_model(state: AgentState, runtime: Runtime) -> None:
print(f"About to call model with {len(state['messages'])} messages")
```
!!! example "With conditional jumping"
```python
@before_model(can_jump_to=["end"])
def conditional_before_model(
state: AgentState, runtime: Runtime
) -> dict[str, Any] | None:
if some_condition(state):
return {"jump_to": "end"}
return None
```
!!! example "With custom state schema"
```python
@before_model(state_schema=MyCustomState)
def custom_before_model(state: MyCustomState, runtime: Runtime) -> dict[str, Any]:
return {"custom_field": "updated_value"}
```
!!! example "Streaming custom events before model call"
Use `runtime.stream_writer` to emit custom events before each model invocation.
Events are received when streaming with `stream_mode="custom"`.
```python
@before_model
async def notify_model_call(state: AgentState, runtime: Runtime) -> None:
'''Notify user before model is called.'''
runtime.stream_writer(
{
"type": "status",
"message": "Thinking...",
}
)
```
"""
def decorator(
func: _CallableWithStateAndRuntime[StateT, ContextT],
) -> AgentMiddleware[StateT, ContextT]:
is_async = iscoroutinefunction(func)
func_can_jump_to = (
can_jump_to if can_jump_to is not None else getattr(func, "__can_jump_to__", [])
)
if is_async:
async def async_wrapped(
_self: AgentMiddleware[StateT, ContextT],
state: StateT,
runtime: Runtime[ContextT],
) -> dict[str, Any] | Command[Any] | None:
return await func(state, runtime) # type: ignore[misc]
# Preserve can_jump_to metadata on the wrapped function
if func_can_jump_to:
async_wrapped.__can_jump_to__ = func_can_jump_to # type: ignore[attr-defined]
middleware_name = name or cast(
"str", getattr(func, "__name__", "BeforeModelMiddleware")
)
return type(
middleware_name,
(AgentMiddleware,),
{
"state_schema": state_schema or AgentState,
"tools": tools or [],
"abefore_model": async_wrapped,
},
)()
def wrapped(
_self: AgentMiddleware[StateT, ContextT],
state: StateT,
runtime: Runtime[ContextT],
) -> dict[str, Any] | Command[Any] | None:
return func(state, runtime) # type: ignore[return-value]
# Preserve can_jump_to metadata on the wrapped function
if func_can_jump_to:
wrapped.__can_jump_to__ = func_can_jump_to # type: ignore[attr-defined]
# Use function name as default if no name provided
middleware_name = name or cast("str", getattr(func, "__name__", "BeforeModelMiddleware"))
return type(
middleware_name,
(AgentMiddleware,),
{
"state_schema": state_schema or AgentState,
"tools": tools or [],
"before_model": wrapped,
},
)()
if func is not None:
return decorator(func)
return decorator
@overload
def after_model(
func: _CallableWithStateAndRuntime[StateT, ContextT],
) -> AgentMiddleware[StateT, ContextT]: ...
@overload
def after_model(
func: None = None,
*,
state_schema: type[StateT] | None = None,
tools: list[BaseTool] | None = None,
can_jump_to: list[JumpTo] | None = None,
name: str | None = None,
) -> Callable[
[_CallableWithStateAndRuntime[StateT, ContextT]], AgentMiddleware[StateT, ContextT]
]: ...
def after_model(
func: _CallableWithStateAndRuntime[StateT, ContextT] | None = None,
*,
state_schema: type[StateT] | None = None,
tools: list[BaseTool] | None = None,
can_jump_to: list[JumpTo] | None = None,
name: str | None = None,
) -> (
Callable[[_CallableWithStateAndRuntime[StateT, ContextT]], AgentMiddleware[StateT, ContextT]]
| AgentMiddleware[StateT, ContextT]
):
"""Decorator used to dynamically create a middleware with the `after_model` hook.
Args:
func: The function to be decorated.
Must accept: `state: StateT, runtime: Runtime[ContextT]` - State and runtime
context
state_schema: Optional custom state schema type.
If not provided, uses the default `AgentState` schema.
tools: Optional list of additional tools to register with this middleware.
can_jump_to: Optional list of valid jump destinations for conditional edges.
Valid values are: `'tools'`, `'model'`, `'end'`
name: Optional name for the generated middleware class.
If not provided, uses the decorated function's name.
Returns:
Either an `AgentMiddleware` instance (if func is provided) or a decorator
function that can be applied to a function.
The decorated function should return:
- `dict[str, Any]` - State updates to merge into the agent state
- `Command` - A command to control flow (e.g., jump to different node)
- `None` - No state updates or flow control
Examples:
!!! example "Basic usage for logging model responses"
```python
@after_model
def log_latest_message(state: AgentState, runtime: Runtime) -> None:
print(state["messages"][-1].content)
```
!!! example "With custom state schema"
```python
@after_model(state_schema=MyCustomState, name="MyAfterModelMiddleware")
def custom_after_model(state: MyCustomState, runtime: Runtime) -> dict[str, Any]:
return {"custom_field": "updated_after_model"}
```
!!! example "Streaming custom events after model call"
Use `runtime.stream_writer` to emit custom events after model responds.
Events are received when streaming with `stream_mode="custom"`.
```python
@after_model
async def notify_model_response(state: AgentState, runtime: Runtime) -> None:
'''Notify user after model has responded.'''
last_message = state["messages"][-1]
has_tool_calls = hasattr(last_message, "tool_calls") and last_message.tool_calls
runtime.stream_writer(
{
"type": "status",
"message": "Using tools..." if has_tool_calls else "Response ready!",
}
)
```
"""
def decorator(
func: _CallableWithStateAndRuntime[StateT, ContextT],
) -> AgentMiddleware[StateT, ContextT]:
is_async = iscoroutinefunction(func)
# Extract can_jump_to from decorator parameter or from function metadata
func_can_jump_to = (
can_jump_to if can_jump_to is not None else getattr(func, "__can_jump_to__", [])
)
if is_async:
async def async_wrapped(
_self: AgentMiddleware[StateT, ContextT],
state: StateT,
runtime: Runtime[ContextT],
) -> dict[str, Any] | Command[Any] | None:
return await func(state, runtime) # type: ignore[misc]
# Preserve can_jump_to metadata on the wrapped function
if func_can_jump_to:
async_wrapped.__can_jump_to__ = func_can_jump_to # type: ignore[attr-defined]
middleware_name = name or cast("str", getattr(func, "__name__", "AfterModelMiddleware"))
return type(
middleware_name,
(AgentMiddleware,),
{
"state_schema": state_schema or AgentState,
"tools": tools or [],
"aafter_model": async_wrapped,
},
)()
def wrapped(
_self: AgentMiddleware[StateT, ContextT],
state: StateT,
runtime: Runtime[ContextT],
) -> dict[str, Any] | Command[Any] | None:
return func(state, runtime) # type: ignore[return-value]
# Preserve can_jump_to metadata on the wrapped function
if func_can_jump_to:
wrapped.__can_jump_to__ = func_can_jump_to # type: ignore[attr-defined]
# Use function name as default if no name provided
middleware_name = name or cast("str", getattr(func, "__name__", "AfterModelMiddleware"))
return type(
middleware_name,
(AgentMiddleware,),
{
"state_schema": state_schema or AgentState,
"tools": tools or [],
"after_model": wrapped,
},
)()
if func is not None:
return decorator(func)
return decorator
@overload
def before_agent(
func: _CallableWithStateAndRuntime[StateT, ContextT],
) -> AgentMiddleware[StateT, ContextT]: ...
@overload
def before_agent(
func: None = None,
*,
state_schema: type[StateT] | None = None,
tools: list[BaseTool] | None = None,
can_jump_to: list[JumpTo] | None = None,
name: str | None = None,
) -> Callable[
[_CallableWithStateAndRuntime[StateT, ContextT]], AgentMiddleware[StateT, ContextT]
]: ...
def before_agent(
func: _CallableWithStateAndRuntime[StateT, ContextT] | None = None,
*,
state_schema: type[StateT] | None = None,
tools: list[BaseTool] | None = None,
can_jump_to: list[JumpTo] | None = None,
name: str | None = None,
) -> (
Callable[[_CallableWithStateAndRuntime[StateT, ContextT]], AgentMiddleware[StateT, ContextT]]
| AgentMiddleware[StateT, ContextT]
):
"""Decorator used to dynamically create a middleware with the `before_agent` hook.
Args:
func: The function to be decorated.
Must accept: `state: StateT, runtime: Runtime[ContextT]` - State and runtime
context
state_schema: Optional custom state schema type.
If not provided, uses the default `AgentState` schema.
tools: Optional list of additional tools to register with this middleware.
can_jump_to: Optional list of valid jump destinations for conditional edges.
Valid values are: `'tools'`, `'model'`, `'end'`
name: Optional name for the generated middleware class.
If not provided, uses the decorated function's name.
Returns:
Either an `AgentMiddleware` instance (if func is provided directly) or a
decorator function that can be applied to a function it is wrapping.
The decorated function should return:
- `dict[str, Any]` - State updates to merge into the agent state
- `Command` - A command to control flow (e.g., jump to different node)
- `None` - No state updates or flow control
Examples:
!!! example "Basic usage"
```python
@before_agent
def log_before_agent(state: AgentState, runtime: Runtime) -> None:
print(f"Starting agent with {len(state['messages'])} messages")
```
!!! example "With conditional jumping"
```python
@before_agent(can_jump_to=["end"])
def conditional_before_agent(
state: AgentState, runtime: Runtime
) -> dict[str, Any] | None:
if some_condition(state):
return {"jump_to": "end"}
return None
```
!!! example "With custom state schema"
```python
@before_agent(state_schema=MyCustomState)
def custom_before_agent(state: MyCustomState, runtime: Runtime) -> dict[str, Any]:
return {"custom_field": "initialized_value"}
```
!!! example "Streaming custom events"
Use `runtime.stream_writer` to emit custom events during agent execution.
Events are received when streaming with `stream_mode="custom"`.
```python
from langchain.agents import create_agent
from langchain.agents.middleware import before_agent, AgentState
from langchain.messages import HumanMessage
from langgraph.runtime import Runtime
@before_agent
async def notify_start(state: AgentState, runtime: Runtime) -> None:
'''Notify user that agent is starting.'''
runtime.stream_writer(
{
"type": "status",
"message": "Initializing agent session...",
}
)
# Perform prerequisite tasks here
runtime.stream_writer({"type": "status", "message": "Agent ready!"})
agent = create_agent(
model="openai:gpt-5.2",
tools=[...],
middleware=[notify_start],
)
# Consume with stream_mode="custom" to receive events
async for mode, event in agent.astream(
{"messages": [HumanMessage("Hello")]},
stream_mode=["updates", "custom"],
):
if mode == "custom":
print(f"Status: {event}")
```
"""
def decorator(
func: _CallableWithStateAndRuntime[StateT, ContextT],
) -> AgentMiddleware[StateT, ContextT]:
is_async = iscoroutinefunction(func)
func_can_jump_to = (
can_jump_to if can_jump_to is not None else getattr(func, "__can_jump_to__", [])
)
if is_async:
async def async_wrapped(
_self: AgentMiddleware[StateT, ContextT],
state: StateT,
runtime: Runtime[ContextT],
) -> dict[str, Any] | Command[Any] | None:
return await func(state, runtime) # type: ignore[misc]
# Preserve can_jump_to metadata on the wrapped function
if func_can_jump_to:
async_wrapped.__can_jump_to__ = func_can_jump_to # type: ignore[attr-defined]
middleware_name = name or cast(
"str", getattr(func, "__name__", "BeforeAgentMiddleware")
)
return type(
middleware_name,
(AgentMiddleware,),
{
"state_schema": state_schema or AgentState,
"tools": tools or [],
"abefore_agent": async_wrapped,
},
)()
def wrapped(
_self: AgentMiddleware[StateT, ContextT],
state: StateT,
runtime: Runtime[ContextT],
) -> dict[str, Any] | Command[Any] | None:
return func(state, runtime) # type: ignore[return-value]
# Preserve can_jump_to metadata on the wrapped function
if func_can_jump_to:
wrapped.__can_jump_to__ = func_can_jump_to # type: ignore[attr-defined]
# Use function name as default if no name provided
middleware_name = name or cast("str", getattr(func, "__name__", "BeforeAgentMiddleware"))
return type(
middleware_name,
(AgentMiddleware,),
{
"state_schema": state_schema or AgentState,
"tools": tools or [],
"before_agent": wrapped,
},
)()
if func is not None:
return decorator(func)
return decorator
@overload
def after_agent(
func: _CallableWithStateAndRuntime[StateT, ContextT],
) -> AgentMiddleware[StateT, ContextT]: ...
@overload
def after_agent(
func: None = None,
*,
state_schema: type[StateT] | None = None,
tools: list[BaseTool] | None = None,
can_jump_to: list[JumpTo] | None = None,
name: str | None = None,
) -> Callable[
[_CallableWithStateAndRuntime[StateT, ContextT]], AgentMiddleware[StateT, ContextT]
]: ...
def after_agent(
func: _CallableWithStateAndRuntime[StateT, ContextT] | None = None,
*,
state_schema: type[StateT] | None = None,
tools: list[BaseTool] | None = None,
can_jump_to: list[JumpTo] | None = None,
name: str | None = None,
) -> (
Callable[[_CallableWithStateAndRuntime[StateT, ContextT]], AgentMiddleware[StateT, ContextT]]
| AgentMiddleware[StateT, ContextT]
):
"""Decorator used to dynamically create a middleware with the `after_agent` hook.
Async version is `aafter_agent`.
Args:
func: The function to be decorated.
Must accept: `state: StateT, runtime: Runtime[ContextT]` - State and runtime
context
state_schema: Optional custom state schema type.
If not provided, uses the default `AgentState` schema.
tools: Optional list of additional tools to register with this middleware.
can_jump_to: Optional list of valid jump destinations for conditional edges.
Valid values are: `'tools'`, `'model'`, `'end'`
name: Optional name for the generated middleware class.
If not provided, uses the decorated function's name.
Returns:
Either an `AgentMiddleware` instance (if func is provided) or a decorator
function that can be applied to a function.
The decorated function should return:
- `dict[str, Any]` - State updates to merge into the agent state
- `Command` - A command to control flow (e.g., jump to different node)
- `None` - No state updates or flow control
Examples:
!!! example "Basic usage for logging agent completion"
```python
@after_agent
def log_completion(state: AgentState, runtime: Runtime) -> None:
print(f"Agent completed with {len(state['messages'])} messages")
```
!!! example "With custom state schema"
```python
@after_agent(state_schema=MyCustomState, name="MyAfterAgentMiddleware")
def custom_after_agent(state: MyCustomState, runtime: Runtime) -> dict[str, Any]:
return {"custom_field": "finalized_value"}
```
!!! example "Streaming custom events on completion"
Use `runtime.stream_writer` to emit custom events when agent completes.
Events are received when streaming with `stream_mode="custom"`.
```python
@after_agent
async def notify_completion(state: AgentState, runtime: Runtime) -> None:
'''Notify user that agent has completed.'''
runtime.stream_writer(
{
"type": "status",
"message": "Agent execution complete!",
"total_messages": len(state["messages"]),
}
)
```
"""
def decorator(
func: _CallableWithStateAndRuntime[StateT, ContextT],
) -> AgentMiddleware[StateT, ContextT]:
is_async = iscoroutinefunction(func)
# Extract can_jump_to from decorator parameter or from function metadata
func_can_jump_to = (
can_jump_to if can_jump_to is not None else getattr(func, "__can_jump_to__", [])
)
if is_async:
async def async_wrapped(
_self: AgentMiddleware[StateT, ContextT],
state: StateT,
runtime: Runtime[ContextT],
) -> dict[str, Any] | Command[Any] | None:
return await func(state, runtime) # type: ignore[misc]
# Preserve can_jump_to metadata on the wrapped function
if func_can_jump_to:
async_wrapped.__can_jump_to__ = func_can_jump_to # type: ignore[attr-defined]
middleware_name = name or cast("str", getattr(func, "__name__", "AfterAgentMiddleware"))
return type(
middleware_name,
(AgentMiddleware,),
{
"state_schema": state_schema or AgentState,
"tools": tools or [],
"aafter_agent": async_wrapped,
},
)()
def wrapped(
_self: AgentMiddleware[StateT, ContextT],
state: StateT,
runtime: Runtime[ContextT],
) -> dict[str, Any] | Command[Any] | None:
return func(state, runtime) # type: ignore[return-value]
# Preserve can_jump_to metadata on the wrapped function
if func_can_jump_to:
wrapped.__can_jump_to__ = func_can_jump_to # type: ignore[attr-defined]
# Use function name as default if no name provided
middleware_name = name or cast("str", getattr(func, "__name__", "AfterAgentMiddleware"))
return type(
middleware_name,
(AgentMiddleware,),
{
"state_schema": state_schema or AgentState,
"tools": tools or [],
"after_agent": wrapped,
},
)()
if func is not None:
return decorator(func)
return decorator
@overload
def dynamic_prompt(
func: _CallableReturningSystemMessage[StateT, ContextT],
) -> AgentMiddleware[StateT, ContextT]: ...
@overload
def dynamic_prompt(
func: None = None,
) -> Callable[
[_CallableReturningSystemMessage[StateT, ContextT]],
AgentMiddleware[StateT, ContextT],
]: ...
def dynamic_prompt(
func: _CallableReturningSystemMessage[StateT, ContextT] | None = None,
) -> (
Callable[
[_CallableReturningSystemMessage[StateT, ContextT]],
AgentMiddleware[StateT, ContextT],
]
| AgentMiddleware[StateT, ContextT]
):
"""Decorator used to dynamically generate system prompts for the model.
This is a convenience decorator that creates middleware using `wrap_model_call`
specifically for dynamic prompt generation. The decorated function should return
a string that will be set as the system prompt for the model request.
Args:
func: The function to be decorated.
Must accept: `request: ModelRequest` - Model request (contains state and
runtime)
Returns:
Either an `AgentMiddleware` instance (if func is provided) or a decorator
function that can be applied to a function.
The decorated function should return:
- `str` – The system prompt string to use for the model request
- `SystemMessage` – A complete system message to use for the model request
Examples:
Basic usage with dynamic content:
```python
@dynamic_prompt
def my_prompt(request: ModelRequest) -> str:
user_name = request.runtime.context.get("user_name", "User")
return f"You are a helpful assistant helping {user_name}."
```
Using state to customize the prompt:
```python
@dynamic_prompt
def context_aware_prompt(request: ModelRequest) -> str:
msg_count = len(request.state["messages"])
if msg_count > 10:
return "You are in a long conversation. Be concise."
return "You are a helpful assistant."
```
Using with agent:
```python
agent = create_agent(model, middleware=[my_prompt])
```
"""
def decorator(
func: _CallableReturningSystemMessage[StateT, ContextT],
) -> AgentMiddleware[StateT, ContextT]:
is_async = iscoroutinefunction(func)
if is_async:
async def async_wrapped(
_self: AgentMiddleware[StateT, ContextT],
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], Awaitable[ModelResponse[Any]]],
) -> ModelResponse[Any] | AIMessage:
prompt = await func(request) # type: ignore[misc]
if isinstance(prompt, SystemMessage):
request = request.override(system_message=prompt)
else:
request = request.override(system_message=SystemMessage(content=prompt))
return await handler(request)
middleware_name = cast("str", getattr(func, "__name__", "DynamicPromptMiddleware"))
return type(
middleware_name,
(AgentMiddleware,),
{
"state_schema": AgentState,
"tools": [],
"awrap_model_call": async_wrapped,
},
)()
def wrapped(
_self: AgentMiddleware[StateT, ContextT],
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse[Any]],
) -> ModelResponse[Any] | AIMessage:
prompt = cast("Callable[[ModelRequest[ContextT]], SystemMessage | str]", func)(request)
if isinstance(prompt, SystemMessage):
request = request.override(system_message=prompt)
else:
request = request.override(system_message=SystemMessage(content=prompt))
return handler(request)
async def async_wrapped_from_sync(
_self: AgentMiddleware[StateT, ContextT],
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], Awaitable[ModelResponse[Any]]],
) -> ModelResponse[Any] | AIMessage:
# Delegate to sync function
prompt = cast("Callable[[ModelRequest[ContextT]], SystemMessage | str]", func)(request)
if isinstance(prompt, SystemMessage):
request = request.override(system_message=prompt)
else:
request = request.override(system_message=SystemMessage(content=prompt))
return await handler(request)
middleware_name = cast("str", getattr(func, "__name__", "DynamicPromptMiddleware"))
return type(
middleware_name,
(AgentMiddleware,),
{
"state_schema": AgentState,
"tools": [],
"wrap_model_call": wrapped,
"awrap_model_call": async_wrapped_from_sync,
},
)()
if func is not None:
return decorator(func)
return decorator
@overload
def wrap_model_call(
func: _CallableReturningModelResponse[StateT, ContextT, ResponseT],
) -> AgentMiddleware[StateT, ContextT]: ...
@overload
def wrap_model_call(
func: None = None,
*,
state_schema: type[StateT] | None = None,
tools: list[BaseTool] | None = None,
name: str | None = None,
) -> Callable[
[_CallableReturningModelResponse[StateT, ContextT, ResponseT]],
AgentMiddleware[StateT, ContextT],
]: ...
def wrap_model_call(
func: _CallableReturningModelResponse[StateT, ContextT, ResponseT] | None = None,
*,
state_schema: type[StateT] | None = None,
tools: list[BaseTool] | None = None,
name: str | None = None,
) -> (
Callable[
[_CallableReturningModelResponse[StateT, ContextT, ResponseT]],
AgentMiddleware[StateT, ContextT],
]
| AgentMiddleware[StateT, ContextT]
):
"""Create middleware with `wrap_model_call` hook from a function.
Converts a function with handler callback into middleware that can intercept model
calls, implement retry logic, handle errors, and rewrite responses.
Args:
func: Function accepting (request, handler) that calls handler(request)
to execute the model and returns `ModelResponse` or `AIMessage`.
Request contains state and runtime.
state_schema: Custom state schema.
Defaults to `AgentState`.
tools: Additional tools to register with this middleware.
name: Middleware class name.
Defaults to function name.
Returns:
`AgentMiddleware` instance if func provided, otherwise a decorator.
Examples:
!!! example "Basic retry logic"
```python
@wrap_model_call
def retry_on_error(request, handler):
max_retries = 3
for attempt in range(max_retries):
try:
return handler(request)
except Exception:
if attempt == max_retries - 1:
raise
```
!!! example "Model fallback"
```python
@wrap_model_call
def fallback_model(request, handler):
# Try primary model
try:
return handler(request)
except Exception:
pass
# Try fallback model
request = request.override(model=fallback_model_instance)
return handler(request)
```
!!! example "Rewrite response content (full `ModelResponse`)"
```python
@wrap_model_call
def uppercase_responses(request, handler):
response = handler(request)
ai_msg = response.result[0]
return ModelResponse(
result=[AIMessage(content=ai_msg.content.upper())],
structured_response=response.structured_response,
)
```
!!! example "Simple `AIMessage` return (converted automatically)"
```python
@wrap_model_call
def simple_response(request, handler):
# AIMessage is automatically converted to ModelResponse
return AIMessage(content="Simple response")
```
"""
def decorator(
func: _CallableReturningModelResponse[StateT, ContextT, ResponseT],
) -> AgentMiddleware[StateT, ContextT]:
is_async = iscoroutinefunction(func)
if is_async:
async def async_wrapped(
_self: AgentMiddleware[StateT, ContextT],
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], Awaitable[ModelResponse[ResponseT]]],
) -> ModelResponse[ResponseT] | AIMessage:
return await func(request, handler) # type: ignore[misc, arg-type]
middleware_name = name or cast(
"str", getattr(func, "__name__", "WrapModelCallMiddleware")
)
return type(
middleware_name,
(AgentMiddleware,),
{
"state_schema": state_schema or AgentState,
"tools": tools or [],
"awrap_model_call": async_wrapped,
},
)()
def wrapped(
_self: AgentMiddleware[StateT, ContextT],
request: ModelRequest[ContextT],
handler: Callable[[ModelRequest[ContextT]], ModelResponse[ResponseT]],
) -> ModelResponse[ResponseT] | AIMessage:
return func(request, handler)
middleware_name = name or cast("str", getattr(func, "__name__", "WrapModelCallMiddleware"))
return type(
middleware_name,
(AgentMiddleware,),
{
"state_schema": state_schema or AgentState,
"tools": tools or [],
"wrap_model_call": wrapped,
},
)()
if func is not None:
return decorator(func)
return decorator
@overload
def wrap_tool_call(
func: _CallableReturningToolResponse,
) -> AgentMiddleware: ...
@overload
def wrap_tool_call(
func: None = None,
*,
tools: list[BaseTool] | None = None,
name: str | None = None,
) -> Callable[
[_CallableReturningToolResponse],
AgentMiddleware,
]: ...
def wrap_tool_call(
func: _CallableReturningToolResponse | None = None,
*,
tools: list[BaseTool] | None = None,
name: str | None = None,
) -> (
Callable[
[_CallableReturningToolResponse],
AgentMiddleware,
]
| AgentMiddleware
):
"""Create middleware with `wrap_tool_call` hook from a function.
Async version is `awrap_tool_call`.
Converts a function with handler callback into middleware that can intercept
tool calls, implement retry logic, monitor execution, and modify responses.
Args:
func: Function accepting (request, handler) that calls
handler(request) to execute the tool and returns final `ToolMessage` or
`Command`.
Can be sync or async.
tools: Additional tools to register with this middleware.
name: Middleware class name.
Defaults to function name.
Returns:
`AgentMiddleware` instance if func provided, otherwise a decorator.
Examples:
!!! example "Retry logic"
```python
@wrap_tool_call
def retry_on_error(request, handler):
max_retries = 3
for attempt in range(max_retries):
try:
return handler(request)
except Exception:
if attempt == max_retries - 1:
raise
```
!!! example "Async retry logic"
```python
@wrap_tool_call
async def async_retry(request, handler):
for attempt in range(3):
try:
return await handler(request)
except Exception:
if attempt == 2:
raise
```
!!! example "Modify request"
```python
@wrap_tool_call
def modify_args(request, handler):
modified_call = {
**request.tool_call,
"args": {
**request.tool_call["args"],
"value": request.tool_call["args"]["value"] * 2,
},
}
request = request.override(tool_call=modified_call)
return handler(request)
```
!!! example "Short-circuit with cached result"
```python
@wrap_tool_call
def with_cache(request, handler):
if cached := get_cache(request):
return ToolMessage(content=cached, tool_call_id=request.tool_call["id"])
result = handler(request)
save_cache(request, result)
return result
```
"""
def decorator(
func: _CallableReturningToolResponse,
) -> AgentMiddleware:
is_async = iscoroutinefunction(func)
if is_async:
async def async_wrapped(
_self: AgentMiddleware,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], Awaitable[ToolMessage | Command[Any]]],
) -> ToolMessage | Command[Any]:
return await func(request, handler) # type: ignore[arg-type,misc]
middleware_name = name or cast(
"str", getattr(func, "__name__", "WrapToolCallMiddleware")
)
return type(
middleware_name,
(AgentMiddleware,),
{
"state_schema": AgentState,
"tools": tools or [],
"awrap_tool_call": async_wrapped,
},
)()
def wrapped(
_self: AgentMiddleware,
request: ToolCallRequest,
handler: Callable[[ToolCallRequest], ToolMessage | Command[Any]],
) -> ToolMessage | Command[Any]:
return func(request, handler)
middleware_name = name or cast("str", getattr(func, "__name__", "WrapToolCallMiddleware"))
return type(
middleware_name,
(AgentMiddleware,),
{
"state_schema": AgentState,
"tools": tools or [],
"wrap_tool_call": wrapped,
},
)()
if func is not None:
return decorator(func)
return decorator
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/middleware/types.py",
"license": "MIT License",
"lines": 1626,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain_v1/langchain/agents/structured_output.py | """Types for setting agent response formats."""
from __future__ import annotations
import json
import uuid
from dataclasses import dataclass, is_dataclass
from types import UnionType
from typing import (
TYPE_CHECKING,
Any,
Generic,
Literal,
TypeVar,
Union,
get_args,
get_origin,
)
from langchain_core.tools import BaseTool, StructuredTool
from pydantic import BaseModel, TypeAdapter
from typing_extensions import Self, is_typeddict
if TYPE_CHECKING:
from collections.abc import Callable, Iterable
from langchain_core.messages import AIMessage
# Supported schema types: Pydantic models, dataclasses, TypedDict, JSON schema dicts
SchemaT = TypeVar("SchemaT")
SchemaKind = Literal["pydantic", "dataclass", "typeddict", "json_schema"]
class StructuredOutputError(Exception):
"""Base class for structured output errors."""
ai_message: AIMessage
class MultipleStructuredOutputsError(StructuredOutputError):
"""Raised when model returns multiple structured output tool calls when only one is expected."""
def __init__(self, tool_names: list[str], ai_message: AIMessage) -> None:
"""Initialize `MultipleStructuredOutputsError`.
Args:
tool_names: The names of the tools called for structured output.
ai_message: The AI message that contained the invalid multiple tool calls.
"""
self.tool_names = tool_names
self.ai_message = ai_message
super().__init__(
"Model incorrectly returned multiple structured responses "
f"({', '.join(tool_names)}) when only one is expected."
)
class StructuredOutputValidationError(StructuredOutputError):
"""Raised when structured output tool call arguments fail to parse according to the schema."""
def __init__(self, tool_name: str, source: Exception, ai_message: AIMessage) -> None:
"""Initialize `StructuredOutputValidationError`.
Args:
tool_name: The name of the tool that failed.
source: The exception that occurred.
ai_message: The AI message that contained the invalid structured output.
"""
self.tool_name = tool_name
self.source = source
self.ai_message = ai_message
super().__init__(f"Failed to parse structured output for tool '{tool_name}': {source}.")
def _parse_with_schema(
schema: type[SchemaT] | dict[str, Any], schema_kind: SchemaKind, data: dict[str, Any]
) -> Any:
"""Parse data using for any supported schema type.
Args:
schema: The schema type (Pydantic model, `dataclass`, or `TypedDict`)
schema_kind: One of `'pydantic'`, `'dataclass'`, `'typeddict'`, or
`'json_schema'`
data: The data to parse
Returns:
The parsed instance according to the schema type
Raises:
ValueError: If parsing fails
"""
if schema_kind == "json_schema":
return data
try:
adapter: TypeAdapter[SchemaT] = TypeAdapter(schema)
return adapter.validate_python(data)
except Exception as e:
schema_name = getattr(schema, "__name__", str(schema))
msg = f"Failed to parse data to {schema_name}: {e}"
raise ValueError(msg) from e
@dataclass(init=False)
class _SchemaSpec(Generic[SchemaT]):
"""Describes a structured output schema."""
schema: type[SchemaT] | dict[str, Any]
"""The schema for the response, can be a Pydantic model, `dataclass`, `TypedDict`,
or JSON schema dict.
"""
name: str
"""Name of the schema, used for tool calling.
If not provided, the name will be the class name for models/dataclasses/TypedDicts,
or the `title` field for JSON schemas.
Falls back to a generated name if unavailable.
"""
description: str
"""Custom description of the schema.
If not provided, will use the model's docstring.
"""
schema_kind: SchemaKind
"""The kind of schema."""
json_schema: dict[str, Any]
"""JSON schema associated with the schema."""
strict: bool | None = None
"""Whether to enforce strict validation of the schema."""
def __init__(
self,
schema: type[SchemaT] | dict[str, Any],
*,
name: str | None = None,
description: str | None = None,
strict: bool | None = None,
) -> None:
"""Initialize `SchemaSpec` with schema and optional parameters.
Args:
schema: Schema to describe.
name: Optional name for the schema.
description: Optional description for the schema.
strict: Whether to enforce strict validation of the schema.
Raises:
ValueError: If the schema type is unsupported.
"""
self.schema = schema
if name:
self.name = name
elif isinstance(schema, dict):
self.name = str(schema.get("title", f"response_format_{str(uuid.uuid4())[:4]}"))
else:
self.name = str(getattr(schema, "__name__", f"response_format_{str(uuid.uuid4())[:4]}"))
self.description = description or (
schema.get("description", "")
if isinstance(schema, dict)
else getattr(schema, "__doc__", None) or ""
)
self.strict = strict
if isinstance(schema, dict):
self.schema_kind = "json_schema"
self.json_schema = schema
elif isinstance(schema, type) and issubclass(schema, BaseModel):
self.schema_kind = "pydantic"
self.json_schema = schema.model_json_schema()
elif is_dataclass(schema):
self.schema_kind = "dataclass"
self.json_schema = TypeAdapter(schema).json_schema()
elif is_typeddict(schema):
self.schema_kind = "typeddict"
self.json_schema = TypeAdapter(schema).json_schema()
else:
msg = (
f"Unsupported schema type: {type(schema)}. "
f"Supported types: Pydantic models, dataclasses, TypedDicts, and JSON schema dicts."
)
raise ValueError(msg)
@dataclass(init=False)
class ToolStrategy(Generic[SchemaT]):
"""Use a tool calling strategy for model responses."""
schema: type[SchemaT] | UnionType | dict[str, Any]
"""Schema for the tool calls."""
schema_specs: list[_SchemaSpec[Any]]
"""Schema specs for the tool calls."""
tool_message_content: str | None
"""The content of the tool message to be returned when the model calls
an artificial structured output tool.
"""
handle_errors: (
bool | str | type[Exception] | tuple[type[Exception], ...] | Callable[[Exception], str]
)
"""Error handling strategy for structured output via `ToolStrategy`.
- `True`: Catch all errors with default error template
- `str`: Catch all errors with this custom message
- `type[Exception]`: Only catch this exception type with default message
- `tuple[type[Exception], ...]`: Only catch these exception types with default
message
- `Callable[[Exception], str]`: Custom function that returns error message
- `False`: No retry, let exceptions propagate
"""
def __init__(
self,
schema: type[SchemaT] | UnionType | dict[str, Any],
*,
tool_message_content: str | None = None,
handle_errors: bool
| str
| type[Exception]
| tuple[type[Exception], ...]
| Callable[[Exception], str] = True,
) -> None:
"""Initialize `ToolStrategy`.
Initialize `ToolStrategy` with schemas, tool message content, and error handling
strategy.
"""
self.schema = schema
self.tool_message_content = tool_message_content
self.handle_errors = handle_errors
def _iter_variants(schema: Any) -> Iterable[Any]:
"""Yield leaf variants from Union and JSON Schema oneOf."""
if get_origin(schema) in {UnionType, Union}:
for arg in get_args(schema):
yield from _iter_variants(arg)
return
if isinstance(schema, dict) and "oneOf" in schema:
for sub in schema.get("oneOf", []):
yield from _iter_variants(sub)
return
yield schema
self.schema_specs = [_SchemaSpec(s) for s in _iter_variants(schema)]
@dataclass(init=False)
class ProviderStrategy(Generic[SchemaT]):
"""Use the model provider's native structured output method."""
schema: type[SchemaT] | dict[str, Any]
"""Schema for native mode."""
schema_spec: _SchemaSpec[SchemaT]
"""Schema spec for native mode."""
def __init__(
self,
schema: type[SchemaT] | dict[str, Any],
*,
strict: bool | None = None,
) -> None:
"""Initialize `ProviderStrategy` with schema.
Args:
schema: Schema to enforce via the provider's native structured output.
strict: Whether to request strict provider-side schema enforcement.
"""
self.schema = schema
self.schema_spec = _SchemaSpec(schema, strict=strict)
def to_model_kwargs(self) -> dict[str, Any]:
"""Convert to kwargs to bind to a model to force structured output.
Returns:
The kwargs to bind to a model.
"""
# OpenAI:
# - see https://platform.openai.com/docs/guides/structured-outputs
json_schema: dict[str, Any] = {
"name": self.schema_spec.name,
"schema": self.schema_spec.json_schema,
}
if self.schema_spec.strict:
json_schema["strict"] = True
response_format: dict[str, Any] = {
"type": "json_schema",
"json_schema": json_schema,
}
return {"response_format": response_format}
@dataclass
class OutputToolBinding(Generic[SchemaT]):
"""Information for tracking structured output tool metadata.
This contains all necessary information to handle structured responses generated via
tool calls, including the original schema, its type classification, and the
corresponding tool implementation used by the tools strategy.
"""
schema: type[SchemaT] | dict[str, Any]
"""The original schema provided for structured output (Pydantic model, dataclass,
TypedDict, or JSON schema dict).
"""
schema_kind: SchemaKind
"""Classification of the schema type for proper response construction."""
tool: BaseTool
"""LangChain tool instance created from the schema for model binding."""
@classmethod
def from_schema_spec(cls, schema_spec: _SchemaSpec[SchemaT]) -> Self:
"""Create an `OutputToolBinding` instance from a `SchemaSpec`.
Args:
schema_spec: The `SchemaSpec` to convert
Returns:
An `OutputToolBinding` instance with the appropriate tool created
"""
return cls(
schema=schema_spec.schema,
schema_kind=schema_spec.schema_kind,
tool=StructuredTool(
args_schema=schema_spec.json_schema,
name=schema_spec.name,
description=schema_spec.description,
),
)
def parse(self, tool_args: dict[str, Any]) -> SchemaT:
"""Parse tool arguments according to the schema.
Args:
tool_args: The arguments from the tool call
Returns:
The parsed response according to the schema type
Raises:
ValueError: If parsing fails
"""
return _parse_with_schema(self.schema, self.schema_kind, tool_args)
@dataclass
class ProviderStrategyBinding(Generic[SchemaT]):
"""Information for tracking native structured output metadata.
This contains all necessary information to handle structured responses generated via
native provider output, including the original schema, its type classification, and
parsing logic for provider-enforced JSON.
"""
schema: type[SchemaT] | dict[str, Any]
"""The original schema provided for structured output (Pydantic model, `dataclass`,
`TypedDict`, or JSON schema dict).
"""
schema_kind: SchemaKind
"""Classification of the schema type for proper response construction."""
@classmethod
def from_schema_spec(cls, schema_spec: _SchemaSpec[SchemaT]) -> Self:
"""Create a `ProviderStrategyBinding` instance from a `SchemaSpec`.
Args:
schema_spec: The `SchemaSpec` to convert
Returns:
A `ProviderStrategyBinding` instance for parsing native structured output
"""
return cls(
schema=schema_spec.schema,
schema_kind=schema_spec.schema_kind,
)
def parse(self, response: AIMessage) -> SchemaT:
"""Parse `AIMessage` content according to the schema.
Args:
response: The `AIMessage` containing the structured output
Returns:
The parsed response according to the schema
Raises:
ValueError: If text extraction, JSON parsing or schema validation fails
"""
# Extract text content from AIMessage and parse as JSON
raw_text = self._extract_text_content_from_message(response)
try:
data = json.loads(raw_text)
except Exception as e:
schema_name = getattr(self.schema, "__name__", "response_format")
msg = (
f"Native structured output expected valid JSON for {schema_name}, "
f"but parsing failed: {e}."
)
raise ValueError(msg) from e
# Parse according to schema
return _parse_with_schema(self.schema, self.schema_kind, data)
@staticmethod
def _extract_text_content_from_message(message: AIMessage) -> str:
"""Extract text content from an `AIMessage`.
Args:
message: The AI message to extract text from
Returns:
The extracted text content
"""
content = message.content
if isinstance(content, str):
return content
parts: list[str] = []
for c in content:
if isinstance(c, dict):
if c.get("type") == "text" and "text" in c:
parts.append(str(c["text"]))
elif "content" in c and isinstance(c["content"], str):
parts.append(c["content"])
else:
parts.append(str(c))
return "".join(parts)
class AutoStrategy(Generic[SchemaT]):
"""Automatically select the best strategy for structured output."""
schema: type[SchemaT] | dict[str, Any]
"""Schema for automatic mode."""
def __init__(
self,
schema: type[SchemaT] | dict[str, Any],
) -> None:
"""Initialize `AutoStrategy` with schema."""
self.schema = schema
ResponseFormat = ToolStrategy[SchemaT] | ProviderStrategy[SchemaT] | AutoStrategy[SchemaT]
"""Union type for all supported response format strategies."""
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/agents/structured_output.py",
"license": "MIT License",
"lines": 362,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/any_str.py | import re
class AnyStr(str):
__slots__ = ("prefix",)
def __init__(self, prefix: str | re.Pattern[str] = "") -> None:
super().__init__()
self.prefix = prefix
def __eq__(self, other: object) -> bool:
return isinstance(other, str) and (
other.startswith(self.prefix)
if isinstance(self.prefix, str)
else self.prefix.match(other) is not None
)
def __hash__(self) -> int:
return hash((str(self), self.prefix))
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/any_str.py",
"license": "MIT License",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/conftest_checkpointer.py | from collections.abc import AsyncIterator, Iterator
from contextlib import asynccontextmanager, contextmanager
from langgraph.checkpoint.base import BaseCheckpointSaver
from tests.unit_tests.agents.memory_assert import MemorySaverAssertImmutable
@contextmanager
def _checkpointer_memory() -> Iterator[BaseCheckpointSaver[str]]:
yield MemorySaverAssertImmutable()
@asynccontextmanager
async def _checkpointer_memory_aio() -> AsyncIterator[BaseCheckpointSaver[str]]:
yield MemorySaverAssertImmutable()
# Placeholder functions for other checkpointer types that aren't available
@contextmanager
def _checkpointer_sqlite() -> Iterator[BaseCheckpointSaver[str]]:
# Fallback to memory for now
yield MemorySaverAssertImmutable()
@contextmanager
def _checkpointer_postgres() -> Iterator[BaseCheckpointSaver[str]]:
# Fallback to memory for now
yield MemorySaverAssertImmutable()
@contextmanager
def _checkpointer_postgres_pipe() -> Iterator[BaseCheckpointSaver[str]]:
# Fallback to memory for now
yield MemorySaverAssertImmutable()
@contextmanager
def _checkpointer_postgres_pool() -> Iterator[BaseCheckpointSaver[str]]:
# Fallback to memory for now
yield MemorySaverAssertImmutable()
@asynccontextmanager
async def _checkpointer_sqlite_aio() -> AsyncIterator[BaseCheckpointSaver[str]]:
# Fallback to memory for now
yield MemorySaverAssertImmutable()
@asynccontextmanager
async def _checkpointer_postgres_aio() -> AsyncIterator[BaseCheckpointSaver[str]]:
# Fallback to memory for now
yield MemorySaverAssertImmutable()
@asynccontextmanager
async def _checkpointer_postgres_aio_pipe() -> AsyncIterator[BaseCheckpointSaver[str]]:
# Fallback to memory for now
yield MemorySaverAssertImmutable()
@asynccontextmanager
async def _checkpointer_postgres_aio_pool() -> AsyncIterator[BaseCheckpointSaver[str]]:
# Fallback to memory for now
yield MemorySaverAssertImmutable()
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/conftest_checkpointer.py",
"license": "MIT License",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/conftest_store.py | from collections.abc import AsyncIterator, Iterator
from contextlib import asynccontextmanager, contextmanager
from langgraph.store.base import BaseStore
from langgraph.store.memory import InMemoryStore
@contextmanager
def _store_memory() -> Iterator[BaseStore]:
store = InMemoryStore()
yield store
@asynccontextmanager
async def _store_memory_aio() -> AsyncIterator[BaseStore]:
store = InMemoryStore()
yield store
# Placeholder functions for other store types that aren't available
@contextmanager
def _store_postgres() -> Iterator[BaseStore]:
# Fallback to memory for now
store = InMemoryStore()
yield store
@contextmanager
def _store_postgres_pipe() -> Iterator[BaseStore]:
# Fallback to memory for now
store = InMemoryStore()
yield store
@contextmanager
def _store_postgres_pool() -> Iterator[BaseStore]:
# Fallback to memory for now
store = InMemoryStore()
yield store
@asynccontextmanager
async def _store_postgres_aio() -> AsyncIterator[BaseStore]:
# Fallback to memory for now
store = InMemoryStore()
yield store
@asynccontextmanager
async def _store_postgres_aio_pipe() -> AsyncIterator[BaseStore]:
# Fallback to memory for now
store = InMemoryStore()
yield store
@asynccontextmanager
async def _store_postgres_aio_pool() -> AsyncIterator[BaseStore]:
# Fallback to memory for now
store = InMemoryStore()
yield store
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/conftest_store.py",
"license": "MIT License",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/memory_assert.py | import os
import tempfile
import time
from collections import defaultdict
from typing import Any
from langchain_core.runnables import RunnableConfig
from langgraph.checkpoint.base import (
ChannelVersions,
Checkpoint,
CheckpointMetadata,
)
from langgraph.checkpoint.memory import InMemorySaver, PersistentDict
from langgraph.checkpoint.serde.base import (
SerializerProtocol,
)
from langgraph.pregel._checkpoint import copy_checkpoint
class MemorySaverAssertImmutable(InMemorySaver):
storage_for_copies: defaultdict[str, dict[str, dict[str, tuple[str, bytes]]]]
def __init__(
self,
*,
serde: SerializerProtocol | None = None,
put_sleep: float | None = None,
) -> None:
_, filename = tempfile.mkstemp()
class TempfilePersistentDict(PersistentDict):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, filename=filename, **kwargs)
super().__init__(serde=serde, factory=TempfilePersistentDict)
self.storage_for_copies = defaultdict(lambda: defaultdict(dict))
self.put_sleep = put_sleep
self.stack.callback(os.remove, filename)
def put(
self,
config: RunnableConfig,
checkpoint: Checkpoint,
metadata: CheckpointMetadata,
new_versions: ChannelVersions,
) -> RunnableConfig:
if self.put_sleep:
time.sleep(self.put_sleep)
# assert checkpoint hasn't been modified since last written
thread_id = config["configurable"]["thread_id"]
checkpoint_ns = config["configurable"]["checkpoint_ns"]
if saved := super().get(config):
assert (
self.serde.loads_typed(
self.storage_for_copies[thread_id][checkpoint_ns][saved["id"]]
)
== saved
)
self.storage_for_copies[thread_id][checkpoint_ns][checkpoint["id"]] = (
self.serde.dumps_typed(copy_checkpoint(checkpoint))
)
# call super to write checkpoint
return super().put(config, checkpoint, metadata, new_versions)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/memory_assert.py",
"license": "MIT License",
"lines": 56,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/messages.py | """Redefined messages as a work-around for pydantic issue with AnyStr.
The code below creates version of pydantic models
that will work in unit tests with AnyStr as id field
Please note that the `id` field is assigned AFTER the model is created
to workaround an issue with pydantic ignoring the __eq__ method on
subclassed strings.
"""
from typing import Any
from langchain_core.messages import HumanMessage, ToolMessage
from tests.unit_tests.agents.any_str import AnyStr
def _AnyIdHumanMessage(**kwargs: Any) -> HumanMessage: # noqa: N802
"""Create a human message with an any id field."""
message = HumanMessage(**kwargs)
message.id = AnyStr()
return message
def _AnyIdToolMessage(**kwargs: Any) -> ToolMessage: # noqa: N802
"""Create a tool message with an any id field."""
message = ToolMessage(**kwargs)
message.id = AnyStr()
return message
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/messages.py",
"license": "MIT License",
"lines": 20,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/model.py | import json
from collections.abc import Callable, Sequence
from dataclasses import asdict, is_dataclass
from typing import (
Any,
Literal,
)
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import BaseChatModel, LanguageModelInput
from langchain_core.messages import (
AIMessage,
BaseMessage,
ToolCall,
)
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.runnables import Runnable
from langchain_core.tools import BaseTool
from pydantic import BaseModel
from typing_extensions import override
class FakeToolCallingModel(BaseChatModel):
tool_calls: list[list[ToolCall]] | list[list[dict[str, Any]]] | None = None
structured_response: Any | None = None
index: int = 0
tool_style: Literal["openai", "anthropic"] = "openai"
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> ChatResult:
"""Top Level call."""
is_native = kwargs.get("response_format")
if self.tool_calls:
if is_native:
tool_calls = (
self.tool_calls[self.index] if self.index < len(self.tool_calls) else []
)
else:
tool_calls = self.tool_calls[self.index % len(self.tool_calls)]
else:
tool_calls = []
if is_native and not tool_calls:
if isinstance(self.structured_response, BaseModel):
content_obj = self.structured_response.model_dump()
elif is_dataclass(self.structured_response) and not isinstance(
self.structured_response, type
):
content_obj = asdict(self.structured_response)
elif isinstance(self.structured_response, dict):
content_obj = self.structured_response
message = AIMessage(content=json.dumps(content_obj), id=str(self.index))
else:
messages_string = "-".join([m.text for m in messages])
message = AIMessage(
content=messages_string,
id=str(self.index),
tool_calls=tool_calls.copy(),
)
self.index += 1
return ChatResult(generations=[ChatGeneration(message=message)])
@property
def _llm_type(self) -> str:
return "fake-tool-call-model"
@override
def bind_tools(
self,
tools: Sequence[dict[str, Any] | type | Callable[..., Any] | BaseTool],
*,
tool_choice: str | None = None,
**kwargs: Any,
) -> Runnable[LanguageModelInput, AIMessage]:
if len(tools) == 0:
msg = "Must provide at least one tool"
raise ValueError(msg)
tool_dicts = []
for tool in tools:
if isinstance(tool, dict):
tool_dicts.append(tool)
continue
if not isinstance(tool, BaseTool):
msg = "Only BaseTool and dict is supported by FakeToolCallingModel.bind_tools"
raise TypeError(msg)
# NOTE: this is a simplified tool spec for testing purposes only
if self.tool_style == "openai":
tool_dicts.append(
{
"type": "function",
"function": {
"name": tool.name,
},
}
)
elif self.tool_style == "anthropic":
tool_dicts.append(
{
"name": tool.name,
}
)
return self.bind(tools=tool_dicts, **kwargs)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/model.py",
"license": "MIT License",
"lines": 100,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_react_agent.py | # import dataclasses
# import inspect
# from types import UnionType
# from typing import (
# Annotated,
# Union,
# )
# import pytest
# from langchain_core.language_models import BaseChatModel
# from langchain_core.messages import (
# AIMessage,
# HumanMessage,
# MessageLikeRepresentation,
# RemoveMessage,
# SystemMessage,
# ToolCall,
# ToolMessage,
# )
# from langchain_core.runnables import RunnableConfig, RunnableLambda
# from langchain_core.tools import BaseTool, InjectedToolCallId, ToolException
# from langchain_core.tools import tool as dec_tool
# from langgraph.checkpoint.base import BaseCheckpointSaver
# from langgraph.graph import START, MessagesState, StateGraph
# from langgraph.graph.message import REMOVE_ALL_MESSAGES
# from langgraph.runtime import Runtime
# from langgraph.store.base import BaseStore
# from langgraph.store.memory import InMemoryStore
# from langgraph.types import Command, Interrupt, interrupt
# from pydantic import BaseModel, Field
# from typing_extensions import TypedDict
# from langchain.agents import (
# AgentState,
# create_agent,
# )
# from langchain.tools import (
# ToolNode,
# InjectedState,
# InjectedStore,
# )
# from langchain.tools.tool_node import (
# _get_state_args,
# _infer_handled_types,
# )
# from tests.unit_tests.agents.any_str import AnyStr
# from tests.unit_tests.agents.messages import _AnyIdHumanMessage, _AnyIdToolMessage
# from tests.unit_tests.agents.model import FakeToolCallingModel
# pytestmark = pytest.mark.anyio
# def test_no_prompt(sync_checkpointer: BaseCheckpointSaver) -> None:
# model = FakeToolCallingModel()
# agent = create_agent(
# model,
# [],
# checkpointer=sync_checkpointer,
# )
# inputs = [HumanMessage("hi?")]
# thread = {"configurable": {"thread_id": "123"}}
# response = agent.invoke({"messages": inputs}, thread, debug=True)
# expected_response = {"messages": [*inputs, AIMessage(content="hi?", id="0")]}
# assert response == expected_response
# saved = sync_checkpointer.get_tuple(thread)
# assert saved is not None
# assert saved.checkpoint["channel_values"] == {
# "messages": [
# _AnyIdHumanMessage(content="hi?"),
# AIMessage(content="hi?", id="0"),
# ],
# }
# assert saved.metadata == {
# "parents": {},
# "source": "loop",
# "step": 1,
# }
# assert saved.pending_writes == []
# async def test_no_prompt_async(async_checkpointer: BaseCheckpointSaver) -> None:
# model = FakeToolCallingModel()
# agent = create_agent(model, [], checkpointer=async_checkpointer)
# inputs = [HumanMessage("hi?")]
# thread = {"configurable": {"thread_id": "123"}}
# response = await agent.ainvoke({"messages": inputs}, thread, debug=True)
# expected_response = {"messages": [*inputs, AIMessage(content="hi?", id="0")]}
# assert response == expected_response
# saved = await async_checkpointer.aget_tuple(thread)
# assert saved is not None
# assert saved.checkpoint["channel_values"] == {
# "messages": [
# _AnyIdHumanMessage(content="hi?"),
# AIMessage(content="hi?", id="0"),
# ],
# }
# assert saved.metadata == {
# "parents": {},
# "source": "loop",
# "step": 1,
# }
# assert saved.pending_writes == []
# def test_system_message_prompt() -> None:
# prompt = SystemMessage(content="Foo")
# agent = create_agent(FakeToolCallingModel(), [], system_prompt=prompt)
# inputs = [HumanMessage("hi?")]
# response = agent.invoke({"messages": inputs})
# expected_response = {"messages": [*inputs, AIMessage(content="Foo-hi?", id="0", tool_calls=[])]}
# assert response == expected_response
# def test_string_prompt() -> None:
# prompt = "Foo"
# agent = create_agent(FakeToolCallingModel(), [], system_prompt=prompt)
# inputs = [HumanMessage("hi?")]
# response = agent.invoke({"messages": inputs})
# expected_response = {"messages": [*inputs, AIMessage(content="Foo-hi?", id="0", tool_calls=[])]}
# assert response == expected_response
# def test_callable_prompt() -> None:
# def prompt(state):
# modified_message = f"Bar {state['messages'][-1].content}"
# return [HumanMessage(content=modified_message)]
# agent = create_agent(FakeToolCallingModel(), [], system_prompt=prompt)
# inputs = [HumanMessage("hi?")]
# response = agent.invoke({"messages": inputs})
# expected_response = {"messages": [*inputs, AIMessage(content="Bar hi?", id="0")]}
# assert response == expected_response
# async def test_callable_prompt_async() -> None:
# async def prompt(state):
# modified_message = f"Bar {state['messages'][-1].content}"
# return [HumanMessage(content=modified_message)]
# agent = create_agent(FakeToolCallingModel(), [], system_prompt=prompt)
# inputs = [HumanMessage("hi?")]
# response = await agent.ainvoke({"messages": inputs})
# expected_response = {"messages": [*inputs, AIMessage(content="Bar hi?", id="0")]}
# assert response == expected_response
# def test_runnable_prompt() -> None:
# prompt = RunnableLambda(
# lambda state: [HumanMessage(content=f"Baz {state['messages'][-1].content}")]
# )
# agent = create_agent(FakeToolCallingModel(), [], system_prompt=prompt)
# inputs = [HumanMessage("hi?")]
# response = agent.invoke({"messages": inputs})
# expected_response = {"messages": [*inputs, AIMessage(content="Baz hi?", id="0")]}
# assert response == expected_response
# def test_prompt_with_store() -> None:
# def add(a: int, b: int):
# """Adds a and b"""
# return a + b
# in_memory_store = InMemoryStore()
# in_memory_store.put(("memories", "1"), "user_name", {"data": "User name is Alice"})
# in_memory_store.put(("memories", "2"), "user_name", {"data": "User name is Bob"})
# def prompt(state, config, *, store):
# user_id = config["configurable"]["user_id"]
# system_str = store.get(("memories", user_id), "user_name").value["data"]
# return [SystemMessage(system_str)] + state["messages"]
# def prompt_no_store(state, config):
# return SystemMessage("foo") + state["messages"]
# model = FakeToolCallingModel()
# # test state modifier that uses store works
# agent = create_agent(
# model,
# [add],
# prompt=prompt,
# store=in_memory_store,
# )
# response = agent.invoke({"messages": [("user", "hi")]}, {"configurable": {"user_id": "1"}})
# assert response["messages"][-1].content == "User name is Alice-hi"
# # test state modifier that doesn't use store works
# agent = create_agent(
# model,
# [add],
# prompt=prompt_no_store,
# store=in_memory_store,
# )
# response = agent.invoke({"messages": [("user", "hi")]}, {"configurable": {"user_id": "2"}})
# assert response["messages"][-1].content == "foo-hi"
# async def test_prompt_with_store_async() -> None:
# async def add(a: int, b: int):
# """Adds a and b"""
# return a + b
# in_memory_store = InMemoryStore()
# await in_memory_store.aput(("memories", "1"), "user_name", {"data": "User name is Alice"})
# await in_memory_store.aput(("memories", "2"), "user_name", {"data": "User name is Bob"})
# async def prompt(state, config, *, store):
# user_id = config["configurable"]["user_id"]
# system_str = (await store.aget(("memories", user_id), "user_name")).value["data"]
# return [SystemMessage(system_str)] + state["messages"]
# async def prompt_no_store(state, config):
# return SystemMessage("foo") + state["messages"]
# model = FakeToolCallingModel()
# # test state modifier that uses store works
# agent = create_agent(model, [add], system_prompt=prompt, store=in_memory_store)
# response = await agent.ainvoke(
# {"messages": [("user", "hi")]}, {"configurable": {"user_id": "1"}}
# )
# assert response["messages"][-1].content == "User name is Alice-hi"
# # test state modifier that doesn't use store works
# agent = create_agent(model, [add], system_prompt=prompt_no_store, store=in_memory_store)
# response = await agent.ainvoke(
# {"messages": [("user", "hi")]}, {"configurable": {"user_id": "2"}}
# )
# assert response["messages"][-1].content == "foo-hi"
# @pytest.mark.parametrize("tool_style", ["openai", "anthropic"])
# @pytest.mark.parametrize("include_builtin", [True, False])
# def test_model_with_tools(tool_style: str, include_builtin: bool) -> None:
# model = FakeToolCallingModel(tool_style=tool_style)
# @dec_tool
# def tool1(some_val: int) -> str:
# """Tool 1 docstring."""
# return f"Tool 1: {some_val}"
# @dec_tool
# def tool2(some_val: int) -> str:
# """Tool 2 docstring."""
# return f"Tool 2: {some_val}"
# tools: list[BaseTool | dict] = [tool1, tool2]
# if include_builtin:
# tools.append(
# {
# "type": "mcp",
# "server_label": "atest_sever",
# "server_url": "https://some.mcp.somewhere.com/sse",
# "headers": {"foo": "bar"},
# "allowed_tools": [
# "mcp_tool_1",
# "set_active_account",
# "get_url_markdown",
# "get_url_screenshot",
# ],
# "require_approval": "never",
# }
# )
# # check valid agent constructor
# with pytest.raises(ValueError):
# create_agent(
# model.bind_tools(tools),
# tools,
# )
# # Test removed: _validate_chat_history function no longer exists
# # def test__validate_messages() -> None:
# # pass
# def test__infer_handled_types() -> None:
# def handle(e) -> str: # type: ignore
# return ""
# def handle2(e: Exception) -> str:
# return ""
# def handle3(e: ValueError | ToolException) -> str:
# return ""
# def handle4(e: Union[ValueError, ToolException]) -> str:
# return ""
# class Handler:
# def handle(self, e: ValueError) -> str:
# return ""
# handle5 = Handler().handle
# def handle6(e: Union[Union[TypeError, ValueError], ToolException]) -> str:
# return ""
# expected: tuple = (Exception,)
# actual = _infer_handled_types(handle)
# assert expected == actual
# expected = (Exception,)
# actual = _infer_handled_types(handle2)
# assert expected == actual
# expected = (ValueError, ToolException)
# actual = _infer_handled_types(handle3)
# assert expected == actual
# expected = (ValueError, ToolException)
# actual = _infer_handled_types(handle4)
# assert expected == actual
# expected = (ValueError,)
# actual = _infer_handled_types(handle5)
# assert expected == actual
# expected = (TypeError, ValueError, ToolException)
# actual = _infer_handled_types(handle6)
# assert expected == actual
# with pytest.raises(ValueError):
# def handler(e: str) -> str:
# return ""
# _infer_handled_types(handler)
# with pytest.raises(ValueError):
# def handler(e: list[Exception]) -> str:
# return ""
# _infer_handled_types(handler)
# with pytest.raises(ValueError):
# def handler(e: Union[str, int]) -> str:
# return ""
# _infer_handled_types(handler)
# def test_react_agent_with_structured_response() -> None:
# class WeatherResponse(BaseModel):
# temperature: float = Field(description="The temperature in fahrenheit")
# tool_calls = [
# [{"args": {}, "id": "1", "name": "get_weather"}],
# [{"name": "WeatherResponse", "id": "2", "args": {"temperature": 75}}],
# ]
# def get_weather() -> str:
# """Get the weather"""
# return "The weather is sunny and 75°F."
# expected_structured_response = WeatherResponse(temperature=75)
# model = FakeToolCallingModel(
# tool_calls=tool_calls, structured_response=expected_structured_response
# )
# agent = create_agent(
# model,
# [get_weather],
# response_format=WeatherResponse,
# )
# response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
# assert response["structured_response"] == expected_structured_response
# assert len(response["messages"]) == 5
# # Check message types in message history
# msg_types = [m.type for m in response["messages"]]
# assert msg_types == [
# "human", # "What's the weather?"
# "ai", # "What's the weather?"
# "tool", # "The weather is sunny and 75°F."
# "ai", # structured response
# "tool", # artificial tool message
# ]
# assert [m.content for m in response["messages"]] == [
# "What's the weather?",
# "What's the weather?",
# "The weather is sunny and 75°F.",
# "What's the weather?-What's the weather?-The weather is sunny and 75°F.",
# "Returning structured response: {'temperature': 75.0}",
# ]
# class CustomState(AgentState):
# user_name: str
# def test_react_agent_update_state(
# sync_checkpointer: BaseCheckpointSaver,
# ) -> None:
# @dec_tool
# def get_user_name(tool_call_id: Annotated[str, InjectedToolCallId]):
# """Retrieve user name"""
# user_name = interrupt("Please provider user name:")
# return Command(
# update={
# "user_name": user_name,
# "messages": [
# ToolMessage("Successfully retrieved user name", tool_call_id=tool_call_id)
# ],
# }
# )
# def prompt(state: CustomState):
# user_name = state.get("user_name")
# if user_name is None:
# return state["messages"]
# system_msg = f"User name is {user_name}"
# return [{"role": "system", "content": system_msg}] + state["messages"]
# tool_calls = [[{"args": {}, "id": "1", "name": "get_user_name"}]]
# model = FakeToolCallingModel(tool_calls=tool_calls)
# agent = create_agent(
# model,
# [get_user_name],
# state_schema=CustomState,
# prompt=prompt,
# checkpointer=sync_checkpointer,
# )
# config = {"configurable": {"thread_id": "1"}}
# # Run until interrupted
# agent.invoke({"messages": [("user", "what's my name")]}, config)
# # supply the value for the interrupt
# response = agent.invoke(Command(resume="Archibald"), config)
# # confirm that the state was updated
# assert response["user_name"] == "Archibald"
# assert len(response["messages"]) == 4
# tool_message: ToolMessage = response["messages"][-2]
# assert tool_message.content == "Successfully retrieved user name"
# assert tool_message.tool_call_id == "1"
# assert tool_message.name == "get_user_name"
# def test_react_agent_parallel_tool_calls(
# sync_checkpointer: BaseCheckpointSaver,
# ) -> None:
# human_assistance_execution_count = 0
# @dec_tool
# def human_assistance(query: str) -> str:
# """Request assistance from a human."""
# nonlocal human_assistance_execution_count
# human_response = interrupt({"query": query})
# human_assistance_execution_count += 1
# return human_response["data"]
# get_weather_execution_count = 0
# @dec_tool
# def get_weather(location: str) -> str:
# """Use this tool to get the weather."""
# nonlocal get_weather_execution_count
# get_weather_execution_count += 1
# return "It's sunny!"
# tool_calls = [
# [
# {"args": {"location": "sf"}, "id": "1", "name": "get_weather"},
# {"args": {"query": "request help"}, "id": "2", "name": "human_assistance"},
# ],
# [],
# ]
# model = FakeToolCallingModel(tool_calls=tool_calls)
# agent = create_agent(
# model,
# [human_assistance, get_weather],
# checkpointer=sync_checkpointer,
# )
# config = {"configurable": {"thread_id": "1"}}
# query = "Get user assistance and also check the weather"
# message_types = []
# for event in agent.stream({"messages": [("user", query)]}, config, stream_mode="values"):
# if messages := event.get("messages"):
# message_types.append([m.type for m in messages])
# assert message_types == [
# ["human"],
# ["human", "ai"],
# ["human", "ai", "tool"],
# ]
# # Resume
# message_types = []
# for event in agent.stream(Command(resume={"data": "Hello"}), config, stream_mode="values"):
# if messages := event.get("messages"):
# message_types.append([m.type for m in messages])
# assert message_types == [
# ["human", "ai"],
# ["human", "ai", "tool", "tool"],
# ["human", "ai", "tool", "tool", "ai"],
# ]
# assert human_assistance_execution_count == 1
# assert get_weather_execution_count == 1
# class AgentStateExtraKey(AgentState):
# foo: int
# def test_create_agent_inject_vars() -> None:
# """Test that the agent can inject state and store into tool functions."""
# store = InMemoryStore()
# namespace = ("test",)
# store.put(namespace, "test_key", {"bar": 3})
# def tool1(
# some_val: int,
# state: Annotated[dict, InjectedState],
# store: Annotated[BaseStore, InjectedStore()],
# ) -> str:
# """Tool 1 docstring."""
# store_val = store.get(namespace, "test_key").value["bar"]
# return some_val + state["foo"] + store_val
# tool_call = {
# "name": "tool1",
# "args": {"some_val": 1},
# "id": "some 0",
# "type": "tool_call",
# }
# model = FakeToolCallingModel(tool_calls=[[tool_call], []])
# agent = create_agent(
# model,
# ToolNode([tool1], handle_tool_errors=False),
# state_schema=AgentStateExtraKey,
# store=store,
# )
# result = agent.invoke({"messages": [{"role": "user", "content": "hi"}], "foo": 2})
# assert result["messages"] == [
# _AnyIdHumanMessage(content="hi"),
# AIMessage(content="hi", tool_calls=[tool_call], id="0"),
# _AnyIdToolMessage(content="6", name="tool1", tool_call_id="some 0"),
# AIMessage("hi-hi-6", id="1"),
# ]
# assert result["foo"] == 2
# async def test_return_direct() -> None:
# @dec_tool(return_direct=True)
# def tool_return_direct(input: str) -> str:
# """A tool that returns directly."""
# return f"Direct result: {input}"
# @dec_tool
# def tool_normal(input: str) -> str:
# """A normal tool."""
# return f"Normal result: {input}"
# first_tool_call = [
# ToolCall(
# name="tool_return_direct",
# args={"input": "Test direct"},
# id="1",
# ),
# ]
# expected_ai = AIMessage(
# content="Test direct",
# id="0",
# tool_calls=first_tool_call,
# )
# model = FakeToolCallingModel(tool_calls=[first_tool_call, []])
# agent = create_agent(
# model,
# [tool_return_direct, tool_normal],
# )
# # Test direct return for tool_return_direct
# result = agent.invoke({"messages": [HumanMessage(content="Test direct", id="hum0")]})
# assert result["messages"] == [
# HumanMessage(content="Test direct", id="hum0"),
# expected_ai,
# ToolMessage(
# content="Direct result: Test direct",
# name="tool_return_direct",
# tool_call_id="1",
# id=result["messages"][2].id,
# ),
# ]
# second_tool_call = [
# ToolCall(
# name="tool_normal",
# args={"input": "Test normal"},
# id="2",
# ),
# ]
# model = FakeToolCallingModel(tool_calls=[second_tool_call, []])
# agent = create_agent(model, [tool_return_direct, tool_normal])
# result = agent.invoke({"messages": [HumanMessage(content="Test normal", id="hum1")]})
# assert result["messages"] == [
# HumanMessage(content="Test normal", id="hum1"),
# AIMessage(content="Test normal", id="0", tool_calls=second_tool_call),
# ToolMessage(
# content="Normal result: Test normal",
# name="tool_normal",
# tool_call_id="2",
# id=result["messages"][2].id,
# ),
# AIMessage(content="Test normal-Test normal-Normal result: Test normal", id="1"),
# ]
# both_tool_calls = [
# ToolCall(
# name="tool_return_direct",
# args={"input": "Test both direct"},
# id="3",
# ),
# ToolCall(
# name="tool_normal",
# args={"input": "Test both normal"},
# id="4",
# ),
# ]
# model = FakeToolCallingModel(tool_calls=[both_tool_calls, []])
# agent = create_agent(model, [tool_return_direct, tool_normal])
# result = agent.invoke({"messages": [HumanMessage(content="Test both", id="hum2")]})
# assert result["messages"] == [
# HumanMessage(content="Test both", id="hum2"),
# AIMessage(content="Test both", id="0", tool_calls=both_tool_calls),
# ToolMessage(
# content="Direct result: Test both direct",
# name="tool_return_direct",
# tool_call_id="3",
# id=result["messages"][2].id,
# ),
# ToolMessage(
# content="Normal result: Test both normal",
# name="tool_normal",
# tool_call_id="4",
# id=result["messages"][3].id,
# ),
# ]
# def test__get_state_args() -> None:
# class Schema1(BaseModel):
# a: Annotated[str, InjectedState]
# class Schema2(Schema1):
# b: Annotated[int, InjectedState("bar")]
# @dec_tool(args_schema=Schema2)
# def foo(a: str, b: int) -> float:
# """return"""
# return 0.0
# assert _get_state_args(foo) == {"a": None, "b": "bar"}
# def test_inspect_react() -> None:
# model = FakeToolCallingModel(tool_calls=[])
# agent = create_agent(model, [])
# inspect.getclosurevars(agent.nodes["agent"].bound.func)
# def test_react_with_subgraph_tools(
# sync_checkpointer: BaseCheckpointSaver,
# ) -> None:
# class State(TypedDict):
# a: int
# b: int
# class Output(TypedDict):
# result: int
# # Define the subgraphs
# def add(state):
# return {"result": state["a"] + state["b"]}
# add_subgraph = (
# StateGraph(State, output_schema=Output).add_node(add).add_edge(START, "add").compile()
# )
# def multiply(state):
# return {"result": state["a"] * state["b"]}
# multiply_subgraph = (
# StateGraph(State, output_schema=Output)
# .add_node(multiply)
# .add_edge(START, "multiply")
# .compile()
# )
# multiply_subgraph.invoke({"a": 2, "b": 3})
# # Add subgraphs as tools
# def addition(a: int, b: int):
# """Add two numbers"""
# return add_subgraph.invoke({"a": a, "b": b})["result"]
# def multiplication(a: int, b: int):
# """Multiply two numbers"""
# return multiply_subgraph.invoke({"a": a, "b": b})["result"]
# model = FakeToolCallingModel(
# tool_calls=[
# [
# {"args": {"a": 2, "b": 3}, "id": "1", "name": "addition"},
# {"args": {"a": 2, "b": 3}, "id": "2", "name": "multiplication"},
# ],
# [],
# ]
# )
# tool_node = ToolNode([addition, multiplication], handle_tool_errors=False)
# agent = create_agent(
# model,
# tool_node,
# checkpointer=sync_checkpointer,
# )
# result = agent.invoke(
# {"messages": [HumanMessage(content="What's 2 + 3 and 2 * 3?")]},
# config={"configurable": {"thread_id": "1"}},
# )
# assert result["messages"] == [
# _AnyIdHumanMessage(content="What's 2 + 3 and 2 * 3?"),
# AIMessage(
# content="What's 2 + 3 and 2 * 3?",
# id="0",
# tool_calls=[
# ToolCall(name="addition", args={"a": 2, "b": 3}, id="1"),
# ToolCall(name="multiplication", args={"a": 2, "b": 3}, id="2"),
# ],
# ),
# ToolMessage(content="5", name="addition", tool_call_id="1", id=result["messages"][2].id),
# ToolMessage(
# content="6",
# name="multiplication",
# tool_call_id="2",
# id=result["messages"][3].id,
# ),
# AIMessage(content="What's 2 + 3 and 2 * 3?-What's 2 + 3 and 2 * 3?-5-6", id="1"),
# ]
# def test_react_agent_subgraph_streaming_sync() -> None:
# """Test React agent streaming when used as a subgraph node sync version"""
# @dec_tool
# def get_weather(city: str) -> str:
# """Get the weather of a city."""
# return f"The weather of {city} is sunny."
# # Create a React agent
# model = FakeToolCallingModel(
# tool_calls=[
# [{"args": {"city": "Tokyo"}, "id": "1", "name": "get_weather"}],
# [],
# ]
# )
# agent = create_agent(
# model,
# tools=[get_weather],
# prompt="You are a helpful travel assistant.",
# )
# # Create a subgraph that uses the React agent as a node
# def react_agent_node(state: MessagesState, config: RunnableConfig) -> MessagesState:
# """Node that runs the React agent and collects streaming output."""
# collected_content = ""
# # Stream the agent output and collect content
# for msg_chunk, _msg_metadata in agent.stream(
# {"messages": [("user", state["messages"][-1].content)]},
# config,
# stream_mode="messages",
# ):
# if hasattr(msg_chunk, "content") and msg_chunk.content:
# collected_content += msg_chunk.content
# return {"messages": [("assistant", collected_content)]}
# # Create the main workflow with the React agent as a subgraph node
# workflow = StateGraph(MessagesState)
# workflow.add_node("react_agent", react_agent_node)
# workflow.add_edge(START, "react_agent")
# workflow.add_edge("react_agent", "__end__")
# compiled_workflow = workflow.compile()
# # Test the streaming functionality
# result = compiled_workflow.invoke({"messages": [("user", "What is the weather in Tokyo?")]})
# # Verify the result contains expected structure
# assert len(result["messages"]) == 2
# assert result["messages"][0].content == "What is the weather in Tokyo?"
# assert "assistant" in str(result["messages"][1])
# # Test streaming with subgraphs = True
# result = compiled_workflow.invoke(
# {"messages": [("user", "What is the weather in Tokyo?")]},
# subgraphs=True,
# )
# assert len(result["messages"]) == 2
# events = []
# for event in compiled_workflow.stream(
# {"messages": [("user", "What is the weather in Tokyo?")]},
# stream_mode="messages",
# subgraphs=False,
# ):
# events.append(event)
# assert len(events) == 0
# events = []
# for event in compiled_workflow.stream(
# {"messages": [("user", "What is the weather in Tokyo?")]},
# stream_mode="messages",
# subgraphs=True,
# ):
# events.append(event)
# assert len(events) == 3
# namespace, (msg, metadata) = events[0]
# # FakeToolCallingModel returns a single AIMessage with tool calls
# # The content of the AIMessage reflects the input message
# assert msg.content.startswith("You are a helpful travel assistant")
# namespace, (msg, metadata) = events[1] # ToolMessage
# assert msg.content.startswith("The weather of Tokyo is sunny.")
# async def test_react_agent_subgraph_streaming() -> None:
# """Test React agent streaming when used as a subgraph node."""
# @dec_tool
# def get_weather(city: str) -> str:
# """Get the weather of a city."""
# return f"The weather of {city} is sunny."
# # Create a React agent
# model = FakeToolCallingModel(
# tool_calls=[
# [{"args": {"city": "Tokyo"}, "id": "1", "name": "get_weather"}],
# [],
# ]
# )
# agent = create_agent(
# model,
# tools=[get_weather],
# prompt="You are a helpful travel assistant.",
# )
# # Create a subgraph that uses the React agent as a node
# async def react_agent_node(state: MessagesState, config: RunnableConfig) -> MessagesState:
# """Node that runs the React agent and collects streaming output."""
# collected_content = ""
# # Stream the agent output and collect content
# async for msg_chunk, _msg_metadata in agent.astream(
# {"messages": [("user", state["messages"][-1].content)]},
# config,
# stream_mode="messages",
# ):
# if hasattr(msg_chunk, "content") and msg_chunk.content:
# collected_content += msg_chunk.content
# return {"messages": [("assistant", collected_content)]}
# # Create the main workflow with the React agent as a subgraph node
# workflow = StateGraph(MessagesState)
# workflow.add_node("react_agent", react_agent_node)
# workflow.add_edge(START, "react_agent")
# workflow.add_edge("react_agent", "__end__")
# compiled_workflow = workflow.compile()
# # Test the streaming functionality
# result = await compiled_workflow.ainvoke(
# {"messages": [("user", "What is the weather in Tokyo?")]}
# )
# # Verify the result contains expected structure
# assert len(result["messages"]) == 2
# assert result["messages"][0].content == "What is the weather in Tokyo?"
# assert "assistant" in str(result["messages"][1])
# # Test streaming with subgraphs = True
# result = await compiled_workflow.ainvoke(
# {"messages": [("user", "What is the weather in Tokyo?")]},
# subgraphs=True,
# )
# assert len(result["messages"]) == 2
# events = []
# async for event in compiled_workflow.astream(
# {"messages": [("user", "What is the weather in Tokyo?")]},
# stream_mode="messages",
# subgraphs=False,
# ):
# events.append(event)
# assert len(events) == 0
# events = []
# async for event in compiled_workflow.astream(
# {"messages": [("user", "What is the weather in Tokyo?")]},
# stream_mode="messages",
# subgraphs=True,
# ):
# events.append(event)
# assert len(events) == 3
# namespace, (msg, metadata) = events[0]
# # FakeToolCallingModel returns a single AIMessage with tool calls
# # The content of the AIMessage reflects the input message
# assert msg.content.startswith("You are a helpful travel assistant")
# namespace, (msg, metadata) = events[1] # ToolMessage
# assert msg.content.startswith("The weather of Tokyo is sunny.")
# def test_tool_node_node_interrupt(
# sync_checkpointer: BaseCheckpointSaver,
# ) -> None:
# def tool_normal(some_val: int) -> str:
# """Tool docstring."""
# return "normal"
# def tool_interrupt(some_val: int) -> str:
# """Tool docstring."""
# return interrupt("provide value for foo")
# # test inside react agent
# model = FakeToolCallingModel(
# tool_calls=[
# [
# ToolCall(name="tool_interrupt", args={"some_val": 0}, id="1"),
# ToolCall(name="tool_normal", args={"some_val": 1}, id="2"),
# ],
# [],
# ]
# )
# config = {"configurable": {"thread_id": "1"}}
# agent = create_agent(
# model,
# [tool_interrupt, tool_normal],
# checkpointer=sync_checkpointer,
# )
# result = agent.invoke({"messages": [HumanMessage("hi?")]}, config)
# expected_messages = [
# _AnyIdHumanMessage(content="hi?"),
# AIMessage(
# content="hi?",
# id="0",
# tool_calls=[
# {
# "name": "tool_interrupt",
# "args": {"some_val": 0},
# "id": "1",
# "type": "tool_call",
# },
# {
# "name": "tool_normal",
# "args": {"some_val": 1},
# "id": "2",
# "type": "tool_call",
# },
# ],
# ),
# _AnyIdToolMessage(content="normal", name="tool_normal", tool_call_id="2"),
# ]
# assert result["messages"] == expected_messages
# state = agent.get_state(config)
# assert state.next == ("tools",)
# task = state.tasks[0]
# assert task.name == "tools"
# assert task.interrupts == (
# Interrupt(
# value="provide value for foo",
# id=AnyStr(),
# ),
# )
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/test_react_agent.py",
"license": "MIT License",
"lines": 824,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_response_format.py | """Test suite for create_agent with structured output response_format permutations."""
import json
from collections.abc import Callable, Sequence
from dataclasses import dataclass
from typing import Any
from unittest.mock import patch
import pytest
from langchain_core.language_models import LanguageModelInput
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.language_models.fake_chat_models import GenericFakeChatModel
from langchain_core.messages import HumanMessage
from langchain_core.runnables import Runnable
from pydantic import BaseModel, Field
from typing_extensions import TypedDict
from langchain.agents import create_agent
from langchain.agents.factory import _supports_provider_strategy
from langchain.agents.middleware.types import (
AgentMiddleware,
ModelCallResult,
ModelRequest,
ModelResponse,
)
from langchain.agents.structured_output import (
MultipleStructuredOutputsError,
ProviderStrategy,
StructuredOutputValidationError,
ToolStrategy,
)
from langchain.messages import AIMessage
from langchain.tools import BaseTool, tool
from tests.unit_tests.agents.model import FakeToolCallingModel
# Test data models
class WeatherBaseModel(BaseModel):
"""Weather response."""
temperature: float = Field(description="The temperature in fahrenheit")
condition: str = Field(description="Weather condition")
@dataclass
class WeatherDataclass:
"""Weather response."""
temperature: float
condition: str
class WeatherTypedDict(TypedDict):
"""Weather response."""
temperature: float
condition: str
weather_json_schema = {
"type": "object",
"properties": {
"temperature": {"type": "number", "description": "Temperature in fahrenheit"},
"condition": {"type": "string", "description": "Weather condition"},
},
"title": "weather_schema",
"required": ["temperature", "condition"],
}
class LocationResponse(BaseModel):
city: str = Field(description="The city name")
country: str = Field(description="The country name")
class LocationTypedDict(TypedDict):
city: str
country: str
location_json_schema = {
"type": "object",
"properties": {
"city": {"type": "string", "description": "The city name"},
"country": {"type": "string", "description": "The country name"},
},
"title": "location_schema",
"required": ["city", "country"],
}
@tool
def get_weather() -> str:
"""Get the weather."""
return "The weather is sunny and 75°F."
@tool
def get_location() -> str:
"""Get the current location."""
return "You are in New York, USA."
# Standardized test data
WEATHER_DATA: dict[str, float | str] = {"temperature": 75.0, "condition": "sunny"}
LOCATION_DATA: dict[str, str] = {"city": "New York", "country": "USA"}
# Standardized expected responses
EXPECTED_WEATHER_PYDANTIC = WeatherBaseModel(temperature=75.0, condition="sunny")
EXPECTED_WEATHER_DATACLASS = WeatherDataclass(temperature=75.0, condition="sunny")
EXPECTED_WEATHER_DICT: WeatherTypedDict = {"temperature": 75.0, "condition": "sunny"}
EXPECTED_LOCATION = LocationResponse(city="New York", country="USA")
EXPECTED_LOCATION_DICT: LocationTypedDict = {"city": "New York", "country": "USA"}
class TestResponseFormatAsModel:
def test_pydantic_model(self) -> None:
"""Test response_format as Pydantic model."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "WeatherBaseModel",
"id": "2",
"args": WEATHER_DATA,
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(model, [get_weather], response_format=WeatherBaseModel)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
assert len(response["messages"]) == 5
def test_dataclass(self) -> None:
"""Test response_format as dataclass."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "WeatherDataclass",
"id": "2",
"args": WEATHER_DATA,
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(model, [get_weather], response_format=WeatherDataclass)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_DATACLASS
assert len(response["messages"]) == 5
def test_typed_dict(self) -> None:
"""Test response_format as TypedDict."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "WeatherTypedDict",
"id": "2",
"args": WEATHER_DATA,
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(model, [get_weather], response_format=WeatherTypedDict)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_DICT
assert len(response["messages"]) == 5
def test_json_schema(self) -> None:
"""Test response_format as JSON schema."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "weather_schema",
"id": "2",
"args": WEATHER_DATA,
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(model, [get_weather], response_format=weather_json_schema)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_DICT
assert len(response["messages"]) == 5
def test_autostrategy_with_anonymous_json_schema(self) -> None:
"""Test response_format as anonymous JSON schema (AutoStrategy).
Verifies that tool name mismatch is avoided when using AutoStrategy with
schemas that generate random names by ensuring the ToolStrategy instance
is reused during execution.
"""
anonymous_schema = {
"type": "object",
"properties": {
"result": {"type": "string"},
},
"required": ["result"],
}
with patch("langchain.agents.factory._supports_provider_strategy", return_value=False):
model = FakeToolCallingModel(tool_calls=[])
agent = create_agent(model, [], response_format=anonymous_schema)
# We expect a recursion error or similar because we didn't mock the tool call
# matching our anonymous schema, but it should NOT raise ValueError
# during the binding phase.
try:
agent.invoke({"messages": [HumanMessage("hi")]}, config={"recursion_limit": 1})
except ValueError as e:
if "which wasn't declared" in str(e):
pytest.fail(f"Tool name mismatch occurred: {e}")
except Exception: # noqa: S110
# Other exceptions mean we passed the binding phase
pass
class TestResponseFormatAsToolStrategy:
def test_pydantic_model(self) -> None:
"""Test response_format as ToolStrategy with Pydantic model."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "WeatherBaseModel",
"id": "2",
"args": WEATHER_DATA,
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(model, [get_weather], response_format=ToolStrategy(WeatherBaseModel))
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
assert len(response["messages"]) == 5
def test_dataclass(self) -> None:
"""Test response_format as ToolStrategy with dataclass."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "WeatherDataclass",
"id": "2",
"args": WEATHER_DATA,
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(model, [get_weather], response_format=ToolStrategy(WeatherDataclass))
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_DATACLASS
assert len(response["messages"]) == 5
def test_typed_dict(self) -> None:
"""Test response_format as ToolStrategy with TypedDict."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "WeatherTypedDict",
"id": "2",
"args": WEATHER_DATA,
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(model, [get_weather], response_format=ToolStrategy(WeatherTypedDict))
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_DICT
assert len(response["messages"]) == 5
def test_json_schema(self) -> None:
"""Test response_format as ToolStrategy with JSON schema."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "weather_schema",
"id": "2",
"args": WEATHER_DATA,
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(
model, [get_weather], response_format=ToolStrategy(weather_json_schema)
)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_DICT
assert len(response["messages"]) == 5
def test_union_of_json_schemas(self) -> None:
"""Test response_format as ToolStrategy with union of JSON schemas."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "weather_schema",
"id": "2",
"args": WEATHER_DATA,
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(
model,
[get_weather, get_location],
response_format=ToolStrategy({"oneOf": [weather_json_schema, location_json_schema]}),
)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_DICT
assert len(response["messages"]) == 5
# Test with LocationResponse
tool_calls_location = [
[{"args": {}, "id": "1", "name": "get_location"}],
[
{
"name": "location_schema",
"id": "2",
"args": LOCATION_DATA,
}
],
]
model_location = FakeToolCallingModel(tool_calls=tool_calls_location)
agent_location = create_agent(
model_location,
[get_weather, get_location],
response_format=ToolStrategy({"oneOf": [weather_json_schema, location_json_schema]}),
)
response_location = agent_location.invoke({"messages": [HumanMessage("Where am I?")]})
assert response_location["structured_response"] == EXPECTED_LOCATION_DICT
assert len(response_location["messages"]) == 5
def test_union_of_types(self) -> None:
"""Test response_format as ToolStrategy with Union of various types."""
# Test with WeatherBaseModel
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "WeatherBaseModel",
"id": "2",
"args": WEATHER_DATA,
}
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(
model,
[get_weather, get_location],
response_format=ToolStrategy(WeatherBaseModel | LocationResponse),
)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
assert len(response["messages"]) == 5
# Test with LocationResponse
tool_calls_location = [
[{"args": {}, "id": "1", "name": "get_location"}],
[
{
"name": "LocationResponse",
"id": "2",
"args": LOCATION_DATA,
}
],
]
model_location = FakeToolCallingModel(tool_calls=tool_calls_location)
agent_location = create_agent(
model_location,
[get_weather, get_location],
response_format=ToolStrategy(WeatherBaseModel | LocationResponse),
)
response_location = agent_location.invoke({"messages": [HumanMessage("Where am I?")]})
assert response_location["structured_response"] == EXPECTED_LOCATION
assert len(response_location["messages"]) == 5
def test_multiple_structured_outputs_error_without_retry(self) -> None:
"""Test multiple structured outputs error without retry.
Test that MultipleStructuredOutputsError is raised when model returns multiple
structured tool calls without retry.
"""
tool_calls = [
[
{
"name": "WeatherBaseModel",
"id": "1",
"args": WEATHER_DATA,
},
{
"name": "LocationResponse",
"id": "2",
"args": LOCATION_DATA,
},
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(
model,
[],
response_format=ToolStrategy(
WeatherBaseModel | LocationResponse,
handle_errors=False,
),
)
with pytest.raises(
MultipleStructuredOutputsError,
match=r".*WeatherBaseModel.*LocationResponse.*",
):
agent.invoke({"messages": [HumanMessage("Give me weather and location")]})
def test_multiple_structured_outputs_with_retry(self) -> None:
"""Test that retry handles multiple structured output tool calls."""
tool_calls = [
[
{
"name": "WeatherBaseModel",
"id": "1",
"args": WEATHER_DATA,
},
{
"name": "LocationResponse",
"id": "2",
"args": LOCATION_DATA,
},
],
[
{
"name": "WeatherBaseModel",
"id": "3",
"args": WEATHER_DATA,
},
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(
model,
[],
response_format=ToolStrategy(
WeatherBaseModel | LocationResponse,
handle_errors=True,
),
)
response = agent.invoke({"messages": [HumanMessage("Give me weather")]})
# HumanMessage, AIMessage, ToolMessage, ToolMessage, AI, ToolMessage
assert len(response["messages"]) == 6
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
def test_structured_output_parsing_error_without_retry(self) -> None:
"""Test structured output parsing error without retry.
Test that StructuredOutputValidationError is raised when tool args fail to parse
without retry.
"""
tool_calls = [
[
{
"name": "WeatherBaseModel",
"id": "1",
"args": {"invalid": "data"},
},
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(
model,
[],
response_format=ToolStrategy(
WeatherBaseModel,
handle_errors=False,
),
)
with pytest.raises(
StructuredOutputValidationError,
match=r".*WeatherBaseModel.*",
):
agent.invoke({"messages": [HumanMessage("What's the weather?")]})
def test_structured_output_parsing_error_with_retry(self) -> None:
"""Test that retry handles parsing errors for structured output."""
tool_calls = [
[
{
"name": "WeatherBaseModel",
"id": "1",
"args": {"invalid": "data"},
},
],
[
{
"name": "WeatherBaseModel",
"id": "2",
"args": WEATHER_DATA,
},
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(
model,
[],
response_format=ToolStrategy(
WeatherBaseModel,
handle_errors=(StructuredOutputValidationError,),
),
)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
# HumanMessage, AIMessage, ToolMessage, AIMessage, ToolMessage
assert len(response["messages"]) == 5
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
def test_retry_with_custom_function(self) -> None:
"""Test retry with custom message generation."""
tool_calls = [
[
{
"name": "WeatherBaseModel",
"id": "1",
"args": WEATHER_DATA,
},
{
"name": "LocationResponse",
"id": "2",
"args": LOCATION_DATA,
},
],
[
{
"name": "WeatherBaseModel",
"id": "3",
"args": WEATHER_DATA,
},
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
def custom_message(exception: Exception) -> str:
if isinstance(exception, MultipleStructuredOutputsError):
return "Custom error: Multiple outputs not allowed"
return "Custom error"
agent = create_agent(
model,
[],
response_format=ToolStrategy(
WeatherBaseModel | LocationResponse,
handle_errors=custom_message,
),
)
response = agent.invoke({"messages": [HumanMessage("Give me weather")]})
# HumanMessage, AIMessage, ToolMessage, ToolMessage, AI, ToolMessage
assert len(response["messages"]) == 6
assert response["messages"][2].content == "Custom error: Multiple outputs not allowed"
assert response["messages"][3].content == "Custom error: Multiple outputs not allowed"
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
def test_retry_with_custom_string_message(self) -> None:
"""Test retry with custom static string message."""
tool_calls = [
[
{
"name": "WeatherBaseModel",
"id": "1",
"args": {"invalid": "data"},
},
],
[
{
"name": "WeatherBaseModel",
"id": "2",
"args": WEATHER_DATA,
},
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(
model,
[],
response_format=ToolStrategy(
WeatherBaseModel,
handle_errors="Please provide valid weather data with temperature and condition.",
),
)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert len(response["messages"]) == 5
assert (
response["messages"][2].content
== "Please provide valid weather data with temperature and condition."
)
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
def test_validation_error_with_invalid_response(self) -> None:
"""Test validation error with invalid response.
Test that StructuredOutputValidationError is raised when tool strategy receives
invalid response.
"""
tool_calls = [
[
{
"name": "WeatherBaseModel",
"id": "1",
"args": {"invalid_field": "wrong_data", "another_bad_field": 123},
},
],
]
model = FakeToolCallingModel(tool_calls=tool_calls)
agent = create_agent(
model,
[],
response_format=ToolStrategy(
WeatherBaseModel,
handle_errors=False, # Disable retry to ensure error is raised
),
)
with pytest.raises(
StructuredOutputValidationError,
match=r".*WeatherBaseModel.*",
):
agent.invoke({"messages": [HumanMessage("What's the weather?")]})
class TestResponseFormatAsProviderStrategy:
def test_pydantic_model(self) -> None:
"""Test response_format as ProviderStrategy with Pydantic model."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
]
model = FakeToolCallingModel(
tool_calls=tool_calls, structured_response=EXPECTED_WEATHER_PYDANTIC
)
agent = create_agent(
model, [get_weather], response_format=ProviderStrategy(WeatherBaseModel)
)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
assert len(response["messages"]) == 4
def test_validation_error_with_invalid_response(self) -> None:
"""Test validation error with invalid response.
Test that StructuredOutputValidationError is raised when provider strategy
receives invalid response.
"""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
]
# But we're using WeatherBaseModel which has different field requirements
model = FakeToolCallingModel(
tool_calls=tool_calls,
structured_response={"invalid": "data"}, # Wrong structure
)
agent = create_agent(
model, [get_weather], response_format=ProviderStrategy(WeatherBaseModel)
)
with pytest.raises(
StructuredOutputValidationError,
match=r".*WeatherBaseModel.*",
):
agent.invoke({"messages": [HumanMessage("What's the weather?")]})
def test_dataclass(self) -> None:
"""Test response_format as ProviderStrategy with dataclass."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
]
model = FakeToolCallingModel(
tool_calls=tool_calls, structured_response=EXPECTED_WEATHER_DATACLASS
)
agent = create_agent(
model, [get_weather], response_format=ProviderStrategy(WeatherDataclass)
)
response = agent.invoke(
{"messages": [HumanMessage("What's the weather?")]},
)
assert response["structured_response"] == EXPECTED_WEATHER_DATACLASS
assert len(response["messages"]) == 4
def test_typed_dict(self) -> None:
"""Test response_format as ProviderStrategy with TypedDict."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
]
model = FakeToolCallingModel(
tool_calls=tool_calls, structured_response=EXPECTED_WEATHER_DICT
)
agent = create_agent(
model, [get_weather], response_format=ProviderStrategy(WeatherTypedDict)
)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_DICT
assert len(response["messages"]) == 4
def test_json_schema(self) -> None:
"""Test response_format as ProviderStrategy with JSON schema."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
]
model = FakeToolCallingModel(
tool_calls=tool_calls, structured_response=EXPECTED_WEATHER_DICT
)
agent = create_agent(
model, [get_weather], response_format=ProviderStrategy(weather_json_schema)
)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_DICT
assert len(response["messages"]) == 4
class TestDynamicModelWithResponseFormat:
"""Test response_format with middleware that modifies the model."""
def test_middleware_model_swap_provider_to_tool_strategy(self) -> None:
"""Test that strategy resolution is deferred until after middleware modifies the model.
Verifies that when a raw schema is provided, `_supports_provider_strategy` is called
on the middleware-modified model (not the original), ensuring the correct strategy is
selected based on the final model's capabilities.
"""
# Custom model that we'll use to test whether the tool strategy is applied
# correctly at runtime.
class CustomModel(GenericFakeChatModel):
tool_bindings: list[Any] = Field(default_factory=list)
def bind_tools(
self,
tools: Sequence[dict[str, Any] | type[BaseModel] | Callable[..., Any] | BaseTool],
**kwargs: Any,
) -> Runnable[LanguageModelInput, AIMessage]:
# Record every tool binding event.
self.tool_bindings.append(tools)
return self
model = CustomModel(
messages=iter(
[
# Simulate model returning structured output directly
# (this is what provider strategy would do)
json.dumps(WEATHER_DATA),
]
)
)
# Create middleware that swaps the model in the request
class ModelSwappingMiddleware(AgentMiddleware):
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
# Replace the model with our custom test model
return handler(request.override(model=model))
# Track which model is checked for provider strategy support
calls = []
def mock_supports_provider_strategy(
model: str | BaseChatModel, tools: list[Any] | None = None
) -> bool:
"""Track which model is checked and return True for ProviderStrategy."""
calls.append(model)
return True
# Use raw Pydantic model (not wrapped in ToolStrategy or ProviderStrategy)
# This should auto-detect strategy based on model capabilities
agent = create_agent(
model=model,
tools=[],
# Raw schema - should auto-detect strategy
response_format=WeatherBaseModel,
middleware=[ModelSwappingMiddleware()],
)
with patch(
"langchain.agents.factory._supports_provider_strategy",
side_effect=mock_supports_provider_strategy,
):
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
# Verify strategy resolution was deferred: check was called once during _get_bound_model
assert len(calls) == 1
# Verify successful parsing of JSON as structured output via ProviderStrategy
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
# Two messages: Human input message and AI response with JSON content
assert len(response["messages"]) == 2
ai_message = response["messages"][1]
assert isinstance(ai_message, AIMessage)
# ProviderStrategy doesn't use tool calls - it parses content directly
assert ai_message.tool_calls == []
assert ai_message.content == json.dumps(WEATHER_DATA)
def test_union_of_types() -> None:
"""Test response_format as ProviderStrategy with Union (if supported)."""
tool_calls = [
[{"args": {}, "id": "1", "name": "get_weather"}],
[
{
"name": "WeatherBaseModel",
"id": "2",
"args": WEATHER_DATA,
}
],
]
model = FakeToolCallingModel(
tool_calls=tool_calls, structured_response=EXPECTED_WEATHER_PYDANTIC
)
agent = create_agent(
model,
[get_weather, get_location],
response_format=ToolStrategy(WeatherBaseModel | LocationResponse),
)
response = agent.invoke({"messages": [HumanMessage("What's the weather?")]})
assert response["structured_response"] == EXPECTED_WEATHER_PYDANTIC
assert len(response["messages"]) == 5
class TestSupportsProviderStrategy:
"""Unit tests for `_supports_provider_strategy`."""
@staticmethod
def _make_structured_model(model_name: str):
class GeminiTestChatModel(GenericFakeChatModel):
model_name: str
return GeminiTestChatModel(
messages=iter(
[
AIMessage(content="test-response"),
]
),
profile={"structured_output": True},
model_name=model_name,
)
def test_blocks_gemini_v2_with_tools(self) -> None:
"""Gemini 2 series models cannot use provider strategy with tools."""
model = self._make_structured_model("gemini-2.5-flash")
assert not _supports_provider_strategy(model, tools=[get_weather])
def test_allows_gemini_v3_with_tools(self) -> None:
"""Gemini 3 series models support structured output alongside tools."""
model = self._make_structured_model("gemini-3-pro-preview")
assert _supports_provider_strategy(model, tools=[get_weather])
@pytest.mark.parametrize(
"alias",
[
"gemini-flash-latest",
"gemini-flash-lite-latest",
],
)
def test_blocks_gemini_latest_aliases(self, alias: str) -> None:
"""Latest aliases stay blocked until they point to Gemini 3."""
model = self._make_structured_model(alias)
assert not _supports_provider_strategy(model, tools=[get_weather])
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/test_response_format.py",
"license": "MIT License",
"lines": 776,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_responses.py | """Unit tests for langchain.agents.structured_output module."""
import pytest
from langchain_core.messages import AIMessage
from pydantic import BaseModel
from langchain.agents.structured_output import (
OutputToolBinding,
ProviderStrategy,
ProviderStrategyBinding,
ToolStrategy,
_SchemaSpec,
)
class _TestModel(BaseModel):
"""A test model for structured output."""
name: str
age: int
email: str = "default@example.com"
class CustomModel(BaseModel):
"""Custom model with a custom docstring."""
value: float
description: str
class EmptyDocModel(BaseModel):
# No custom docstring, should have no description in tool
data: str
class TestToolStrategy:
"""Test ToolStrategy dataclass."""
def test_basic_creation(self) -> None:
"""Test basic ToolStrategy creation."""
strategy = ToolStrategy(schema=_TestModel)
assert strategy.schema == _TestModel
assert strategy.tool_message_content is None
assert len(strategy.schema_specs) == 1
assert strategy.schema_specs[0].schema == _TestModel
def test_multiple_schemas(self) -> None:
"""Test ToolStrategy with multiple schemas."""
strategy = ToolStrategy(schema=_TestModel | CustomModel)
assert len(strategy.schema_specs) == 2
assert strategy.schema_specs[0].schema == _TestModel
assert strategy.schema_specs[1].schema == CustomModel
def test_schema_with_tool_message_content(self) -> None:
"""Test ToolStrategy with tool message content."""
strategy = ToolStrategy(schema=_TestModel, tool_message_content="custom message")
assert strategy.schema == _TestModel
assert strategy.tool_message_content == "custom message"
assert len(strategy.schema_specs) == 1
assert strategy.schema_specs[0].schema == _TestModel
class TestProviderStrategy:
"""Test ProviderStrategy dataclass."""
def test_basic_creation(self) -> None:
"""Test basic ProviderStrategy creation."""
strategy = ProviderStrategy(schema=_TestModel)
assert strategy.schema == _TestModel
assert strategy.schema_spec.schema == _TestModel
assert strategy.schema_spec.strict is None
def test_strict(self) -> None:
"""Test ProviderStrategy creation with strict=True."""
strategy = ProviderStrategy(schema=_TestModel, strict=True)
assert strategy.schema == _TestModel
assert strategy.schema_spec.schema == _TestModel
assert strategy.schema_spec.strict is True
def test_to_model_kwargs(self) -> None:
strategy_default = ProviderStrategy(schema=_TestModel)
assert strategy_default.to_model_kwargs() == {
"response_format": {
"json_schema": {
"name": "_TestModel",
"schema": {
"description": "A test model for structured output.",
"properties": {
"age": {"title": "Age", "type": "integer"},
"email": {
"default": "default@example.com",
"title": "Email",
"type": "string",
},
"name": {"title": "Name", "type": "string"},
},
"required": ["name", "age"],
"title": "_TestModel",
"type": "object",
},
},
"type": "json_schema",
}
}
def test_to_model_kwargs_strict(self) -> None:
strategy_default = ProviderStrategy(schema=_TestModel, strict=True)
assert strategy_default.to_model_kwargs() == {
"response_format": {
"json_schema": {
"name": "_TestModel",
"schema": {
"description": "A test model for structured output.",
"properties": {
"age": {"title": "Age", "type": "integer"},
"email": {
"default": "default@example.com",
"title": "Email",
"type": "string",
},
"name": {"title": "Name", "type": "string"},
},
"required": ["name", "age"],
"title": "_TestModel",
"type": "object",
},
"strict": True,
},
"type": "json_schema",
}
}
class TestOutputToolBinding:
"""Test OutputToolBinding dataclass and its methods."""
def test_from_schema_spec_basic(self) -> None:
"""Test basic OutputToolBinding creation from SchemaSpec."""
schema_spec = _SchemaSpec(schema=_TestModel)
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
assert tool_binding.schema == _TestModel
assert tool_binding.schema_kind == "pydantic"
assert tool_binding.tool is not None
assert tool_binding.tool.name == "_TestModel"
def test_from_schema_spec_with_custom_name(self) -> None:
"""Test OutputToolBinding creation with custom name."""
schema_spec = _SchemaSpec(schema=_TestModel, name="custom_tool_name")
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
assert tool_binding.tool.name == "custom_tool_name"
def test_from_schema_spec_with_custom_description(self) -> None:
"""Test OutputToolBinding creation with custom description."""
schema_spec = _SchemaSpec(schema=_TestModel, description="Custom tool description")
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
assert tool_binding.tool.description == "Custom tool description"
def test_from_schema_spec_with_model_docstring(self) -> None:
"""Test OutputToolBinding creation using model docstring as description."""
schema_spec = _SchemaSpec(schema=CustomModel)
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
assert tool_binding.tool.description == "Custom model with a custom docstring."
def test_from_schema_spec_empty_docstring(self) -> None:
"""Test OutputToolBinding creation with model that has default docstring."""
# Create a model with the same docstring as BaseModel
class DefaultDocModel(BaseModel):
# This should have the same docstring as BaseModel
pass
schema_spec = _SchemaSpec(schema=DefaultDocModel)
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
# Should use empty description when model has default BaseModel docstring
assert not tool_binding.tool.description
def test_parse_payload_pydantic_success(self) -> None:
"""Test successful parsing for Pydantic model."""
schema_spec = _SchemaSpec(schema=_TestModel)
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
tool_args = {"name": "John", "age": 30}
result = tool_binding.parse(tool_args)
assert isinstance(result, _TestModel)
assert result.name == "John"
assert result.age == 30
assert result.email == "default@example.com" # default value
def test_parse_payload_pydantic_validation_error(self) -> None:
"""Test parsing failure for invalid Pydantic data."""
schema_spec = _SchemaSpec(schema=_TestModel)
tool_binding = OutputToolBinding.from_schema_spec(schema_spec)
# Missing required field 'name'
tool_args = {"age": 30}
with pytest.raises(ValueError, match="Failed to parse data to _TestModel"):
tool_binding.parse(tool_args)
class TestProviderStrategyBinding:
"""Test ProviderStrategyBinding dataclass and its methods."""
def test_from_schema_spec_basic(self) -> None:
"""Test basic ProviderStrategyBinding creation from SchemaSpec."""
schema_spec = _SchemaSpec(schema=_TestModel)
tool_binding = ProviderStrategyBinding.from_schema_spec(schema_spec)
assert tool_binding.schema == _TestModel
assert tool_binding.schema_kind == "pydantic"
def test_parse_payload_pydantic_success(self) -> None:
"""Test successful parsing for Pydantic model."""
schema_spec = _SchemaSpec(schema=_TestModel)
tool_binding = ProviderStrategyBinding.from_schema_spec(schema_spec)
message = AIMessage(content='{"name": "John", "age": 30}')
result = tool_binding.parse(message)
assert isinstance(result, _TestModel)
assert result.name == "John"
assert result.age == 30
assert result.email == "default@example.com" # default value
def test_parse_payload_pydantic_validation_error(self) -> None:
"""Test parsing failure for invalid Pydantic data."""
schema_spec = _SchemaSpec(schema=_TestModel)
tool_binding = ProviderStrategyBinding.from_schema_spec(schema_spec)
# Missing required field 'name'
message = AIMessage(content='{"age": 30}')
with pytest.raises(ValueError, match="Failed to parse data to _TestModel"):
tool_binding.parse(message)
def test_parse_payload_pydantic_json_error(self) -> None:
"""Test parsing failure for invalid JSON data."""
schema_spec = _SchemaSpec(schema=_TestModel)
tool_binding = ProviderStrategyBinding.from_schema_spec(schema_spec)
message = AIMessage(content="invalid json")
with pytest.raises(
ValueError,
match="Native structured output expected valid JSON for _TestModel, but parsing failed",
):
tool_binding.parse(message)
def test_parse_content_list(self) -> None:
"""Test successful parsing for Pydantic model with content as list."""
schema_spec = _SchemaSpec(schema=_TestModel)
tool_binding = ProviderStrategyBinding.from_schema_spec(schema_spec)
message = AIMessage(
content=['{"name":', {"content": ' "John",'}, {"type": "text", "text": ' "age": 30}'}]
)
result = tool_binding.parse(message)
assert isinstance(result, _TestModel)
assert result.name == "John"
assert result.age == 30
assert result.email == "default@example.com" # default value
class TestEdgeCases:
"""Test edge cases and error conditions."""
def test_single_schema(self) -> None:
"""Test ToolStrategy with a single schema creates one schema spec."""
strategy = ToolStrategy(EmptyDocModel)
assert len(strategy.schema_specs) == 1
def test_empty_docstring_model(self) -> None:
"""Test that models without explicit docstrings have empty tool descriptions."""
binding = OutputToolBinding.from_schema_spec(_SchemaSpec(EmptyDocModel))
assert binding.tool.name == "EmptyDocModel"
assert not binding.tool.description
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/test_responses.py",
"license": "MIT License",
"lines": 222,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_responses_spec.py | from __future__ import annotations
import os
from typing import (
TYPE_CHECKING,
Any,
)
from unittest.mock import MagicMock
import httpx
import pytest
from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
from pydantic import BaseModel, create_model
from langchain.agents import create_agent
from langchain.agents.structured_output import (
ToolStrategy,
)
from tests.unit_tests.agents.utils import BaseSchema, load_spec
if TYPE_CHECKING:
from collections.abc import Callable
try:
from langchain_openai import ChatOpenAI
except ImportError:
skip_openai_integration_tests = True
else:
skip_openai_integration_tests = "OPENAI_API_KEY" not in os.environ
AGENT_PROMPT = "You are an HR assistant."
class ToolCalls(BaseSchema):
get_employee_role: int
get_employee_department: int
class AssertionByInvocation(BaseSchema):
prompt: str
tools_with_expected_calls: ToolCalls
expected_last_message: str
expected_structured_response: dict[str, Any] | None
llm_request_count: int
class TestCase(BaseSchema):
name: str
response_format: dict[str, Any] | list[dict[str, Any]]
assertions_by_invocation: list[AssertionByInvocation]
class Employee(BaseModel):
name: str
role: str
department: str
EMPLOYEES: list[Employee] = [
Employee(name="Sabine", role="Developer", department="IT"),
Employee(name="Henrik", role="Product Manager", department="IT"),
Employee(name="Jessica", role="HR", department="People"),
]
TEST_CASES = load_spec("responses", as_model=TestCase)
def _make_tool(fn: Callable[..., str | None], *, name: str, description: str) -> dict[str, Any]:
mock = MagicMock(side_effect=lambda *, name: fn(name=name))
input_model = create_model(f"{name}_input", name=(str, ...))
@tool(name, description=description, args_schema=input_model)
def _wrapped(name: str) -> Any:
return mock(name=name)
return {"tool": _wrapped, "mock": mock}
@pytest.mark.skipif(skip_openai_integration_tests, reason="OpenAI integration tests are disabled.")
@pytest.mark.parametrize("case", TEST_CASES, ids=[c.name for c in TEST_CASES])
def test_responses_integration_matrix(case: TestCase) -> None:
if case.name == "asking for information that does not fit into the response format":
pytest.xfail(
"currently failing due to undefined behavior when model cannot conform to "
"any of the structured response formats."
)
def get_employee_role(*, name: str) -> str | None:
for e in EMPLOYEES:
if e.name == name:
return e.role
return None
def get_employee_department(*, name: str) -> str | None:
for e in EMPLOYEES:
if e.name == name:
return e.department
return None
role_tool = _make_tool(
get_employee_role,
name="get_employee_role",
description="Get the employee role by name",
)
dept_tool = _make_tool(
get_employee_department,
name="get_employee_department",
description="Get the employee department by name",
)
response_format_spec = case.response_format
if isinstance(response_format_spec, dict):
response_format_spec = [response_format_spec]
# Unwrap nested schema objects
response_format_spec = [item.get("schema", item) for item in response_format_spec]
if len(response_format_spec) == 1:
tool_output = ToolStrategy(response_format_spec[0])
else:
tool_output = ToolStrategy({"oneOf": response_format_spec})
llm_request_count = 0
for assertion in case.assertions_by_invocation:
def on_request(_request: httpx.Request) -> None:
nonlocal llm_request_count
llm_request_count += 1
http_client = httpx.Client(
event_hooks={"request": [on_request]},
)
model = ChatOpenAI(
model="gpt-4o",
temperature=0,
http_client=http_client,
)
agent = create_agent(
model,
tools=[role_tool["tool"], dept_tool["tool"]],
system_prompt=AGENT_PROMPT,
response_format=tool_output,
)
result = agent.invoke({"messages": [HumanMessage(assertion.prompt)]})
# Count tool calls
assert role_tool["mock"].call_count == assertion.tools_with_expected_calls.get_employee_role
assert (
dept_tool["mock"].call_count
== assertion.tools_with_expected_calls.get_employee_department
)
# Count LLM calls
assert llm_request_count == assertion.llm_request_count
# Check last message content
last_message = result["messages"][-1]
assert last_message.content == assertion.expected_last_message
# Check structured response
structured_response_json = result["structured_response"]
assert structured_response_json == assertion.expected_structured_response
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/test_responses_spec.py",
"license": "MIT License",
"lines": 127,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/test_return_direct_spec.py | from __future__ import annotations
import os
from typing import (
Any,
)
from unittest.mock import MagicMock
import pytest
from langchain_core.messages import HumanMessage
from langchain_core.tools import tool
from langchain.agents import create_agent
from langchain.agents.structured_output import (
ToolStrategy,
)
from tests.unit_tests.agents.utils import BaseSchema, load_spec
try:
from langchain_openai import ChatOpenAI
except ImportError:
skip_openai_integration_tests = True
else:
skip_openai_integration_tests = "OPENAI_API_KEY" not in os.environ
AGENT_PROMPT = """
You are a strict polling bot.
- Only use the "poll_job" tool until it returns { status: "succeeded" }.
- If status is "pending", call the tool again. Do not produce a final answer.
- When it is "succeeded", return exactly: "Attempts: <number>" with no extra text.
"""
class TestCase(BaseSchema):
name: str
return_direct: bool
response_format: dict[str, Any] | None
expected_tool_calls: int
expected_last_message: str
expected_structured_response: dict[str, Any] | None
TEST_CASES = load_spec("return_direct", as_model=TestCase)
def _make_tool(*, return_direct: bool) -> dict[str, Any]:
attempts = 0
def _side_effect() -> dict[str, Any]:
nonlocal attempts
attempts += 1
return {
"status": "succeeded" if attempts >= 10 else "pending",
"attempts": attempts,
}
mock = MagicMock(side_effect=_side_effect)
@tool(
"pollJob",
description=(
"Check the status of a long-running job. "
"Returns { status: 'pending' | 'succeeded', attempts: number }."
),
return_direct=return_direct,
)
def _wrapped() -> Any:
return mock()
return {"tool": _wrapped, "mock": mock}
@pytest.mark.skipif(skip_openai_integration_tests, reason="OpenAI integration tests are disabled.")
@pytest.mark.parametrize("case", TEST_CASES, ids=[c.name for c in TEST_CASES])
def test_return_direct_integration_matrix(case: TestCase) -> None:
poll_tool = _make_tool(return_direct=case.return_direct)
model = ChatOpenAI(
model="gpt-4o",
temperature=0,
)
if case.response_format:
agent = create_agent(
model,
tools=[poll_tool["tool"]],
system_prompt=AGENT_PROMPT,
response_format=ToolStrategy(case.response_format),
)
else:
agent = create_agent(
model,
tools=[poll_tool["tool"]],
system_prompt=AGENT_PROMPT,
)
result = agent.invoke(
{
"messages": [
HumanMessage("Poll the job until it's done and tell me how many attempts it took.")
]
}
)
# Count tool calls
assert poll_tool["mock"].call_count == case.expected_tool_calls
# Check last message content
last_message = result["messages"][-1]
assert last_message.content == case.expected_last_message
# Check structured response
if case.expected_structured_response is not None:
structured_response_json = result["structured_response"]
assert structured_response_json == case.expected_structured_response
else:
assert "structured_response" not in result
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/test_return_direct_spec.py",
"license": "MIT License",
"lines": 94,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/agents/utils.py | import json
from pathlib import Path
from typing import TypeVar
from pydantic import BaseModel, ConfigDict
from pydantic.alias_generators import to_camel
class BaseSchema(BaseModel):
model_config = ConfigDict(
alias_generator=to_camel,
populate_by_name=True,
from_attributes=True,
)
_T = TypeVar("_T", bound=BaseModel)
def load_spec(spec_name: str, as_model: type[_T]) -> list[_T]:
with (Path(__file__).parent / "specifications" / f"{spec_name}.json").open(
"r", encoding="utf-8"
) as f:
data = json.load(f)
return [as_model(**item) for item in data]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/agents/utils.py",
"license": "MIT License",
"lines": 18,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/anthropic/tests/unit_tests/test_client_utils.py | """Test client utility functions."""
from __future__ import annotations
from langchain_anthropic._client_utils import (
_get_default_async_httpx_client,
_get_default_httpx_client,
)
def test_sync_client_without_proxy() -> None:
"""Test sync client creation without proxy."""
client = _get_default_httpx_client(base_url="https://api.anthropic.com")
# Should not have proxy configured
assert not hasattr(client, "proxies") or client.proxies is None
def test_sync_client_with_proxy() -> None:
"""Test sync client creation with proxy."""
proxy_url = "http://proxy.example.com:8080"
client = _get_default_httpx_client(
base_url="https://api.anthropic.com", anthropic_proxy=proxy_url
)
# Check internal _transport since httpx stores proxy configuration in the transport
# layer
transport = getattr(client, "_transport", None)
assert transport is not None
def test_async_client_without_proxy() -> None:
"""Test async client creation without proxy."""
client = _get_default_async_httpx_client(base_url="https://api.anthropic.com")
assert not hasattr(client, "proxies") or client.proxies is None
def test_async_client_with_proxy() -> None:
"""Test async client creation with proxy."""
proxy_url = "http://proxy.example.com:8080"
client = _get_default_async_httpx_client(
base_url="https://api.anthropic.com", anthropic_proxy=proxy_url
)
transport = getattr(client, "_transport", None)
assert transport is not None
def test_client_proxy_none_value() -> None:
"""Test that explicitly passing None for proxy works correctly."""
sync_client = _get_default_httpx_client(
base_url="https://api.anthropic.com", anthropic_proxy=None
)
async_client = _get_default_async_httpx_client(
base_url="https://api.anthropic.com", anthropic_proxy=None
)
# Both should be created successfully with None proxy
assert sync_client is not None
assert async_client is not None
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/anthropic/tests/unit_tests/test_client_utils.py",
"license": "MIT License",
"lines": 44,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/openai/langchain_openai/tools/custom_tool.py | """Custom tool decorator for OpenAI custom tools."""
import inspect
from collections.abc import Awaitable, Callable
from typing import Any
from langchain_core.tools import tool
def _make_wrapped_func(func: Callable[..., str]) -> Callable[..., list[dict[str, Any]]]:
def wrapped(x: str) -> list[dict[str, Any]]:
return [{"type": "custom_tool_call_output", "output": func(x)}]
return wrapped
def _make_wrapped_coroutine(
coroutine: Callable[..., Awaitable[str]],
) -> Callable[..., Awaitable[list[dict[str, Any]]]]:
async def wrapped(*args: Any, **kwargs: Any) -> list[dict[str, Any]]:
result = await coroutine(*args, **kwargs)
return [{"type": "custom_tool_call_output", "output": result}]
return wrapped
def custom_tool(*args: Any, **kwargs: Any) -> Any:
"""Decorator to create an OpenAI custom tool.
Custom tools allow for tools with (potentially long) freeform string inputs.
See below for an example using LangGraph:
```python
@custom_tool
def execute_code(code: str) -> str:
\"\"\"Execute python code.\"\"\"
return "27"
model = ChatOpenAI(model="gpt-5", output_version="responses/v1")
agent = create_react_agent(model, [execute_code])
input_message = {"role": "user", "content": "Use the tool to calculate 3^3."}
for step in agent.stream(
{"messages": [input_message]},
stream_mode="values",
):
step["messages"][-1].pretty_print()
```
You can also specify a format for a corresponding context-free grammar using the
`format` kwarg:
```python
from langchain_openai import ChatOpenAI, custom_tool
from langgraph.prebuilt import create_react_agent
grammar = \"\"\"
start: expr
expr: term (SP ADD SP term)* -> add
| term
term: factor (SP MUL SP factor)* -> mul
| factor
factor: INT
SP: " "
ADD: "+"
MUL: "*"
%import common.INT
\"\"\"
format = {"type": "grammar", "syntax": "lark", "definition": grammar}
# highlight-next-line
@custom_tool(format=format)
def do_math(input_string: str) -> str:
\"\"\"Do a mathematical operation.\"\"\"
return "27"
model = ChatOpenAI(model="gpt-5", output_version="responses/v1")
agent = create_react_agent(model, [do_math])
input_message = {"role": "user", "content": "Use the tool to calculate 3^3."}
for step in agent.stream(
{"messages": [input_message]},
stream_mode="values",
):
step["messages"][-1].pretty_print()
```
"""
def decorator(func: Callable[..., Any]) -> Any:
metadata = {"type": "custom_tool"}
if "format" in kwargs:
metadata["format"] = kwargs.pop("format")
tool_obj = tool(infer_schema=False, **kwargs)(func)
tool_obj.metadata = metadata
tool_obj.description = func.__doc__
if inspect.iscoroutinefunction(func):
tool_obj.coroutine = _make_wrapped_coroutine(func)
else:
tool_obj.func = _make_wrapped_func(func)
return tool_obj
if args and callable(args[0]) and not kwargs:
return decorator(args[0])
return decorator
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openai/langchain_openai/tools/custom_tool.py",
"license": "MIT License",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/partners/openai/tests/unit_tests/test_tools.py | from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langchain_core.tools import Tool
from langchain_openai import ChatOpenAI, custom_tool
def test_custom_tool() -> None:
@custom_tool
def my_tool(x: str) -> str:
"""Do thing."""
return "a" + x
# Test decorator
assert isinstance(my_tool, Tool)
assert my_tool.metadata == {"type": "custom_tool"}
assert my_tool.description == "Do thing."
result = my_tool.invoke(
{
"type": "tool_call",
"name": "my_tool",
"args": {"whatever": "b"},
"id": "abc",
"extras": {"type": "custom_tool_call"},
}
)
assert result == ToolMessage(
[{"type": "custom_tool_call_output", "output": "ab"}],
name="my_tool",
tool_call_id="abc",
)
# Test tool schema
## Test with format
@custom_tool(format={"type": "grammar", "syntax": "lark", "definition": "..."})
def another_tool(x: str) -> None:
"""Do thing."""
llm = ChatOpenAI(
model="gpt-4.1", use_responses_api=True, output_version="responses/v1"
).bind_tools([another_tool])
assert llm.kwargs == { # type: ignore[attr-defined]
"tools": [
{
"type": "custom",
"name": "another_tool",
"description": "Do thing.",
"format": {"type": "grammar", "syntax": "lark", "definition": "..."},
}
]
}
llm = ChatOpenAI(
model="gpt-4.1", use_responses_api=True, output_version="responses/v1"
).bind_tools([my_tool])
assert llm.kwargs == { # type: ignore[attr-defined]
"tools": [{"type": "custom", "name": "my_tool", "description": "Do thing."}]
}
# Test passing messages back
message_history = [
HumanMessage("Use the tool"),
AIMessage(
[
{
"type": "custom_tool_call",
"id": "ctc_abc123",
"call_id": "abc",
"name": "my_tool",
"input": "a",
}
],
tool_calls=[
{
"type": "tool_call",
"name": "my_tool",
"args": {"__arg1": "a"},
"id": "abc",
}
],
),
result,
]
payload = llm._get_request_payload(message_history) # type: ignore[attr-defined]
expected_input = [
{"content": "Use the tool", "role": "user"},
{
"type": "custom_tool_call",
"id": "ctc_abc123",
"call_id": "abc",
"name": "my_tool",
"input": "a",
},
{"type": "custom_tool_call_output", "call_id": "abc", "output": "ab"},
]
assert payload["input"] == expected_input
async def test_async_custom_tool() -> None:
@custom_tool
async def my_async_tool(x: str) -> str:
"""Do async thing."""
return "a" + x
# Test decorator
assert isinstance(my_async_tool, Tool)
assert my_async_tool.metadata == {"type": "custom_tool"}
assert my_async_tool.description == "Do async thing."
result = await my_async_tool.ainvoke(
{
"type": "tool_call",
"name": "my_async_tool",
"args": {"whatever": "b"},
"id": "abc",
"extras": {"type": "custom_tool_call"},
}
)
assert result == ToolMessage(
[{"type": "custom_tool_call_output", "output": "ab"}],
name="my_async_tool",
tool_call_id="abc",
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openai/tests/unit_tests/test_tools.py",
"license": "MIT License",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/openai/tests/unit_tests/chat_models/test_prompt_cache_key.py | """Unit tests for prompt_cache_key parameter."""
from langchain_core.messages import HumanMessage
from langchain_openai import ChatOpenAI
def test_prompt_cache_key_parameter_inclusion() -> None:
"""Test that prompt_cache_key parameter is properly included in request payload."""
chat = ChatOpenAI(model="gpt-4o-mini", max_completion_tokens=10)
messages = [HumanMessage("Hello")]
payload = chat._get_request_payload(messages, prompt_cache_key="test-cache-key")
assert "prompt_cache_key" in payload
assert payload["prompt_cache_key"] == "test-cache-key"
def test_prompt_cache_key_parameter_exclusion() -> None:
"""Test that prompt_cache_key parameter behavior matches OpenAI API."""
chat = ChatOpenAI(model="gpt-4o-mini", max_completion_tokens=10)
messages = [HumanMessage("Hello")]
# Test with explicit None (OpenAI should accept None values (marked Optional))
payload = chat._get_request_payload(messages, prompt_cache_key=None)
assert "prompt_cache_key" in payload
assert payload["prompt_cache_key"] is None
def test_prompt_cache_key_per_call() -> None:
"""Test that prompt_cache_key can be passed per-call with different values."""
chat = ChatOpenAI(model="gpt-4o-mini", max_completion_tokens=10)
messages = [HumanMessage("Hello")]
# Test different cache keys per call
payload1 = chat._get_request_payload(messages, prompt_cache_key="cache-v1")
payload2 = chat._get_request_payload(messages, prompt_cache_key="cache-v2")
assert payload1["prompt_cache_key"] == "cache-v1"
assert payload2["prompt_cache_key"] == "cache-v2"
# Test dynamic cache key assignment
cache_keys = ["customer-v1", "support-v1", "feedback-v1"]
for cache_key in cache_keys:
payload = chat._get_request_payload(messages, prompt_cache_key=cache_key)
assert "prompt_cache_key" in payload
assert payload["prompt_cache_key"] == cache_key
def test_prompt_cache_key_model_kwargs() -> None:
"""Test prompt_cache_key via model_kwargs and method precedence."""
messages = [HumanMessage("Hello world")]
# Test model-level via model_kwargs
chat = ChatOpenAI(
model="gpt-4o-mini",
max_completion_tokens=10,
model_kwargs={"prompt_cache_key": "model-level-cache"},
)
payload = chat._get_request_payload(messages)
assert "prompt_cache_key" in payload
assert payload["prompt_cache_key"] == "model-level-cache"
# Test that per-call cache key overrides model-level
payload_override = chat._get_request_payload(
messages, prompt_cache_key="per-call-cache"
)
assert payload_override["prompt_cache_key"] == "per-call-cache"
def test_prompt_cache_key_responses_api() -> None:
"""Test that prompt_cache_key works with Responses API."""
chat = ChatOpenAI(
model="gpt-4o-mini",
use_responses_api=True,
output_version="responses/v1",
max_completion_tokens=10,
)
messages = [HumanMessage("Hello")]
payload = chat._get_request_payload(
messages, prompt_cache_key="responses-api-cache-v1"
)
# prompt_cache_key should be present regardless of API type
assert "prompt_cache_key" in payload
assert payload["prompt_cache_key"] == "responses-api-cache-v1"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openai/tests/unit_tests/chat_models/test_prompt_cache_key.py",
"license": "MIT License",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/langchain/chat_models/base.py | """Factory functions for chat models."""
from __future__ import annotations
import functools
import importlib
import warnings
from typing import (
TYPE_CHECKING,
Any,
Literal,
TypeAlias,
cast,
overload,
)
from langchain_core.language_models import BaseChatModel, LanguageModelInput
from langchain_core.messages import AIMessage, AnyMessage
from langchain_core.prompt_values import ChatPromptValueConcrete, StringPromptValue
from langchain_core.runnables import Runnable, RunnableConfig, ensure_config
from typing_extensions import override
if TYPE_CHECKING:
from collections.abc import AsyncIterator, Callable, Iterator, Sequence
from types import ModuleType
from langchain_core.runnables.schema import StreamEvent
from langchain_core.tools import BaseTool
from langchain_core.tracers import RunLog, RunLogPatch
from pydantic import BaseModel
def _call(cls: type[BaseChatModel], **kwargs: Any) -> BaseChatModel:
# TODO: replace with operator.call when lower bounding to Python 3.11
return cls(**kwargs)
_BUILTIN_PROVIDERS: dict[str, tuple[str, str, Callable[..., BaseChatModel]]] = {
"anthropic": ("langchain_anthropic", "ChatAnthropic", _call),
"anthropic_bedrock": ("langchain_aws", "ChatAnthropicBedrock", _call),
"azure_ai": ("langchain_azure_ai.chat_models", "AzureAIChatCompletionsModel", _call),
"azure_openai": ("langchain_openai", "AzureChatOpenAI", _call),
"bedrock": ("langchain_aws", "ChatBedrock", _call),
"bedrock_converse": ("langchain_aws", "ChatBedrockConverse", _call),
"cohere": ("langchain_cohere", "ChatCohere", _call),
"deepseek": ("langchain_deepseek", "ChatDeepSeek", _call),
"fireworks": ("langchain_fireworks", "ChatFireworks", _call),
"google_anthropic_vertex": (
"langchain_google_vertexai.model_garden",
"ChatAnthropicVertex",
_call,
),
"google_genai": ("langchain_google_genai", "ChatGoogleGenerativeAI", _call),
"google_vertexai": ("langchain_google_vertexai", "ChatVertexAI", _call),
"groq": ("langchain_groq", "ChatGroq", _call),
"huggingface": (
"langchain_huggingface",
"ChatHuggingFace",
lambda cls, model, **kwargs: cls.from_model_id(model_id=model, **kwargs),
),
"ibm": (
"langchain_ibm",
"ChatWatsonx",
lambda cls, model, **kwargs: cls(model_id=model, **kwargs),
),
"mistralai": ("langchain_mistralai", "ChatMistralAI", _call),
"nvidia": ("langchain_nvidia_ai_endpoints", "ChatNVIDIA", _call),
"ollama": ("langchain_ollama", "ChatOllama", _call),
"openai": ("langchain_openai", "ChatOpenAI", _call),
"openrouter": ("langchain_openrouter", "ChatOpenRouter", _call),
"perplexity": ("langchain_perplexity", "ChatPerplexity", _call),
"together": ("langchain_together", "ChatTogether", _call),
"upstage": ("langchain_upstage", "ChatUpstage", _call),
"xai": ("langchain_xai", "ChatXAI", _call),
}
"""Registry mapping provider names to their import configuration.
Each entry maps a provider key to a tuple of:
- `module_path`: The Python module path containing the chat model class.
This may be a submodule (e.g., `'langchain_azure_ai.chat_models'`) if the class is
not exported from the package root.
- `class_name`: The name of the chat model class to import.
- `creator_func`: A callable that instantiates the class with provided kwargs.
!!! note
This dict is not exhaustive of all providers supported by LangChain, but is
meant to cover the most popular ones and serve as a template for adding more
providers in the future. If a provider is not in this dict, it can still be
used with `init_chat_model` as long as its integration package is installed,
but the provider key will not be inferred from the model name and must be
specified explicitly via the `model_provider` parameter.
Refer to the LangChain [integration documentation](https://docs.langchain.com/oss/python/integrations/providers/overview)
for a full list of supported providers and their corresponding packages.
"""
def _import_module(module: str, class_name: str) -> ModuleType:
"""Import a module by name.
Args:
module: The fully qualified module name to import (e.g., `'langchain_openai'`).
class_name: The name of the class being imported, used for error messages.
Returns:
The imported module.
Raises:
ImportError: If the module cannot be imported, with a message suggesting
the pip package to install.
"""
try:
return importlib.import_module(module)
except ImportError as e:
# Extract package name from module path (e.g., "langchain_azure_ai.chat_models"
# becomes "langchain-azure-ai")
pkg = module.split(".", maxsplit=1)[0].replace("_", "-")
msg = (
f"Initializing {class_name} requires the {pkg} package. Please install it "
f"with `pip install {pkg}`"
)
raise ImportError(msg) from e
@functools.lru_cache(maxsize=len(_BUILTIN_PROVIDERS))
def _get_chat_model_creator(
provider: str,
) -> Callable[..., BaseChatModel]:
"""Return a factory function that creates a chat model for the given provider.
This function is cached to avoid repeated module imports.
Args:
provider: The name of the model provider (e.g., `'openai'`, `'anthropic'`).
Must be a key in `_BUILTIN_PROVIDERS`.
Returns:
A callable that accepts model kwargs and returns a `BaseChatModel` instance for
the specified provider.
Raises:
ValueError: If the provider is not in `_BUILTIN_PROVIDERS`.
ImportError: If the provider's integration package is not installed.
"""
if provider not in _BUILTIN_PROVIDERS:
supported = ", ".join(_BUILTIN_PROVIDERS.keys())
msg = f"Unsupported {provider=}.\n\nSupported model providers are: {supported}"
raise ValueError(msg)
pkg, class_name, creator_func = _BUILTIN_PROVIDERS[provider]
try:
module = _import_module(pkg, class_name)
except ImportError as e:
if provider != "ollama":
raise
# For backwards compatibility
try:
module = _import_module("langchain_community.chat_models", class_name)
except ImportError:
# If both langchain-ollama and langchain-community aren't available,
# raise an error related to langchain-ollama
raise e from None
cls = getattr(module, class_name)
return functools.partial(creator_func, cls=cls)
@overload
def init_chat_model(
model: str,
*,
model_provider: str | None = None,
configurable_fields: None = None,
config_prefix: str | None = None,
**kwargs: Any,
) -> BaseChatModel: ...
@overload
def init_chat_model(
model: None = None,
*,
model_provider: str | None = None,
configurable_fields: None = None,
config_prefix: str | None = None,
**kwargs: Any,
) -> _ConfigurableModel: ...
@overload
def init_chat_model(
model: str | None = None,
*,
model_provider: str | None = None,
configurable_fields: Literal["any"] | list[str] | tuple[str, ...] = ...,
config_prefix: str | None = None,
**kwargs: Any,
) -> _ConfigurableModel: ...
# FOR CONTRIBUTORS: If adding support for a new provider, please append the provider
# name to the supported list in the docstring below. Do *not* change the order of the
# existing providers.
def init_chat_model(
model: str | None = None,
*,
model_provider: str | None = None,
configurable_fields: Literal["any"] | list[str] | tuple[str, ...] | None = None,
config_prefix: str | None = None,
**kwargs: Any,
) -> BaseChatModel | _ConfigurableModel:
"""Initialize a chat model from any supported provider using a unified interface.
**Two main use cases:**
1. **Fixed model** – specify the model upfront and get a ready-to-use chat model.
2. **Configurable model** – choose to specify parameters (including model name) at
runtime via `config`. Makes it easy to switch between models/providers without
changing your code
!!! note "Installation requirements"
Requires the integration package for the chosen model provider to be installed.
See the `model_provider` parameter below for specific package names
(e.g., `pip install langchain-openai`).
Refer to the [provider integration's API reference](https://docs.langchain.com/oss/python/integrations/providers)
for supported model parameters to use as `**kwargs`.
Args:
model: The model name, optionally prefixed with provider (e.g., `'openai:gpt-4o'`).
Prefer exact model IDs from provider docs over aliases for reliable behavior
(e.g., dated versions like `'...-20250514'` instead of `'...-latest'`).
Will attempt to infer `model_provider` from model if not specified.
The following providers will be inferred based on these model prefixes:
- `gpt-...` | `o1...` | `o3...` -> `openai`
- `claude...` -> `anthropic`
- `amazon...` -> `bedrock`
- `gemini...` -> `google_vertexai`
- `command...` -> `cohere`
- `accounts/fireworks...` -> `fireworks`
- `mistral...` -> `mistralai`
- `deepseek...` -> `deepseek`
- `grok...` -> `xai`
- `sonar...` -> `perplexity`
- `solar...` -> `upstage`
model_provider: The model provider if not specified as part of the model arg
(see above).
Supported `model_provider` values and the corresponding integration package
are:
- `openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
- `anthropic` -> [`langchain-anthropic`](https://docs.langchain.com/oss/python/integrations/providers/anthropic)
- `azure_openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
- `azure_ai` -> [`langchain-azure-ai`](https://docs.langchain.com/oss/python/integrations/providers/microsoft)
- `google_vertexai` -> [`langchain-google-vertexai`](https://docs.langchain.com/oss/python/integrations/providers/google)
- `google_genai` -> [`langchain-google-genai`](https://docs.langchain.com/oss/python/integrations/providers/google)
- `anthropic_bedrock` -> [`langchain-aws`](https://docs.langchain.com/oss/python/integrations/providers/aws)
- `bedrock` -> [`langchain-aws`](https://docs.langchain.com/oss/python/integrations/providers/aws)
- `bedrock_converse` -> [`langchain-aws`](https://docs.langchain.com/oss/python/integrations/providers/aws)
- `cohere` -> [`langchain-cohere`](https://docs.langchain.com/oss/python/integrations/providers/cohere)
- `fireworks` -> [`langchain-fireworks`](https://docs.langchain.com/oss/python/integrations/providers/fireworks)
- `together` -> [`langchain-together`](https://docs.langchain.com/oss/python/integrations/providers/together)
- `mistralai` -> [`langchain-mistralai`](https://docs.langchain.com/oss/python/integrations/providers/mistralai)
- `huggingface` -> [`langchain-huggingface`](https://docs.langchain.com/oss/python/integrations/providers/huggingface)
- `groq` -> [`langchain-groq`](https://docs.langchain.com/oss/python/integrations/providers/groq)
- `ollama` -> [`langchain-ollama`](https://docs.langchain.com/oss/python/integrations/providers/ollama)
- `google_anthropic_vertex` -> [`langchain-google-vertexai`](https://docs.langchain.com/oss/python/integrations/providers/google)
- `deepseek` -> [`langchain-deepseek`](https://docs.langchain.com/oss/python/integrations/providers/deepseek)
- `ibm` -> [`langchain-ibm`](https://docs.langchain.com/oss/python/integrations/providers/ibm)
- `nvidia` -> [`langchain-nvidia-ai-endpoints`](https://docs.langchain.com/oss/python/integrations/providers/nvidia)
- `xai` -> [`langchain-xai`](https://docs.langchain.com/oss/python/integrations/providers/xai)
- `openrouter` -> [`langchain-openrouter`](https://docs.langchain.com/oss/python/integrations/providers/openrouter)
- `perplexity` -> [`langchain-perplexity`](https://docs.langchain.com/oss/python/integrations/providers/perplexity)
- `upstage` -> [`langchain-upstage`](https://docs.langchain.com/oss/python/integrations/providers/upstage)
configurable_fields: Which model parameters are configurable at runtime:
- `None`: No configurable fields (i.e., a fixed model).
- `'any'`: All fields are configurable. **See security note below.**
- `list[str] | Tuple[str, ...]`: Specified fields are configurable.
Fields are assumed to have `config_prefix` stripped if a `config_prefix` is
specified.
If `model` is specified, then defaults to `None`.
If `model` is not specified, then defaults to `("model", "model_provider")`.
!!! warning "Security note"
Setting `configurable_fields="any"` means fields like `api_key`,
`base_url`, etc., can be altered at runtime, potentially redirecting
model requests to a different service/user.
Make sure that if you're accepting untrusted configurations that you
enumerate the `configurable_fields=(...)` explicitly.
config_prefix: Optional prefix for configuration keys.
Useful when you have multiple configurable models in the same application.
If `'config_prefix'` is a non-empty string then `model` will be configurable
at runtime via the `config["configurable"]["{config_prefix}_{param}"]` keys.
See examples below.
If `'config_prefix'` is an empty string then model will be configurable via
`config["configurable"]["{param}"]`.
**kwargs: Additional model-specific keyword args to pass to the underlying
chat model's `__init__` method. Common parameters include:
- `temperature`: Model temperature for controlling randomness.
- `max_tokens`: Maximum number of output tokens.
- `timeout`: Maximum time (in seconds) to wait for a response.
- `max_retries`: Maximum number of retry attempts for failed requests.
- `base_url`: Custom API endpoint URL.
- `rate_limiter`: A
[`BaseRateLimiter`][langchain_core.rate_limiters.BaseRateLimiter]
instance to control request rate.
Refer to the specific model provider's
[integration reference](https://reference.langchain.com/python/integrations/)
for all available parameters.
Returns:
A `BaseChatModel` corresponding to the `model_name` and `model_provider`
specified if configurability is inferred to be `False`. If configurable, a
chat model emulator that initializes the underlying model at runtime once a
config is passed in.
Raises:
ValueError: If `model_provider` cannot be inferred or isn't supported.
ImportError: If the model provider integration package is not installed.
???+ example "Initialize a non-configurable model"
```python
# pip install langchain langchain-openai langchain-anthropic langchain-google-vertexai
from langchain.chat_models import init_chat_model
o3_mini = init_chat_model("openai:o3-mini", temperature=0)
claude_sonnet = init_chat_model("anthropic:claude-sonnet-4-5-20250929", temperature=0)
gemini_2-5_flash = init_chat_model("google_vertexai:gemini-2.5-flash", temperature=0)
o3_mini.invoke("what's your name")
claude_sonnet.invoke("what's your name")
gemini_2-5_flash.invoke("what's your name")
```
??? example "Partially configurable model with no default"
```python
# pip install langchain langchain-openai langchain-anthropic
from langchain.chat_models import init_chat_model
# (We don't need to specify configurable=True if a model isn't specified.)
configurable_model = init_chat_model(temperature=0)
configurable_model.invoke("what's your name", config={"configurable": {"model": "gpt-4o"}})
# Use GPT-4o to generate the response
configurable_model.invoke(
"what's your name",
config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
)
```
??? example "Fully configurable model with a default"
```python
# pip install langchain langchain-openai langchain-anthropic
from langchain.chat_models import init_chat_model
configurable_model_with_default = init_chat_model(
"openai:gpt-4o",
configurable_fields="any", # This allows us to configure other params like temperature, max_tokens, etc at runtime.
config_prefix="foo",
temperature=0,
)
configurable_model_with_default.invoke("what's your name")
# GPT-4o response with temperature 0 (as set in default)
configurable_model_with_default.invoke(
"what's your name",
config={
"configurable": {
"foo_model": "anthropic:claude-sonnet-4-5-20250929",
"foo_temperature": 0.6,
}
},
)
# Override default to use Sonnet 4.5 with temperature 0.6 to generate response
```
??? example "Bind tools to a configurable model"
You can call any chat model declarative methods on a configurable model in the
same way that you would with a normal model:
```python
# pip install langchain langchain-openai langchain-anthropic
from langchain.chat_models import init_chat_model
from pydantic import BaseModel, Field
class GetWeather(BaseModel):
'''Get the current weather in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
class GetPopulation(BaseModel):
'''Get the current population in a given location'''
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
configurable_model = init_chat_model(
"gpt-4o", configurable_fields=("model", "model_provider"), temperature=0
)
configurable_model_with_tools = configurable_model.bind_tools(
[
GetWeather,
GetPopulation,
]
)
configurable_model_with_tools.invoke(
"Which city is hotter today and which is bigger: LA or NY?"
)
# Use GPT-4o
configurable_model_with_tools.invoke(
"Which city is hotter today and which is bigger: LA or NY?",
config={"configurable": {"model": "claude-sonnet-4-5-20250929"}},
)
# Use Sonnet 4.5
```
""" # noqa: E501
if model is not None and not isinstance(model, str):
msg = ( # type: ignore[unreachable]
f"`model` must be a string (e.g., 'openai:gpt-4o'), got "
f"{type(model).__name__}. If you've already constructed a chat model "
f"object, use it directly instead of passing it to init_chat_model()."
)
raise TypeError(msg)
if not model and not configurable_fields:
configurable_fields = ("model", "model_provider")
config_prefix = config_prefix or ""
if config_prefix and not configurable_fields:
warnings.warn(
f"{config_prefix=} has been set but no fields are configurable. Set "
f"`configurable_fields=(...)` to specify the model params that are "
f"configurable.",
stacklevel=2,
)
if not configurable_fields:
return _init_chat_model_helper(
cast("str", model),
model_provider=model_provider,
**kwargs,
)
if model:
kwargs["model"] = model
if model_provider:
kwargs["model_provider"] = model_provider
return _ConfigurableModel(
default_config=kwargs,
config_prefix=config_prefix,
configurable_fields=configurable_fields,
)
def _init_chat_model_helper(
model: str,
*,
model_provider: str | None = None,
**kwargs: Any,
) -> BaseChatModel:
model, model_provider = _parse_model(model, model_provider)
creator_func = _get_chat_model_creator(model_provider)
return creator_func(model=model, **kwargs)
def _attempt_infer_model_provider(model_name: str) -> str | None:
"""Attempt to infer model provider from model name.
Args:
model_name: The name of the model to infer provider for.
Returns:
The inferred provider name, or `None` if no provider could be inferred.
"""
model_lower = model_name.lower()
# OpenAI models (including newer models and aliases)
if any(
model_lower.startswith(pre)
for pre in (
"gpt-",
"o1",
"o3",
"chatgpt",
"text-davinci",
)
):
return "openai"
# Anthropic models
if model_lower.startswith("claude"):
return "anthropic"
# Cohere models
if model_lower.startswith("command"):
return "cohere"
# Fireworks models
if model_lower.startswith("accounts/fireworks"):
return "fireworks"
# Google models
if model_lower.startswith("gemini"):
return "google_vertexai"
# AWS Bedrock models
if model_lower.startswith(("amazon.", "anthropic.", "meta.")):
return "bedrock"
# Mistral models
if model_lower.startswith(("mistral", "mixtral")):
return "mistralai"
# DeepSeek models
if model_lower.startswith("deepseek"):
return "deepseek"
# xAI models
if model_lower.startswith("grok"):
return "xai"
# Perplexity models
if model_lower.startswith("sonar"):
return "perplexity"
# Upstage models
if model_lower.startswith("solar"):
return "upstage"
return None
def _parse_model(model: str, model_provider: str | None) -> tuple[str, str]:
"""Parse model name and provider, inferring provider if necessary."""
# Handle provider:model format
if (
not model_provider
and ":" in model
and model.split(":", maxsplit=1)[0] in _BUILTIN_PROVIDERS
):
model_provider = model.split(":", maxsplit=1)[0]
model = ":".join(model.split(":")[1:])
# Attempt to infer provider if not specified
model_provider = model_provider or _attempt_infer_model_provider(model)
if not model_provider:
# Enhanced error message with suggestions
supported_list = ", ".join(sorted(_BUILTIN_PROVIDERS))
msg = (
f"Unable to infer model provider for {model=}. "
f"Please specify 'model_provider' directly.\n\n"
f"Supported providers: {supported_list}\n\n"
f"For help with specific providers, see: "
f"https://docs.langchain.com/oss/python/integrations/providers"
)
raise ValueError(msg)
# Normalize provider name
model_provider = model_provider.replace("-", "_").lower()
return model, model_provider
def _remove_prefix(s: str, prefix: str) -> str:
return s.removeprefix(prefix)
_DECLARATIVE_METHODS = ("bind_tools", "with_structured_output")
class _ConfigurableModel(Runnable[LanguageModelInput, Any]):
def __init__(
self,
*,
default_config: dict[str, Any] | None = None,
configurable_fields: Literal["any"] | list[str] | tuple[str, ...] = "any",
config_prefix: str = "",
queued_declarative_operations: Sequence[tuple[str, tuple[Any, ...], dict[str, Any]]] = (),
) -> None:
self._default_config: dict[str, Any] = default_config or {}
self._configurable_fields: Literal["any"] | list[str] = (
"any" if configurable_fields == "any" else list(configurable_fields)
)
self._config_prefix = (
config_prefix + "_"
if config_prefix and not config_prefix.endswith("_")
else config_prefix
)
self._queued_declarative_operations: list[tuple[str, tuple[Any, ...], dict[str, Any]]] = (
list(
queued_declarative_operations,
)
)
def __getattr__(self, name: str) -> Any:
if name in _DECLARATIVE_METHODS:
# Declarative operations that cannot be applied until after an actual model
# object is instantiated. So instead of returning the actual operation,
# we record the operation and its arguments in a queue. This queue is
# then applied in order whenever we actually instantiate the model (in
# self._model()).
def queue(*args: Any, **kwargs: Any) -> _ConfigurableModel:
queued_declarative_operations = list(
self._queued_declarative_operations,
)
queued_declarative_operations.append((name, args, kwargs))
return _ConfigurableModel(
default_config=dict(self._default_config),
configurable_fields=list(self._configurable_fields)
if isinstance(self._configurable_fields, list)
else self._configurable_fields,
config_prefix=self._config_prefix,
queued_declarative_operations=queued_declarative_operations,
)
return queue
if self._default_config and (model := self._model()) and hasattr(model, name):
return getattr(model, name)
msg = f"{name} is not a BaseChatModel attribute"
if self._default_config:
msg += " and is not implemented on the default model"
msg += "."
raise AttributeError(msg)
def _model(self, config: RunnableConfig | None = None) -> Runnable[Any, Any]:
params = {**self._default_config, **self._model_params(config)}
model = _init_chat_model_helper(**params)
for name, args, kwargs in self._queued_declarative_operations:
model = getattr(model, name)(*args, **kwargs)
return model
def _model_params(self, config: RunnableConfig | None) -> dict[str, Any]:
config = ensure_config(config)
model_params = {
_remove_prefix(k, self._config_prefix): v
for k, v in config.get("configurable", {}).items()
if k.startswith(self._config_prefix)
}
if self._configurable_fields != "any":
model_params = {k: v for k, v in model_params.items() if k in self._configurable_fields}
return model_params
def with_config(
self,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> _ConfigurableModel:
config = RunnableConfig(**(config or {}), **cast("RunnableConfig", kwargs))
# Ensure config is not None after creation
config = ensure_config(config)
model_params = self._model_params(config)
remaining_config = {k: v for k, v in config.items() if k != "configurable"}
remaining_config["configurable"] = {
k: v
for k, v in config.get("configurable", {}).items()
if _remove_prefix(k, self._config_prefix) not in model_params
}
queued_declarative_operations = list(self._queued_declarative_operations)
if remaining_config:
queued_declarative_operations.append(
(
"with_config",
(),
{"config": remaining_config},
),
)
return _ConfigurableModel(
default_config={**self._default_config, **model_params},
configurable_fields=list(self._configurable_fields)
if isinstance(self._configurable_fields, list)
else self._configurable_fields,
config_prefix=self._config_prefix,
queued_declarative_operations=queued_declarative_operations,
)
@property
@override
def InputType(self) -> TypeAlias:
"""Get the input type for this `Runnable`."""
# This is a version of LanguageModelInput which replaces the abstract
# base class BaseMessage with a union of its subclasses, which makes
# for a much better schema.
return str | StringPromptValue | ChatPromptValueConcrete | list[AnyMessage]
@override
def invoke(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> Any:
return self._model(config).invoke(input, config=config, **kwargs)
@override
async def ainvoke(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
**kwargs: Any,
) -> Any:
return await self._model(config).ainvoke(input, config=config, **kwargs)
@override
def stream(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[Any]:
yield from self._model(config).stream(input, config=config, **kwargs)
@override
async def astream(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> AsyncIterator[Any]:
async for x in self._model(config).astream(input, config=config, **kwargs):
yield x
def batch(
self,
inputs: list[LanguageModelInput],
config: RunnableConfig | list[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any | None,
) -> list[Any]:
config = config or None
# If <= 1 config use the underlying models batch implementation.
if config is None or isinstance(config, dict) or len(config) <= 1:
if isinstance(config, list):
config = config[0]
return self._model(config).batch(
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
# If multiple configs default to Runnable.batch which uses executor to invoke
# in parallel.
return super().batch(
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
async def abatch(
self,
inputs: list[LanguageModelInput],
config: RunnableConfig | list[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any | None,
) -> list[Any]:
config = config or None
# If <= 1 config use the underlying models batch implementation.
if config is None or isinstance(config, dict) or len(config) <= 1:
if isinstance(config, list):
config = config[0]
return await self._model(config).abatch(
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
# If multiple configs default to Runnable.batch which uses executor to invoke
# in parallel.
return await super().abatch(
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
def batch_as_completed(
self,
inputs: Sequence[LanguageModelInput],
config: RunnableConfig | Sequence[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any,
) -> Iterator[tuple[int, Any | Exception]]:
config = config or None
# If <= 1 config use the underlying models batch implementation.
if config is None or isinstance(config, dict) or len(config) <= 1:
if isinstance(config, list):
config = config[0]
yield from self._model(cast("RunnableConfig", config)).batch_as_completed( # type: ignore[call-overload]
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
# If multiple configs default to Runnable.batch which uses executor to invoke
# in parallel.
else:
yield from super().batch_as_completed( # type: ignore[call-overload]
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
)
async def abatch_as_completed(
self,
inputs: Sequence[LanguageModelInput],
config: RunnableConfig | Sequence[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any,
) -> AsyncIterator[tuple[int, Any]]:
config = config or None
# If <= 1 config use the underlying models batch implementation.
if config is None or isinstance(config, dict) or len(config) <= 1:
if isinstance(config, list):
config = config[0]
async for x in self._model(
cast("RunnableConfig", config),
).abatch_as_completed( # type: ignore[call-overload]
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
):
yield x
# If multiple configs default to Runnable.batch which uses executor to invoke
# in parallel.
else:
async for x in super().abatch_as_completed( # type: ignore[call-overload]
inputs,
config=config,
return_exceptions=return_exceptions,
**kwargs,
):
yield x
@override
def transform(
self,
input: Iterator[LanguageModelInput],
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> Iterator[Any]:
yield from self._model(config).transform(input, config=config, **kwargs)
@override
async def atransform(
self,
input: AsyncIterator[LanguageModelInput],
config: RunnableConfig | None = None,
**kwargs: Any | None,
) -> AsyncIterator[Any]:
async for x in self._model(config).atransform(input, config=config, **kwargs):
yield x
@overload
@override
def astream_log(
self,
input: Any,
config: RunnableConfig | None = None,
*,
diff: Literal[True] = True,
with_streamed_output_list: bool = True,
include_names: Sequence[str] | None = None,
include_types: Sequence[str] | None = None,
include_tags: Sequence[str] | None = None,
exclude_names: Sequence[str] | None = None,
exclude_types: Sequence[str] | None = None,
exclude_tags: Sequence[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[RunLogPatch]: ...
@overload
@override
def astream_log(
self,
input: Any,
config: RunnableConfig | None = None,
*,
diff: Literal[False],
with_streamed_output_list: bool = True,
include_names: Sequence[str] | None = None,
include_types: Sequence[str] | None = None,
include_tags: Sequence[str] | None = None,
exclude_names: Sequence[str] | None = None,
exclude_types: Sequence[str] | None = None,
exclude_tags: Sequence[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[RunLog]: ...
@override
async def astream_log(
self,
input: Any,
config: RunnableConfig | None = None,
*,
diff: bool = True,
with_streamed_output_list: bool = True,
include_names: Sequence[str] | None = None,
include_types: Sequence[str] | None = None,
include_tags: Sequence[str] | None = None,
exclude_names: Sequence[str] | None = None,
exclude_types: Sequence[str] | None = None,
exclude_tags: Sequence[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[RunLogPatch] | AsyncIterator[RunLog]:
async for x in self._model(config).astream_log( # type: ignore[call-overload, misc]
input,
config=config,
diff=diff,
with_streamed_output_list=with_streamed_output_list,
include_names=include_names,
include_types=include_types,
include_tags=include_tags,
exclude_tags=exclude_tags,
exclude_types=exclude_types,
exclude_names=exclude_names,
**kwargs,
):
yield x
@override
async def astream_events(
self,
input: Any,
config: RunnableConfig | None = None,
*,
version: Literal["v1", "v2"] = "v2",
include_names: Sequence[str] | None = None,
include_types: Sequence[str] | None = None,
include_tags: Sequence[str] | None = None,
exclude_names: Sequence[str] | None = None,
exclude_types: Sequence[str] | None = None,
exclude_tags: Sequence[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[StreamEvent]:
async for x in self._model(config).astream_events(
input,
config=config,
version=version,
include_names=include_names,
include_types=include_types,
include_tags=include_tags,
exclude_tags=exclude_tags,
exclude_types=exclude_types,
exclude_names=exclude_names,
**kwargs,
):
yield x
# Explicitly added to satisfy downstream linters.
def bind_tools(
self,
tools: Sequence[dict[str, Any] | type[BaseModel] | Callable[..., Any] | BaseTool],
**kwargs: Any,
) -> Runnable[LanguageModelInput, AIMessage]:
return self.__getattr__("bind_tools")(tools, **kwargs)
# Explicitly added to satisfy downstream linters.
def with_structured_output(
self,
schema: dict[str, Any] | type[BaseModel],
**kwargs: Any,
) -> Runnable[LanguageModelInput, dict[str, Any] | BaseModel]:
return self.__getattr__("with_structured_output")(schema, **kwargs)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/chat_models/base.py",
"license": "MIT License",
"lines": 855,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/langchain_v1/langchain/embeddings/base.py | """Factory functions for embeddings."""
import functools
import importlib
from collections.abc import Callable
from typing import Any
from langchain_core.embeddings import Embeddings
def _call(cls: type[Embeddings], **kwargs: Any) -> Embeddings:
return cls(**kwargs)
_BUILTIN_PROVIDERS: dict[str, tuple[str, str, Callable[..., Embeddings]]] = {
"azure_openai": ("langchain_openai", "AzureOpenAIEmbeddings", _call),
"bedrock": (
"langchain_aws",
"BedrockEmbeddings",
lambda cls, model, **kwargs: cls(model_id=model, **kwargs),
),
"cohere": ("langchain_cohere", "CohereEmbeddings", _call),
"google_genai": ("langchain_google_genai", "GoogleGenerativeAIEmbeddings", _call),
"google_vertexai": ("langchain_google_vertexai", "VertexAIEmbeddings", _call),
"huggingface": (
"langchain_huggingface",
"HuggingFaceEmbeddings",
lambda cls, model, **kwargs: cls(model_name=model, **kwargs),
),
"mistralai": ("langchain_mistralai", "MistralAIEmbeddings", _call),
"ollama": ("langchain_ollama", "OllamaEmbeddings", _call),
"openai": ("langchain_openai", "OpenAIEmbeddings", _call),
}
"""Registry mapping provider names to their import configuration.
Each entry maps a provider key to a tuple of:
- `module_path`: The Python module path containing the embeddings class.
- `class_name`: The name of the embeddings class to import.
- `creator_func`: A callable that instantiates the class with provided kwargs.
!!! note
This dict is not exhaustive of all providers supported by LangChain, but is
meant to cover the most popular ones and serve as a template for adding more
providers in the future. If a provider is not in this dict, it can still be
used with `init_chat_model` as long as its integration package is installed,
but the provider key will not be inferred from the model name and must be
specified explicitly via the `model_provider` parameter.
Refer to the LangChain [integration documentation](https://docs.langchain.com/oss/python/integrations/providers/overview)
for a full list of supported providers and their corresponding packages.
"""
@functools.lru_cache(maxsize=len(_BUILTIN_PROVIDERS))
def _get_embeddings_class_creator(provider: str) -> Callable[..., Embeddings]:
"""Return a factory function that creates an embeddings model for the given provider.
This function is cached to avoid repeated module imports.
Args:
provider: The name of the model provider (e.g., `'openai'`, `'cohere'`).
Must be a key in `_BUILTIN_PROVIDERS`.
Returns:
A callable that accepts model kwargs and returns an `Embeddings` instance for
the specified provider.
Raises:
ValueError: If the provider is not in `_BUILTIN_PROVIDERS`.
ImportError: If the provider's integration package is not installed.
"""
if provider not in _BUILTIN_PROVIDERS:
msg = (
f"Provider '{provider}' is not supported.\n"
f"Supported providers and their required packages:\n"
f"{_get_provider_list()}"
)
raise ValueError(msg)
module_name, class_name, creator_func = _BUILTIN_PROVIDERS[provider]
try:
module = importlib.import_module(module_name)
except ImportError as e:
pkg = module_name.replace("_", "-")
msg = f"Could not import {pkg} python package. Please install it with `pip install {pkg}`"
raise ImportError(msg) from e
cls = getattr(module, class_name)
return functools.partial(creator_func, cls=cls)
def _get_provider_list() -> str:
"""Get formatted list of providers and their packages."""
return "\n".join(
f" - {p}: {pkg[0].replace('_', '-')}" for p, pkg in _BUILTIN_PROVIDERS.items()
)
def _parse_model_string(model_name: str) -> tuple[str, str]:
"""Parse a model string into provider and model name components.
The model string should be in the format 'provider:model-name', where provider
is one of the supported providers.
Args:
model_name: A model string in the format 'provider:model-name'
Returns:
A tuple of (provider, model_name)
Example:
```python
_parse_model_string("openai:text-embedding-3-small")
# Returns: ("openai", "text-embedding-3-small")
_parse_model_string("bedrock:amazon.titan-embed-text-v1")
# Returns: ("bedrock", "amazon.titan-embed-text-v1")
```
Raises:
ValueError: If the model string is not in the correct format or
the provider is unsupported
"""
if ":" not in model_name:
msg = (
f"Invalid model format '{model_name}'.\n"
f"Model name must be in format 'provider:model-name'\n"
f"Example valid model strings:\n"
f" - openai:text-embedding-3-small\n"
f" - bedrock:amazon.titan-embed-text-v1\n"
f" - cohere:embed-english-v3.0\n"
f"Supported providers: {_BUILTIN_PROVIDERS.keys()}"
)
raise ValueError(msg)
provider, model = model_name.split(":", 1)
provider = provider.lower().strip()
model = model.strip()
if provider not in _BUILTIN_PROVIDERS:
msg = (
f"Provider '{provider}' is not supported.\n"
f"Supported providers and their required packages:\n"
f"{_get_provider_list()}"
)
raise ValueError(msg)
if not model:
msg = "Model name cannot be empty"
raise ValueError(msg)
return provider, model
def _infer_model_and_provider(
model: str,
*,
provider: str | None = None,
) -> tuple[str, str]:
if not model.strip():
msg = "Model name cannot be empty"
raise ValueError(msg)
if provider is None and ":" in model:
provider, model_name = _parse_model_string(model)
else:
model_name = model
if not provider:
msg = (
"Must specify either:\n"
"1. A model string in format 'provider:model-name'\n"
" Example: 'openai:text-embedding-3-small'\n"
"2. Or explicitly set provider from: "
f"{_BUILTIN_PROVIDERS.keys()}"
)
raise ValueError(msg)
if provider not in _BUILTIN_PROVIDERS:
msg = (
f"Provider '{provider}' is not supported.\n"
f"Supported providers and their required packages:\n"
f"{_get_provider_list()}"
)
raise ValueError(msg)
return provider, model_name
def init_embeddings(
model: str,
*,
provider: str | None = None,
**kwargs: Any,
) -> Embeddings:
"""Initialize an embedding model from a model name and optional provider.
!!! note
Requires the integration package for the chosen model provider to be installed.
See the `model_provider` parameter below for specific package names
(e.g., `pip install langchain-openai`).
Refer to the [provider integration's API reference](https://docs.langchain.com/oss/python/integrations/providers)
for supported model parameters to use as `**kwargs`.
Args:
model: The name of the model, e.g. `'openai:text-embedding-3-small'`.
You can also specify model and model provider in a single argument using
`'{model_provider}:{model}'` format, e.g. `'openai:text-embedding-3-small'`.
provider: The model provider if not specified as part of the model arg
(see above).
Supported `provider` values and the corresponding integration package
are:
- `openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
- `azure_openai` -> [`langchain-openai`](https://docs.langchain.com/oss/python/integrations/providers/openai)
- `bedrock` -> [`langchain-aws`](https://docs.langchain.com/oss/python/integrations/providers/aws)
- `cohere` -> [`langchain-cohere`](https://docs.langchain.com/oss/python/integrations/providers/cohere)
- `google_vertexai` -> [`langchain-google-vertexai`](https://docs.langchain.com/oss/python/integrations/providers/google)
- `huggingface` -> [`langchain-huggingface`](https://docs.langchain.com/oss/python/integrations/providers/huggingface)
- `mistralai` -> [`langchain-mistralai`](https://docs.langchain.com/oss/python/integrations/providers/mistralai)
- `ollama` -> [`langchain-ollama`](https://docs.langchain.com/oss/python/integrations/providers/ollama)
**kwargs: Additional model-specific parameters passed to the embedding model.
These vary by provider. Refer to the specific model provider's
[integration reference](https://reference.langchain.com/python/integrations/)
for all available parameters.
Returns:
An `Embeddings` instance that can generate embeddings for text.
Raises:
ValueError: If the model provider is not supported or cannot be determined
ImportError: If the required provider package is not installed
???+ example
```python
# pip install langchain langchain-openai
# Using a model string
model = init_embeddings("openai:text-embedding-3-small")
model.embed_query("Hello, world!")
# Using explicit provider
model = init_embeddings(model="text-embedding-3-small", provider="openai")
model.embed_documents(["Hello, world!", "Goodbye, world!"])
# With additional parameters
model = init_embeddings("openai:text-embedding-3-small", api_key="sk-...")
```
!!! version-added "Added in `langchain` 0.3.9"
"""
if not model:
providers = _BUILTIN_PROVIDERS.keys()
msg = f"Must specify model name. Supported providers are: {', '.join(providers)}"
raise ValueError(msg)
provider, model_name = _infer_model_and_provider(model, provider=provider)
return _get_embeddings_class_creator(provider)(model=model_name, **kwargs)
__all__ = [
"Embeddings", # This one is for backwards compatibility
"init_embeddings",
]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/langchain/embeddings/base.py",
"license": "MIT License",
"lines": 212,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langchain:libs/langchain_v1/scripts/check_imports.py | """Check imports script.
Quickly verify that a list of Python files can be loaded by the Python interpreter
without raising any errors. Ran before running more expensive tests. Useful in
Makefiles.
If loading a file fails, the script prints the problematic filename and the detailed
error traceback.
"""
import random
import string
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
module_name = "".join(
random.choice(string.ascii_letters) # noqa: S311
for _ in range(20)
)
SourceFileLoader(module_name, file).load_module()
except Exception:
has_failure = True
print(file)
traceback.print_exc()
print()
sys.exit(1 if has_failure else 0)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/scripts/check_imports.py",
"license": "MIT License",
"lines": 28,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langchain:libs/langchain_v1/tests/integration_tests/cache/fake_embeddings.py | """Fake Embedding class for testing purposes."""
import math
from langchain_core.embeddings import Embeddings
from typing_extensions import override
fake_texts = ["foo", "bar", "baz"]
class FakeEmbeddings(Embeddings):
"""Fake embeddings functionality for testing."""
@override
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return simple embeddings.
Embeddings encode each text as its index.
"""
return [[1.0] * 9 + [float(i)] for i in range(len(texts))]
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
return self.embed_documents(texts)
@override
def embed_query(self, text: str) -> list[float]:
"""Return constant query embeddings.
Embeddings are identical to embed_documents(texts)[0].
Distance to each text will be that text's index,
as it was passed to embed_documents.
"""
return [1.0] * 9 + [0.0]
async def aembed_query(self, text: str) -> list[float]:
return self.embed_query(text)
class ConsistentFakeEmbeddings(FakeEmbeddings):
"""Consistent fake embeddings.
Fake embeddings which remember all the texts seen so far to return consistent
vectors for the same texts.
"""
def __init__(self, dimensionality: int = 10) -> None:
self.known_texts: list[str] = []
self.dimensionality = dimensionality
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Return consistent embeddings for each text seen so far."""
out_vectors = []
for text in texts:
if text not in self.known_texts:
self.known_texts.append(text)
vector = [1.0] * (self.dimensionality - 1) + [
float(self.known_texts.index(text)),
]
out_vectors.append(vector)
return out_vectors
def embed_query(self, text: str) -> list[float]:
"""Return consistent embeddings.
Return consistent embeddings for the text, if seen before, or a constant
one if the text is unknown.
"""
return self.embed_documents([text])[0]
class AngularTwoDimensionalEmbeddings(Embeddings):
"""From angles (as strings in units of pi) to unit embedding vectors on a circle."""
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Make a list of texts into a list of embedding vectors."""
return [self.embed_query(text) for text in texts]
@override
def embed_query(self, text: str) -> list[float]:
"""Convert input text to a 'vector' (list of floats).
If the text is a number, use it as the angle for the
unit vector in units of pi.
Any other input text becomes the singular result [0, 0] !
"""
try:
angle = float(text)
return [math.cos(angle * math.pi), math.sin(angle * math.pi)]
except ValueError:
# Assume: just test string, no attention is paid to values.
return [0.0, 0.0]
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/integration_tests/cache/fake_embeddings.py",
"license": "MIT License",
"lines": 68,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/integration_tests/chat_models/test_base.py | from typing import Any, cast
import pytest
from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableConfig
from langchain_tests.integration_tests import ChatModelIntegrationTests
from pydantic import BaseModel
from langchain.chat_models import init_chat_model
class Multiply(BaseModel):
"""Product of two ints."""
x: int
y: int
@pytest.mark.requires("langchain_openai", "langchain_anthropic")
async def test_init_chat_model_chain() -> None:
model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
model_with_tools = model.bind_tools([Multiply])
model_with_config = model_with_tools.with_config(
RunnableConfig(tags=["foo"]),
configurable={"bar_model": "claude-sonnet-4-5-20250929"},
)
prompt = ChatPromptTemplate.from_messages([("system", "foo"), ("human", "{input}")])
chain = prompt | model_with_config
output = chain.invoke({"input": "bar"})
assert isinstance(output, AIMessage)
events = [event async for event in chain.astream_events({"input": "bar"}, version="v2")]
assert events
class TestStandard(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return cast("type[BaseChatModel]", init_chat_model)
@property
def chat_model_params(self) -> dict[str, Any]:
return {"model": "gpt-4o", "configurable_fields": "any"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def has_tool_calling(self) -> bool:
return True
@property
def has_structured_output(self) -> bool:
return True
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/integration_tests/chat_models/test_base.py",
"license": "MIT License",
"lines": 43,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/integration_tests/embeddings/test_base.py | """Test embeddings base module."""
import importlib
import pytest
from langchain_core.embeddings import Embeddings
from langchain.embeddings.base import _BUILTIN_PROVIDERS, init_embeddings
@pytest.mark.parametrize(
("provider", "model"),
[
("openai", "text-embedding-3-large"),
("google_vertexai", "text-embedding-gecko@003"),
("bedrock", "amazon.titan-embed-text-v1"),
("cohere", "embed-english-v2.0"),
],
)
async def test_init_embedding_model(provider: str, model: str) -> None:
package = _BUILTIN_PROVIDERS[provider][0]
try:
importlib.import_module(package)
except ImportError:
pytest.skip(f"Package {package} is not installed")
model_colon = init_embeddings(f"{provider}:{model}")
assert isinstance(model_colon, Embeddings)
model_explicit = init_embeddings(
model=model,
provider=provider,
)
assert isinstance(model_explicit, Embeddings)
text = "Hello world"
embedding_colon = await model_colon.aembed_query(text)
assert isinstance(embedding_colon, list)
assert all(isinstance(x, float) for x in embedding_colon)
embedding_explicit = await model_explicit.aembed_query(text)
assert isinstance(embedding_explicit, list)
assert all(isinstance(x, float) for x in embedding_explicit)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/integration_tests/embeddings/test_base.py",
"license": "MIT License",
"lines": 34,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/chat_models/test_chat_models.py | import os
from typing import TYPE_CHECKING
from unittest import mock
import pytest
from langchain_core.language_models.fake_chat_models import FakeChatModel
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableConfig, RunnableSequence
from pydantic import SecretStr
from langchain.chat_models import __all__, init_chat_model
from langchain.chat_models.base import _BUILTIN_PROVIDERS, _attempt_infer_model_provider
if TYPE_CHECKING:
from langchain_core.language_models import BaseChatModel
EXPECTED_ALL = [
"init_chat_model",
"BaseChatModel",
]
def test_all_imports() -> None:
"""Test that all expected imports are present in the module's __all__."""
assert set(__all__) == set(EXPECTED_ALL)
@pytest.mark.requires(
"langchain_openai",
"langchain_anthropic",
"langchain_fireworks",
"langchain_groq",
)
@pytest.mark.parametrize(
("model_name", "model_provider"),
[
("gpt-4o", "openai"),
("claude-opus-4-1", "anthropic"),
("accounts/fireworks/models/mixtral-8x7b-instruct", "fireworks"),
("mixtral-8x7b-32768", "groq"),
],
)
def test_init_chat_model(model_name: str, model_provider: str | None) -> None:
llm1: BaseChatModel = init_chat_model(
model_name,
model_provider=model_provider,
api_key="foo",
)
llm2: BaseChatModel = init_chat_model(
f"{model_provider}:{model_name}",
api_key="foo",
)
assert llm1.dict() == llm2.dict()
def test_init_chat_model_rejects_model_object() -> None:
"""Passing a model object instead of a string should raise TypeError."""
with pytest.raises(TypeError, match="must be a string"):
init_chat_model(model=FakeChatModel()) # type: ignore[call-overload]
def test_init_missing_dep() -> None:
with pytest.raises(ImportError):
init_chat_model("mixtral-8x7b-32768", model_provider="groq")
def test_init_unknown_provider() -> None:
with pytest.raises(ValueError, match="Unsupported provider='bar'"):
init_chat_model("foo", model_provider="bar")
def test_supported_providers_is_sorted() -> None:
"""Test that supported providers are sorted alphabetically."""
assert list(_BUILTIN_PROVIDERS) == sorted(_BUILTIN_PROVIDERS.keys())
@pytest.mark.parametrize(
("model_name", "expected_provider"),
[
("gpt-4o", "openai"),
("o1-mini", "openai"),
("o3-mini", "openai"),
("chatgpt-4o-latest", "openai"),
("text-davinci-003", "openai"),
("claude-3-haiku-20240307", "anthropic"),
("command-r-plus", "cohere"),
("accounts/fireworks/models/mixtral-8x7b-instruct", "fireworks"),
("Accounts/Fireworks/models/mixtral-8x7b-instruct", "fireworks"),
("gemini-1.5-pro", "google_vertexai"),
("gemini-2.5-pro", "google_vertexai"),
("gemini-3-pro-preview", "google_vertexai"),
("amazon.titan-text-express-v1", "bedrock"),
("Amazon.Titan-Text-Express-v1", "bedrock"),
("anthropic.claude-v2", "bedrock"),
("Anthropic.Claude-V2", "bedrock"),
("mistral-small", "mistralai"),
("mixtral-8x7b", "mistralai"),
("deepseek-v3", "deepseek"),
("grok-beta", "xai"),
("sonar-small", "perplexity"),
("solar-pro", "upstage"),
],
)
def test_attempt_infer_model_provider(model_name: str, expected_provider: str) -> None:
assert _attempt_infer_model_provider(model_name) == expected_provider
@pytest.mark.requires("langchain_openai")
@mock.patch.dict(
os.environ,
{"OPENAI_API_KEY": "foo", "ANTHROPIC_API_KEY": "bar"},
clear=True,
)
def test_configurable() -> None:
"""Test configurable chat model behavior without default parameters.
Verifies that a configurable chat model initialized without default parameters:
- Has access to all standard runnable methods (`invoke`, `stream`, etc.)
- Blocks access to non-configurable methods until configuration is provided
- Supports declarative operations (`bind_tools`) without mutating original model
- Can chain declarative operations and configuration to access full functionality
- Properly resolves to the configured model type when parameters are provided
Example:
```python
# This creates a configurable model without specifying which model
model = init_chat_model()
# This will FAIL - no model specified yet
model.get_num_tokens("hello") # AttributeError!
# This works - provides model at runtime
response = model.invoke("Hello", config={"configurable": {"model": "gpt-4o"}})
```
"""
model = init_chat_model()
for method in (
"invoke",
"ainvoke",
"batch",
"abatch",
"stream",
"astream",
"batch_as_completed",
"abatch_as_completed",
):
assert hasattr(model, method)
# Doesn't have access non-configurable, non-declarative methods until a config is
# provided.
for method in ("get_num_tokens", "get_num_tokens_from_messages"):
with pytest.raises(AttributeError):
getattr(model, method)
# Can call declarative methods even without a default model.
model_with_tools = model.bind_tools(
[{"name": "foo", "description": "foo", "parameters": {}}],
)
# Check that original model wasn't mutated by declarative operation.
assert model._queued_declarative_operations == []
# Can iteratively call declarative methods.
model_with_config = model_with_tools.with_config(
RunnableConfig(tags=["foo"]),
configurable={"model": "gpt-4o"},
)
assert model_with_config.model_name == "gpt-4o" # type: ignore[attr-defined]
for method in ("get_num_tokens", "get_num_tokens_from_messages"):
assert hasattr(model_with_config, method)
assert model_with_config.model_dump() == { # type: ignore[attr-defined]
"name": None,
"bound": {
"name": None,
"disable_streaming": False,
"disabled_params": None,
"model_name": "gpt-4o",
"temperature": None,
"model_kwargs": {},
"openai_api_key": SecretStr("foo"),
"openai_api_base": None,
"openai_organization": None,
"openai_proxy": None,
"output_version": None,
"request_timeout": None,
"max_retries": None,
"presence_penalty": None,
"reasoning": None,
"reasoning_effort": None,
"verbosity": None,
"frequency_penalty": None,
"context_management": None,
"include": None,
"seed": None,
"service_tier": None,
"logprobs": None,
"top_logprobs": None,
"logit_bias": None,
"streaming": False,
"n": None,
"top_p": None,
"truncation": None,
"max_tokens": None,
"tiktoken_model_name": None,
"default_headers": None,
"default_query": None,
"stop": None,
"store": None,
"extra_body": None,
"include_response_headers": False,
"stream_usage": True,
"use_previous_response_id": False,
"use_responses_api": None,
},
"kwargs": {
"tools": [
{
"type": "function",
"function": {"name": "foo", "description": "foo", "parameters": {}},
},
],
},
"config": {
"callbacks": None,
"configurable": {},
"metadata": {"model": "gpt-4o"},
"recursion_limit": 25,
"tags": ["foo"],
},
"config_factories": [],
"custom_input_type": None,
"custom_output_type": None,
}
@pytest.mark.requires("langchain_openai", "langchain_anthropic")
@mock.patch.dict(
os.environ,
{"OPENAI_API_KEY": "foo", "ANTHROPIC_API_KEY": "bar"},
clear=True,
)
def test_configurable_with_default() -> None:
"""Test configurable chat model behavior with default parameters.
Verifies that a configurable chat model initialized with default parameters:
- Has access to all standard runnable methods (`invoke`, `stream`, etc.)
- Provides immediate access to non-configurable methods (e.g. `get_num_tokens`)
- Supports model switching through runtime configuration using `config_prefix`
- Maintains proper model identity and attributes when reconfigured
- Can be used in chains with different model providers via configuration
Example:
```python
# This creates a configurable model with default parameters (model)
model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
# This works immediately - uses default gpt-4o
tokens = model.get_num_tokens("hello")
# This also works - switches to Claude at runtime
response = model.invoke(
"Hello", config={"configurable": {"my_model_model": "claude-3-sonnet-20240229"}}
)
```
"""
model = init_chat_model("gpt-4o", configurable_fields="any", config_prefix="bar")
for method in (
"invoke",
"ainvoke",
"batch",
"abatch",
"stream",
"astream",
"batch_as_completed",
"abatch_as_completed",
):
assert hasattr(model, method)
# Does have access non-configurable, non-declarative methods since default params
# are provided.
for method in ("get_num_tokens", "get_num_tokens_from_messages", "dict"):
assert hasattr(model, method)
assert model.model_name == "gpt-4o"
model_with_tools = model.bind_tools(
[{"name": "foo", "description": "foo", "parameters": {}}],
)
model_with_config = model_with_tools.with_config(
RunnableConfig(tags=["foo"]),
configurable={"bar_model": "claude-sonnet-4-5-20250929"},
)
assert model_with_config.model == "claude-sonnet-4-5-20250929" # type: ignore[attr-defined]
assert model_with_config.model_dump() == { # type: ignore[attr-defined]
"name": None,
"bound": {
"name": None,
"disable_streaming": False,
"effort": None,
"model": "claude-sonnet-4-5-20250929",
"mcp_servers": None,
"max_tokens": 64000,
"temperature": None,
"thinking": None,
"top_k": None,
"top_p": None,
"default_request_timeout": None,
"max_retries": 2,
"stop_sequences": None,
"anthropic_api_url": "https://api.anthropic.com",
"anthropic_proxy": None,
"context_management": None,
"anthropic_api_key": SecretStr("bar"),
"betas": None,
"default_headers": None,
"model_kwargs": {},
"reuse_last_container": None,
"inference_geo": None,
"streaming": False,
"stream_usage": True,
"output_version": None,
},
"kwargs": {
"tools": [{"name": "foo", "description": "foo", "input_schema": {}}],
},
"config": {
"callbacks": None,
"configurable": {},
"metadata": {"bar_model": "claude-sonnet-4-5-20250929"},
"recursion_limit": 25,
"tags": ["foo"],
},
"config_factories": [],
"custom_input_type": None,
"custom_output_type": None,
}
prompt = ChatPromptTemplate.from_messages([("system", "foo")])
chain = prompt | model_with_config
assert isinstance(chain, RunnableSequence)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/chat_models/test_chat_models.py",
"license": "MIT License",
"lines": 302,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/embeddings/test_base.py | """Test embeddings base module."""
import pytest
from langchain.embeddings.base import (
_BUILTIN_PROVIDERS,
_infer_model_and_provider,
_parse_model_string,
)
@pytest.mark.parametrize(
("model_string", "expected_provider", "expected_model"),
[
("openai:text-embedding-3-small", "openai", "text-embedding-3-small"),
("bedrock:amazon.titan-embed-text-v1", "bedrock", "amazon.titan-embed-text-v1"),
("huggingface:BAAI/bge-base-en:v1.5", "huggingface", "BAAI/bge-base-en:v1.5"),
("google_genai:gemini-embedding-001", "google_genai", "gemini-embedding-001"),
],
)
def test_parse_model_string(model_string: str, expected_provider: str, expected_model: str) -> None:
"""Test parsing model strings into provider and model components."""
assert _parse_model_string(model_string) == (
expected_provider,
expected_model,
)
def test_parse_model_string_errors() -> None:
"""Test error cases for model string parsing."""
with pytest.raises(ValueError, match="Model name must be"):
_parse_model_string("just-a-model-name")
with pytest.raises(ValueError, match="Invalid model format "):
_parse_model_string("")
with pytest.raises(ValueError, match="is not supported"):
_parse_model_string(":model-name")
with pytest.raises(ValueError, match="Model name cannot be empty"):
_parse_model_string("openai:")
with pytest.raises(
ValueError,
match="Provider 'invalid-provider' is not supported",
):
_parse_model_string("invalid-provider:model-name")
for provider in _BUILTIN_PROVIDERS:
with pytest.raises(ValueError, match=f"{provider}"):
_parse_model_string("invalid-provider:model-name")
def test_infer_model_and_provider() -> None:
"""Test model and provider inference from different input formats."""
assert _infer_model_and_provider("openai:text-embedding-3-small") == (
"openai",
"text-embedding-3-small",
)
assert _infer_model_and_provider(
model="text-embedding-3-small",
provider="openai",
) == ("openai", "text-embedding-3-small")
assert _infer_model_and_provider(
model="ft:text-embedding-3-small",
provider="openai",
) == ("openai", "ft:text-embedding-3-small")
assert _infer_model_and_provider(model="openai:ft:text-embedding-3-small") == (
"openai",
"ft:text-embedding-3-small",
)
def test_infer_model_and_provider_errors() -> None:
"""Test error cases for model and provider inference."""
# Test missing provider
with pytest.raises(ValueError, match="Must specify either"):
_infer_model_and_provider("text-embedding-3-small")
# Test empty model
with pytest.raises(ValueError, match="Model name cannot be empty"):
_infer_model_and_provider("")
# Test empty provider with model
with pytest.raises(ValueError, match="Must specify either"):
_infer_model_and_provider("model", provider="")
# Test invalid provider
with pytest.raises(ValueError, match="Provider 'invalid' is not supported") as exc:
_infer_model_and_provider("model", provider="invalid")
# Test provider list is in error
for provider in _BUILTIN_PROVIDERS:
assert provider in str(exc.value)
@pytest.mark.parametrize(
"provider",
sorted(_BUILTIN_PROVIDERS.keys()),
)
def test_supported_providers_package_names(provider: str) -> None:
"""Test that all supported providers have valid package names."""
package = _BUILTIN_PROVIDERS[provider][0]
assert "-" not in package
assert package.startswith("langchain_")
assert package.islower()
def test_is_sorted() -> None:
assert list(_BUILTIN_PROVIDERS) == sorted(_BUILTIN_PROVIDERS.keys())
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/embeddings/test_base.py",
"license": "MIT License",
"lines": 87,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/embeddings/test_imports.py | from langchain import embeddings
EXPECTED_ALL = [
"Embeddings",
"init_embeddings",
]
def test_all_imports() -> None:
assert set(embeddings.__all__) == set(EXPECTED_ALL)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/embeddings/test_imports.py",
"license": "MIT License",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/test_dependencies.py | """A unit test meant to catch accidental introduction of non-optional dependencies."""
from collections.abc import Mapping
from pathlib import Path
from typing import Any
import pytest
import toml
from packaging.requirements import Requirement
HERE = Path(__file__).parent
PYPROJECT_TOML = HERE / "../../pyproject.toml"
@pytest.fixture
def uv_conf() -> dict[str, Any]:
"""Load the pyproject.toml file."""
with PYPROJECT_TOML.open() as f:
return toml.load(f)
def test_required_dependencies(uv_conf: Mapping[str, Any]) -> None:
"""A test that checks if a new non-optional dependency is being introduced.
If this test is triggered, it means that a contributor is trying to introduce a new
required dependency. This should be avoided in most situations.
"""
# Get the dependencies from the [tool.poetry.dependencies] section
dependencies = uv_conf["project"]["dependencies"]
required_dependencies = {Requirement(dep).name for dep in dependencies}
assert sorted(required_dependencies) == sorted(
[
"langchain-core",
"langgraph",
"pydantic",
]
)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/test_dependencies.py",
"license": "MIT License",
"lines": 29,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/test_imports.py | import importlib
import warnings
from pathlib import Path
# Attempt to recursively import all modules in langchain
PKG_ROOT = Path(__file__).parent.parent.parent
def test_import_all() -> None:
"""Generate the public API for this package."""
with warnings.catch_warnings():
warnings.filterwarnings(action="ignore", category=UserWarning)
library_code = PKG_ROOT / "langchain"
for path in library_code.rglob("*.py"):
# Calculate the relative path to the module
module_name = path.relative_to(PKG_ROOT).with_suffix("").as_posix().replace("/", ".")
if module_name.endswith("__init__"):
# Without init
module_name = module_name.rsplit(".", 1)[0]
mod = importlib.import_module(module_name)
all_attrs = getattr(mod, "__all__", [])
for name in all_attrs:
# Attempt to import the name from the module
try:
obj = getattr(mod, name)
assert obj is not None
except Exception as e:
msg = f"Could not import {module_name}.{name}"
raise AssertionError(msg) from e
def test_import_all_using_dir() -> None:
"""Generate the public API for this package."""
library_code = PKG_ROOT / "langchain"
for path in library_code.rglob("*.py"):
# Calculate the relative path to the module
module_name = path.relative_to(PKG_ROOT).with_suffix("").as_posix().replace("/", ".")
if module_name.endswith("__init__"):
# Without init
module_name = module_name.rsplit(".", 1)[0]
try:
mod = importlib.import_module(module_name)
except ModuleNotFoundError as e:
msg = f"Could not import {module_name}"
raise ModuleNotFoundError(msg) from e
attributes = dir(mod)
for name in attributes:
if name.strip().startswith("_"):
continue
# Attempt to import the name from the module
getattr(mod, name)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/test_imports.py",
"license": "MIT License",
"lines": 46,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/test_pytest_config.py | import pytest
import pytest_socket
import requests
def test_socket_disabled() -> None:
"""This test should fail."""
with pytest.raises(pytest_socket.SocketBlockedError):
requests.get("https://www.example.com", timeout=1)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/test_pytest_config.py",
"license": "MIT License",
"lines": 7,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/langchain_v1/tests/unit_tests/tools/test_imports.py | from langchain import tools
EXPECTED_ALL = {
"BaseTool",
"InjectedState",
"InjectedStore",
"InjectedToolArg",
"InjectedToolCallId",
"ToolException",
"ToolRuntime",
"tool",
}
def test_all_imports() -> None:
assert set(tools.__all__) == EXPECTED_ALL
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/langchain_v1/tests/unit_tests/tools/test_imports.py",
"license": "MIT License",
"lines": 13,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/core/tests/unit_tests/utils/test_strings.py | """Test string utilities."""
from langchain_core.utils.strings import (
comma_list,
sanitize_for_postgres,
stringify_dict,
stringify_value,
)
def test_sanitize_for_postgres() -> None:
"""Test sanitizing text for PostgreSQL compatibility."""
# Test with NUL bytes
text_with_nul = "Hello\x00world\x00test"
expected = "Helloworldtest"
assert sanitize_for_postgres(text_with_nul) == expected
# Test with replacement character
expected_with_replacement = "Hello world test"
assert sanitize_for_postgres(text_with_nul, " ") == expected_with_replacement
# Test with text without NUL bytes
clean_text = "Hello world"
assert sanitize_for_postgres(clean_text) == clean_text
# Test empty string
assert not sanitize_for_postgres("")
# Test with multiple consecutive NUL bytes
text_with_multiple_nuls = "Hello\x00\x00\x00world"
assert sanitize_for_postgres(text_with_multiple_nuls) == "Helloworld"
assert sanitize_for_postgres(text_with_multiple_nuls, "-") == "Hello---world"
def test_existing_string_functions() -> None:
"""Test existing string functions still work."""
# Test comma_list
assert comma_list([1, 2, 3]) == "1, 2, 3"
assert comma_list(["a", "b", "c"]) == "a, b, c"
# Test stringify_value
assert stringify_value("hello") == "hello"
assert stringify_value(42) == "42"
# Test stringify_dict
data = {"key": "value", "number": 123}
result = stringify_dict(data)
assert "key: value" in result
assert "number: 123" in result
def test_stringify_value_nested_structures() -> None:
"""Test stringifying nested structures."""
# Test nested dict in list
nested_data = {
"users": [
{"name": "Alice", "age": 25},
{"name": "Bob", "age": 30},
],
"metadata": {"total_users": 2, "active": True},
}
result = stringify_value(nested_data)
# Should contain all the nested values
assert "users:" in result
assert "name: Alice" in result
assert "name: Bob" in result
assert "metadata:" in result
assert "total_users: 2" in result
assert "active: True" in result
# Test list of mixed types
mixed_list = ["string", 42, {"key": "value"}, ["nested", "list"]]
result = stringify_value(mixed_list)
assert "string" in result
assert "42" in result
assert "key: value" in result
assert "nested" in result
assert "list" in result
def test_comma_list_with_iterables() -> None:
"""Test `comma_list` works with various iterable types."""
# Tuple
assert comma_list((1, 2, 3)) == "1, 2, 3"
# Generator
assert comma_list(x for x in range(3)) == "0, 1, 2"
# Range
assert comma_list(range(3)) == "0, 1, 2"
# Empty iterable
assert comma_list([]) == ""
assert comma_list(()) == ""
# Single item
assert comma_list([1]) == "1"
assert comma_list(("single",)) == "single"
# Mixed types
assert comma_list([1, "two", 3.0]) == "1, two, 3.0"
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/utils/test_strings.py",
"license": "MIT License",
"lines": 80,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/ollama/langchain_ollama/_utils.py | """Utility function to validate Ollama models."""
from __future__ import annotations
import base64
from urllib.parse import unquote, urlparse
from httpx import ConnectError
from ollama import Client, ResponseError
def validate_model(client: Client, model_name: str) -> None:
"""Validate that a model exists in the local Ollama instance.
Args:
client: The Ollama client.
model_name: The name of the model to validate.
Raises:
ValueError: If the model is not found or if there's a connection issue.
"""
try:
response = client.list()
model_names: list[str] = [model["model"] for model in response["models"]]
if not any(
model_name == m or m.startswith(f"{model_name}:") for m in model_names
):
msg = (
f"Model `{model_name}` not found in Ollama. Please pull the "
f"model (using `ollama pull {model_name}`) or specify a valid "
f"model name. Available local models: {', '.join(model_names)}"
)
raise ValueError(msg)
except ConnectError as e:
msg = (
"Failed to connect to Ollama. Please check that Ollama is downloaded, "
"running and accessible. https://ollama.com/download"
)
raise ValueError(msg) from e
except ResponseError as e:
msg = (
"Received an error from the Ollama API. "
"Please check your Ollama server logs."
)
raise ValueError(msg) from e
def parse_url_with_auth(
url: str | None,
) -> tuple[str | None, dict[str, str] | None]:
"""Parse URL and extract `userinfo` credentials for headers.
Handles URLs of the form: `https://user:password@host:port/path`
Args:
url: The URL to parse.
Returns:
A tuple of `(cleaned_url, headers_dict)` where:
- `cleaned_url` is the URL without authentication credentials if any were
found. Otherwise, returns the original URL.
- `headers_dict` contains Authorization header if credentials were found.
"""
if not url:
return None, None
parsed = urlparse(url)
if not parsed.scheme or not parsed.netloc or not parsed.hostname:
return None, None
if not parsed.username:
return url, None
# Handle case where password might be empty string or None
password = parsed.password or ""
# Create basic auth header (decode percent-encoding)
username = unquote(parsed.username)
password = unquote(password)
credentials = f"{username}:{password}"
encoded_credentials = base64.b64encode(credentials.encode()).decode()
headers = {"Authorization": f"Basic {encoded_credentials}"}
# Strip credentials from URL
cleaned_netloc = parsed.hostname or ""
if parsed.port:
cleaned_netloc += f":{parsed.port}"
cleaned_url = f"{parsed.scheme}://{cleaned_netloc}"
if parsed.path:
cleaned_url += parsed.path
if parsed.query:
cleaned_url += f"?{parsed.query}"
if parsed.fragment:
cleaned_url += f"#{parsed.fragment}"
return cleaned_url, headers
def merge_auth_headers(
client_kwargs: dict,
auth_headers: dict[str, str] | None,
) -> None:
"""Merge authentication headers into client kwargs in-place.
Args:
client_kwargs: The client kwargs dict to update.
auth_headers: Headers to merge (typically from `parse_url_with_auth`).
"""
if auth_headers:
headers = client_kwargs.get("headers", {})
headers.update(auth_headers)
client_kwargs["headers"] = headers
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/ollama/langchain_ollama/_utils.py",
"license": "MIT License",
"lines": 91,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/text-splitters/tests/unit_tests/test_html_security.py | """Security tests for HTML splitters to prevent XXE attacks."""
import pytest
from langchain_text_splitters.html import HTMLSectionSplitter
@pytest.mark.requires("lxml", "bs4")
class TestHTMLSectionSplitterSecurity:
"""Security tests for HTMLSectionSplitter to ensure XXE prevention."""
def test_xxe_entity_attack_blocked(self) -> None:
"""Test that external entity attacks are blocked."""
# Create HTML content to process
html_content = """<html><body><p>Test content</p></body></html>"""
# Since xslt_path parameter is removed, this attack vector is eliminated
# The splitter should use only the default XSLT
splitter = HTMLSectionSplitter(headers_to_split_on=[("h1", "Header 1")])
# Process the HTML - should not contain any external entity content
result = splitter.split_text(html_content)
# Verify that no external entity content is present
all_content = " ".join([doc.page_content for doc in result])
assert "root:" not in all_content # /etc/passwd content
assert "XXE Attack Result" not in all_content
def test_xxe_document_function_blocked(self) -> None:
"""Test that XSLT document() function attacks are blocked."""
# Even if someone modifies the default XSLT internally,
# the secure parser configuration should block document() attacks
html_content = (
"""<html><body><h1>Test Header</h1><p>Test content</p></body></html>"""
)
splitter = HTMLSectionSplitter(headers_to_split_on=[("h1", "Header 1")])
# Process the HTML safely
result = splitter.split_text(html_content)
# Should process normally without any security issues
assert len(result) > 0
assert any("Test content" in doc.page_content for doc in result)
def test_secure_parser_configuration(self) -> None:
"""Test that parsers are configured with security settings."""
# This test verifies our security hardening is in place
html_content = """<html><body><h1>Test</h1></body></html>"""
splitter = HTMLSectionSplitter(headers_to_split_on=[("h1", "Header 1")])
# The convert_possible_tags_to_header method should use secure parsers
result = splitter.convert_possible_tags_to_header(html_content)
# Result should be valid transformed HTML
assert result is not None
assert isinstance(result, str)
def test_no_network_access(self) -> None:
"""Test that network access is blocked in parsers."""
# Create HTML that might trigger network access
html_with_external_ref = """<?xml version="1.0"?>
<!DOCTYPE html [
<!ENTITY external SYSTEM "http://attacker.com/xxe">
]>
<html>
<body>
<h1>Test</h1>
<p>&external;</p>
</body>
</html>"""
splitter = HTMLSectionSplitter(headers_to_split_on=[("h1", "Header 1")])
# Process the HTML - should not make network requests
result = splitter.split_text(html_with_external_ref)
# Verify no external content is included
all_content = " ".join([doc.page_content for doc in result])
assert "attacker.com" not in all_content
def test_dtd_processing_disabled(self) -> None:
"""Test that DTD processing is disabled."""
# HTML with DTD that attempts to define entities
html_with_dtd = """<!DOCTYPE html [
<!ELEMENT html (body)>
<!ELEMENT body (h1, p)>
<!ELEMENT h1 (#PCDATA)>
<!ELEMENT p (#PCDATA)>
<!ENTITY test "This is a test entity">
]>
<html>
<body>
<h1>Header</h1>
<p>&test;</p>
</body>
</html>"""
splitter = HTMLSectionSplitter(headers_to_split_on=[("h1", "Header 1")])
# Process the HTML - entities should not be resolved
result = splitter.split_text(html_with_dtd)
# The entity should not be expanded
all_content = " ".join([doc.page_content for doc in result])
assert "This is a test entity" not in all_content
def test_safe_default_xslt_usage(self) -> None:
"""Test that the default XSLT file is used safely."""
# Test with HTML that has font-size styling (what the default XSLT handles)
html_with_font_size = """<html>
<body>
<span style="font-size: 24px;">Large Header</span>
<p>Content under large text</p>
<span style="font-size: 18px;">Small Header</span>
<p>Content under small text</p>
</body>
</html>"""
splitter = HTMLSectionSplitter(headers_to_split_on=[("h1", "Header 1")])
# Process the HTML using the default XSLT
result = splitter.split_text(html_with_font_size)
# Should successfully process the content
assert len(result) > 0
# Large font text should be converted to header
assert any("Large Header" in str(doc.metadata.values()) for doc in result)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/text-splitters/tests/unit_tests/test_html_security.py",
"license": "MIT License",
"lines": 101,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/anthropic/langchain_anthropic/_client_utils.py | """Helpers for creating Anthropic API clients.
This module allows for the caching of httpx clients to avoid creating new instances
for each instance of ChatAnthropic.
Logic is largely replicated from anthropic._base_client.
"""
from __future__ import annotations
import asyncio
import os
from functools import lru_cache
from typing import Any
import anthropic
_NOT_GIVEN: Any = object()
class _SyncHttpxClientWrapper(anthropic.DefaultHttpxClient):
"""Borrowed from anthropic._base_client."""
def __del__(self) -> None:
if self.is_closed:
return
try:
self.close()
except Exception: # noqa: S110
pass
class _AsyncHttpxClientWrapper(anthropic.DefaultAsyncHttpxClient):
"""Borrowed from anthropic._base_client."""
def __del__(self) -> None:
if self.is_closed:
return
try:
# TODO(someday): support non asyncio runtimes here
asyncio.get_running_loop().create_task(self.aclose())
except Exception: # noqa: S110
pass
@lru_cache
def _get_default_httpx_client(
*,
base_url: str | None,
timeout: Any = _NOT_GIVEN,
anthropic_proxy: str | None = None,
) -> _SyncHttpxClientWrapper:
kwargs: dict[str, Any] = {
"base_url": base_url
or os.environ.get("ANTHROPIC_BASE_URL")
or "https://api.anthropic.com",
}
if timeout is not _NOT_GIVEN:
kwargs["timeout"] = timeout
if anthropic_proxy is not None:
kwargs["proxy"] = anthropic_proxy
return _SyncHttpxClientWrapper(**kwargs)
@lru_cache
def _get_default_async_httpx_client(
*,
base_url: str | None,
timeout: Any = _NOT_GIVEN,
anthropic_proxy: str | None = None,
) -> _AsyncHttpxClientWrapper:
kwargs: dict[str, Any] = {
"base_url": base_url
or os.environ.get("ANTHROPIC_BASE_URL")
or "https://api.anthropic.com",
}
if timeout is not _NOT_GIVEN:
kwargs["timeout"] = timeout
if anthropic_proxy is not None:
kwargs["proxy"] = anthropic_proxy
return _AsyncHttpxClientWrapper(**kwargs)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/anthropic/langchain_anthropic/_client_utils.py",
"license": "MIT License",
"lines": 65,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/core/tests/unit_tests/vectorstores/test_utils.py | """Tests for langchain_core.vectorstores.utils module."""
import math
import pytest
pytest.importorskip("numpy")
import numpy as np
from langchain_core.vectorstores.utils import _cosine_similarity
class TestCosineSimilarity:
"""Tests for _cosine_similarity function."""
def test_basic_cosine_similarity(self) -> None:
"""Test basic cosine similarity calculation."""
# Simple orthogonal vectors
x: list[list[float]] = [[1, 0], [0, 1]]
y: list[list[float]] = [[1, 0], [0, 1]]
result = _cosine_similarity(x, y)
expected = np.array([[1.0, 0.0], [0.0, 1.0]])
np.testing.assert_array_almost_equal(result, expected)
def test_identical_vectors(self) -> None:
"""Test cosine similarity of identical vectors."""
x: list[list[float]] = [[1, 2, 3]]
y: list[list[float]] = [[1, 2, 3]]
result = _cosine_similarity(x, y)
expected = np.array([[1.0]])
np.testing.assert_array_almost_equal(result, expected)
def test_opposite_vectors(self) -> None:
"""Test cosine similarity of opposite vectors."""
x: list[list[float]] = [[1, 2, 3]]
y: list[list[float]] = [[-1, -2, -3]]
result = _cosine_similarity(x, y)
expected = np.array([[-1.0]])
np.testing.assert_array_almost_equal(result, expected)
def test_zero_vector(self) -> None:
"""Test cosine similarity with zero vector."""
x: list[list[float]] = [[0, 0, 0]]
y: list[list[float]] = [[1, 2, 3]]
with pytest.raises(ValueError, match="NaN values found"):
_cosine_similarity(x, y)
def test_multiple_vectors(self) -> None:
"""Test cosine similarity with multiple vectors."""
x: list[list[float]] = [[1, 0], [0, 1], [1, 1]]
y: list[list[float]] = [[1, 0], [0, 1]]
result = _cosine_similarity(x, y)
expected = np.array(
[
[1.0, 0.0],
[0.0, 1.0],
[1 / math.sqrt(2), 1 / math.sqrt(2)],
]
)
np.testing.assert_array_almost_equal(result, expected)
def test_numpy_array_input(self) -> None:
"""Test with numpy array inputs."""
x: np.ndarray = np.array([[1, 0], [0, 1]])
y: np.ndarray = np.array([[1, 0], [0, 1]])
result = _cosine_similarity(x, y)
expected = np.array([[1.0, 0.0], [0.0, 1.0]])
np.testing.assert_array_almost_equal(result, expected)
def test_mixed_input_types(self) -> None:
"""Test with mixed input types (list and numpy array)."""
x: list[list[float]] = [[1, 0], [0, 1]]
y: np.ndarray = np.array([[1, 0], [0, 1]])
result = _cosine_similarity(x, y)
expected = np.array([[1.0, 0.0], [0.0, 1.0]])
np.testing.assert_array_almost_equal(result, expected)
def test_higher_dimensions(self) -> None:
"""Test with higher dimensional vectors."""
x: list[list[float]] = [[1, 0, 0, 0], [0, 1, 0, 0]]
y: list[list[float]] = [[1, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
result = _cosine_similarity(x, y)
expected = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
np.testing.assert_array_almost_equal(result, expected)
def test_empty_matrices(self) -> None:
"""Test with empty matrices."""
x: list[list[float]] = []
y: list[list[float]] = []
result = _cosine_similarity(x, y)
expected = np.array([[]])
np.testing.assert_array_equal(result, expected)
def test_single_empty_matrix(self) -> None:
"""Test with one empty matrix."""
x: list[list[float]] = []
y: list[list[float]] = [[1, 2, 3]]
result = _cosine_similarity(x, y)
expected = np.array([[]])
np.testing.assert_array_equal(result, expected)
def test_dimension_mismatch_error(self) -> None:
"""Test error when matrices have different number of columns."""
x: list[list[float]] = [[1, 2]] # 2 columns
y: list[list[float]] = [[1, 2, 3]] # 3 columns
with pytest.raises(
ValueError, match="Number of columns in X and Y must be the same"
):
_cosine_similarity(x, y)
def test_nan_and_inf_handling(self) -> None:
"""Test that NaN and inf values are handled properly."""
# Create vectors that would result in NaN/inf in similarity calculation
x: list[list[float]] = [[0, 0]] # Zero vector
y: list[list[float]] = [[0, 0]] # Zero vector
with pytest.raises(ValueError, match="NaN values found"):
_cosine_similarity(x, y)
def test_large_values(self) -> None:
"""Test with large values to check numerical stability."""
x: list[list[float]] = [[1e6, 1e6]]
y: list[list[float]] = [[1e6, 1e6], [1e6, -1e6]]
result = _cosine_similarity(x, y)
expected = np.array([[1.0, 0.0]])
np.testing.assert_array_almost_equal(result, expected)
def test_small_values(self) -> None:
"""Test with very small values."""
x: list[list[float]] = [[1e-10, 1e-10]]
y: list[list[float]] = [[1e-10, 1e-10], [1e-10, -1e-10]]
result = _cosine_similarity(x, y)
expected = np.array([[1.0, 0.0]])
np.testing.assert_array_almost_equal(result, expected)
def test_single_vector_vs_multiple(self) -> None:
"""Test single vector against multiple vectors."""
x: list[list[float]] = [[1, 1]]
y: list[list[float]] = [[1, 0], [0, 1], [1, 1], [-1, -1]]
result = _cosine_similarity(x, y)
expected = np.array(
[
[
1 / math.sqrt(2), # cos(45°)
1 / math.sqrt(2), # cos(45°)
1.0, # cos(0°)
-1.0, # cos(180°)
]
]
)
np.testing.assert_array_almost_equal(result, expected)
def test_single_dimension_vectors(self) -> None:
"""Test with single-dimension vectors."""
x: list[list[float]] = [[5], [-3]]
y: list[list[float]] = [[2], [-1], [4]]
result = _cosine_similarity(x, y)
expected = np.array(
[
[1.0, -1.0, 1.0], # [5] vs [2], [-1], [4]
[-1.0, 1.0, -1.0], # [-3] vs [2], [-1], [4]
]
)
np.testing.assert_array_almost_equal(result, expected)
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/core/tests/unit_tests/vectorstores/test_utils.py",
"license": "MIT License",
"lines": 141,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/openai/langchain_openai/chat_models/_compat.py | """Converts between AIMessage output formats, governed by `output_version`.
`output_version` is an attribute on ChatOpenAI.
Supported values are `None`, `'v0'`, and `'responses/v1'`.
`'v0'` corresponds to the format as of `ChatOpenAI` v0.3. For the Responses API, it
stores reasoning and tool outputs in `AIMessage.additional_kwargs`:
```python
AIMessage(
content=[
{"type": "text", "text": "Hello, world!", "annotations": [{"type": "foo"}]}
],
additional_kwargs={
"reasoning": {
"type": "reasoning",
"id": "rs_123",
"summary": [{"type": "summary_text", "text": "Reasoning summary"}],
},
"tool_outputs": [
{
"type": "web_search_call",
"id": "websearch_123",
"status": "completed",
}
],
"refusal": "I cannot assist with that.",
},
response_metadata={"id": "resp_123"},
id="msg_123",
)
```
`'responses/v1'` is only applicable to the Responses API. It retains information
about response item sequencing and accommodates multiple reasoning items by
representing these items in the content sequence:
```python
AIMessage(
content=[
{
"type": "reasoning",
"summary": [{"type": "summary_text", "text": "Reasoning summary"}],
"id": "rs_123",
},
{
"type": "text",
"text": "Hello, world!",
"annotations": [{"type": "foo"}],
"id": "msg_123",
},
{"type": "refusal", "refusal": "I cannot assist with that."},
{"type": "web_search_call", "id": "websearch_123", "status": "completed"},
],
response_metadata={"id": "resp_123"},
id="resp_123",
)
```
There are other, small improvements as well-- e.g., we store message IDs on text
content blocks, rather than on the AIMessage.id, which now stores the response ID.
For backwards compatibility, this module provides functions to convert between the
formats. The functions are used internally by ChatOpenAI.
"""
from __future__ import annotations
import json
from collections.abc import Iterable, Iterator
from typing import Any, cast
from langchain_core.messages import AIMessage, is_data_content_block
from langchain_core.messages import content as types
_FUNCTION_CALL_IDS_MAP_KEY = "__openai_function_call_ids__"
# v0.3 / Responses
def _convert_to_v03_ai_message(
message: AIMessage, has_reasoning: bool = False
) -> AIMessage:
"""Mutate an `AIMessage` to the old-style v0.3 format."""
if isinstance(message.content, list):
new_content: list[dict | str] = []
for block in message.content:
if isinstance(block, dict):
if block.get("type") == "reasoning":
# Store a reasoning item in additional_kwargs (overwriting as in
# v0.3)
_ = block.pop("index", None)
if has_reasoning:
_ = block.pop("id", None)
_ = block.pop("type", None)
message.additional_kwargs["reasoning"] = block
elif block.get("type") in (
"web_search_call",
"file_search_call",
"computer_call",
"code_interpreter_call",
"mcp_call",
"mcp_list_tools",
"mcp_approval_request",
"image_generation_call",
):
# Store built-in tool calls in additional_kwargs
if "tool_outputs" not in message.additional_kwargs:
message.additional_kwargs["tool_outputs"] = []
message.additional_kwargs["tool_outputs"].append(block)
elif block.get("type") == "function_call":
# Store function call item IDs in additional_kwargs, otherwise
# discard function call items.
if _FUNCTION_CALL_IDS_MAP_KEY not in message.additional_kwargs:
message.additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY] = {}
if (call_id := block.get("call_id")) and (
function_call_id := block.get("id")
):
message.additional_kwargs[_FUNCTION_CALL_IDS_MAP_KEY][
call_id
] = function_call_id
elif (block.get("type") == "refusal") and (
refusal := block.get("refusal")
):
# Store a refusal item in additional_kwargs (overwriting as in
# v0.3)
message.additional_kwargs["refusal"] = refusal
elif block.get("type") == "text":
# Store a message item ID on AIMessage.id
if "id" in block:
message.id = block["id"]
new_content.append({k: v for k, v in block.items() if k != "id"})
elif (
set(block.keys()) == {"id", "index"}
and isinstance(block["id"], str)
and block["id"].startswith("msg_")
):
# Drop message IDs in streaming case
new_content.append({"index": block["index"]})
else:
new_content.append(block)
else:
new_content.append(block)
message.content = new_content
if isinstance(message.id, str) and message.id.startswith("resp_"):
message.id = None
else:
pass
return message
# v1 / Chat Completions
def _convert_from_v1_to_chat_completions(message: AIMessage) -> AIMessage:
"""Convert a v1 message to the Chat Completions format."""
if isinstance(message.content, list):
new_content: list = []
for block in message.content:
if isinstance(block, dict):
block_type = block.get("type")
if block_type == "text":
# Strip annotations
new_content.append({"type": "text", "text": block["text"]})
elif block_type in ("reasoning", "tool_call"):
pass
else:
new_content.append(block)
else:
new_content.append(block)
return message.model_copy(update={"content": new_content})
return message
# v1 / Responses
def _convert_annotation_from_v1(annotation: types.Annotation) -> dict[str, Any]:
"""Convert a v1 `Annotation` to the v0.3 format (for Responses API)."""
if annotation["type"] == "citation":
new_ann: dict[str, Any] = {}
for field in ("end_index", "start_index"):
if field in annotation:
new_ann[field] = annotation[field]
if "url" in annotation:
# URL citation
if "title" in annotation:
new_ann["title"] = annotation["title"]
new_ann["type"] = "url_citation"
new_ann["url"] = annotation["url"]
if extra_fields := annotation.get("extras"):
new_ann.update(dict(extra_fields.items()))
else:
# Document citation
new_ann["type"] = "file_citation"
if extra_fields := annotation.get("extras"):
new_ann.update(dict(extra_fields.items()))
if "title" in annotation:
new_ann["filename"] = annotation["title"]
return new_ann
if annotation["type"] == "non_standard_annotation":
return annotation["value"]
return dict(annotation)
def _implode_reasoning_blocks(blocks: list[dict[str, Any]]) -> Iterable[dict[str, Any]]:
i = 0
n = len(blocks)
while i < n:
block = blocks[i]
# Skip non-reasoning blocks or blocks already in Responses format
if block.get("type") != "reasoning" or "summary" in block:
yield dict(block)
i += 1
continue
elif "reasoning" not in block and "summary" not in block:
# {"type": "reasoning", "id": "rs_..."}
oai_format = {**block, "summary": []}
if "extras" in oai_format:
oai_format.update(oai_format.pop("extras"))
oai_format["type"] = oai_format.pop("type", "reasoning")
if "encrypted_content" in oai_format:
oai_format["encrypted_content"] = oai_format.pop("encrypted_content")
yield oai_format
i += 1
continue
else:
pass
summary: list[dict[str, str]] = [
{"type": "summary_text", "text": block.get("reasoning", "")}
]
# 'common' is every field except the exploded 'reasoning'
common = {k: v for k, v in block.items() if k != "reasoning"}
if "extras" in common:
common.update(common.pop("extras"))
i += 1
while i < n:
next_ = blocks[i]
if next_.get("type") == "reasoning" and "reasoning" in next_:
summary.append(
{"type": "summary_text", "text": next_.get("reasoning", "")}
)
i += 1
else:
break
merged = dict(common)
merged["summary"] = summary
merged["type"] = merged.pop("type", "reasoning")
yield merged
def _consolidate_calls(items: Iterable[dict[str, Any]]) -> Iterator[dict[str, Any]]:
"""Generator that walks through *items* and, whenever it meets the pair.
{"type": "server_tool_call", "name": "web_search", "id": X, ...}
{"type": "server_tool_result", "id": X}
merges them into
{"id": X,
"output": ...,
"status": ...,
"type": "web_search_call"}
keeping every other element untouched.
"""
items = iter(items) # make sure we have a true iterator
for current in items:
# Only a call can start a pair worth collapsing
if current.get("type") != "server_tool_call":
yield current
continue
try:
nxt = next(items) # look-ahead one element
except StopIteration: # no “result” - just yield the call back
yield current
break
# If this really is the matching “result” - collapse
if nxt.get("type") == "server_tool_result" and nxt.get(
"tool_call_id"
) == current.get("id"):
if current.get("name") == "web_search":
collapsed = {"id": current["id"]}
if "args" in current:
# N.B. as of 2025-09-17 OpenAI raises BadRequestError if sources
# are passed back in
collapsed["action"] = current["args"]
if status := nxt.get("status"):
if status == "success":
collapsed["status"] = "completed"
elif status == "error":
collapsed["status"] = "failed"
elif nxt.get("extras", {}).get("status"):
collapsed["status"] = nxt["extras"]["status"]
else:
pass
collapsed["type"] = "web_search_call"
if current.get("name") == "file_search":
collapsed = {"id": current["id"]}
if "args" in current and "queries" in current["args"]:
collapsed["queries"] = current["args"]["queries"]
if "output" in nxt:
collapsed["results"] = nxt["output"]
if status := nxt.get("status"):
if status == "success":
collapsed["status"] = "completed"
elif status == "error":
collapsed["status"] = "failed"
elif nxt.get("extras", {}).get("status"):
collapsed["status"] = nxt["extras"]["status"]
else:
pass
collapsed["type"] = "file_search_call"
elif current.get("name") == "code_interpreter":
collapsed = {"id": current["id"]}
if "args" in current and "code" in current["args"]:
collapsed["code"] = current["args"]["code"]
for key in ("container_id",):
if key in current:
collapsed[key] = current[key]
elif key in current.get("extras", {}):
collapsed[key] = current["extras"][key]
else:
pass
if "output" in nxt:
collapsed["outputs"] = nxt["output"]
if status := nxt.get("status"):
if status == "success":
collapsed["status"] = "completed"
elif status == "error":
collapsed["status"] = "failed"
elif nxt.get("extras", {}).get("status"):
collapsed["status"] = nxt["extras"]["status"]
collapsed["type"] = "code_interpreter_call"
elif current.get("name") == "remote_mcp":
collapsed = {"id": current["id"]}
if "args" in current:
collapsed["arguments"] = json.dumps(
current["args"], separators=(",", ":")
)
elif "arguments" in current.get("extras", {}):
collapsed["arguments"] = current["extras"]["arguments"]
else:
pass
if tool_name := current.get("extras", {}).get("tool_name"):
collapsed["name"] = tool_name
if server_label := current.get("extras", {}).get("server_label"):
collapsed["server_label"] = server_label
collapsed["type"] = "mcp_call"
if approval_id := current.get("extras", {}).get("approval_request_id"):
collapsed["approval_request_id"] = approval_id
if error := nxt.get("extras", {}).get("error"):
collapsed["error"] = error
if "output" in nxt:
collapsed["output"] = nxt["output"]
for k, v in current.get("extras", {}).items():
if k not in ("server_label", "arguments", "tool_name", "error"):
collapsed[k] = v
elif current.get("name") == "mcp_list_tools":
collapsed = {"id": current["id"]}
if server_label := current.get("extras", {}).get("server_label"):
collapsed["server_label"] = server_label
if "output" in nxt:
collapsed["tools"] = nxt["output"]
collapsed["type"] = "mcp_list_tools"
if error := nxt.get("extras", {}).get("error"):
collapsed["error"] = error
for k, v in current.get("extras", {}).items():
if k not in ("server_label", "error"):
collapsed[k] = v
else:
pass
yield collapsed
else:
# Not a matching pair - emit both, in original order
yield current
yield nxt
def _convert_from_v1_to_responses(
content: list[types.ContentBlock], tool_calls: list[types.ToolCall]
) -> list[dict[str, Any]]:
new_content: list = []
for block in content:
if block["type"] == "text" and "annotations" in block:
# Need a copy because we're changing the annotations list
new_block = dict(block)
new_block["annotations"] = [
_convert_annotation_from_v1(a) for a in block["annotations"]
]
new_content.append(new_block)
elif block["type"] == "tool_call":
new_block = {"type": "function_call", "call_id": block["id"]}
if "extras" in block and "item_id" in block["extras"]:
new_block["id"] = block["extras"]["item_id"]
if "name" in block:
new_block["name"] = block["name"]
if "extras" in block and "arguments" in block["extras"]:
new_block["arguments"] = block["extras"]["arguments"]
if any(key not in block for key in ("name", "arguments")):
matching_tool_calls = [
call for call in tool_calls if call["id"] == block["id"]
]
if matching_tool_calls:
tool_call = matching_tool_calls[0]
if "name" not in block:
new_block["name"] = tool_call["name"]
if "arguments" not in block:
new_block["arguments"] = json.dumps(tool_call["args"])
new_content.append(new_block)
elif (
is_data_content_block(cast(dict, block))
and block["type"] == "image"
and "base64" in block
and isinstance(block.get("id"), str)
and block["id"].startswith("ig_")
):
new_block = {"type": "image_generation_call", "result": block["base64"]}
for extra_key in ("id", "status"):
if extra_key in block:
new_block[extra_key] = block[extra_key] # type: ignore[typeddict-item]
elif extra_key in block.get("extras", {}):
new_block[extra_key] = block["extras"][extra_key]
new_content.append(new_block)
elif block["type"] == "non_standard" and "value" in block:
new_content.append(block["value"])
else:
new_content.append(block)
new_content = list(_implode_reasoning_blocks(new_content))
return list(_consolidate_calls(new_content))
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openai/langchain_openai/chat_models/_compat.py",
"license": "MIT License",
"lines": 398,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langchain:libs/partners/openai/tests/unit_tests/chat_models/test_responses_stream.py | from __future__ import annotations
from typing import Any
from unittest.mock import MagicMock, patch
import pytest
from langchain_core.messages import AIMessageChunk, BaseMessageChunk
from openai.types.responses import (
ResponseCompletedEvent,
ResponseContentPartAddedEvent,
ResponseContentPartDoneEvent,
ResponseCreatedEvent,
ResponseInProgressEvent,
ResponseOutputItemAddedEvent,
ResponseOutputItemDoneEvent,
ResponseOutputMessage,
ResponseReasoningItem,
ResponseReasoningSummaryPartAddedEvent,
ResponseReasoningSummaryPartDoneEvent,
ResponseReasoningSummaryTextDeltaEvent,
ResponseReasoningSummaryTextDoneEvent,
ResponseTextConfig,
ResponseTextDeltaEvent,
ResponseTextDoneEvent,
)
from openai.types.responses.response import Response
from openai.types.responses.response_output_text import ResponseOutputText
from openai.types.responses.response_reasoning_item import Summary
from openai.types.responses.response_reasoning_summary_part_added_event import (
Part as PartAdded,
)
from openai.types.responses.response_reasoning_summary_part_done_event import (
Part as PartDone,
)
from openai.types.responses.response_usage import (
InputTokensDetails,
OutputTokensDetails,
ResponseUsage,
)
from openai.types.shared.reasoning import Reasoning
from openai.types.shared.response_format_text import ResponseFormatText
from langchain_openai import ChatOpenAI
from tests.unit_tests.chat_models.test_base import MockSyncContextManager
responses_stream = [
ResponseCreatedEvent(
response=Response(
id="resp_123",
created_at=1749734255.0,
error=None,
incomplete_details=None,
instructions=None,
metadata={},
model="o4-mini-2025-04-16",
object="response",
output=[],
parallel_tool_calls=True,
temperature=1.0,
tool_choice="auto",
tools=[],
top_p=1.0,
background=False,
max_output_tokens=None,
previous_response_id=None,
reasoning=Reasoning(
effort="medium", generate_summary=None, summary="detailed"
),
service_tier="auto",
status="in_progress",
text=ResponseTextConfig(format=ResponseFormatText(type="text")),
truncation="disabled",
usage=None,
user=None,
),
sequence_number=0,
type="response.created",
),
ResponseInProgressEvent(
response=Response(
id="resp_123",
created_at=1749734255.0,
error=None,
incomplete_details=None,
instructions=None,
metadata={},
model="o4-mini-2025-04-16",
object="response",
output=[],
parallel_tool_calls=True,
temperature=1.0,
tool_choice="auto",
tools=[],
top_p=1.0,
background=False,
max_output_tokens=None,
previous_response_id=None,
reasoning=Reasoning(
effort="medium", generate_summary=None, summary="detailed"
),
service_tier="auto",
status="in_progress",
text=ResponseTextConfig(format=ResponseFormatText(type="text")),
truncation="disabled",
usage=None,
user=None,
),
sequence_number=1,
type="response.in_progress",
),
ResponseOutputItemAddedEvent(
item=ResponseReasoningItem(
id="rs_123",
summary=[],
type="reasoning",
encrypted_content=None,
status=None,
),
output_index=0,
sequence_number=2,
type="response.output_item.added",
),
ResponseReasoningSummaryPartAddedEvent(
item_id="rs_123",
output_index=0,
part=PartAdded(text="", type="summary_text"),
sequence_number=3,
summary_index=0,
type="response.reasoning_summary_part.added",
),
ResponseReasoningSummaryTextDeltaEvent(
delta="reasoning block",
item_id="rs_123",
output_index=0,
sequence_number=4,
summary_index=0,
type="response.reasoning_summary_text.delta",
),
ResponseReasoningSummaryTextDeltaEvent(
delta=" one",
item_id="rs_123",
output_index=0,
sequence_number=5,
summary_index=0,
type="response.reasoning_summary_text.delta",
),
ResponseReasoningSummaryTextDoneEvent(
item_id="rs_123",
output_index=0,
sequence_number=6,
summary_index=0,
text="reasoning block one",
type="response.reasoning_summary_text.done",
),
ResponseReasoningSummaryPartDoneEvent(
item_id="rs_123",
output_index=0,
part=PartDone(text="reasoning block one", type="summary_text"),
sequence_number=7,
summary_index=0,
type="response.reasoning_summary_part.done",
),
ResponseReasoningSummaryPartAddedEvent(
item_id="rs_123",
output_index=0,
part=PartAdded(text="", type="summary_text"),
sequence_number=8,
summary_index=1,
type="response.reasoning_summary_part.added",
),
ResponseReasoningSummaryTextDeltaEvent(
delta="another reasoning",
item_id="rs_123",
output_index=0,
sequence_number=9,
summary_index=1,
type="response.reasoning_summary_text.delta",
),
ResponseReasoningSummaryTextDeltaEvent(
delta=" block",
item_id="rs_123",
output_index=0,
sequence_number=10,
summary_index=1,
type="response.reasoning_summary_text.delta",
),
ResponseReasoningSummaryTextDoneEvent(
item_id="rs_123",
output_index=0,
sequence_number=11,
summary_index=1,
text="another reasoning block",
type="response.reasoning_summary_text.done",
),
ResponseReasoningSummaryPartDoneEvent(
item_id="rs_123",
output_index=0,
part=PartDone(text="another reasoning block", type="summary_text"),
sequence_number=12,
summary_index=1,
type="response.reasoning_summary_part.done",
),
ResponseOutputItemDoneEvent(
item=ResponseReasoningItem(
id="rs_123",
summary=[
Summary(text="reasoning block one", type="summary_text"),
Summary(text="another reasoning block", type="summary_text"),
],
type="reasoning",
encrypted_content=None,
status=None,
),
output_index=0,
sequence_number=13,
type="response.output_item.done",
),
ResponseOutputItemAddedEvent(
item=ResponseOutputMessage(
id="msg_123",
content=[],
role="assistant",
status="in_progress",
type="message",
),
output_index=1,
sequence_number=14,
type="response.output_item.added",
),
ResponseContentPartAddedEvent(
content_index=0,
item_id="msg_123",
output_index=1,
part=ResponseOutputText(annotations=[], text="", type="output_text"),
sequence_number=15,
type="response.content_part.added",
),
ResponseTextDeltaEvent(
content_index=0,
delta="text block",
item_id="msg_123",
output_index=1,
sequence_number=16,
logprobs=[],
type="response.output_text.delta",
),
ResponseTextDeltaEvent(
content_index=0,
delta=" one",
item_id="msg_123",
output_index=1,
sequence_number=17,
logprobs=[],
type="response.output_text.delta",
),
ResponseTextDoneEvent(
content_index=0,
item_id="msg_123",
output_index=1,
sequence_number=18,
text="text block one",
logprobs=[],
type="response.output_text.done",
),
ResponseContentPartDoneEvent(
content_index=0,
item_id="msg_123",
output_index=1,
part=ResponseOutputText(
annotations=[], text="text block one", type="output_text"
),
sequence_number=19,
type="response.content_part.done",
),
ResponseContentPartAddedEvent(
content_index=1,
item_id="msg_123",
output_index=1,
part=ResponseOutputText(annotations=[], text="", type="output_text"),
sequence_number=20,
type="response.content_part.added",
),
ResponseTextDeltaEvent(
content_index=1,
delta="another text",
item_id="msg_123",
output_index=1,
sequence_number=21,
logprobs=[],
type="response.output_text.delta",
),
ResponseTextDeltaEvent(
content_index=1,
delta=" block",
item_id="msg_123",
output_index=1,
sequence_number=22,
logprobs=[],
type="response.output_text.delta",
),
ResponseTextDoneEvent(
content_index=1,
item_id="msg_123",
output_index=1,
sequence_number=23,
text="another text block",
logprobs=[],
type="response.output_text.done",
),
ResponseContentPartDoneEvent(
content_index=1,
item_id="msg_123",
output_index=1,
part=ResponseOutputText(
annotations=[], text="another text block", type="output_text"
),
sequence_number=24,
type="response.content_part.done",
),
ResponseOutputItemDoneEvent(
item=ResponseOutputMessage(
id="msg_123",
content=[
ResponseOutputText(
annotations=[], text="text block one", type="output_text"
),
ResponseOutputText(
annotations=[], text="another text block", type="output_text"
),
],
role="assistant",
status="completed",
type="message",
),
output_index=1,
sequence_number=25,
type="response.output_item.done",
),
ResponseOutputItemAddedEvent(
item=ResponseReasoningItem(
id="rs_234",
summary=[],
type="reasoning",
encrypted_content="encrypted-content",
status=None,
),
output_index=2,
sequence_number=26,
type="response.output_item.added",
),
ResponseReasoningSummaryPartAddedEvent(
item_id="rs_234",
output_index=2,
part=PartAdded(text="", type="summary_text"),
sequence_number=27,
summary_index=0,
type="response.reasoning_summary_part.added",
),
ResponseReasoningSummaryTextDeltaEvent(
delta="more reasoning",
item_id="rs_234",
output_index=2,
sequence_number=28,
summary_index=0,
type="response.reasoning_summary_text.delta",
),
ResponseReasoningSummaryTextDoneEvent(
item_id="rs_234",
output_index=2,
sequence_number=29,
summary_index=0,
text="more reasoning",
type="response.reasoning_summary_text.done",
),
ResponseReasoningSummaryPartDoneEvent(
item_id="rs_234",
output_index=2,
part=PartDone(text="more reasoning", type="summary_text"),
sequence_number=30,
summary_index=0,
type="response.reasoning_summary_part.done",
),
ResponseReasoningSummaryPartAddedEvent(
item_id="rs_234",
output_index=2,
part=PartAdded(text="", type="summary_text"),
sequence_number=31,
summary_index=1,
type="response.reasoning_summary_part.added",
),
ResponseReasoningSummaryTextDeltaEvent(
delta="still more reasoning",
item_id="rs_234",
output_index=2,
sequence_number=32,
summary_index=1,
type="response.reasoning_summary_text.delta",
),
ResponseReasoningSummaryTextDoneEvent(
item_id="rs_234",
output_index=2,
sequence_number=33,
summary_index=1,
text="still more reasoning",
type="response.reasoning_summary_text.done",
),
ResponseReasoningSummaryPartDoneEvent(
item_id="rs_234",
output_index=2,
part=PartDone(text="still more reasoning", type="summary_text"),
sequence_number=34,
summary_index=1,
type="response.reasoning_summary_part.done",
),
ResponseOutputItemDoneEvent(
item=ResponseReasoningItem(
id="rs_234",
summary=[
Summary(text="more reasoning", type="summary_text"),
Summary(text="still more reasoning", type="summary_text"),
],
type="reasoning",
encrypted_content="encrypted-content",
status=None,
),
output_index=2,
sequence_number=35,
type="response.output_item.done",
),
ResponseOutputItemAddedEvent(
item=ResponseOutputMessage(
id="msg_234",
content=[],
role="assistant",
status="in_progress",
type="message",
),
output_index=3,
sequence_number=36,
type="response.output_item.added",
),
ResponseContentPartAddedEvent(
content_index=0,
item_id="msg_234",
output_index=3,
part=ResponseOutputText(annotations=[], text="", type="output_text"),
sequence_number=37,
type="response.content_part.added",
),
ResponseTextDeltaEvent(
content_index=0,
delta="more",
item_id="msg_234",
output_index=3,
sequence_number=38,
logprobs=[],
type="response.output_text.delta",
),
ResponseTextDoneEvent(
content_index=0,
item_id="msg_234",
output_index=3,
sequence_number=39,
text="more",
logprobs=[],
type="response.output_text.done",
),
ResponseContentPartDoneEvent(
content_index=0,
item_id="msg_234",
output_index=3,
part=ResponseOutputText(annotations=[], text="more", type="output_text"),
sequence_number=40,
type="response.content_part.done",
),
ResponseContentPartAddedEvent(
content_index=1,
item_id="msg_234",
output_index=3,
part=ResponseOutputText(annotations=[], text="", type="output_text"),
sequence_number=41,
type="response.content_part.added",
),
ResponseTextDeltaEvent(
content_index=1,
delta="text",
item_id="msg_234",
output_index=3,
sequence_number=42,
logprobs=[],
type="response.output_text.delta",
),
ResponseTextDoneEvent(
content_index=1,
item_id="msg_234",
output_index=3,
sequence_number=43,
text="text",
logprobs=[],
type="response.output_text.done",
),
ResponseContentPartDoneEvent(
content_index=1,
item_id="msg_234",
output_index=3,
part=ResponseOutputText(annotations=[], text="text", type="output_text"),
sequence_number=44,
type="response.content_part.done",
),
ResponseOutputItemDoneEvent(
item=ResponseOutputMessage(
id="msg_234",
content=[
ResponseOutputText(annotations=[], text="more", type="output_text"),
ResponseOutputText(annotations=[], text="text", type="output_text"),
],
role="assistant",
status="completed",
type="message",
),
output_index=3,
sequence_number=45,
type="response.output_item.done",
),
ResponseCompletedEvent(
response=Response(
id="resp_123",
created_at=1749734255.0,
error=None,
incomplete_details=None,
instructions=None,
metadata={},
model="o4-mini-2025-04-16",
object="response",
output=[
ResponseReasoningItem(
id="rs_123",
summary=[
Summary(text="reasoning block one", type="summary_text"),
Summary(text="another reasoning block", type="summary_text"),
],
type="reasoning",
encrypted_content=None,
status=None,
),
ResponseOutputMessage(
id="msg_123",
content=[
ResponseOutputText(
annotations=[], text="text block one", type="output_text"
),
ResponseOutputText(
annotations=[],
text="another text block",
type="output_text",
),
],
role="assistant",
status="completed",
type="message",
),
ResponseReasoningItem(
id="rs_234",
summary=[
Summary(text="more reasoning", type="summary_text"),
Summary(text="still more reasoning", type="summary_text"),
],
type="reasoning",
encrypted_content="encrypted-content",
status=None,
),
ResponseOutputMessage(
id="msg_234",
content=[
ResponseOutputText(
annotations=[], text="more", type="output_text"
),
ResponseOutputText(
annotations=[], text="text", type="output_text"
),
],
role="assistant",
status="completed",
type="message",
),
],
parallel_tool_calls=True,
temperature=1.0,
tool_choice="auto",
tools=[],
top_p=1.0,
background=False,
max_output_tokens=None,
previous_response_id=None,
reasoning=Reasoning(
effort="medium", generate_summary=None, summary="detailed"
),
service_tier="default",
status="completed",
text=ResponseTextConfig(format=ResponseFormatText(type="text")),
truncation="disabled",
usage=ResponseUsage(
input_tokens=13,
input_tokens_details=InputTokensDetails(cached_tokens=0),
output_tokens=71,
output_tokens_details=OutputTokensDetails(reasoning_tokens=64),
total_tokens=84,
),
user=None,
),
sequence_number=46,
type="response.completed",
),
]
def _strip_none(obj: Any) -> Any:
"""Recursively strip None values from dictionaries and lists."""
if isinstance(obj, dict):
return {k: _strip_none(v) for k, v in obj.items() if v is not None}
if isinstance(obj, list):
return [_strip_none(v) for v in obj]
return obj
@pytest.mark.parametrize(
("output_version", "expected_content"),
[
(
"responses/v1",
[
{
"id": "rs_123",
"summary": [
{
"index": 0,
"type": "summary_text",
"text": "reasoning block one",
},
{
"index": 1,
"type": "summary_text",
"text": "another reasoning block",
},
],
"type": "reasoning",
"index": 0,
},
{"type": "text", "text": "text block one", "index": 1, "id": "msg_123"},
{
"type": "text",
"text": "another text block",
"index": 2,
"id": "msg_123",
},
{
"id": "rs_234",
"summary": [
{"index": 0, "type": "summary_text", "text": "more reasoning"},
{
"index": 1,
"type": "summary_text",
"text": "still more reasoning",
},
],
"encrypted_content": "encrypted-content",
"type": "reasoning",
"index": 3,
},
{"type": "text", "text": "more", "index": 4, "id": "msg_234"},
{"type": "text", "text": "text", "index": 5, "id": "msg_234"},
],
),
(
"v1",
[
{
"type": "reasoning",
"reasoning": "reasoning block one",
"id": "rs_123",
"index": "lc_rs_305f30",
},
{
"type": "reasoning",
"reasoning": "another reasoning block",
"id": "rs_123",
"index": "lc_rs_305f31",
},
{
"type": "text",
"text": "text block one",
"index": "lc_txt_1",
"id": "msg_123",
},
{
"type": "text",
"text": "another text block",
"index": "lc_txt_2",
"id": "msg_123",
},
{
"type": "reasoning",
"reasoning": "more reasoning",
"id": "rs_234",
"extras": {"encrypted_content": "encrypted-content"},
"index": "lc_rs_335f30",
},
{
"type": "reasoning",
"reasoning": "still more reasoning",
"id": "rs_234",
"index": "lc_rs_335f31",
},
{"type": "text", "text": "more", "index": "lc_txt_4", "id": "msg_234"},
{"type": "text", "text": "text", "index": "lc_txt_5", "id": "msg_234"},
],
),
],
)
def test_responses_stream(output_version: str, expected_content: list[dict]) -> None:
llm = ChatOpenAI(
model="o4-mini", use_responses_api=True, output_version=output_version
)
mock_client = MagicMock()
def mock_create(*args: Any, **kwargs: Any) -> MockSyncContextManager:
return MockSyncContextManager(responses_stream)
mock_client.responses.create = mock_create
full: BaseMessageChunk | None = None
chunks = []
with patch.object(llm, "root_client", mock_client):
for chunk in llm.stream("test"):
assert isinstance(chunk, AIMessageChunk)
full = chunk if full is None else full + chunk
chunks.append(chunk)
assert isinstance(full, AIMessageChunk)
assert full.content == expected_content
assert full.additional_kwargs == {}
assert full.id == "resp_123"
# Test reconstruction
payload = llm._get_request_payload([full])
completed = [
item
for item in responses_stream
if item.type == "response.completed" # type: ignore[attr-defined]
]
assert len(completed) == 1
response = completed[0].response # type: ignore[attr-defined]
assert len(response.output) == len(payload["input"])
for idx, item in enumerate(response.output):
dumped = _strip_none(item.model_dump())
_ = dumped.pop("status", None)
assert dumped == payload["input"][idx]
def test_responses_stream_with_image_generation_multiple_calls() -> None:
"""Test that streaming with image_generation tool works across multiple calls.
Regression test: image_generation tool should not be mutated between calls,
which would cause NotImplementedError on subsequent invocations.
"""
tools: list[dict[str, Any]] = [
{"type": "image_generation"},
{"type": "function", "name": "my_tool", "parameters": {}},
]
llm = ChatOpenAI(
model="gpt-4o",
use_responses_api=True,
streaming=True,
)
llm_with_tools = llm.bind_tools(tools)
mock_client = MagicMock()
def mock_create(*args: Any, **kwargs: Any) -> MockSyncContextManager:
return MockSyncContextManager(responses_stream)
mock_client.responses.create = mock_create
# First call should work
with patch.object(llm, "root_client", mock_client):
chunks = list(llm_with_tools.stream("test"))
assert len(chunks) > 0
# Second call should also work (would fail before fix due to tool mutation)
with patch.object(llm, "root_client", mock_client):
chunks = list(llm_with_tools.stream("test again"))
assert len(chunks) > 0
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openai/tests/unit_tests/chat_models/test_responses_stream.py",
"license": "MIT License",
"lines": 771,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langchain:libs/partners/openai/langchain_openai/chat_models/_client_utils.py | """Helpers for creating OpenAI API clients.
This module allows for the caching of httpx clients to avoid creating new instances
for each instance of ChatOpenAI.
Logic is largely replicated from openai._base_client.
"""
from __future__ import annotations
import asyncio
import inspect
import os
from collections.abc import Awaitable, Callable
from functools import lru_cache
from typing import Any, cast
import openai
from pydantic import SecretStr
class _SyncHttpxClientWrapper(openai.DefaultHttpxClient):
"""Borrowed from openai._base_client."""
def __del__(self) -> None:
if self.is_closed:
return
try:
self.close()
except Exception: # noqa: S110
pass
class _AsyncHttpxClientWrapper(openai.DefaultAsyncHttpxClient):
"""Borrowed from openai._base_client."""
def __del__(self) -> None:
if self.is_closed:
return
try:
# TODO(someday): support non asyncio runtimes here
asyncio.get_running_loop().create_task(self.aclose())
except Exception: # noqa: S110
pass
def _build_sync_httpx_client(
base_url: str | None, timeout: Any
) -> _SyncHttpxClientWrapper:
return _SyncHttpxClientWrapper(
base_url=base_url
or os.environ.get("OPENAI_BASE_URL")
or "https://api.openai.com/v1",
timeout=timeout,
)
def _build_async_httpx_client(
base_url: str | None, timeout: Any
) -> _AsyncHttpxClientWrapper:
return _AsyncHttpxClientWrapper(
base_url=base_url
or os.environ.get("OPENAI_BASE_URL")
or "https://api.openai.com/v1",
timeout=timeout,
)
@lru_cache
def _cached_sync_httpx_client(
base_url: str | None, timeout: Any
) -> _SyncHttpxClientWrapper:
return _build_sync_httpx_client(base_url, timeout)
@lru_cache
def _cached_async_httpx_client(
base_url: str | None, timeout: Any
) -> _AsyncHttpxClientWrapper:
return _build_async_httpx_client(base_url, timeout)
def _get_default_httpx_client(
base_url: str | None, timeout: Any
) -> _SyncHttpxClientWrapper:
"""Get default httpx client.
Uses cached client unless timeout is `httpx.Timeout`, which is not hashable.
"""
try:
hash(timeout)
except TypeError:
return _build_sync_httpx_client(base_url, timeout)
else:
return _cached_sync_httpx_client(base_url, timeout)
def _get_default_async_httpx_client(
base_url: str | None, timeout: Any
) -> _AsyncHttpxClientWrapper:
"""Get default httpx client.
Uses cached client unless timeout is `httpx.Timeout`, which is not hashable.
"""
try:
hash(timeout)
except TypeError:
return _build_async_httpx_client(base_url, timeout)
else:
return _cached_async_httpx_client(base_url, timeout)
def _resolve_sync_and_async_api_keys(
api_key: SecretStr | Callable[[], str] | Callable[[], Awaitable[str]],
) -> tuple[str | None | Callable[[], str], str | Callable[[], Awaitable[str]]]:
"""Resolve sync and async API key values.
Because OpenAI and AsyncOpenAI clients support either sync or async callables for
the API key, we need to resolve separate values here.
"""
if isinstance(api_key, SecretStr):
sync_api_key_value: str | None | Callable[[], str] = api_key.get_secret_value()
async_api_key_value: str | Callable[[], Awaitable[str]] = (
api_key.get_secret_value()
)
elif callable(api_key):
if inspect.iscoroutinefunction(api_key):
async_api_key_value = api_key
sync_api_key_value = None
else:
sync_api_key_value = cast(Callable, api_key)
async def async_api_key_wrapper() -> str:
return await asyncio.get_event_loop().run_in_executor(
None, cast(Callable, api_key)
)
async_api_key_value = async_api_key_wrapper
return sync_api_key_value, async_api_key_value
| {
"repo_id": "langchain-ai/langchain",
"file_path": "libs/partners/openai/langchain_openai/chat_models/_client_utils.py",
"license": "MIT License",
"lines": 109,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langgraph:libs/checkpoint/langgraph/checkpoint/serde/event_hooks.py | from __future__ import annotations
import logging
from collections.abc import Callable
from threading import Lock
from typing import TypedDict
from typing_extensions import NotRequired
logger = logging.getLogger(__name__)
class SerdeEvent(TypedDict):
kind: str
module: str
name: str
method: NotRequired[str]
SerdeEventListener = Callable[[SerdeEvent], None]
_listeners: list[SerdeEventListener] = []
_listeners_lock = Lock()
def register_serde_event_listener(listener: SerdeEventListener) -> Callable[[], None]:
"""Register a listener for serde allowlist events."""
with _listeners_lock:
_listeners.append(listener)
def unregister() -> None:
with _listeners_lock:
try:
_listeners.remove(listener)
except ValueError:
pass
return unregister
def emit_serde_event(event: SerdeEvent) -> None:
"""Emit a serde event to all listeners.
Listener failures are isolated and logged.
"""
with _listeners_lock:
listeners = tuple(_listeners)
for listener in listeners:
try:
listener(event)
except Exception:
logger.warning("Serde listener failed", exc_info=True)
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint/langgraph/checkpoint/serde/event_hooks.py",
"license": "MIT License",
"lines": 37,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langgraph:libs/checkpoint/langgraph/checkpoint/serde/_msgpack.py | import os
from collections.abc import Iterable
from typing import cast
STRICT_MSGPACK_ENABLED = os.getenv("LANGGRAPH_STRICT_MSGPACK", "false").lower() in (
"1",
"true",
"yes",
)
_SENTINEL = cast(None, object())
SAFE_MSGPACK_TYPES: frozenset[tuple[str, ...]] = frozenset(
{
# datetime types
("datetime", "datetime"),
("datetime", "date"),
("datetime", "time"),
("datetime", "timedelta"),
("datetime", "timezone"),
# uuid
("uuid", "UUID"),
# numeric
("decimal", "Decimal"),
# collections
("builtins", "set"),
("builtins", "frozenset"),
("collections", "deque"),
# ip addresses
("ipaddress", "IPv4Address"),
("ipaddress", "IPv4Interface"),
("ipaddress", "IPv4Network"),
("ipaddress", "IPv6Address"),
("ipaddress", "IPv6Interface"),
("ipaddress", "IPv6Network"),
# pathlib
("pathlib", "Path"),
("pathlib", "PosixPath"),
("pathlib", "WindowsPath"),
# pathlib in Python 3.13+
("pathlib._local", "Path"),
("pathlib._local", "PosixPath"),
("pathlib._local", "WindowsPath"),
# zoneinfo
("zoneinfo", "ZoneInfo"),
# regex
("re", "compile"),
# langchain-core messages (safe container types used by graph state)
("langchain_core.messages.base", "BaseMessage"),
("langchain_core.messages.base", "BaseMessageChunk"),
("langchain_core.messages.human", "HumanMessage"),
("langchain_core.messages.human", "HumanMessageChunk"),
("langchain_core.messages.ai", "AIMessage"),
("langchain_core.messages.ai", "AIMessageChunk"),
("langchain_core.messages.system", "SystemMessage"),
("langchain_core.messages.system", "SystemMessageChunk"),
("langchain_core.messages.chat", "ChatMessage"),
("langchain_core.messages.chat", "ChatMessageChunk"),
("langchain_core.messages.tool", "ToolMessage"),
("langchain_core.messages.tool", "ToolMessageChunk"),
("langchain_core.messages.function", "FunctionMessage"),
("langchain_core.messages.function", "FunctionMessageChunk"),
("langchain_core.messages.modifier", "RemoveMessage"),
# langchain-core document model
("langchain_core.documents.base", "Document"),
# langgraph
("langgraph.types", "Send"),
("langgraph.types", "Interrupt"),
("langgraph.types", "Command"),
("langgraph.types", "StateSnapshot"),
("langgraph.types", "PregelTask"),
("langgraph.types", "Overwrite"),
("langgraph.store.base", "Item"),
("langgraph.store.base", "GetOp"),
}
)
# Allowed (module, name, method) triples for EXT_METHOD_SINGLE_ARG.
# Only these specific method invocations are permitted during deserialization.
# This is separate from SAFE_MSGPACK_TYPES which only governs construction.
SAFE_MSGPACK_METHODS: frozenset[tuple[str, str, str]] = frozenset(
{
("datetime", "datetime", "fromisoformat"),
}
)
AllowedMsgpackModules = Iterable[tuple[str, ...] | type]
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint/langgraph/checkpoint/serde/_msgpack.py",
"license": "MIT License",
"lines": 82,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langgraph:libs/checkpoint/tests/test_encrypted.py | """Tests for EncryptedSerializer with msgpack allowlist functionality.
These tests mirror the msgpack allowlist tests in test_jsonplus.py but run them
through the EncryptedSerializer to ensure the allowlist behavior is preserved
when encryption is enabled.
"""
from __future__ import annotations
import logging
import pathlib
import re
import uuid
from collections import deque
from datetime import date, datetime, time, timezone
from decimal import Decimal
from ipaddress import IPv4Address
from typing import Literal, cast
import ormsgpack
import pytest
from pydantic import BaseModel
from langgraph.checkpoint.base import BaseCheckpointSaver, _with_msgpack_allowlist
from langgraph.checkpoint.serde import _msgpack as _lg_msgpack
from langgraph.checkpoint.serde.base import CipherProtocol
from langgraph.checkpoint.serde.encrypted import EncryptedSerializer
from langgraph.checkpoint.serde.jsonplus import (
EXT_METHOD_SINGLE_ARG,
JsonPlusSerializer,
_msgpack_enc,
)
class InnerPydantic(BaseModel):
hello: str
class MyPydantic(BaseModel):
foo: str
bar: int
inner: InnerPydantic
class AnotherPydantic(BaseModel):
foo: str
class _PassthroughCipher(CipherProtocol):
def encrypt(self, plaintext: bytes) -> tuple[str, bytes]:
return "passthrough", plaintext
def decrypt(self, ciphername: str, ciphertext: bytes) -> bytes:
assert ciphername == "passthrough"
return ciphertext
def _make_encrypted_serde(
allowed_msgpack_modules: (
_lg_msgpack.AllowedMsgpackModules | Literal[True] | None | object
) = _lg_msgpack._SENTINEL,
) -> EncryptedSerializer:
"""Create an EncryptedSerializer with AES encryption for testing."""
inner = JsonPlusSerializer(
allowed_msgpack_modules=cast(
_lg_msgpack.AllowedMsgpackModules | Literal[True] | None,
allowed_msgpack_modules,
)
)
return EncryptedSerializer.from_pycryptodome_aes(
serde=inner, key=b"1234567890123456"
)
def test_msgpack_method_pathlib_blocked_encrypted_strict(
tmp_path: pathlib.Path, caplog: pytest.LogCaptureFixture
) -> None:
target = tmp_path / "secret.txt"
target.write_text("secret")
payload = ormsgpack.packb(
ormsgpack.Ext(
EXT_METHOD_SINGLE_ARG,
_msgpack_enc(("pathlib", "Path", target, "read_text")),
),
option=ormsgpack.OPT_NON_STR_KEYS,
)
serde = EncryptedSerializer(
_PassthroughCipher(),
JsonPlusSerializer(allowed_msgpack_modules=None),
)
caplog.set_level(logging.WARNING, logger="langgraph.checkpoint.serde.jsonplus")
caplog.clear()
result = serde.loads_typed(("msgpack+passthrough", payload))
assert result == target
assert "blocked deserialization of method call pathlib.path.read_text" in (
caplog.text.lower()
)
class TestEncryptedSerializerMsgpackAllowlist:
"""Test msgpack allowlist behavior through EncryptedSerializer."""
def test_safe_types_no_warning(self, caplog: pytest.LogCaptureFixture) -> None:
"""Test safe types deserialize without warnings through encryption."""
serde = _make_encrypted_serde()
safe_objects = [
datetime.now(),
date.today(),
time(12, 30),
timezone.utc,
uuid.uuid4(),
Decimal("123.45"),
{1, 2, 3},
frozenset([1, 2, 3]),
deque([1, 2, 3]),
IPv4Address("192.168.1.1"),
pathlib.Path("/tmp/test"),
]
for obj in safe_objects:
caplog.clear()
dumped = serde.dumps_typed(obj)
# Verify encryption is happening
assert "+aes" in dumped[0], f"Expected encryption for {type(obj)}"
result = serde.loads_typed(dumped)
assert "unregistered type" not in caplog.text.lower(), (
f"Unexpected warning for {type(obj)}"
)
assert result is not None
def test_pydantic_warns_by_default(self, caplog: pytest.LogCaptureFixture) -> None:
"""Pydantic models not in allowlist should log warning but still deserialize."""
current = _lg_msgpack.STRICT_MSGPACK_ENABLED
_lg_msgpack.STRICT_MSGPACK_ENABLED = False
serde = _make_encrypted_serde()
obj = MyPydantic(foo="test", bar=42, inner=InnerPydantic(hello="world"))
caplog.clear()
dumped = serde.dumps_typed(obj)
assert "+aes" in dumped[0]
result = serde.loads_typed(dumped)
assert "unregistered type" in caplog.text.lower()
assert "allowed_msgpack_modules" in caplog.text
assert result == obj
_lg_msgpack.STRICT_MSGPACK_ENABLED = current
def test_strict_mode_blocks_unregistered(
self, caplog: pytest.LogCaptureFixture
) -> None:
"""Strict mode should block unregistered types through encryption."""
serde = _make_encrypted_serde(allowed_msgpack_modules=None)
obj = MyPydantic(foo="test", bar=42, inner=InnerPydantic(hello="world"))
caplog.clear()
dumped = serde.dumps_typed(obj)
assert "+aes" in dumped[0]
result = serde.loads_typed(dumped)
assert "blocked" in caplog.text.lower()
expected = obj.model_dump()
assert result == expected
def test_allowlist_silences_warning(self, caplog: pytest.LogCaptureFixture) -> None:
"""Types in allowed_msgpack_modules should deserialize without warnings."""
serde = _make_encrypted_serde(
allowed_msgpack_modules=[
("tests.test_encrypted", "MyPydantic"),
("tests.test_encrypted", "InnerPydantic"),
]
)
obj = MyPydantic(foo="test", bar=42, inner=InnerPydantic(hello="world"))
caplog.clear()
dumped = serde.dumps_typed(obj)
assert "+aes" in dumped[0]
result = serde.loads_typed(dumped)
assert "unregistered type" not in caplog.text.lower()
assert "blocked" not in caplog.text.lower()
assert result == obj
def test_allowlist_blocks_non_listed(
self, caplog: pytest.LogCaptureFixture
) -> None:
"""Allowlists should block unregistered types even through encryption."""
serde = _make_encrypted_serde(
allowed_msgpack_modules=[("tests.test_encrypted", "MyPydantic")]
)
obj = AnotherPydantic(foo="nope")
caplog.clear()
dumped = serde.dumps_typed(obj)
assert "+aes" in dumped[0]
result = serde.loads_typed(dumped)
assert "blocked" in caplog.text.lower()
expected = obj.model_dump()
assert result == expected
def test_safe_types_value_equality(self, caplog: pytest.LogCaptureFixture) -> None:
"""Verify safe types are correctly restored with proper values through encryption."""
serde = _make_encrypted_serde(allowed_msgpack_modules=None)
test_cases = [
datetime(2024, 1, 15, 12, 30, 45, 123456),
date(2024, 6, 15),
time(14, 30, 0),
uuid.UUID("12345678-1234-5678-1234-567812345678"),
Decimal("123.456789"),
{1, 2, 3, 4, 5},
frozenset(["a", "b", "c"]),
deque([1, 2, 3]),
IPv4Address("10.0.0.1"),
pathlib.Path("/some/test/path"),
re.compile(r"\d+", re.MULTILINE),
]
for obj in test_cases:
caplog.clear()
dumped = serde.dumps_typed(obj)
assert "+aes" in dumped[0], f"Expected encryption for {type(obj)}"
result = serde.loads_typed(dumped)
assert "blocked" not in caplog.text.lower(), f"Blocked for {type(obj)}"
if isinstance(obj, re.Pattern):
assert result.pattern == obj.pattern
assert result.flags == obj.flags
else:
assert result == obj, (
f"Value mismatch for {type(obj)}: {result} != {obj}"
)
def test_regex_safe_type(self, caplog: pytest.LogCaptureFixture) -> None:
"""re.compile patterns should deserialize without warnings as a safe type."""
serde = _make_encrypted_serde(allowed_msgpack_modules=None)
pattern = re.compile(r"foo.*bar", re.IGNORECASE | re.DOTALL)
caplog.clear()
dumped = serde.dumps_typed(pattern)
assert "+aes" in dumped[0]
result = serde.loads_typed(dumped)
assert "blocked" not in caplog.text.lower()
assert "unregistered" not in caplog.text.lower()
assert result.pattern == pattern.pattern
assert result.flags == pattern.flags
class TestWithMsgpackAllowlistEncrypted:
"""Test _with_msgpack_allowlist function with EncryptedSerializer."""
def test_propagates_allowlist_to_inner_serde(self) -> None:
"""_with_msgpack_allowlist should propagate allowlist to inner JsonPlusSerializer."""
inner = JsonPlusSerializer(allowed_msgpack_modules=None)
encrypted = EncryptedSerializer.from_pycryptodome_aes(
serde=inner, key=b"1234567890123456"
)
extra = [("my.module", "MyClass")]
result = _with_msgpack_allowlist(encrypted, extra)
# Should return a new EncryptedSerializer
assert isinstance(result, EncryptedSerializer)
assert result is not encrypted
# Inner serde should have the allowlist
assert isinstance(result.serde, JsonPlusSerializer)
assert isinstance(result.serde._allowed_msgpack_modules, set)
assert ("my.module", "MyClass") in result.serde._allowed_msgpack_modules
def test_preserves_cipher(self) -> None:
"""_with_msgpack_allowlist should preserve the cipher from the original."""
inner = JsonPlusSerializer(allowed_msgpack_modules=None)
encrypted = EncryptedSerializer.from_pycryptodome_aes(
serde=inner, key=b"1234567890123456"
)
result = _with_msgpack_allowlist(encrypted, [("my.module", "MyClass")])
assert isinstance(result, EncryptedSerializer)
# Should use the same cipher
assert result.cipher is encrypted.cipher
def test_returns_same_if_not_jsonplus_inner(self) -> None:
"""_with_msgpack_allowlist should return same serde if inner is not JsonPlusSerializer."""
class DummyInnerSerde:
def dumps_typed(self, obj: object) -> tuple[str, bytes]:
return ("dummy", b"")
def loads_typed(self, data: tuple[str, bytes]) -> None:
return None
from langgraph.checkpoint.serde.base import CipherProtocol
class DummyCipher(CipherProtocol):
def encrypt(self, plaintext: bytes) -> tuple[str, bytes]:
return "dummy", plaintext
def decrypt(self, ciphername: str, ciphertext: bytes) -> bytes:
return ciphertext
encrypted = EncryptedSerializer(DummyCipher(), DummyInnerSerde())
result = _with_msgpack_allowlist(encrypted, [("my.module", "MyClass")])
assert result is encrypted
def test_warns_if_allowlist_unsupported(
self, caplog: pytest.LogCaptureFixture
) -> None:
class DummySerde:
def dumps_typed(self, obj: object) -> tuple[str, bytes]:
return ("dummy", b"")
def loads_typed(self, data: tuple[str, bytes]) -> object:
return data
serde = DummySerde()
caplog.set_level(logging.WARNING, logger="langgraph.checkpoint.base")
caplog.clear()
result = _with_msgpack_allowlist(serde, [("my.module", "MyClass")])
assert result is serde
assert "does not support msgpack allowlist" in caplog.text.lower()
def test_noop_allowlist_returns_same_encrypted_instance(self) -> None:
inner = JsonPlusSerializer(allowed_msgpack_modules=None)
encrypted = EncryptedSerializer.from_pycryptodome_aes(
serde=inner, key=b"1234567890123456"
)
result = _with_msgpack_allowlist(encrypted, ())
assert result is encrypted
def test_functional_roundtrip_with_allowlist(
self, caplog: pytest.LogCaptureFixture
) -> None:
"""End-to-end test: allowlist applied via _with_msgpack_allowlist works."""
inner = JsonPlusSerializer(allowed_msgpack_modules=None)
encrypted = EncryptedSerializer.from_pycryptodome_aes(
serde=inner, key=b"1234567890123456"
)
# Apply allowlist for MyPydantic
updated = _with_msgpack_allowlist(
encrypted,
[
("tests.test_encrypted", "MyPydantic"),
("tests.test_encrypted", "InnerPydantic"),
],
)
obj = MyPydantic(foo="test", bar=42, inner=InnerPydantic(hello="world"))
caplog.clear()
dumped = updated.dumps_typed(obj)
assert "+aes" in dumped[0]
result = updated.loads_typed(dumped)
# Should deserialize without blocking
assert "blocked" not in caplog.text.lower()
assert result == obj
def test_original_still_blocks_after_with_allowlist(
self, caplog: pytest.LogCaptureFixture
) -> None:
"""Original serde should still block after _with_msgpack_allowlist creates a new one."""
inner = JsonPlusSerializer(allowed_msgpack_modules=None)
encrypted = EncryptedSerializer.from_pycryptodome_aes(
serde=inner, key=b"1234567890123456"
)
# Apply allowlist - this should create a NEW serde
_with_msgpack_allowlist(
encrypted,
[("tests.test_encrypted", "MyPydantic")],
)
# Original should still block
obj = MyPydantic(foo="test", bar=42, inner=InnerPydantic(hello="world"))
caplog.clear()
dumped = encrypted.dumps_typed(obj)
result = encrypted.loads_typed(dumped)
assert "blocked" in caplog.text.lower()
assert result == obj.model_dump()
class TestEncryptedSerializerUnencryptedFallback:
"""Test that EncryptedSerializer handles unencrypted data correctly."""
def test_loads_unencrypted_data(self) -> None:
"""EncryptedSerializer should handle unencrypted data for backwards compat."""
plain = JsonPlusSerializer(allowed_msgpack_modules=None)
encrypted = _make_encrypted_serde(allowed_msgpack_modules=None)
obj = {"key": "value", "number": 42}
# Serialize with plain serde
dumped = plain.dumps_typed(obj)
assert "+aes" not in dumped[0]
# Should still deserialize with encrypted serde
result = encrypted.loads_typed(dumped)
assert result == obj
def test_with_allowlist_uses_copy_protocol() -> None:
class CopyAwareSaver(BaseCheckpointSaver[str]):
def __init__(self) -> None:
super().__init__(serde=JsonPlusSerializer(allowed_msgpack_modules=None))
self.copy_was_used = False
def __copy__(self) -> object:
clone = object.__new__(self.__class__)
clone.__dict__ = self.__dict__.copy()
clone.copy_was_used = True
return clone
saver = CopyAwareSaver()
updated = saver.with_allowlist([("tests.test_encrypted", "MyPydantic")])
assert isinstance(updated, CopyAwareSaver)
assert updated is not saver
assert updated.copy_was_used is True
assert saver.copy_was_used is False
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint/tests/test_encrypted.py",
"license": "MIT License",
"lines": 343,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/langgraph/bench/serde_allowlist.py | from __future__ import annotations
from collections import deque
from dataclasses import dataclass
from enum import Enum
from typing import Annotated
from pydantic import BaseModel
from typing_extensions import NotRequired, TypedDict
from langgraph._internal._serde import collect_allowlist_from_schemas
class Color(Enum):
RED = "red"
BLUE = "blue"
@dataclass
class InnerDataclass:
value: int
class InnerModel(BaseModel):
name: str
class InnerTyped(TypedDict):
payload: InnerDataclass
optional: NotRequired[InnerModel]
@dataclass
class Node:
value: int
child: Node | None = None
@dataclass
class NestedDataclass:
inner: InnerDataclass
items: list[InnerModel]
mapping: dict[str, InnerDataclass]
optional: InnerModel | None
union: InnerDataclass | InnerModel
queue: deque[InnerDataclass]
frozen: frozenset[InnerModel]
AnnotatedList = Annotated[list[InnerDataclass], "meta"]
class DummyChannel:
@property
def ValueType(self) -> type[InnerDataclass]:
return InnerDataclass
@property
def UpdateType(self) -> type[InnerModel]:
return InnerModel
SCHEMAS_SMALL = [InnerDataclass, InnerModel, Color]
SCHEMAS_LARGE = [
InnerDataclass,
InnerModel,
Color,
InnerTyped,
Node,
NestedDataclass,
AnnotatedList,
]
CHANNELS = {"a": DummyChannel(), "b": DummyChannel()}
def collect_allowlist_small() -> None:
collect_allowlist_from_schemas(schemas=SCHEMAS_SMALL, channels=CHANNELS)
def collect_allowlist_large() -> None:
collect_allowlist_from_schemas(schemas=SCHEMAS_LARGE, channels=CHANNELS)
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/langgraph/bench/serde_allowlist.py",
"license": "MIT License",
"lines": 55,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langgraph:libs/langgraph/langgraph/_internal/_serde.py | from __future__ import annotations
import dataclasses
import logging
import sys
import types
from collections import deque
from enum import Enum
from typing import (
Annotated,
Any,
Literal,
Union,
get_args,
get_origin,
get_type_hints,
)
from langchain_core import messages as lc_messages
from langgraph.checkpoint.base import BaseCheckpointSaver
from pydantic import BaseModel
from typing_extensions import NotRequired, Required, is_typeddict
try:
from langgraph.checkpoint.serde._msgpack import ( # noqa: F401
STRICT_MSGPACK_ENABLED,
)
except ImportError:
STRICT_MSGPACK_ENABLED = False
_warned_allowlist_unsupported = False
logger = logging.getLogger(__name__)
def _supports_checkpointer_allowlist() -> bool:
return hasattr(BaseCheckpointSaver, "with_allowlist")
_SUPPORTS_ALLOWLIST = _supports_checkpointer_allowlist()
def apply_checkpointer_allowlist(
checkpointer: Any, allowlist: set[tuple[str, ...]] | None
) -> Any:
if not checkpointer or allowlist is None or checkpointer in (True, False):
return checkpointer
if not _SUPPORTS_ALLOWLIST:
global _warned_allowlist_unsupported
if not _warned_allowlist_unsupported:
logger.warning(
"Checkpointer does not support with_allowlist; strict msgpack "
"allowlist will be skipped."
)
_warned_allowlist_unsupported = True
return checkpointer
return checkpointer.with_allowlist(allowlist)
def curated_core_allowlist() -> set[tuple[str, ...]]:
allowlist: set[tuple[str, ...]] = set()
for name in (
"BaseMessage",
"BaseMessageChunk",
"HumanMessage",
"HumanMessageChunk",
"AIMessage",
"AIMessageChunk",
"SystemMessage",
"SystemMessageChunk",
"ChatMessage",
"ChatMessageChunk",
"ToolMessage",
"ToolMessageChunk",
"FunctionMessage",
"FunctionMessageChunk",
"RemoveMessage",
):
cls = getattr(lc_messages, name, None)
if cls is None:
continue
allowlist.add((cls.__module__, cls.__name__))
return allowlist
def build_serde_allowlist(
*,
schemas: list[type[Any]] | None = None,
channels: dict[str, Any] | None = None,
) -> set[tuple[str, ...]]:
allowlist = curated_core_allowlist()
if schemas:
schemas = [schema for schema in schemas if schema is not None]
return allowlist | collect_allowlist_from_schemas(
schemas=schemas,
channels=channels,
)
def collect_allowlist_from_schemas(
*,
schemas: list[type[Any]] | None = None,
channels: dict[str, Any] | None = None,
) -> set[tuple[str, ...]]:
allowlist: set[tuple[str, ...]] = set()
seen: set[Any] = set()
seen_ids: set[int] = set()
if schemas:
for schema in schemas:
_collect_from_type(schema, allowlist, seen, seen_ids)
if channels:
for channel in channels.values():
value_type = getattr(channel, "ValueType", None)
if value_type is not None:
_collect_from_type(value_type, allowlist, seen, seen_ids)
update_type = getattr(channel, "UpdateType", None)
if update_type is not None:
_collect_from_type(update_type, allowlist, seen, seen_ids)
return allowlist
def _collect_from_type(
typ: Any,
allowlist: set[tuple[str, ...]],
seen: set[Any],
seen_ids: set[int],
) -> None:
if _already_seen(typ, seen, seen_ids):
return
if typ is Any or typ is None:
return
if typ is Literal:
return
if isinstance(typ, types.UnionType):
for arg in typ.__args__:
_collect_from_type(arg, allowlist, seen, seen_ids)
return
origin = get_origin(typ)
if origin is Union:
for arg in get_args(typ):
_collect_from_type(arg, allowlist, seen, seen_ids)
return
if origin is Annotated or origin in (Required, NotRequired):
args = get_args(typ)
if args:
_collect_from_type(args[0], allowlist, seen, seen_ids)
return
if origin is Literal:
return
if origin in (list, set, tuple, dict, deque, frozenset):
for arg in get_args(typ):
_collect_from_type(arg, allowlist, seen, seen_ids)
return
if hasattr(typ, "__supertype__"):
_collect_from_type(typ.__supertype__, allowlist, seen, seen_ids)
return
if is_typeddict(typ):
for field_type in _safe_get_type_hints(typ).values():
_collect_from_type(field_type, allowlist, seen, seen_ids)
return
if _is_pydantic_model(typ):
allowlist.add((typ.__module__, typ.__name__))
field_types = _safe_get_type_hints(typ)
if field_types:
for field_type in field_types.values():
_collect_from_type(field_type, allowlist, seen, seen_ids)
else:
for field_type in _pydantic_field_types(typ):
_collect_from_type(field_type, allowlist, seen, seen_ids)
return
if dataclasses.is_dataclass(typ):
if typ_name := getattr(typ, "__name__", None):
allowlist.add((typ.__module__, typ_name))
field_types = _safe_get_type_hints(typ)
if field_types:
for field_type in field_types.values():
_collect_from_type(field_type, allowlist, seen, seen_ids)
else:
for field in dataclasses.fields(typ):
_collect_from_type(field.type, allowlist, seen, seen_ids)
return
if isinstance(typ, type) and issubclass(typ, Enum):
allowlist.add((typ.__module__, typ.__name__))
return
def _already_seen(typ: Any, seen: set[Any], seen_ids: set[int]) -> bool:
try:
if typ in seen:
return True
seen.add(typ)
return False
except TypeError:
typ_id = id(typ)
if typ_id in seen_ids:
return True
seen_ids.add(typ_id)
return False
def _safe_get_type_hints(typ: Any) -> dict[str, Any]:
try:
module = sys.modules.get(getattr(typ, "__module__", ""))
globalns = module.__dict__ if module else None
localns = dict(vars(typ)) if hasattr(typ, "__dict__") else None
return get_type_hints(
typ, globalns=globalns, localns=localns, include_extras=True
)
except Exception:
return {}
def _is_pydantic_model(typ: Any) -> bool:
if not isinstance(typ, type):
return False
if issubclass(typ, BaseModel):
return True
try:
from pydantic.v1 import BaseModel as BaseModelV1
except Exception:
return False
return issubclass(typ, BaseModelV1)
def _pydantic_field_types(typ: type[Any]) -> list[Any]:
if hasattr(typ, "model_fields"):
return [
field.annotation
for field in typ.model_fields.values()
if getattr(field, "annotation", None) is not None
]
if hasattr(typ, "__fields__"):
return [
field.outer_type_
for field in typ.__fields__.values()
if getattr(field, "outer_type_", None) is not None
]
return []
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/langgraph/langgraph/_internal/_serde.py",
"license": "MIT License",
"lines": 211,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langgraph:libs/langgraph/tests/test_serde_allowlist.py | from __future__ import annotations
from collections import deque
from dataclasses import dataclass
from enum import Enum
from typing import TYPE_CHECKING, Annotated, Any, Literal, NewType, Optional, Union
import pytest
from pydantic import BaseModel
from typing_extensions import NotRequired, Required, TypedDict
from langgraph._internal._serde import (
collect_allowlist_from_schemas,
curated_core_allowlist,
)
class Color(Enum):
RED = "red"
BLUE = "blue"
@dataclass
class InnerDataclass:
value: int
class InnerModel(BaseModel):
name: str
@dataclass
class Node:
value: int
child: Node | None = None
if TYPE_CHECKING:
class MissingType:
pass
@dataclass
class MissingRefDataclass:
payload: MissingType
class Payload(TypedDict):
item: InnerDataclass
maybe: NotRequired[InnerModel]
required: Required[str]
@dataclass
class NestedDataclass:
inner: InnerDataclass
items: list[InnerModel]
mapping: dict[str, InnerDataclass]
optional: InnerModel | None
union: InnerDataclass | InnerModel
queue: deque[InnerDataclass]
frozen: frozenset[InnerModel]
AnnotatedList = Annotated[list[InnerDataclass], "meta"]
UserId = NewType("UserId", int)
class DummyChannel:
@property
def ValueType(self) -> type[InnerDataclass]:
return InnerDataclass
@property
def UpdateType(self) -> type[InnerModel]:
return InnerModel
def test_curated_core_allowlist_includes_messages() -> None:
try:
from langchain_core.messages import BaseMessage
except Exception:
pytest.skip("langchain_core not available")
allowlist = curated_core_allowlist()
assert (BaseMessage.__module__, BaseMessage.__name__) in allowlist
def test_collect_allowlist_basic_models() -> None:
allowlist = collect_allowlist_from_schemas(
schemas=[InnerDataclass, InnerModel, Color]
)
assert (InnerDataclass.__module__, InnerDataclass.__name__) in allowlist
assert (InnerModel.__module__, InnerModel.__name__) in allowlist
assert (Color.__module__, Color.__name__) in allowlist
def test_collect_allowlist_nested_containers() -> None:
allowlist = collect_allowlist_from_schemas(schemas=[NestedDataclass])
assert (NestedDataclass.__module__, NestedDataclass.__name__) in allowlist
assert (InnerDataclass.__module__, InnerDataclass.__name__) in allowlist
assert (InnerModel.__module__, InnerModel.__name__) in allowlist
def test_collect_allowlist_annotated_and_union() -> None:
allowlist = collect_allowlist_from_schemas(
schemas=[AnnotatedList, InnerModel | None, InnerDataclass | None]
)
assert (InnerDataclass.__module__, InnerDataclass.__name__) in allowlist
assert (InnerModel.__module__, InnerModel.__name__) in allowlist
def test_collect_allowlist_literal_and_any() -> None:
allowlist = collect_allowlist_from_schemas(schemas=[Any, Literal["a"]])
assert allowlist == set()
def test_collect_allowlist_typeddict_fields_only() -> None:
allowlist = collect_allowlist_from_schemas(schemas=[Payload])
assert (InnerDataclass.__module__, InnerDataclass.__name__) in allowlist
assert (InnerModel.__module__, InnerModel.__name__) in allowlist
assert (Payload.__module__, Payload.__name__) not in allowlist
def test_collect_allowlist_forward_refs() -> None:
allowlist = collect_allowlist_from_schemas(schemas=[Node])
assert (Node.__module__, Node.__name__) in allowlist
def test_collect_allowlist_missing_forward_ref() -> None:
allowlist = collect_allowlist_from_schemas(schemas=[MissingRefDataclass])
assert allowlist == {(MissingRefDataclass.__module__, MissingRefDataclass.__name__)}
def test_collect_allowlist_newtype_supertype() -> None:
allowlist = collect_allowlist_from_schemas(schemas=[UserId])
assert allowlist == set()
def test_collect_allowlist_channels() -> None:
channels = {"a": DummyChannel(), "b": DummyChannel()}
allowlist = collect_allowlist_from_schemas(channels=channels)
assert (InnerDataclass.__module__, InnerDataclass.__name__) in allowlist
assert (InnerModel.__module__, InnerModel.__name__) in allowlist
def test_collect_allowlist_pep604_union() -> None:
schema = InnerDataclass | InnerModel
allowlist = collect_allowlist_from_schemas(schemas=[schema])
assert (InnerDataclass.__module__, InnerDataclass.__name__) in allowlist
assert (InnerModel.__module__, InnerModel.__name__) in allowlist
def test_collect_allowlist_typing_union_optional() -> None:
typing_optional = Optional[InnerDataclass] # noqa: UP045
typing_union = Union[InnerDataclass, InnerModel] # noqa: UP007
allowlist = collect_allowlist_from_schemas(schemas=[typing_optional, typing_union])
assert (InnerDataclass.__module__, InnerDataclass.__name__) in allowlist
assert (InnerModel.__module__, InnerModel.__name__) in allowlist
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/langgraph/tests/test_serde_allowlist.py",
"license": "MIT License",
"lines": 110,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/langgraph/tests/test_subgraph_persistence.py | """Tests for subgraph persistence behavior (sync).
Covers three checkpointer settings for subgraph state:
- checkpointer=False: no persistence, even when parent has a checkpointer
- checkpointer=None (default): "stateless" — inherits parent checkpointer for
interrupt support, but state resets each invocation. This is the common case
when an agent is invoked from inside a tool used by another agent.
- checkpointer=True: "stateful" — state accumulates across invocations on the same thread id
"""
from uuid import uuid4
from langchain_core.messages import AIMessage, HumanMessage
from langgraph.checkpoint.base import BaseCheckpointSaver
from typing_extensions import TypedDict
from langgraph.graph import START, StateGraph
from langgraph.graph.message import MessagesState
from langgraph.types import Command, Interrupt, interrupt
from tests.any_str import AnyStr
class ParentState(TypedDict):
result: str
# -- checkpointer=None (stateless) --
def test_stateless_interrupt_resume(
sync_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that a subgraph compiled with checkpointer=None (the default) can
still support interrupt/resume when invoked from inside a parent graph that
has a checkpointer. This is the "stateless" pattern — the subgraph inherits
the parent's checkpointer just enough to pause and resume, but does not
retain any state across separate parent invocations. This pattern commonly
appears when an agent is invoked from inside a tool used by another agent.
"""
# Build a subgraph that interrupts before echoing.
# Two nodes: "process" interrupts then echoes, "respond" returns "Done".
def process(state: MessagesState) -> dict:
interrupt("continue?")
return {
"messages": [AIMessage(content=f"Processing: {state['messages'][-1].text}")]
}
def respond(state: MessagesState) -> dict:
return {"messages": [AIMessage(content="Done")]}
inner = (
StateGraph(MessagesState)
.add_node("process", process)
.add_node("respond", respond)
.add_edge(START, "process")
.add_edge("process", "respond")
.compile()
)
def call_inner(state: ParentState) -> dict:
resp = inner.invoke({"messages": [HumanMessage(content="apples")]})
return {"result": resp["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_inner", call_inner)
.add_edge(START, "call_inner")
.compile(checkpointer=sync_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
# First invoke hits the interrupt
result = parent.invoke({"result": ""}, config)
assert result == {
"result": "",
"__interrupt__": [Interrupt(value="continue?", id=AnyStr())],
}
# Resume completes the subgraph
result = parent.invoke(Command(resume=True), config)
assert result == {"result": "Done"}
def test_stateless_state_resets(
sync_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that a subgraph compiled with checkpointer=None (the default) does
not retain any message history between separate parent invocations. Each time
the parent graph invokes the subgraph, it starts with a clean slate. This
confirms the "stateless" behavior: even though the parent has a checkpointer,
the subgraph state is not persisted across calls.
"""
# Build a simple echo subgraph: echoes "Processing: <input>"
def echo(state: MessagesState) -> dict:
return {
"messages": [AIMessage(content=f"Processing: {state['messages'][-1].text}")]
}
inner = (
StateGraph(MessagesState)
.add_node("echo", echo)
.add_edge(START, "echo")
.compile()
)
subgraph_messages: list[list[str]] = []
call_count = 0
def call_inner(state: ParentState) -> dict:
nonlocal call_count
call_count += 1
topic = "apples" if call_count == 1 else "bananas"
resp = inner.invoke(
{"messages": [HumanMessage(content=f"tell me about {topic}")]}
)
subgraph_messages.append([m.text for m in resp["messages"]])
return {"result": resp["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_inner", call_inner)
.add_edge(START, "call_inner")
.compile(checkpointer=sync_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
result1 = parent.invoke({"result": ""}, config)
assert result1 == {"result": "Processing: tell me about apples"}
result2 = parent.invoke({"result": ""}, config)
assert result2 == {"result": "Processing: tell me about bananas"}
# Both invocations produce fresh history — no memory of prior call
assert subgraph_messages[0] == [
"tell me about apples",
"Processing: tell me about apples",
]
assert subgraph_messages[1] == [
"tell me about bananas",
"Processing: tell me about bananas",
]
def test_stateless_state_resets_with_interrupt(
sync_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that a subgraph compiled with checkpointer=None resets its state
between parent invocations even when interrupt/resume is used. The subgraph
is invoked twice from the parent, each time with an interrupt that must be
resumed. After both invoke+resume cycles, each subgraph run should only
contain its own messages — no bleed-over from the previous run.
"""
# Build a subgraph that interrupts before echoing, then responds "Done"
def process(state: MessagesState) -> dict:
interrupt("continue?")
return {
"messages": [AIMessage(content=f"Processing: {state['messages'][-1].text}")]
}
def respond(state: MessagesState) -> dict:
return {"messages": [AIMessage(content="Done")]}
inner = (
StateGraph(MessagesState)
.add_node("process", process)
.add_node("respond", respond)
.add_edge(START, "process")
.add_edge("process", "respond")
.compile()
)
subgraph_messages: list[list[str]] = []
call_count = 0
def call_inner(state: ParentState) -> dict:
nonlocal call_count
call_count += 1
topic = "apples" if call_count == 1 else "bananas"
resp = inner.invoke(
{"messages": [HumanMessage(content=f"tell me about {topic}")]}
)
subgraph_messages.append([m.text for m in resp["messages"]])
return {"result": resp["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_inner", call_inner)
.add_edge(START, "call_inner")
.compile(checkpointer=sync_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
# First invoke+resume cycle
result = parent.invoke({"result": ""}, config)
assert result == {
"result": "",
"__interrupt__": [Interrupt(value="continue?", id=AnyStr())],
}
result = parent.invoke(Command(resume=True), config)
assert result == {"result": "Done"}
# Second invoke+resume cycle
result = parent.invoke({"result": ""}, config)
assert result == {
"result": "",
"__interrupt__": [Interrupt(value="continue?", id=AnyStr())],
}
result = parent.invoke(Command(resume=True), config)
assert result == {"result": "Done"}
# Both invocations produce fresh history — no memory of prior call
assert subgraph_messages[0] == [
"tell me about apples",
"Processing: tell me about apples",
"Done",
]
assert subgraph_messages[1] == [
"tell me about bananas",
"Processing: tell me about bananas",
"Done",
]
# -- checkpointer=False --
def test_checkpointer_false_no_persistence(
sync_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that a subgraph compiled with checkpointer=False gets no
persistence at all, even when the parent graph has a checkpointer. Unlike
the default (checkpointer=None) which inherits just enough from the parent
to support interrupt/resume, checkpointer=False explicitly opts out of all
checkpoint behavior. Each invocation starts completely fresh.
"""
# Build a simple echo subgraph with checkpointer=False
def echo(state: MessagesState) -> dict:
return {
"messages": [AIMessage(content=f"Processed: {state['messages'][-1].text}")]
}
inner = (
StateGraph(MessagesState)
.add_node("echo", echo)
.add_edge(START, "echo")
.compile(checkpointer=False)
)
subgraph_messages: list[list[str]] = []
call_count = 0
def call_inner(state: ParentState) -> dict:
nonlocal call_count
call_count += 1
topic = "apples" if call_count == 1 else "bananas"
resp = inner.invoke(
{"messages": [HumanMessage(content=f"tell me about {topic}")]}
)
subgraph_messages.append([m.text for m in resp["messages"]])
return {"result": resp["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_inner", call_inner)
.add_edge(START, "call_inner")
.compile(checkpointer=sync_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
result1 = parent.invoke({"result": ""}, config)
assert result1 == {"result": "Processed: tell me about apples"}
result2 = parent.invoke({"result": ""}, config)
assert result2 == {"result": "Processed: tell me about bananas"}
# Both start fresh — no history from first call
assert subgraph_messages[0] == [
"tell me about apples",
"Processed: tell me about apples",
]
assert subgraph_messages[1] == [
"tell me about bananas",
"Processed: tell me about bananas",
]
# -- checkpointer=True (stateful) --
def test_stateful_state_accumulates(
sync_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that a subgraph compiled with checkpointer=True ("stateful")
retains its message history across separate parent invocations. To enable
this, the subgraph is wrapped in an outer graph compiled with
checkpointer=True — this wrapper gives the inner subgraph its own persistent
checkpoint namespace. After two parent calls, the second subgraph invocation
should see messages from both the first and second calls.
"""
# Build a simple echo subgraph
def echo(state: MessagesState) -> dict:
return {
"messages": [AIMessage(content=f"Processing: {state['messages'][-1].text}")]
}
inner = (
StateGraph(MessagesState)
.add_node("echo", echo)
.add_edge(START, "echo")
.compile()
)
# Wrap the inner subgraph with checkpointer=True to enable stateful.
# The wrapper graph gives the subgraph its own persistent checkpoint
# namespace, keyed by the node name ("agent").
wrapper = (
StateGraph(MessagesState)
.add_node("agent", inner)
.add_edge(START, "agent")
.compile(checkpointer=True)
)
subgraph_messages: list[list[str]] = []
topics = ["apples", "bananas"]
def call_inner(state: ParentState) -> dict:
topic = topics[len(subgraph_messages)]
resp = wrapper.invoke(
{"messages": [HumanMessage(content=f"tell me about {topic}")]}
)
subgraph_messages.append([m.text for m in resp["messages"]])
return {"result": resp["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_inner", call_inner)
.add_edge(START, "call_inner")
.compile(checkpointer=sync_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
result1 = parent.invoke({"result": ""}, config)
assert result1 == {"result": "Processing: tell me about apples"}
result2 = parent.invoke({"result": ""}, config)
assert result2 == {"result": "Processing: tell me about bananas"}
# First call: fresh history
assert subgraph_messages[0] == [
"tell me about apples",
"Processing: tell me about apples",
]
# Second call: retains messages from first call
assert subgraph_messages[1] == [
"tell me about apples",
"Processing: tell me about apples",
"tell me about bananas",
"Processing: tell me about bananas",
]
def test_stateful_state_accumulates_with_interrupt(
sync_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that a stateful subgraph (checkpointer=True) retains its
message history across parent invocations even when interrupt/resume is
involved. The subgraph interrupts before echoing, then responds "Done".
After two invoke+resume cycles, the second run should contain the full
accumulated history from both calls.
"""
# Build a subgraph that interrupts before echoing, then responds "Done"
def process(state: MessagesState) -> dict:
interrupt("continue?")
return {
"messages": [AIMessage(content=f"Processing: {state['messages'][-1].text}")]
}
def respond(state: MessagesState) -> dict:
return {"messages": [AIMessage(content="Done")]}
inner = (
StateGraph(MessagesState)
.add_node("process", process)
.add_node("respond", respond)
.add_edge(START, "process")
.add_edge("process", "respond")
.compile()
)
# Wrap with checkpointer=True for stateful
wrapper = (
StateGraph(MessagesState)
.add_node("agent", inner)
.add_edge(START, "agent")
.compile(checkpointer=True)
)
subgraph_messages: list[list[str]] = []
topics = ["apples", "bananas"]
def call_inner(state: ParentState) -> dict:
topic = topics[len(subgraph_messages)]
resp = wrapper.invoke(
{"messages": [HumanMessage(content=f"tell me about {topic}")]}
)
subgraph_messages.append([m.text for m in resp["messages"]])
return {"result": resp["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_inner", call_inner)
.add_edge(START, "call_inner")
.compile(checkpointer=sync_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
# First invoke+resume cycle
result = parent.invoke({"result": ""}, config)
assert result == {
"result": "",
"__interrupt__": [Interrupt(value="continue?", id=AnyStr())],
}
result = parent.invoke(Command(resume=True), config)
assert result == {"result": "Done"}
# Second invoke+resume cycle
result = parent.invoke({"result": ""}, config)
assert result == {
"result": "",
"__interrupt__": [Interrupt(value="continue?", id=AnyStr())],
}
result = parent.invoke(Command(resume=True), config)
assert result == {"result": "Done"}
# First call: fresh history
assert subgraph_messages[0] == [
"tell me about apples",
"Processing: tell me about apples",
"Done",
]
# Second call: retains messages from first call
assert subgraph_messages[1] == [
"tell me about apples",
"Processing: tell me about apples",
"Done",
"tell me about bananas",
"Processing: tell me about bananas",
"Done",
]
def test_stateful_interrupt_resume(
sync_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that a stateful subgraph (checkpointer=True) correctly
supports interrupt/resume while also accumulating state. Each invoke+resume
pair triggers the subgraph, and after the second pair completes we verify
both the per-step invoke outputs and the accumulated message history. This
exercises the full lifecycle: interrupt, resume, state accumulation.
"""
# Build a subgraph that interrupts before echoing, then responds "Done"
def process(state: MessagesState) -> dict:
interrupt("continue?")
return {
"messages": [AIMessage(content=f"Processing: {state['messages'][-1].text}")]
}
def respond(state: MessagesState) -> dict:
return {"messages": [AIMessage(content="Done")]}
inner = (
StateGraph(MessagesState)
.add_node("process", process)
.add_node("respond", respond)
.add_edge(START, "process")
.add_edge("process", "respond")
.compile()
)
# Wrap with checkpointer=True for stateful
wrapper = (
StateGraph(MessagesState)
.add_node("agent", inner)
.add_edge(START, "agent")
.compile(checkpointer=True)
)
subgraph_messages: list[list[str]] = []
topics = ["apples", "bananas"]
def call_inner(state: ParentState) -> dict:
topic = topics[len(subgraph_messages)]
resp = wrapper.invoke(
{"messages": [HumanMessage(content=f"tell me about {topic}")]}
)
subgraph_messages.append([m.text for m in resp["messages"]])
return {"result": resp["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_inner", call_inner)
.add_edge(START, "call_inner")
.compile(checkpointer=sync_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
# First invocation: hits interrupt
result = parent.invoke({"result": ""}, config)
assert result == {
"result": "",
"__interrupt__": [Interrupt(value="continue?", id=AnyStr())],
}
# Resume: completes first call
result = parent.invoke(Command(resume=True), config)
assert result == {"result": "Done"}
assert subgraph_messages[0] == [
"tell me about apples",
"Processing: tell me about apples",
"Done",
]
# Second invocation: hits interrupt, state accumulated from first call
result = parent.invoke({"result": ""}, config)
assert result == {
"result": "",
"__interrupt__": [Interrupt(value="continue?", id=AnyStr())],
}
# Resume: completes second call with accumulated state
result = parent.invoke(Command(resume=True), config)
assert result == {"result": "Done"}
assert subgraph_messages[1] == [
"tell me about apples",
"Processing: tell me about apples",
"Done",
"tell me about bananas",
"Processing: tell me about bananas",
"Done",
]
def test_stateful_namespace_isolation(
sync_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that two different stateful subgraphs (checkpointer=True)
maintain completely independent state when they use different wrapper node
names. A "fruit_agent" and "veggie_agent" are each wrapped in their own
stateful graph. After two parent invocations, each agent should only
see its own accumulated history with no cross-contamination between them.
"""
# Build two simple echo subgraphs with different prefixes
def fruit_echo(state: MessagesState) -> dict:
return {"messages": [AIMessage(content=f"Fruit: {state['messages'][-1].text}")]}
def veggie_echo(state: MessagesState) -> dict:
return {
"messages": [AIMessage(content=f"Veggie: {state['messages'][-1].text}")]
}
fruit_inner = (
StateGraph(MessagesState)
.add_node("echo", fruit_echo)
.add_edge(START, "echo")
.compile()
)
veggie_inner = (
StateGraph(MessagesState)
.add_node("echo", veggie_echo)
.add_edge(START, "echo")
.compile()
)
# Wrap each with checkpointer=True, using different node names to get
# independent checkpoint namespaces
fruit = (
StateGraph(MessagesState)
.add_node("fruit_agent", fruit_inner)
.add_edge(START, "fruit_agent")
.compile(checkpointer=True)
)
veggie = (
StateGraph(MessagesState)
.add_node("veggie_agent", veggie_inner)
.add_edge(START, "veggie_agent")
.compile(checkpointer=True)
)
fruit_msgs: list[list[str]] = []
veggie_msgs: list[list[str]] = []
call_count = 0
def call_both(state: ParentState) -> dict:
nonlocal call_count
call_count += 1
suffix = "round 1" if call_count == 1 else "round 2"
f = fruit.invoke({"messages": [HumanMessage(content=f"cherries {suffix}")]})
v = veggie.invoke({"messages": [HumanMessage(content=f"broccoli {suffix}")]})
fruit_msgs.append([m.text for m in f["messages"]])
veggie_msgs.append([m.text for m in v["messages"]])
return {"result": f["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_both", call_both)
.add_edge(START, "call_both")
.compile(checkpointer=sync_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
result1 = parent.invoke({"result": ""}, config)
assert result1 == {"result": "Fruit: cherries round 1"}
result2 = parent.invoke({"result": ""}, config)
assert result2 == {"result": "Fruit: cherries round 2"}
# First call: each agent sees only its own history
assert fruit_msgs[0] == ["cherries round 1", "Fruit: cherries round 1"]
assert veggie_msgs[0] == ["broccoli round 1", "Veggie: broccoli round 1"]
# Second call: each accumulated independently — no cross-contamination
assert fruit_msgs[1] == [
"cherries round 1",
"Fruit: cherries round 1",
"cherries round 2",
"Fruit: cherries round 2",
]
assert veggie_msgs[1] == [
"broccoli round 1",
"Veggie: broccoli round 1",
"broccoli round 2",
"Veggie: broccoli round 2",
]
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/langgraph/tests/test_subgraph_persistence.py",
"license": "MIT License",
"lines": 540,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/langgraph/tests/test_subgraph_persistence_async.py | """Tests for subgraph persistence behavior (async).
Covers three checkpointer settings for subgraph state:
- checkpointer=False: no persistence, even when parent has a checkpointer
- checkpointer=None (default): "stateless" — inherits parent checkpointer for
interrupt support, but state resets each invocation. This is the common case
when an agent is invoked from inside a tool used by another agent.
- checkpointer=True: "stateful" — state accumulates across invocations on the same thread id
"""
import sys
from uuid import uuid4
import pytest
from langchain_core.messages import AIMessage, HumanMessage
from langgraph.checkpoint.base import BaseCheckpointSaver
from typing_extensions import TypedDict
from langgraph.graph import START, StateGraph
from langgraph.graph.message import MessagesState
from langgraph.types import Command, Interrupt, interrupt
from tests.any_str import AnyStr
pytestmark = pytest.mark.anyio
NEEDS_CONTEXTVARS = pytest.mark.skipif(
sys.version_info < (3, 11),
reason="Python 3.11+ is required for async contextvars support",
)
class ParentState(TypedDict):
result: str
# -- checkpointer=None (stateless) --
@NEEDS_CONTEXTVARS
async def test_stateless_interrupt_resume_async(
async_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that a subgraph compiled with checkpointer=None (the default) can
still support interrupt/resume when invoked from inside a parent graph that
has a checkpointer. This is the "stateless" pattern — the subgraph inherits
the parent's checkpointer just enough to pause and resume, but does not
retain any state across separate parent invocations. This pattern commonly
appears when an agent is invoked from inside a tool used by another agent.
"""
# Build a subgraph that interrupts before echoing.
# Two nodes: "process" interrupts then echoes, "respond" returns "Done".
def process(state: MessagesState) -> dict:
interrupt("continue?")
return {
"messages": [AIMessage(content=f"Processing: {state['messages'][-1].text}")]
}
def respond(state: MessagesState) -> dict:
return {"messages": [AIMessage(content="Done")]}
inner = (
StateGraph(MessagesState)
.add_node("process", process)
.add_node("respond", respond)
.add_edge(START, "process")
.add_edge("process", "respond")
.compile()
)
async def call_inner(state: ParentState) -> dict:
resp = await inner.ainvoke({"messages": [HumanMessage(content="apples")]})
return {"result": resp["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_inner", call_inner)
.add_edge(START, "call_inner")
.compile(checkpointer=async_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
# First invoke hits the interrupt
result = await parent.ainvoke({"result": ""}, config)
assert result == {
"result": "",
"__interrupt__": [Interrupt(value="continue?", id=AnyStr())],
}
# Resume completes the subgraph
result = await parent.ainvoke(Command(resume=True), config)
assert result == {"result": "Done"}
@NEEDS_CONTEXTVARS
async def test_stateless_state_resets_async(
async_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that a subgraph compiled with checkpointer=None (the default) does
not retain any message history between separate parent invocations. Each time
the parent graph invokes the subgraph, it starts with a clean slate. This
confirms the "stateless" behavior: even though the parent has a checkpointer,
the subgraph state is not persisted across calls.
"""
# Build a simple echo subgraph: echoes "Processing: <input>"
def echo(state: MessagesState) -> dict:
return {
"messages": [AIMessage(content=f"Processing: {state['messages'][-1].text}")]
}
inner = (
StateGraph(MessagesState)
.add_node("echo", echo)
.add_edge(START, "echo")
.compile()
)
subgraph_messages: list[list[str]] = []
call_count = 0
async def call_inner(state: ParentState) -> dict:
nonlocal call_count
call_count += 1
topic = "apples" if call_count == 1 else "bananas"
resp = await inner.ainvoke(
{"messages": [HumanMessage(content=f"tell me about {topic}")]}
)
subgraph_messages.append([m.text for m in resp["messages"]])
return {"result": resp["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_inner", call_inner)
.add_edge(START, "call_inner")
.compile(checkpointer=async_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
result1 = await parent.ainvoke({"result": ""}, config)
assert result1 == {"result": "Processing: tell me about apples"}
result2 = await parent.ainvoke({"result": ""}, config)
assert result2 == {"result": "Processing: tell me about bananas"}
# Both invocations produce fresh history — no memory of prior call
assert subgraph_messages[0] == [
"tell me about apples",
"Processing: tell me about apples",
]
assert subgraph_messages[1] == [
"tell me about bananas",
"Processing: tell me about bananas",
]
@NEEDS_CONTEXTVARS
async def test_stateless_state_resets_with_interrupt_async(
async_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that a subgraph compiled with checkpointer=None resets its state
between parent invocations even when interrupt/resume is used. The subgraph
is invoked twice from the parent, each time with an interrupt that must be
resumed. After both invoke+resume cycles, each subgraph run should only
contain its own messages — no bleed-over from the previous run.
"""
# Build a subgraph that interrupts before echoing, then responds "Done"
def process(state: MessagesState) -> dict:
interrupt("continue?")
return {
"messages": [AIMessage(content=f"Processing: {state['messages'][-1].text}")]
}
def respond(state: MessagesState) -> dict:
return {"messages": [AIMessage(content="Done")]}
inner = (
StateGraph(MessagesState)
.add_node("process", process)
.add_node("respond", respond)
.add_edge(START, "process")
.add_edge("process", "respond")
.compile()
)
subgraph_messages: list[list[str]] = []
call_count = 0
async def call_inner(state: ParentState) -> dict:
nonlocal call_count
call_count += 1
topic = "apples" if call_count == 1 else "bananas"
resp = await inner.ainvoke(
{"messages": [HumanMessage(content=f"tell me about {topic}")]}
)
subgraph_messages.append([m.text for m in resp["messages"]])
return {"result": resp["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_inner", call_inner)
.add_edge(START, "call_inner")
.compile(checkpointer=async_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
# First invoke+resume cycle
result = await parent.ainvoke({"result": ""}, config)
assert result == {
"result": "",
"__interrupt__": [Interrupt(value="continue?", id=AnyStr())],
}
result = await parent.ainvoke(Command(resume=True), config)
assert result == {"result": "Done"}
# Second invoke+resume cycle
result = await parent.ainvoke({"result": ""}, config)
assert result == {
"result": "",
"__interrupt__": [Interrupt(value="continue?", id=AnyStr())],
}
result = await parent.ainvoke(Command(resume=True), config)
assert result == {"result": "Done"}
# Both invocations produce fresh history — no memory of prior call
assert subgraph_messages[0] == [
"tell me about apples",
"Processing: tell me about apples",
"Done",
]
assert subgraph_messages[1] == [
"tell me about bananas",
"Processing: tell me about bananas",
"Done",
]
# -- checkpointer=False --
@NEEDS_CONTEXTVARS
async def test_checkpointer_false_no_persistence_async(
async_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that a subgraph compiled with checkpointer=False gets no
persistence at all, even when the parent graph has a checkpointer. Unlike
the default (checkpointer=None) which inherits just enough from the parent
to support interrupt/resume, checkpointer=False explicitly opts out of all
checkpoint behavior. Each invocation starts completely fresh.
"""
# Build a simple echo subgraph with checkpointer=False
def echo(state: MessagesState) -> dict:
return {
"messages": [AIMessage(content=f"Processed: {state['messages'][-1].text}")]
}
inner = (
StateGraph(MessagesState)
.add_node("echo", echo)
.add_edge(START, "echo")
.compile(checkpointer=False)
)
subgraph_messages: list[list[str]] = []
call_count = 0
async def call_inner(state: ParentState) -> dict:
nonlocal call_count
call_count += 1
topic = "apples" if call_count == 1 else "bananas"
resp = await inner.ainvoke(
{"messages": [HumanMessage(content=f"tell me about {topic}")]}
)
subgraph_messages.append([m.text for m in resp["messages"]])
return {"result": resp["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_inner", call_inner)
.add_edge(START, "call_inner")
.compile(checkpointer=async_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
result1 = await parent.ainvoke({"result": ""}, config)
assert result1 == {"result": "Processed: tell me about apples"}
result2 = await parent.ainvoke({"result": ""}, config)
assert result2 == {"result": "Processed: tell me about bananas"}
# Both start fresh — no history from first call
assert subgraph_messages[0] == [
"tell me about apples",
"Processed: tell me about apples",
]
assert subgraph_messages[1] == [
"tell me about bananas",
"Processed: tell me about bananas",
]
# -- checkpointer=True (stateful) --
@NEEDS_CONTEXTVARS
async def test_stateful_state_accumulates_async(
async_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that a subgraph compiled with checkpointer=True ("stateful")
retains its message history across separate parent invocations. To enable
this, the subgraph is wrapped in an outer graph compiled with
checkpointer=True — this wrapper gives the inner subgraph its own persistent
checkpoint namespace. After two parent calls, the second subgraph invocation
should see messages from both the first and second calls.
"""
# Build a simple echo subgraph
def echo(state: MessagesState) -> dict:
return {
"messages": [AIMessage(content=f"Processing: {state['messages'][-1].text}")]
}
inner = (
StateGraph(MessagesState)
.add_node("echo", echo)
.add_edge(START, "echo")
.compile()
)
# Wrap the inner subgraph with checkpointer=True to enable stateful.
# The wrapper graph gives the subgraph its own persistent checkpoint
# namespace, keyed by the node name ("agent").
wrapper = (
StateGraph(MessagesState)
.add_node("agent", inner)
.add_edge(START, "agent")
.compile(checkpointer=True)
)
subgraph_messages: list[list[str]] = []
topics = ["apples", "bananas"]
async def call_inner(state: ParentState) -> dict:
topic = topics[len(subgraph_messages)]
resp = await wrapper.ainvoke(
{"messages": [HumanMessage(content=f"tell me about {topic}")]}
)
subgraph_messages.append([m.text for m in resp["messages"]])
return {"result": resp["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_inner", call_inner)
.add_edge(START, "call_inner")
.compile(checkpointer=async_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
result1 = await parent.ainvoke({"result": ""}, config)
assert result1 == {"result": "Processing: tell me about apples"}
result2 = await parent.ainvoke({"result": ""}, config)
assert result2 == {"result": "Processing: tell me about bananas"}
# First call: fresh history
assert subgraph_messages[0] == [
"tell me about apples",
"Processing: tell me about apples",
]
# Second call: retains messages from first call
assert subgraph_messages[1] == [
"tell me about apples",
"Processing: tell me about apples",
"tell me about bananas",
"Processing: tell me about bananas",
]
@NEEDS_CONTEXTVARS
async def test_stateful_state_accumulates_with_interrupt_async(
async_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that a stateful subgraph (checkpointer=True) retains its
message history across parent invocations even when interrupt/resume is
involved. The subgraph interrupts before echoing, then responds "Done".
After two invoke+resume cycles, the second run should contain the full
accumulated history from both calls.
"""
# Build a subgraph that interrupts before echoing, then responds "Done"
def process(state: MessagesState) -> dict:
interrupt("continue?")
return {
"messages": [AIMessage(content=f"Processing: {state['messages'][-1].text}")]
}
def respond(state: MessagesState) -> dict:
return {"messages": [AIMessage(content="Done")]}
inner = (
StateGraph(MessagesState)
.add_node("process", process)
.add_node("respond", respond)
.add_edge(START, "process")
.add_edge("process", "respond")
.compile()
)
# Wrap with checkpointer=True for stateful
wrapper = (
StateGraph(MessagesState)
.add_node("agent", inner)
.add_edge(START, "agent")
.compile(checkpointer=True)
)
subgraph_messages: list[list[str]] = []
topics = ["apples", "bananas"]
async def call_inner(state: ParentState) -> dict:
topic = topics[len(subgraph_messages)]
resp = await wrapper.ainvoke(
{"messages": [HumanMessage(content=f"tell me about {topic}")]}
)
subgraph_messages.append([m.text for m in resp["messages"]])
return {"result": resp["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_inner", call_inner)
.add_edge(START, "call_inner")
.compile(checkpointer=async_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
# First invoke+resume cycle
result = await parent.ainvoke({"result": ""}, config)
assert result == {
"result": "",
"__interrupt__": [Interrupt(value="continue?", id=AnyStr())],
}
result = await parent.ainvoke(Command(resume=True), config)
assert result == {"result": "Done"}
# Second invoke+resume cycle
result = await parent.ainvoke({"result": ""}, config)
assert result == {
"result": "",
"__interrupt__": [Interrupt(value="continue?", id=AnyStr())],
}
result = await parent.ainvoke(Command(resume=True), config)
assert result == {"result": "Done"}
# First call: fresh history
assert subgraph_messages[0] == [
"tell me about apples",
"Processing: tell me about apples",
"Done",
]
# Second call: retains messages from first call
assert subgraph_messages[1] == [
"tell me about apples",
"Processing: tell me about apples",
"Done",
"tell me about bananas",
"Processing: tell me about bananas",
"Done",
]
@NEEDS_CONTEXTVARS
async def test_stateful_interrupt_resume_async(
async_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that a stateful subgraph (checkpointer=True) correctly
supports interrupt/resume while also accumulating state. Each invoke+resume
pair triggers the subgraph, and after the second pair completes we verify
both the per-step invoke outputs and the accumulated message history. This
exercises the full lifecycle: interrupt, resume, state accumulation.
"""
# Build a subgraph that interrupts before echoing, then responds "Done"
def process(state: MessagesState) -> dict:
interrupt("continue?")
return {
"messages": [AIMessage(content=f"Processing: {state['messages'][-1].text}")]
}
def respond(state: MessagesState) -> dict:
return {"messages": [AIMessage(content="Done")]}
inner = (
StateGraph(MessagesState)
.add_node("process", process)
.add_node("respond", respond)
.add_edge(START, "process")
.add_edge("process", "respond")
.compile()
)
# Wrap with checkpointer=True for stateful
wrapper = (
StateGraph(MessagesState)
.add_node("agent", inner)
.add_edge(START, "agent")
.compile(checkpointer=True)
)
subgraph_messages: list[list[str]] = []
topics = ["apples", "bananas"]
async def call_inner(state: ParentState) -> dict:
topic = topics[len(subgraph_messages)]
resp = await wrapper.ainvoke(
{"messages": [HumanMessage(content=f"tell me about {topic}")]}
)
subgraph_messages.append([m.text for m in resp["messages"]])
return {"result": resp["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_inner", call_inner)
.add_edge(START, "call_inner")
.compile(checkpointer=async_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
# First invocation: hits interrupt
result = await parent.ainvoke({"result": ""}, config)
assert result == {
"result": "",
"__interrupt__": [Interrupt(value="continue?", id=AnyStr())],
}
# Resume: completes first call
result = await parent.ainvoke(Command(resume=True), config)
assert result == {"result": "Done"}
assert subgraph_messages[0] == [
"tell me about apples",
"Processing: tell me about apples",
"Done",
]
# Second invocation: hits interrupt, state accumulated from first call
result = await parent.ainvoke({"result": ""}, config)
assert result == {
"result": "",
"__interrupt__": [Interrupt(value="continue?", id=AnyStr())],
}
# Resume: completes second call with accumulated state
result = await parent.ainvoke(Command(resume=True), config)
assert result == {"result": "Done"}
assert subgraph_messages[1] == [
"tell me about apples",
"Processing: tell me about apples",
"Done",
"tell me about bananas",
"Processing: tell me about bananas",
"Done",
]
@NEEDS_CONTEXTVARS
async def test_stateful_namespace_isolation_async(
async_checkpointer: BaseCheckpointSaver,
) -> None:
"""Tests that two different stateful subgraphs (checkpointer=True)
maintain completely independent state when they use different wrapper node
names. A "fruit_agent" and "veggie_agent" are each wrapped in their own
stateful graph. After two parent invocations, each agent should only
see its own accumulated history with no cross-contamination between them.
"""
# Build two simple echo subgraphs with different prefixes
def fruit_echo(state: MessagesState) -> dict:
return {"messages": [AIMessage(content=f"Fruit: {state['messages'][-1].text}")]}
def veggie_echo(state: MessagesState) -> dict:
return {
"messages": [AIMessage(content=f"Veggie: {state['messages'][-1].text}")]
}
fruit_inner = (
StateGraph(MessagesState)
.add_node("echo", fruit_echo)
.add_edge(START, "echo")
.compile()
)
veggie_inner = (
StateGraph(MessagesState)
.add_node("echo", veggie_echo)
.add_edge(START, "echo")
.compile()
)
# Wrap each with checkpointer=True, using different node names to get
# independent checkpoint namespaces
fruit = (
StateGraph(MessagesState)
.add_node("fruit_agent", fruit_inner)
.add_edge(START, "fruit_agent")
.compile(checkpointer=True)
)
veggie = (
StateGraph(MessagesState)
.add_node("veggie_agent", veggie_inner)
.add_edge(START, "veggie_agent")
.compile(checkpointer=True)
)
fruit_msgs: list[list[str]] = []
veggie_msgs: list[list[str]] = []
call_count = 0
async def call_both(state: ParentState) -> dict:
nonlocal call_count
call_count += 1
suffix = "round 1" if call_count == 1 else "round 2"
f = await fruit.ainvoke(
{"messages": [HumanMessage(content=f"cherries {suffix}")]}
)
v = await veggie.ainvoke(
{"messages": [HumanMessage(content=f"broccoli {suffix}")]}
)
fruit_msgs.append([m.text for m in f["messages"]])
veggie_msgs.append([m.text for m in v["messages"]])
return {"result": f["messages"][-1].text}
parent = (
StateGraph(ParentState)
.add_node("call_both", call_both)
.add_edge(START, "call_both")
.compile(checkpointer=async_checkpointer)
)
config = {"configurable": {"thread_id": str(uuid4())}}
result1 = await parent.ainvoke({"result": ""}, config)
assert result1 == {"result": "Fruit: cherries round 1"}
result2 = await parent.ainvoke({"result": ""}, config)
assert result2 == {"result": "Fruit: cherries round 2"}
# First call: each agent sees only its own history
assert fruit_msgs[0] == ["cherries round 1", "Fruit: cherries round 1"]
assert veggie_msgs[0] == ["broccoli round 1", "Veggie: broccoli round 1"]
# Second call: each accumulated independently — no cross-contamination
assert fruit_msgs[1] == [
"cherries round 1",
"Fruit: cherries round 1",
"cherries round 2",
"Fruit: cherries round 2",
]
assert veggie_msgs[1] == [
"broccoli round 1",
"Veggie: broccoli round 1",
"broccoli round 2",
"Veggie: broccoli round 2",
]
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/langgraph/tests/test_subgraph_persistence_async.py",
"license": "MIT License",
"lines": 559,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/langgraph/tests/test_parent_command.py | from __future__ import annotations
from typing_extensions import TypedDict
from langgraph.graph import END, START, StateGraph
from langgraph.types import Command
def test_parent_command_from_nested_subgraph() -> None:
class ParentState(TypedDict):
jump_from_idx: int
class ChildState(TypedDict):
jump: bool
child_builder: StateGraph[ChildState] = StateGraph(ChildState)
def child_node(state: ChildState) -> Command | ChildState:
if state["jump"]:
return Command(graph=Command.PARENT, goto="parent_second")
return state
child_builder.add_node("node", child_node)
child_builder.add_edge(START, "node")
child_0 = child_builder.compile()
child_1 = child_builder.compile()
parent_builder: StateGraph[ParentState] = StateGraph(ParentState)
def parent_first(state: ParentState) -> ParentState:
child_0.invoke({"jump": state["jump_from_idx"] == 1})
if state["jump_from_idx"] == 1:
raise AssertionError("Shouldn't be here")
child_1.invoke({"jump": state["jump_from_idx"] == 2})
if state["jump_from_idx"] == 2:
raise AssertionError("Shouldn't be here")
return state
def parent_second(state: ParentState) -> ParentState:
return state
parent_builder.add_node("parent_first", parent_first)
parent_builder.add_node("parent_second", parent_second)
parent_builder.add_edge(START, "parent_first")
parent_builder.add_edge("parent_second", END)
graph = parent_builder.compile()
assert graph.invoke({"jump_from_idx": 1}) == {"jump_from_idx": 1}
assert graph.invoke({"jump_from_idx": 2}) == {"jump_from_idx": 2}
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/langgraph/tests/test_parent_command.py",
"license": "MIT License",
"lines": 36,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/langgraph/tests/test_parent_command_async.py | from __future__ import annotations
import pytest
from langchain_core.runnables import RunnableConfig
from typing_extensions import TypedDict
from langgraph.graph import END, START, StateGraph
from langgraph.types import Command
pytestmark = pytest.mark.anyio
async def test_parent_command_from_nested_subgraph() -> None:
class ParentState(TypedDict):
jump_from_idx: int
class ChildState(TypedDict):
jump: bool
child_builder: StateGraph[ChildState] = StateGraph(ChildState)
async def child_node(state: ChildState) -> Command | ChildState:
if state["jump"]:
return Command(graph=Command.PARENT, goto="parent_second")
return state
child_builder.add_node("node", child_node)
child_builder.add_edge(START, "node")
child_0 = child_builder.compile()
child_1 = child_builder.compile()
parent_builder: StateGraph[ParentState] = StateGraph(ParentState)
async def parent_first(state: ParentState, config: RunnableConfig) -> ParentState:
await child_0.ainvoke({"jump": state["jump_from_idx"] == 1}, config)
if state["jump_from_idx"] == 1:
raise AssertionError("Shouldn't be here")
await child_1.ainvoke({"jump": state["jump_from_idx"] == 2}, config)
if state["jump_from_idx"] == 2:
raise AssertionError("Shouldn't be here")
return state
async def parent_second(state: ParentState) -> ParentState:
return state
parent_builder.add_node("parent_first", parent_first)
parent_builder.add_node("parent_second", parent_second)
parent_builder.add_edge(START, "parent_first")
parent_builder.add_edge("parent_second", END)
graph = parent_builder.compile().with_config(recursion_limit=10)
assert await graph.ainvoke({"jump_from_idx": 1}) == {"jump_from_idx": 1}
assert await graph.ainvoke({"jump_from_idx": 2}) == {"jump_from_idx": 2}
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/langgraph/tests/test_parent_command_async.py",
"license": "MIT License",
"lines": 39,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:docs/generate_redirects.py | #!/usr/bin/env python3
"""
Generate HTML redirect files from redirects.json.
Usage:
python generate_redirects.py
This script reads redirects.json and generates individual HTML files
for each redirect path. Each HTML file uses meta refresh (0 delay)
which is SEO-friendly and treated similarly to 301 redirects by Google.
To add new redirects, simply edit redirects.json and re-run this script.
"""
import json
import os
from pathlib import Path
# Default fallback URL for any path not in the redirect map
DEFAULT_REDIRECT = "https://docs.langchain.com/oss/python/langgraph/overview"
HTML_TEMPLATE = """<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Redirecting...</title>
<link rel="canonical" href="{url}">
<meta name="robots" content="noindex">
<script>var anchor=window.location.hash.substr(1);location.href="{url}"+(anchor?"#"+anchor:"")</script>
<meta http-equiv="refresh" content="0; url={url}">
</head>
<body>
Redirecting...
</body>
</html>
"""
ROOT_HTML_TEMPLATE = """<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Redirecting to LangGraph Documentation</title>
<link rel="canonical" href="{url}">
<meta name="robots" content="noindex">
<script>var anchor=window.location.hash.substr(1);location.href="{url}"+(anchor?"#"+anchor:"")</script>
<meta http-equiv="refresh" content="0; url={url}">
</head>
<body>
<h1>Documentation has moved</h1>
<p>The LangGraph documentation has moved to <a href="{url}">docs.langchain.com</a>.</p>
<p>Redirecting you now...</p>
</body>
</html>
"""
CATCHALL_404_TEMPLATE = """<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Redirecting to LangGraph Documentation</title>
<link rel="canonical" href="{default_url}">
<meta name="robots" content="noindex">
<script>
// Catchall redirect for any unmapped paths
window.location.replace("{default_url}");
</script>
<meta http-equiv="refresh" content="0; url={default_url}">
</head>
<body>
<h1>Documentation has moved</h1>
<p>The LangGraph documentation has moved to <a href="{default_url}">docs.langchain.com</a>.</p>
<p>Redirecting you now...</p>
</body>
</html>
"""
def generate_redirects():
script_dir = Path(__file__).parent
output_dir = script_dir / "_site"
# Load redirects
with open(script_dir / "redirects.json") as f:
redirects = json.load(f)
# Clean output directory
if output_dir.exists():
import shutil
shutil.rmtree(output_dir)
output_dir.mkdir(parents=True)
# Generate individual HTML files for each redirect
for old_path, new_url in redirects.items():
# Remove leading slash and create directory structure
path = old_path.lstrip("/")
# Check if path has a file extension (e.g., .txt, .xml)
# If so, create the file directly instead of a directory with index.html
path_obj = Path(path)
has_extension = path_obj.suffix and len(path_obj.suffix) <= 5
if not path:
html_path = output_dir / "index.html"
elif has_extension:
# For files with extensions, create the file directly
html_path = output_dir / path
else:
# For directory-style URLs, create index.html inside
html_path = output_dir / path / "index.html"
# Create parent directories
html_path.parent.mkdir(parents=True, exist_ok=True)
# Write the redirect HTML
html_path.write_text(HTML_TEMPLATE.format(url=new_url))
print(f"Created: {html_path}")
# Create root index.html
root_index = output_dir / "index.html"
if not root_index.exists():
root_index.write_text(ROOT_HTML_TEMPLATE.format(url=DEFAULT_REDIRECT))
print(f"Created: {root_index}")
# Create 404.html for catchall
catchall_404 = output_dir / "404.html"
catchall_404.write_text(CATCHALL_404_TEMPLATE.format(default_url=DEFAULT_REDIRECT))
print(f"Created: {catchall_404}")
# Copy static files (like llms.txt) that can't be redirected via HTML
static_files = ["llms.txt"]
for static_file in static_files:
src = script_dir / static_file
if src.exists():
dst = output_dir / static_file
dst.write_text(src.read_text())
print(f"Copied: {dst}")
print(f"\nGenerated {len(redirects)} redirect files in {output_dir}")
if __name__ == "__main__":
generate_redirects()
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "docs/generate_redirects.py",
"license": "MIT License",
"lines": 119,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langgraph:libs/checkpoint-conformance/langgraph/checkpoint/conformance/capabilities.py | """Capability detection for checkpointer implementations."""
from __future__ import annotations
from dataclasses import dataclass
from enum import Enum
from typing import TYPE_CHECKING
from langgraph.checkpoint.base import BaseCheckpointSaver
if TYPE_CHECKING:
pass
class Capability(str, Enum):
"""Capabilities that a checkpointer may support."""
PUT = "put"
PUT_WRITES = "put_writes"
GET_TUPLE = "get_tuple"
LIST = "list"
DELETE_THREAD = "delete_thread"
DELETE_FOR_RUNS = "delete_for_runs"
COPY_THREAD = "copy_thread"
PRUNE = "prune"
# Capabilities that every checkpointer must support.
BASE_CAPABILITIES = frozenset(
{
Capability.PUT,
Capability.PUT_WRITES,
Capability.GET_TUPLE,
Capability.LIST,
Capability.DELETE_THREAD,
}
)
# Capabilities that are optional extensions.
EXTENDED_CAPABILITIES = frozenset(
{
Capability.DELETE_FOR_RUNS,
Capability.COPY_THREAD,
Capability.PRUNE,
}
)
ALL_CAPABILITIES = BASE_CAPABILITIES | EXTENDED_CAPABILITIES
# Maps capability to the async method name on BaseCheckpointSaver (or subclass).
_CAPABILITY_METHOD_MAP: dict[Capability, str] = {
Capability.PUT: "aput",
Capability.PUT_WRITES: "aput_writes",
Capability.GET_TUPLE: "aget_tuple",
Capability.LIST: "alist",
Capability.DELETE_THREAD: "adelete_thread",
Capability.DELETE_FOR_RUNS: "adelete_for_runs",
Capability.COPY_THREAD: "acopy_thread",
Capability.PRUNE: "aprune",
}
@dataclass(frozen=True)
class DetectedCapabilities:
"""Result of capability detection for a checkpointer type."""
detected: frozenset[Capability]
missing: frozenset[Capability]
@classmethod
def from_instance(cls, saver: BaseCheckpointSaver) -> DetectedCapabilities:
"""Detect capabilities from a checkpointer instance."""
inner_type = type(saver)
detected: set[Capability] = set()
for cap, method_name in _CAPABILITY_METHOD_MAP.items():
if _is_overridden(inner_type, method_name):
detected.add(cap)
detected_fs = frozenset(detected)
return cls(
detected=detected_fs,
missing=ALL_CAPABILITIES - detected_fs,
)
def _is_overridden(inner_type: type, method: str) -> bool:
"""Check if *method* on *inner_type* differs from the base class default."""
base = getattr(BaseCheckpointSaver, method, None)
impl = getattr(inner_type, method, None)
if base is None or impl is None:
return impl is not None
return impl is not base
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint-conformance/langgraph/checkpoint/conformance/capabilities.py",
"license": "MIT License",
"lines": 73,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langgraph:libs/checkpoint-conformance/langgraph/checkpoint/conformance/initializer.py | """Checkpointer test registration and factory management."""
from __future__ import annotations
from collections.abc import AsyncGenerator, Callable
from contextlib import asynccontextmanager
from dataclasses import dataclass, field
from typing import Any
from langgraph.checkpoint.base import BaseCheckpointSaver
# Type for the lifespan async context manager factory.
LifespanFactory = Callable[[], AsyncGenerator[None, None]]
# Module-level registry of decorated checkpointer factories.
_REGISTRY: dict[str, RegisteredCheckpointer] = {}
async def _noop_lifespan() -> AsyncGenerator[None, None]:
yield
@dataclass
class RegisteredCheckpointer:
"""A registered checkpointer test factory."""
name: str
factory: Callable[[], AsyncGenerator[BaseCheckpointSaver, None]]
skip_capabilities: set[str] = field(default_factory=set)
lifespan: LifespanFactory = _noop_lifespan
@asynccontextmanager
async def create(self) -> AsyncGenerator[BaseCheckpointSaver, None]:
"""Create a fresh checkpointer instance via the async generator."""
gen = self.factory()
try:
saver = await gen.__anext__()
yield saver
finally:
try:
await gen.__anext__()
except StopAsyncIteration:
pass
@asynccontextmanager
async def enter_lifespan(self) -> AsyncGenerator[None, None]:
"""Enter the lifespan context (once per validation run)."""
gen = self.lifespan()
try:
await gen.__anext__()
yield
finally:
try:
await gen.__anext__()
except StopAsyncIteration:
pass
def checkpointer_test(
name: str,
*,
skip_capabilities: set[str] | None = None,
lifespan: LifespanFactory | None = None,
) -> Callable[[Any], RegisteredCheckpointer]:
"""Register an async generator as a checkpointer test factory.
The factory is called once per capability suite to create a fresh
checkpointer. The optional `lifespan` is an async generator that
runs once for the entire validation run (e.g. to create/destroy a
database).
Example::
@checkpointer_test(name="InMemorySaver")
async def memory_checkpointer():
yield InMemorySaver()
With lifespan::
async def pg_lifespan():
await create_database()
yield
await drop_database()
@checkpointer_test(name="PostgresSaver", lifespan=pg_lifespan)
async def pg_checkpointer():
yield PostgresSaver(conn_string="...")
"""
def decorator(fn: Any) -> RegisteredCheckpointer:
registered = RegisteredCheckpointer(
name=name,
factory=fn,
skip_capabilities=skip_capabilities or set(),
lifespan=lifespan or _noop_lifespan,
)
_REGISTRY[name] = registered
return registered
return decorator
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint-conformance/langgraph/checkpoint/conformance/initializer.py",
"license": "MIT License",
"lines": 78,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langgraph:libs/checkpoint-conformance/langgraph/checkpoint/conformance/report.py | """Capability report: results, progress callbacks, and pretty-printing."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass, field
from typing import Any
from langgraph.checkpoint.conformance.capabilities import (
BASE_CAPABILITIES,
EXTENDED_CAPABILITIES,
Capability,
)
# Callback type for per-test progress reporting.
# (capability_name, test_name, passed, error_msg_or_None) -> None
OnTestResult = Callable[[str, str, bool, str | None], None]
# Callback type for capability-level events.
# (capability_name, detected) -> None
OnCapabilityStart = Callable[[str, bool], None]
class ProgressCallbacks:
"""Grouped callbacks for progress reporting during validation."""
def __init__(
self,
*,
on_capability_start: Callable[[str, bool], None] | None = None,
on_test_result: OnTestResult | None = None,
on_capability_end: Callable[[str], None] | None = None,
) -> None:
self.on_capability_start = on_capability_start
self.on_test_result = on_test_result
self.on_capability_end = on_capability_end
@classmethod
def default(cls) -> ProgressCallbacks:
"""Dot-style progress: ``.`` per pass, ``F`` per fail."""
def _cap_start(capability: str, detected: bool) -> None:
if detected:
print(f" {capability}: ", end="", flush=True)
else:
print(f" ⊘ {capability} (not implemented)")
def _test_result(
capability: str, test_name: str, passed: bool, error: str | None
) -> None:
print("." if passed else "F", end="", flush=True)
def _cap_end(capability: str) -> None:
print() # newline after dots
return cls(
on_capability_start=_cap_start,
on_test_result=_test_result,
on_capability_end=_cap_end,
)
@classmethod
def verbose(cls) -> ProgressCallbacks:
"""Per-test output with names and errors."""
def _cap_start(capability: str, detected: bool) -> None:
if detected:
print(f" {capability}:")
else:
print(f" ⊘ {capability} (not implemented)")
def _test_result(
capability: str, test_name: str, passed: bool, error: str | None
) -> None:
icon = "✓" if passed else "✗"
print(f" {icon} {test_name}")
if error:
for line in error.rstrip().splitlines():
print(f" {line}")
return cls(
on_capability_start=_cap_start,
on_test_result=_test_result,
)
@classmethod
def quiet(cls) -> ProgressCallbacks:
"""No progress output."""
return cls()
@dataclass
class CapabilityResult:
"""Result of running a single capability's test suite."""
detected: bool = False
passed: bool | None = None # None = skipped
tests_passed: int = 0
tests_failed: int = 0
tests_skipped: int = 0
failures: list[str] = field(default_factory=list)
@dataclass
class CapabilityReport:
"""Aggregate report across all capabilities."""
checkpointer_name: str
results: dict[str, CapabilityResult] = field(default_factory=dict)
def passed_all_base(self) -> bool:
"""Whether all base capability tests passed."""
for cap in BASE_CAPABILITIES:
result = self.results.get(cap.value)
if result is None or result.passed is not True:
return False
return True
def passed_all(self) -> bool:
"""Whether every detected capability's tests passed."""
for result in self.results.values():
if result.detected and result.passed is not True:
return False
return True
def conformance_level(self) -> str:
"""Return a human-readable conformance level string."""
if self.passed_all():
return "FULL"
if self.passed_all_base():
return "BASE+PARTIAL"
return "BASE" if self._any_base_passed() else "NONE"
def _any_base_passed(self) -> bool:
for cap in BASE_CAPABILITIES:
result = self.results.get(cap.value)
if result and result.passed is True:
return True
return False
def print_report(self) -> None:
"""Pretty-print the report to stdout."""
width = 52
border = "=" * width
print(f"\n{'':>2}{border}")
print(f"{'':>2} Checkpointer Validation: {self.checkpointer_name}")
print(f"{'':>2}{border}")
def _section(title: str, caps: frozenset[Capability]) -> None:
print(f"{'':>2} {title}")
for cap in sorted(caps, key=lambda c: c.value):
result = self.results.get(cap.value)
if result is None:
icon = " "
suffix = "(no tests)"
elif not result.detected:
icon = "⊘ "
suffix = "(not implemented)"
elif result.passed is True:
icon = "✅"
suffix = ""
elif result.passed is False:
icon = "❌"
suffix = f"({result.tests_failed} failed)"
else:
icon = "⏭ "
suffix = "(skipped)"
print(f"{'':>2} {icon} {cap.value:20s} {suffix}")
print()
_section("BASE CAPABILITIES", BASE_CAPABILITIES)
_section("EXTENDED CAPABILITIES", EXTENDED_CAPABILITIES)
total = sum(1 for r in self.results.values() if r.detected)
passed = sum(
1 for r in self.results.values() if r.detected and r.passed is True
)
level = self.conformance_level()
print(f"{'':>2} Result: {level} ({passed}/{total})")
print(f"{'':>2}{border}\n")
def to_dict(self) -> dict[str, Any]:
"""Return a JSON-serializable dict."""
return {
"checkpointer_name": self.checkpointer_name,
"conformance_level": self.conformance_level(),
"results": {
name: {
"detected": r.detected,
"passed": r.passed,
"tests_passed": r.tests_passed,
"tests_failed": r.tests_failed,
"tests_skipped": r.tests_skipped,
"failures": r.failures,
}
for name, r in self.results.items()
},
}
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint-conformance/langgraph/checkpoint/conformance/report.py",
"license": "MIT License",
"lines": 165,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langgraph:libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_copy_thread.py | """COPY_THREAD capability tests — acopy_thread."""
from __future__ import annotations
import traceback
from collections.abc import Callable
from uuid import uuid4
from langgraph.checkpoint.base import BaseCheckpointSaver
from langgraph.checkpoint.conformance.test_utils import (
generate_checkpoint,
generate_config,
generate_metadata,
)
async def _setup_source_thread(
saver: BaseCheckpointSaver,
tid: str,
*,
n: int = 3,
namespaces: list[str] | None = None,
) -> list[dict]:
"""Create n checkpoints on tid (optionally across namespaces). Returns stored configs."""
nss = namespaces or [""]
stored = []
for ns in nss:
parent_cfg = None
for i in range(n):
config = generate_config(tid, checkpoint_ns=ns)
if parent_cfg:
config["configurable"]["checkpoint_id"] = parent_cfg["configurable"][
"checkpoint_id"
]
cp = generate_checkpoint(channel_values={"step": i})
cp["channel_versions"] = {"step": 1}
parent_cfg = await saver.aput(
config, cp, generate_metadata(step=i), {"step": 1}
)
stored.append(parent_cfg)
return stored
async def test_copy_thread_basic(saver: BaseCheckpointSaver) -> None:
"""Checkpoints appear on target thread."""
src = str(uuid4())
dst = str(uuid4())
await _setup_source_thread(saver, src)
await saver.acopy_thread(src, dst)
results = []
async for tup in saver.alist(generate_config(dst)):
results.append(tup)
assert len(results) == 3, f"Expected 3 copied checkpoints, got {len(results)}"
async def test_copy_thread_all_checkpoints(saver: BaseCheckpointSaver) -> None:
"""All checkpoints copied, not just latest."""
src = str(uuid4())
dst = str(uuid4())
await _setup_source_thread(saver, src, n=3)
await saver.acopy_thread(src, dst)
src_results = []
async for tup in saver.alist(generate_config(src)):
src_results.append(tup)
dst_results = []
async for tup in saver.alist(generate_config(dst)):
dst_results.append(tup)
assert len(dst_results) == len(src_results)
# Verify content matches
for s, d in zip(
sorted(src_results, key=lambda t: t.checkpoint["id"]),
sorted(dst_results, key=lambda t: t.checkpoint["id"]),
strict=True,
):
assert s.checkpoint["channel_values"] == d.checkpoint["channel_values"], (
f"channel_values mismatch for checkpoint {s.checkpoint['id']}"
)
async def test_copy_thread_preserves_metadata(
saver: BaseCheckpointSaver,
) -> None:
"""Metadata intact on copied checkpoints."""
src = str(uuid4())
dst = str(uuid4())
await _setup_source_thread(saver, src, n=2)
await saver.acopy_thread(src, dst)
src_tuples = []
async for tup in saver.alist(generate_config(src)):
src_tuples.append(tup)
dst_tuples = []
async for tup in saver.alist(generate_config(dst)):
dst_tuples.append(tup)
for s, d in zip(
sorted(src_tuples, key=lambda t: t.metadata.get("step", 0)),
sorted(dst_tuples, key=lambda t: t.metadata.get("step", 0)),
strict=True,
):
for key in s.metadata:
assert s.metadata.get(key) == d.metadata.get(key), (
f"metadata[{key!r}] mismatch: {s.metadata.get(key)!r} != {d.metadata.get(key)!r}"
)
async def test_copy_thread_preserves_namespaces(
saver: BaseCheckpointSaver,
) -> None:
"""Root + child namespaces copied."""
src = str(uuid4())
dst = str(uuid4())
await _setup_source_thread(saver, src, n=1, namespaces=["", "child:1"])
await saver.acopy_thread(src, dst)
for ns in ["", "child:1"]:
results = []
async for tup in saver.alist(generate_config(dst, checkpoint_ns=ns)):
results.append(tup)
assert len(results) == 1, (
f"Expected 1 checkpoint in namespace '{ns}', got {len(results)}"
)
async def test_copy_thread_preserves_writes(saver: BaseCheckpointSaver) -> None:
"""Pending writes copied."""
src = str(uuid4())
dst = str(uuid4())
configs = await _setup_source_thread(saver, src, n=1)
# Add a write to the source
await saver.aput_writes(configs[-1], [("ch", "write_val")], str(uuid4()))
await saver.acopy_thread(src, dst)
tup = await saver.aget_tuple(generate_config(dst))
assert tup is not None
assert tup.pending_writes is not None
assert len(tup.pending_writes) == 1, (
f"Expected 1 write, got {len(tup.pending_writes)}"
)
assert tup.pending_writes[0][1] == "ch", (
f"channel mismatch: {tup.pending_writes[0][1]!r}"
)
assert tup.pending_writes[0][2] == "write_val", (
f"value mismatch: {tup.pending_writes[0][2]!r}"
)
async def test_copy_thread_preserves_ordering(
saver: BaseCheckpointSaver,
) -> None:
"""Checkpoint order maintained."""
src = str(uuid4())
dst = str(uuid4())
await _setup_source_thread(saver, src, n=4)
await saver.acopy_thread(src, dst)
src_ids = []
async for tup in saver.alist(generate_config(src)):
src_ids.append(tup.checkpoint["id"])
dst_ids = []
async for tup in saver.alist(generate_config(dst)):
dst_ids.append(tup.checkpoint["id"])
# Order should match (both newest-first)
assert src_ids == dst_ids
async def test_copy_thread_source_unchanged(saver: BaseCheckpointSaver) -> None:
"""Source thread still intact after copy."""
src = str(uuid4())
dst = str(uuid4())
await _setup_source_thread(saver, src, n=2)
# Snapshot source before copy
src_before = []
async for tup in saver.alist(generate_config(src)):
src_before.append(tup.checkpoint["id"])
await saver.acopy_thread(src, dst)
# Source should be unchanged
src_after = []
async for tup in saver.alist(generate_config(src)):
src_after.append(tup.checkpoint["id"])
assert src_before == src_after
async def test_copy_thread_nonexistent_source(
saver: BaseCheckpointSaver,
) -> None:
"""Graceful handling of non-existent source thread."""
src = str(uuid4())
dst = str(uuid4())
# Should not raise (or raise a known error)
try:
await saver.acopy_thread(src, dst)
except Exception:
pass # Some implementations may raise; that's acceptable
# Destination should be empty
results = []
async for tup in saver.alist(generate_config(dst)):
results.append(tup)
assert len(results) == 0
ALL_COPY_THREAD_TESTS = [
test_copy_thread_basic,
test_copy_thread_all_checkpoints,
test_copy_thread_preserves_metadata,
test_copy_thread_preserves_namespaces,
test_copy_thread_preserves_writes,
test_copy_thread_preserves_ordering,
test_copy_thread_source_unchanged,
test_copy_thread_nonexistent_source,
]
async def run_copy_thread_tests(
saver: BaseCheckpointSaver,
on_test_result: Callable[[str, str, bool, str | None], None] | None = None,
) -> tuple[int, int, list[str]]:
"""Run all copy_thread tests. Returns (passed, failed, failure_names)."""
passed = 0
failed = 0
failures: list[str] = []
for test_fn in ALL_COPY_THREAD_TESTS:
try:
await test_fn(saver)
passed += 1
if on_test_result:
on_test_result("copy_thread", test_fn.__name__, True, None)
except Exception as e:
failed += 1
msg = f"{test_fn.__name__}: {e}"
failures.append(msg)
if on_test_result:
on_test_result(
"copy_thread", test_fn.__name__, False, traceback.format_exc()
)
return passed, failed, failures
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_copy_thread.py",
"license": "MIT License",
"lines": 206,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_delete_for_runs.py | """DELETE_FOR_RUNS capability tests — adelete_for_runs."""
from __future__ import annotations
import traceback
from collections.abc import Callable
from uuid import uuid4
from langgraph.checkpoint.base import BaseCheckpointSaver
from langgraph.checkpoint.conformance.test_utils import (
generate_checkpoint,
generate_config,
generate_metadata,
)
async def _put_with_run_id(
saver: BaseCheckpointSaver,
tid: str,
run_id: str,
*,
checkpoint_ns: str = "",
parent_config: dict | None = None,
) -> dict:
"""Put a checkpoint with a run_id in metadata, return stored config."""
config = generate_config(tid, checkpoint_ns=checkpoint_ns)
if parent_config:
config["configurable"]["checkpoint_id"] = parent_config["configurable"][
"checkpoint_id"
]
cp = generate_checkpoint()
md = generate_metadata(run_id=run_id)
return await saver.aput(config, cp, md, {})
async def test_delete_for_runs_single(saver: BaseCheckpointSaver) -> None:
"""One run_id removed."""
tid = str(uuid4())
run1, run2 = str(uuid4()), str(uuid4())
stored1 = await _put_with_run_id(saver, tid, run1)
await _put_with_run_id(saver, tid, run2, parent_config=stored1)
# Pre-delete: verify both runs exist
pre_results = []
async for tup in saver.alist(generate_config(tid)):
pre_results.append(tup)
pre_run_ids = {t.metadata.get("run_id") for t in pre_results}
assert run1 in pre_run_ids, "Pre-delete: run1 should exist"
assert run2 in pre_run_ids, "Pre-delete: run2 should exist"
await saver.adelete_for_runs([run1])
# run1's checkpoint should be gone; run2 should remain
results = []
async for tup in saver.alist(generate_config(tid)):
results.append(tup)
run_ids = {t.metadata.get("run_id") for t in results}
assert run1 not in run_ids
assert run2 in run_ids
async def test_delete_for_runs_multiple(saver: BaseCheckpointSaver) -> None:
"""List of run_ids removed."""
tid = str(uuid4())
run1, run2, run3 = str(uuid4()), str(uuid4()), str(uuid4())
s1 = await _put_with_run_id(saver, tid, run1)
s2 = await _put_with_run_id(saver, tid, run2, parent_config=s1)
await _put_with_run_id(saver, tid, run3, parent_config=s2)
# Pre-delete: verify all 3 runs exist
pre_results = []
async for tup in saver.alist(generate_config(tid)):
pre_results.append(tup)
pre_run_ids = {t.metadata.get("run_id") for t in pre_results}
assert run1 in pre_run_ids, "Pre-delete: run1 should exist"
assert run2 in pre_run_ids, "Pre-delete: run2 should exist"
assert run3 in pre_run_ids, "Pre-delete: run3 should exist"
await saver.adelete_for_runs([run1, run2])
results = []
async for tup in saver.alist(generate_config(tid)):
results.append(tup)
run_ids = {t.metadata.get("run_id") for t in results}
assert run1 not in run_ids
assert run2 not in run_ids
assert run3 in run_ids
async def test_delete_for_runs_preserves_other_runs(
saver: BaseCheckpointSaver,
) -> None:
"""Unrelated runs untouched."""
tid = str(uuid4())
run_keep = str(uuid4())
run_delete = str(uuid4())
await _put_with_run_id(saver, tid, run_keep)
await _put_with_run_id(saver, tid, run_delete)
# Pre-delete: verify both runs exist
pre_results = []
async for tup in saver.alist(generate_config(tid)):
pre_results.append(tup)
pre_run_ids = {t.metadata.get("run_id") for t in pre_results}
assert run_keep in pre_run_ids, "Pre-delete: run_keep should exist"
assert run_delete in pre_run_ids, "Pre-delete: run_delete should exist"
await saver.adelete_for_runs([run_delete])
results = []
async for tup in saver.alist(generate_config(tid)):
results.append(tup)
run_ids = {t.metadata.get("run_id") for t in results}
assert run_keep in run_ids
async def test_delete_for_runs_removes_writes(
saver: BaseCheckpointSaver,
) -> None:
"""Associated writes cleaned up."""
tid = str(uuid4())
run1 = str(uuid4())
stored = await _put_with_run_id(saver, tid, run1)
await saver.aput_writes(stored, [("ch", "val")], str(uuid4()))
# Pre-delete: verify writes exist
pre_tup = await saver.aget_tuple(stored)
assert pre_tup is not None, "Pre-delete: checkpoint should exist"
assert pre_tup.pending_writes is not None and len(pre_tup.pending_writes) == 1, (
f"Pre-delete: expected 1 write, got {len(pre_tup.pending_writes) if pre_tup.pending_writes else 0}"
)
await saver.adelete_for_runs([run1])
# The checkpoint (and its writes) should be gone
tup = await saver.aget_tuple(stored)
assert tup is None
async def test_delete_for_runs_empty_list_noop(
saver: BaseCheckpointSaver,
) -> None:
"""Empty list no error."""
await saver.adelete_for_runs([])
async def test_delete_for_runs_nonexistent_noop(
saver: BaseCheckpointSaver,
) -> None:
"""Missing run_ids no error."""
await saver.adelete_for_runs([str(uuid4())])
async def test_delete_for_runs_across_namespaces(
saver: BaseCheckpointSaver,
) -> None:
"""All namespaces cleaned."""
tid = str(uuid4())
run1 = str(uuid4())
await _put_with_run_id(saver, tid, run1, checkpoint_ns="")
await _put_with_run_id(saver, tid, run1, checkpoint_ns="child:1")
# Pre-delete: verify run1 present in both namespaces
for ns in ["", "child:1"]:
pre_results = []
async for tup in saver.alist(generate_config(tid, checkpoint_ns=ns)):
pre_results.append(tup)
pre_run_ids = {t.metadata.get("run_id") for t in pre_results}
assert run1 in pre_run_ids, f"Pre-delete: run1 should exist in ns='{ns}'"
await saver.adelete_for_runs([run1])
for ns in ["", "child:1"]:
results = []
async for tup in saver.alist(generate_config(tid, checkpoint_ns=ns)):
results.append(tup)
run_ids = {t.metadata.get("run_id") for t in results}
assert run1 not in run_ids
ALL_DELETE_FOR_RUNS_TESTS = [
test_delete_for_runs_single,
test_delete_for_runs_multiple,
test_delete_for_runs_preserves_other_runs,
test_delete_for_runs_removes_writes,
test_delete_for_runs_empty_list_noop,
test_delete_for_runs_nonexistent_noop,
test_delete_for_runs_across_namespaces,
]
async def run_delete_for_runs_tests(
saver: BaseCheckpointSaver,
on_test_result: Callable[[str, str, bool, str | None], None] | None = None,
) -> tuple[int, int, list[str]]:
"""Run all delete_for_runs tests. Returns (passed, failed, failure_names)."""
passed = 0
failed = 0
failures: list[str] = []
for test_fn in ALL_DELETE_FOR_RUNS_TESTS:
try:
await test_fn(saver)
passed += 1
if on_test_result:
on_test_result("delete_for_runs", test_fn.__name__, True, None)
except Exception as e:
failed += 1
msg = f"{test_fn.__name__}: {e}"
failures.append(msg)
if on_test_result:
on_test_result(
"delete_for_runs", test_fn.__name__, False, traceback.format_exc()
)
return passed, failed, failures
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_delete_for_runs.py",
"license": "MIT License",
"lines": 176,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_delete_thread.py | """DELETE_THREAD capability tests — adelete_thread."""
from __future__ import annotations
import traceback
from collections.abc import Callable
from uuid import uuid4
from langgraph.checkpoint.base import BaseCheckpointSaver
from langgraph.checkpoint.conformance.test_utils import (
generate_checkpoint,
generate_config,
generate_metadata,
)
async def test_delete_thread_removes_checkpoints(
saver: BaseCheckpointSaver,
) -> None:
"""All checkpoints gone after delete."""
tid = str(uuid4())
parent_cfg = None
for i in range(3):
config = generate_config(tid)
if parent_cfg:
config["configurable"]["checkpoint_id"] = parent_cfg["configurable"][
"checkpoint_id"
]
cp = generate_checkpoint()
parent_cfg = await saver.aput(config, cp, generate_metadata(step=i), {})
# Pre-delete: verify data exists
assert await saver.aget_tuple(generate_config(tid)) is not None, (
"Pre-delete: checkpoint should exist"
)
await saver.adelete_thread(tid)
tup = await saver.aget_tuple(generate_config(tid))
assert tup is None
results = []
async for t in saver.alist(generate_config(tid)):
results.append(t)
assert len(results) == 0
async def test_delete_thread_removes_writes(saver: BaseCheckpointSaver) -> None:
"""Pending writes gone after delete."""
tid = str(uuid4())
config = generate_config(tid)
cp = generate_checkpoint()
stored = await saver.aput(config, cp, generate_metadata(), {})
await saver.aput_writes(stored, [("ch", "val")], str(uuid4()))
# Pre-delete: verify writes exist
pre_tup = await saver.aget_tuple(generate_config(tid))
assert pre_tup is not None, "Pre-delete: checkpoint should exist"
assert pre_tup.pending_writes is not None and len(pre_tup.pending_writes) == 1, (
f"Pre-delete: expected 1 write, got {len(pre_tup.pending_writes) if pre_tup.pending_writes else 0}"
)
await saver.adelete_thread(tid)
tup = await saver.aget_tuple(generate_config(tid))
assert tup is None
async def test_delete_thread_removes_all_namespaces(
saver: BaseCheckpointSaver,
) -> None:
"""Root + child namespaces both removed."""
tid = str(uuid4())
for ns in ["", "child:1"]:
cfg = generate_config(tid, checkpoint_ns=ns)
cp = generate_checkpoint()
await saver.aput(cfg, cp, generate_metadata(), {})
# Pre-delete: verify each namespace has data
for ns in ["", "child:1"]:
pre = await saver.aget_tuple(generate_config(tid, checkpoint_ns=ns))
assert pre is not None, f"Pre-delete: namespace '{ns}' should have data"
await saver.adelete_thread(tid)
for ns in ["", "child:1"]:
tup = await saver.aget_tuple(generate_config(tid, checkpoint_ns=ns))
assert tup is None
async def test_delete_thread_preserves_other_threads(
saver: BaseCheckpointSaver,
) -> None:
"""Other threads untouched."""
tid1, tid2 = str(uuid4()), str(uuid4())
for tid in (tid1, tid2):
cfg = generate_config(tid)
cp = generate_checkpoint()
await saver.aput(cfg, cp, generate_metadata(), {})
await saver.adelete_thread(tid1)
assert await saver.aget_tuple(generate_config(tid1)) is None
assert await saver.aget_tuple(generate_config(tid2)) is not None
async def test_delete_thread_nonexistent_noop(
saver: BaseCheckpointSaver,
) -> None:
"""No error for missing thread."""
# Should not raise
await saver.adelete_thread(str(uuid4()))
ALL_DELETE_THREAD_TESTS = [
test_delete_thread_removes_checkpoints,
test_delete_thread_removes_writes,
test_delete_thread_removes_all_namespaces,
test_delete_thread_preserves_other_threads,
test_delete_thread_nonexistent_noop,
]
async def run_delete_thread_tests(
saver: BaseCheckpointSaver,
on_test_result: Callable[[str, str, bool, str | None], None] | None = None,
) -> tuple[int, int, list[str]]:
"""Run all delete_thread tests. Returns (passed, failed, failure_names)."""
passed = 0
failed = 0
failures: list[str] = []
for test_fn in ALL_DELETE_THREAD_TESTS:
try:
await test_fn(saver)
passed += 1
if on_test_result:
on_test_result("delete_thread", test_fn.__name__, True, None)
except Exception as e:
failed += 1
msg = f"{test_fn.__name__}: {e}"
failures.append(msg)
if on_test_result:
on_test_result(
"delete_thread", test_fn.__name__, False, traceback.format_exc()
)
return passed, failed, failures
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_delete_thread.py",
"license": "MIT License",
"lines": 117,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_get_tuple.py | """GET_TUPLE capability tests — aget_tuple retrieval."""
from __future__ import annotations
import traceback
from collections.abc import Callable
from uuid import uuid4
from langgraph.checkpoint.base import BaseCheckpointSaver
from langgraph.checkpoint.conformance.test_utils import (
generate_checkpoint,
generate_config,
generate_metadata,
)
async def test_get_tuple_nonexistent_returns_none(
saver: BaseCheckpointSaver,
) -> None:
"""Missing thread returns None."""
config = generate_config(str(uuid4()))
tup = await saver.aget_tuple(config)
assert tup is None
async def test_get_tuple_latest_when_no_checkpoint_id(
saver: BaseCheckpointSaver,
) -> None:
"""Returns newest checkpoint when no checkpoint_id in config."""
tid = str(uuid4())
ids = []
parent_cfg = None
for i in range(3):
config = generate_config(tid)
if parent_cfg:
config["configurable"]["checkpoint_id"] = parent_cfg["configurable"][
"checkpoint_id"
]
cp = generate_checkpoint()
parent_cfg = await saver.aput(config, cp, generate_metadata(step=i), {})
ids.append(cp["id"])
# Get without checkpoint_id — should return the latest
tup = await saver.aget_tuple(generate_config(tid))
assert tup is not None
assert tup.checkpoint["id"] == ids[-1]
assert tup.metadata["step"] == 2, (
f"Expected latest step=2, got {tup.metadata['step']}"
)
async def test_get_tuple_specific_checkpoint_id(
saver: BaseCheckpointSaver,
) -> None:
"""Returns exact match when checkpoint_id specified."""
tid = str(uuid4())
config1 = generate_config(tid)
cp1 = generate_checkpoint()
stored1 = await saver.aput(config1, cp1, generate_metadata(step=0), {})
config2 = generate_config(tid)
config2["configurable"]["checkpoint_id"] = stored1["configurable"]["checkpoint_id"]
cp2 = generate_checkpoint()
await saver.aput(config2, cp2, generate_metadata(step=1), {})
# Fetch the first one specifically
tup = await saver.aget_tuple(stored1)
assert tup is not None
assert tup.checkpoint["id"] == cp1["id"]
async def test_get_tuple_config_structure(saver: BaseCheckpointSaver) -> None:
"""tuple.config has thread_id, checkpoint_ns, checkpoint_id."""
tid = str(uuid4())
config = generate_config(tid)
cp = generate_checkpoint()
stored = await saver.aput(config, cp, generate_metadata(), {})
tup = await saver.aget_tuple(stored)
assert tup is not None
conf = tup.config["configurable"]
assert conf["thread_id"] == tid
assert conf.get("checkpoint_ns", "") == "", (
f"Expected checkpoint_ns='', got {conf.get('checkpoint_ns')!r}"
)
assert conf["checkpoint_id"] == cp["id"]
async def test_get_tuple_checkpoint_fields(saver: BaseCheckpointSaver) -> None:
"""All Checkpoint fields present."""
tid = str(uuid4())
config = generate_config(tid)
cp = generate_checkpoint(channel_values={"k": "v"})
cp["channel_versions"] = {"k": 1}
stored = await saver.aput(config, cp, generate_metadata(), {"k": 1})
tup = await saver.aget_tuple(stored)
assert tup is not None
c = tup.checkpoint
assert c["id"] == cp["id"], f"id mismatch: {c['id']!r} != {cp['id']!r}"
assert c["v"] == 1, f"Expected v=1, got {c['v']!r}"
assert "ts" in c and c["ts"], "ts should be non-empty"
assert c["channel_values"] == {"k": "v"}, f"channel_values: {c['channel_values']!r}"
assert "channel_versions" in c
assert "versions_seen" in c
async def test_get_tuple_metadata(saver: BaseCheckpointSaver) -> None:
"""metadata populated correctly."""
tid = str(uuid4())
config = generate_config(tid)
cp = generate_checkpoint()
md = generate_metadata(source="input", step=-1)
stored = await saver.aput(config, cp, md, {})
tup = await saver.aget_tuple(stored)
assert tup is not None
assert tup.metadata["source"] == "input"
assert tup.metadata["step"] == -1
async def test_get_tuple_parent_config(saver: BaseCheckpointSaver) -> None:
"""parent_config when parent exists, None otherwise."""
tid = str(uuid4())
# First checkpoint — no parent
config1 = generate_config(tid)
cp1 = generate_checkpoint()
stored1 = await saver.aput(config1, cp1, generate_metadata(step=0), {})
tup1 = await saver.aget_tuple(stored1)
assert tup1 is not None
assert tup1.parent_config is None
# Second checkpoint — has parent
config2 = generate_config(tid)
config2["configurable"]["checkpoint_id"] = stored1["configurable"]["checkpoint_id"]
cp2 = generate_checkpoint()
stored2 = await saver.aput(config2, cp2, generate_metadata(step=1), {})
tup2 = await saver.aget_tuple(stored2)
assert tup2 is not None
assert tup2.parent_config is not None
assert (
tup2.parent_config["configurable"]["checkpoint_id"]
== stored1["configurable"]["checkpoint_id"]
)
async def test_get_tuple_pending_writes(saver: BaseCheckpointSaver) -> None:
"""pending_writes from put_writes visible."""
tid = str(uuid4())
config = generate_config(tid)
cp = generate_checkpoint()
stored = await saver.aput(config, cp, generate_metadata(), {})
task_id = str(uuid4())
await saver.aput_writes(stored, [("ch", "val")], task_id)
tup = await saver.aget_tuple(stored)
assert tup is not None
assert tup.pending_writes is not None
assert len(tup.pending_writes) == 1, (
f"Expected 1 write, got {len(tup.pending_writes)}"
)
assert tup.pending_writes[0][0] == task_id, (
f"task_id mismatch: {tup.pending_writes[0][0]!r}"
)
assert tup.pending_writes[0][1] == "ch", (
f"channel mismatch: {tup.pending_writes[0][1]!r}"
)
assert tup.pending_writes[0][2] == "val", (
f"value mismatch: {tup.pending_writes[0][2]!r}"
)
async def test_get_tuple_respects_namespace(saver: BaseCheckpointSaver) -> None:
"""checkpoint_ns filtering."""
tid = str(uuid4())
cfg_root = generate_config(tid, checkpoint_ns="")
cp_root = generate_checkpoint()
stored_root = await saver.aput(cfg_root, cp_root, generate_metadata(), {})
cfg_child = generate_config(tid, checkpoint_ns="child:1")
cp_child = generate_checkpoint()
stored_child = await saver.aput(cfg_child, cp_child, generate_metadata(), {})
tup_root = await saver.aget_tuple(stored_root)
assert tup_root is not None
assert tup_root.checkpoint["id"] == cp_root["id"]
tup_child = await saver.aget_tuple(stored_child)
assert tup_child is not None
assert tup_child.checkpoint["id"] == cp_child["id"]
async def test_get_tuple_nonexistent_checkpoint_id(
saver: BaseCheckpointSaver,
) -> None:
"""Specific but missing checkpoint_id returns None."""
tid = str(uuid4())
nonexistent_id = str(uuid4())
# Put one checkpoint so the thread exists
config = generate_config(tid)
cp = generate_checkpoint()
await saver.aput(config, cp, generate_metadata(), {})
# Ask for a non-existent checkpoint_id
bad_cfg = generate_config(tid, checkpoint_id=nonexistent_id)
tup = await saver.aget_tuple(bad_cfg)
assert tup is None
ALL_GET_TUPLE_TESTS = [
test_get_tuple_nonexistent_returns_none,
test_get_tuple_latest_when_no_checkpoint_id,
test_get_tuple_specific_checkpoint_id,
test_get_tuple_config_structure,
test_get_tuple_checkpoint_fields,
test_get_tuple_metadata,
test_get_tuple_parent_config,
test_get_tuple_pending_writes,
test_get_tuple_respects_namespace,
test_get_tuple_nonexistent_checkpoint_id,
]
async def run_get_tuple_tests(
saver: BaseCheckpointSaver,
on_test_result: Callable[[str, str, bool, str | None], None] | None = None,
) -> tuple[int, int, list[str]]:
"""Run all get_tuple tests. Returns (passed, failed, failure_names)."""
passed = 0
failed = 0
failures: list[str] = []
for test_fn in ALL_GET_TUPLE_TESTS:
try:
await test_fn(saver)
passed += 1
if on_test_result:
on_test_result("get_tuple", test_fn.__name__, True, None)
except Exception as e:
failed += 1
msg = f"{test_fn.__name__}: {e}"
failures.append(msg)
if on_test_result:
on_test_result(
"get_tuple", test_fn.__name__, False, traceback.format_exc()
)
return passed, failed, failures
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_get_tuple.py",
"license": "MIT License",
"lines": 207,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_list.py | """LIST capability tests — alist with various filters."""
from __future__ import annotations
import traceback
from collections.abc import Callable
from uuid import uuid4
from langgraph.checkpoint.base import BaseCheckpointSaver
from langgraph.checkpoint.conformance.test_utils import (
generate_checkpoint,
generate_config,
generate_metadata,
)
async def _setup_list_data(saver: BaseCheckpointSaver) -> dict:
"""Populate saver with test data for list tests. Returns lookup info."""
tid = str(uuid4())
ids = []
parent_cfg = None
for i in range(4):
config = generate_config(tid)
if parent_cfg:
config["configurable"]["checkpoint_id"] = parent_cfg["configurable"][
"checkpoint_id"
]
cp = generate_checkpoint()
source = "input" if i % 2 == 0 else "loop"
md = generate_metadata(source=source, step=i)
parent_cfg = await saver.aput(config, cp, md, {})
ids.append(cp["id"])
return {
"thread_id": tid,
"checkpoint_ids": ids,
"latest_config": parent_cfg,
}
async def test_list_all(saver: BaseCheckpointSaver) -> None:
"""No filters returns all checkpoints for the thread."""
data = await _setup_list_data(saver)
tid = data["thread_id"]
results = []
async for tup in saver.alist(generate_config(tid)):
results.append(tup)
assert len(results) == 4
async def test_list_by_thread(saver: BaseCheckpointSaver) -> None:
"""Filter by thread_id — other threads not returned."""
data = await _setup_list_data(saver)
# List for a non-existent thread
results = []
async for tup in saver.alist(generate_config(str(uuid4()))):
results.append(tup)
assert len(results) == 0
# List for actual thread
results = []
async for tup in saver.alist(generate_config(data["thread_id"])):
results.append(tup)
assert len(results) == 4
async def test_list_by_namespace(saver: BaseCheckpointSaver) -> None:
"""Filter by checkpoint_ns."""
tid = str(uuid4())
# Root namespace
cfg1 = generate_config(tid, checkpoint_ns="")
cp1 = generate_checkpoint()
await saver.aput(cfg1, cp1, generate_metadata(), {})
# Child namespace
cfg2 = generate_config(tid, checkpoint_ns="child:1")
cp2 = generate_checkpoint()
await saver.aput(cfg2, cp2, generate_metadata(), {})
root_results = []
async for tup in saver.alist(generate_config(tid, checkpoint_ns="")):
root_results.append(tup)
assert len(root_results) == 1
child_results = []
async for tup in saver.alist(generate_config(tid, checkpoint_ns="child:1")):
child_results.append(tup)
assert len(child_results) == 1
async def test_list_ordering(saver: BaseCheckpointSaver) -> None:
"""Newest first (descending checkpoint_id)."""
data = await _setup_list_data(saver)
ids = data["checkpoint_ids"]
results = []
async for tup in saver.alist(generate_config(data["thread_id"])):
results.append(tup.checkpoint["id"])
# Should be in reverse order (newest first)
assert results == list(reversed(ids))
async def test_list_metadata_filter_single_key(
saver: BaseCheckpointSaver,
) -> None:
"""filter={'source': 'input'} returns only input checkpoints."""
data = await _setup_list_data(saver)
results = []
async for tup in saver.alist(
generate_config(data["thread_id"]),
filter={"source": "input"},
):
results.append(tup)
assert len(results) == 2, (
f"Expected 2 'input' checkpoints (steps 0,2), got {len(results)}"
)
for tup in results:
assert tup.metadata["source"] == "input"
async def test_list_metadata_filter_step(saver: BaseCheckpointSaver) -> None:
"""filter={'step': 1} returns matching checkpoints."""
data = await _setup_list_data(saver)
results = []
async for tup in saver.alist(
generate_config(data["thread_id"]),
filter={"step": 1},
):
results.append(tup)
assert len(results) == 1
assert results[0].metadata["step"] == 1
async def test_list_before(saver: BaseCheckpointSaver) -> None:
"""Pagination cursor — only checkpoints before the given one."""
data = await _setup_list_data(saver)
ids = data["checkpoint_ids"]
# Use the 3rd checkpoint as the 'before' cursor (index 2)
before_cfg = generate_config(data["thread_id"], checkpoint_id=ids[2])
results = []
async for tup in saver.alist(
generate_config(data["thread_id"]),
before=before_cfg,
):
results.append(tup)
# Should only include checkpoints before ids[2]
result_ids = [t.checkpoint["id"] for t in results]
assert ids[2] not in result_ids
assert ids[3] not in result_ids
assert set(result_ids) == {ids[0], ids[1]}, (
f"Expected {{ids[0], ids[1]}}, got {set(result_ids)}"
)
async def test_list_limit(saver: BaseCheckpointSaver) -> None:
"""limit=1, limit=N."""
data = await _setup_list_data(saver)
results = []
async for tup in saver.alist(generate_config(data["thread_id"]), limit=1):
results.append(tup)
assert len(results) == 1
results = []
async for tup in saver.alist(generate_config(data["thread_id"]), limit=2):
results.append(tup)
assert len(results) == 2
async def test_list_limit_plus_before(saver: BaseCheckpointSaver) -> None:
"""Pagination with limit."""
data = await _setup_list_data(saver)
ids = data["checkpoint_ids"]
before_cfg = generate_config(data["thread_id"], checkpoint_id=ids[3])
results = []
async for tup in saver.alist(
generate_config(data["thread_id"]),
before=before_cfg,
limit=1,
):
results.append(tup)
assert len(results) == 1
assert results[0].checkpoint["id"] == ids[2]
async def test_list_combined_thread_and_filter(
saver: BaseCheckpointSaver,
) -> None:
"""thread_id + metadata filter combined."""
data = await _setup_list_data(saver)
results = []
async for tup in saver.alist(
generate_config(data["thread_id"]),
filter={"source": "loop"},
):
results.append(tup)
assert len(results) == 2, (
f"Expected 2 'loop' checkpoints (steps 1,3), got {len(results)}"
)
for tup in results:
assert tup.metadata["source"] == "loop"
async def test_list_empty_result(saver: BaseCheckpointSaver) -> None:
"""No matches returns empty."""
results = []
async for tup in saver.alist(
generate_config(str(uuid4())),
filter={"source": "nonexistent"},
):
results.append(tup)
assert len(results) == 0
async def test_list_includes_pending_writes(saver: BaseCheckpointSaver) -> None:
"""pending_writes in listed tuples."""
tid = str(uuid4())
config = generate_config(tid)
cp = generate_checkpoint()
stored = await saver.aput(config, cp, generate_metadata(), {})
await saver.aput_writes(stored, [("ch", "val")], str(uuid4()))
results = []
async for tup in saver.alist(generate_config(tid)):
results.append(tup)
assert len(results) == 1
assert results[0].pending_writes is not None
assert len(results[0].pending_writes) == 1, (
f"Expected 1 write, got {len(results[0].pending_writes)}"
)
assert results[0].pending_writes[0][1] == "ch", (
f"channel mismatch: {results[0].pending_writes[0][1]!r}"
)
assert results[0].pending_writes[0][2] == "val", (
f"value mismatch: {results[0].pending_writes[0][2]!r}"
)
async def test_list_multiple_namespaces(saver: BaseCheckpointSaver) -> None:
"""Root namespace checkpoint listed correctly."""
tid = str(uuid4())
for ns in ["", "child:1", "child:2"]:
cfg = generate_config(tid, checkpoint_ns=ns)
cp = generate_checkpoint()
await saver.aput(cfg, cp, generate_metadata(), {})
# List with root namespace filter — should return exactly the root checkpoint
results = []
async for tup in saver.alist(generate_config(tid, checkpoint_ns="")):
results.append(tup)
assert len(results) == 1, f"Expected 1 root checkpoint, got {len(results)}"
async def test_list_metadata_filter_multiple_keys(
saver: BaseCheckpointSaver,
) -> None:
"""filter with multiple keys — all must match."""
tid = str(uuid4())
# Create checkpoints with different metadata combos
for source, step in [("input", 1), ("loop", 1), ("input", 2)]:
cfg = generate_config(tid)
cp = generate_checkpoint()
await saver.aput(cfg, cp, generate_metadata(source=source, step=step), {})
results = []
async for tup in saver.alist(
generate_config(tid),
filter={"source": "input", "step": 2},
):
results.append(tup)
assert len(results) == 1, (
f"Expected 1 match for source=input+step=2, got {len(results)}"
)
assert results[0].metadata["source"] == "input"
assert results[0].metadata["step"] == 2
async def test_list_metadata_filter_no_match(
saver: BaseCheckpointSaver,
) -> None:
"""Multi-key filter that matches nothing returns empty."""
data = await _setup_list_data(saver)
results = []
async for tup in saver.alist(
generate_config(data["thread_id"]),
filter={"source": "update", "step": 99},
):
results.append(tup)
assert len(results) == 0
async def test_list_metadata_custom_keys(
saver: BaseCheckpointSaver,
) -> None:
"""Custom (non-standard) metadata keys are filterable."""
tid = str(uuid4())
cfg = generate_config(tid)
cp = generate_checkpoint()
await saver.aput(cfg, cp, generate_metadata(score=42, run_id="run-abc"), {})
cfg2 = generate_config(tid)
cp2 = generate_checkpoint()
await saver.aput(cfg2, cp2, generate_metadata(score=99, run_id="run-xyz"), {})
# Filter by custom key
results = []
async for tup in saver.alist(
generate_config(tid),
filter={"score": 42},
):
results.append(tup)
assert len(results) == 1
assert results[0].metadata["score"] == 42
assert results[0].metadata["run_id"] == "run-abc"
async def test_list_global_search(
saver: BaseCheckpointSaver,
) -> None:
"""alist(None, filter=...) searches across all threads."""
tid1, tid2 = str(uuid4()), str(uuid4())
# Use a unique marker so we don't collide with other tests' data
marker = str(uuid4())
cfg1 = generate_config(tid1)
cp1 = generate_checkpoint()
await saver.aput(cfg1, cp1, generate_metadata(source="input", marker=marker), {})
cfg2 = generate_config(tid2)
cp2 = generate_checkpoint()
await saver.aput(cfg2, cp2, generate_metadata(source="loop", marker=marker), {})
# Search across all threads with filter
results = []
async for tup in saver.alist(None, filter={"source": "input", "marker": marker}):
results.append(tup)
assert len(results) == 1
assert results[0].config["configurable"]["thread_id"] == tid1
# Search with marker only — should find both
results = []
async for tup in saver.alist(None, filter={"marker": marker}):
results.append(tup)
assert len(results) == 2
ALL_LIST_TESTS = [
test_list_all,
test_list_by_thread,
test_list_by_namespace,
test_list_ordering,
test_list_metadata_filter_single_key,
test_list_metadata_filter_step,
test_list_metadata_filter_multiple_keys,
test_list_metadata_filter_no_match,
test_list_metadata_custom_keys,
test_list_global_search,
test_list_before,
test_list_limit,
test_list_limit_plus_before,
test_list_combined_thread_and_filter,
test_list_empty_result,
test_list_includes_pending_writes,
test_list_multiple_namespaces,
]
async def run_list_tests(
saver: BaseCheckpointSaver,
on_test_result: Callable[[str, str, bool, str | None], None] | None = None,
) -> tuple[int, int, list[str]]:
"""Run all list tests. Returns (passed, failed, failure_names)."""
passed = 0
failed = 0
failures: list[str] = []
for test_fn in ALL_LIST_TESTS:
try:
await test_fn(saver)
passed += 1
if on_test_result:
on_test_result("list", test_fn.__name__, True, None)
except Exception as e:
failed += 1
msg = f"{test_fn.__name__}: {e}"
failures.append(msg)
if on_test_result:
on_test_result("list", test_fn.__name__, False, traceback.format_exc())
return passed, failed, failures
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_list.py",
"license": "MIT License",
"lines": 328,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_prune.py | """PRUNE capability tests — aprune(strategy)."""
from __future__ import annotations
import traceback
from collections.abc import Callable
from uuid import uuid4
from langgraph.checkpoint.base import BaseCheckpointSaver
from langgraph.checkpoint.conformance.test_utils import (
generate_checkpoint,
generate_config,
generate_metadata,
)
async def _setup_thread(saver: BaseCheckpointSaver, tid: str, n: int = 3) -> list[dict]:
"""Create n checkpoints on tid. Returns stored configs."""
stored = []
parent_cfg = None
for i in range(n):
config = generate_config(tid)
if parent_cfg:
config["configurable"]["checkpoint_id"] = parent_cfg["configurable"][
"checkpoint_id"
]
cp = generate_checkpoint()
parent_cfg = await saver.aput(config, cp, generate_metadata(step=i), {})
stored.append(parent_cfg)
return stored
async def test_prune_keep_latest_single_thread(
saver: BaseCheckpointSaver,
) -> None:
"""Only latest checkpoint survives."""
tid = str(uuid4())
configs = await _setup_thread(saver, tid, n=4)
await saver.aprune([tid], strategy="keep_latest")
results = []
async for tup in saver.alist(generate_config(tid)):
results.append(tup)
assert len(results) == 1
assert (
results[0].config["configurable"]["checkpoint_id"]
== configs[-1]["configurable"]["checkpoint_id"]
)
async def test_prune_keep_latest_multiple_threads(
saver: BaseCheckpointSaver,
) -> None:
"""Each thread keeps its latest."""
tid1, tid2 = str(uuid4()), str(uuid4())
c1 = await _setup_thread(saver, tid1, n=3)
c2 = await _setup_thread(saver, tid2, n=2)
await saver.aprune([tid1, tid2], strategy="keep_latest")
for tid, expected_last in [(tid1, c1[-1]), (tid2, c2[-1])]:
results = []
async for tup in saver.alist(generate_config(tid)):
results.append(tup)
assert len(results) == 1
assert (
results[0].config["configurable"]["checkpoint_id"]
== expected_last["configurable"]["checkpoint_id"]
)
async def test_prune_keep_latest_across_namespaces(
saver: BaseCheckpointSaver,
) -> None:
"""Latest per namespace kept."""
tid = str(uuid4())
# Root namespace: 3 checkpoints
parent = None
for i in range(3):
cfg = generate_config(tid, checkpoint_ns="")
if parent:
cfg["configurable"]["checkpoint_id"] = parent["configurable"][
"checkpoint_id"
]
cp = generate_checkpoint()
parent = await saver.aput(cfg, cp, generate_metadata(step=i), {})
root_latest = parent
# Child namespace: 2 checkpoints
parent = None
for i in range(2):
cfg = generate_config(tid, checkpoint_ns="child:1")
if parent:
cfg["configurable"]["checkpoint_id"] = parent["configurable"][
"checkpoint_id"
]
cp = generate_checkpoint()
parent = await saver.aput(cfg, cp, generate_metadata(step=i), {})
child_latest = parent
assert root_latest is not None
assert child_latest is not None
await saver.aprune([tid], strategy="keep_latest")
for ns, expected in [("", root_latest), ("child:1", child_latest)]:
results = []
async for tup in saver.alist(generate_config(tid, checkpoint_ns=ns)):
results.append(tup)
assert len(results) == 1
assert (
results[0].config["configurable"]["checkpoint_id"]
== expected["configurable"]["checkpoint_id"]
)
async def test_prune_keep_latest_preserves_writes(
saver: BaseCheckpointSaver,
) -> None:
"""Latest checkpoint's writes kept."""
tid = str(uuid4())
configs = await _setup_thread(saver, tid, n=3)
# Add writes to the latest
await saver.aput_writes(configs[-1], [("ch", "val")], str(uuid4()))
await saver.aprune([tid], strategy="keep_latest")
tup = await saver.aget_tuple(generate_config(tid))
assert tup is not None
assert tup.pending_writes is not None
assert len(tup.pending_writes) == 1, (
f"Expected 1 write, got {len(tup.pending_writes)}"
)
assert tup.pending_writes[0][1] == "ch", (
f"channel mismatch: {tup.pending_writes[0][1]!r}"
)
assert tup.pending_writes[0][2] == "val", (
f"value mismatch: {tup.pending_writes[0][2]!r}"
)
async def test_prune_delete_all(saver: BaseCheckpointSaver) -> None:
"""delete_all strategy removes everything."""
tid = str(uuid4())
await _setup_thread(saver, tid, n=3)
await saver.aprune([tid], strategy="delete")
results = []
async for tup in saver.alist(generate_config(tid)):
results.append(tup)
assert len(results) == 0
async def test_prune_preserves_other_threads(
saver: BaseCheckpointSaver,
) -> None:
"""Unlisted threads untouched."""
tid1, tid2 = str(uuid4()), str(uuid4())
await _setup_thread(saver, tid1, n=3)
await _setup_thread(saver, tid2, n=2)
# Snapshot tid2 before prune
pre_ids = []
async for tup in saver.alist(generate_config(tid2)):
pre_ids.append(tup.checkpoint["id"])
await saver.aprune([tid1], strategy="keep_latest")
# tid2 should be fully intact — same checkpoint IDs
post_ids = []
async for tup in saver.alist(generate_config(tid2)):
post_ids.append(tup.checkpoint["id"])
assert post_ids == pre_ids, f"tid2 changed: {pre_ids} -> {post_ids}"
async def test_prune_empty_list_noop(saver: BaseCheckpointSaver) -> None:
"""Empty thread_ids no error."""
await saver.aprune([], strategy="keep_latest")
async def test_prune_nonexistent_noop(saver: BaseCheckpointSaver) -> None:
"""Missing threads no error."""
await saver.aprune([str(uuid4())], strategy="keep_latest")
ALL_PRUNE_TESTS = [
test_prune_keep_latest_single_thread,
test_prune_keep_latest_multiple_threads,
test_prune_keep_latest_across_namespaces,
test_prune_keep_latest_preserves_writes,
test_prune_delete_all,
test_prune_preserves_other_threads,
test_prune_empty_list_noop,
test_prune_nonexistent_noop,
]
async def run_prune_tests(
saver: BaseCheckpointSaver,
on_test_result: Callable[[str, str, bool, str | None], None] | None = None,
) -> tuple[int, int, list[str]]:
"""Run all prune tests. Returns (passed, failed, failure_names)."""
passed = 0
failed = 0
failures: list[str] = []
for test_fn in ALL_PRUNE_TESTS:
try:
await test_fn(saver)
passed += 1
if on_test_result:
on_test_result("prune", test_fn.__name__, True, None)
except Exception as e:
failed += 1
msg = f"{test_fn.__name__}: {e}"
failures.append(msg)
if on_test_result:
on_test_result("prune", test_fn.__name__, False, traceback.format_exc())
return passed, failed, failures
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_prune.py",
"license": "MIT License",
"lines": 180,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_put.py | """PUT capability tests — aput + aget_tuple round-trip."""
from __future__ import annotations
import traceback
from collections.abc import Callable
from typing import Any
from uuid import uuid4
from langgraph.checkpoint.base import (
BaseCheckpointSaver,
ChannelVersions,
)
from langgraph.checkpoint.conformance.test_utils import (
generate_checkpoint,
generate_config,
generate_metadata,
)
async def test_put_returns_config(saver: BaseCheckpointSaver) -> None:
"""aput returns a RunnableConfig with thread_id, checkpoint_ns, checkpoint_id."""
config = generate_config()
cp = generate_checkpoint(channel_values={"k": "v"})
cp["channel_versions"] = {"k": 1}
md = generate_metadata()
result = await saver.aput(config, cp, md, {"k": 1})
assert "configurable" in result
conf = result["configurable"]
assert "thread_id" in conf
assert "checkpoint_ns" in conf
assert "checkpoint_id" in conf
assert conf["checkpoint_id"] == cp["id"]
async def test_put_roundtrip(saver: BaseCheckpointSaver) -> None:
"""put then get_tuple returns identical checkpoint."""
config = generate_config()
cp = generate_checkpoint(channel_values={"msg": "hello"})
cp["channel_versions"] = {"msg": 1}
md = generate_metadata(source="input", step=-1)
stored_config = await saver.aput(config, cp, md, {"msg": 1})
tup = await saver.aget_tuple(stored_config)
assert tup is not None
assert tup.checkpoint["id"] == cp["id"]
assert tup.checkpoint["channel_values"] == {"msg": "hello"}
async def test_put_preserves_channel_values(saver: BaseCheckpointSaver) -> None:
"""Various types (str, int, list, dict, bytes, None) round-trip correctly."""
values: dict[str, Any] = {
"str_val": "hello",
"int_val": 42,
"list_val": [1, 2, 3],
"dict_val": {"nested": True},
}
config = generate_config()
cp = generate_checkpoint(channel_values=values)
versions: ChannelVersions = {k: 1 for k in values}
cp["channel_versions"] = versions
md = generate_metadata()
stored = await saver.aput(config, cp, md, versions)
tup = await saver.aget_tuple(stored)
assert tup is not None
for k, v in values.items():
assert tup.checkpoint["channel_values"].get(k) == v, (
f"channel_values[{k}]: expected {v!r}, got {tup.checkpoint['channel_values'].get(k)!r}"
)
async def test_put_preserves_channel_versions(saver: BaseCheckpointSaver) -> None:
"""ChannelVersions round-trip correctly."""
versions: ChannelVersions = {"a": 1, "b": 2}
config = generate_config()
cp = generate_checkpoint(
channel_values={"a": "x", "b": "y"}, channel_versions=versions
)
md = generate_metadata()
stored = await saver.aput(config, cp, md, versions)
tup = await saver.aget_tuple(stored)
assert tup is not None
# Compare version values — checkpointers may convert int to str
for k, expected in versions.items():
actual = tup.checkpoint["channel_versions"].get(k)
assert actual is not None, f"channel_versions[{k}] missing"
assert str(actual).split(".")[0] == str(expected).split(".")[0], (
f"channel_versions[{k}]: expected {expected!r}, got {actual!r}"
)
async def test_put_preserves_versions_seen(saver: BaseCheckpointSaver) -> None:
"""versions_seen dict round-trips."""
vs: dict[str, ChannelVersions] = {"node1": {"ch": 1}, "node2": {"ch": 2}}
config = generate_config()
cp = generate_checkpoint(versions_seen=vs)
md = generate_metadata()
stored = await saver.aput(config, cp, md, {})
tup = await saver.aget_tuple(stored)
assert tup is not None
for node in vs:
assert node in tup.checkpoint["versions_seen"], f"versions_seen[{node}] missing"
async def test_put_preserves_metadata(saver: BaseCheckpointSaver) -> None:
"""Metadata source, step, parents, and custom keys round-trip."""
md = generate_metadata(source="loop", step=3, custom_key="custom_value")
config = generate_config()
cp = generate_checkpoint()
stored = await saver.aput(config, cp, md, {})
tup = await saver.aget_tuple(stored)
assert tup is not None
assert tup.metadata["source"] == "loop"
assert tup.metadata["step"] == 3
assert tup.metadata.get("custom_key") == "custom_value"
async def test_put_root_namespace(saver: BaseCheckpointSaver) -> None:
"""checkpoint_ns='' works."""
config = generate_config(checkpoint_ns="")
cp = generate_checkpoint()
md = generate_metadata()
stored = await saver.aput(config, cp, md, {})
tup = await saver.aget_tuple(stored)
assert tup is not None
assert tup.config["configurable"].get("checkpoint_ns", "") == ""
async def test_put_child_namespace(saver: BaseCheckpointSaver) -> None:
"""checkpoint_ns='child:abc' works."""
config = generate_config(checkpoint_ns="child:abc")
cp = generate_checkpoint()
md = generate_metadata()
stored = await saver.aput(config, cp, md, {})
tup = await saver.aget_tuple(stored)
assert tup is not None
assert tup.config["configurable"]["checkpoint_ns"] == "child:abc"
async def test_put_default_namespace(saver: BaseCheckpointSaver) -> None:
"""Config without checkpoint_ns defaults to ''."""
tid = str(uuid4())
config = {"configurable": {"thread_id": tid, "checkpoint_ns": ""}}
cp = generate_checkpoint()
md = generate_metadata()
stored = await saver.aput(config, cp, md, {})
tup = await saver.aget_tuple(stored)
assert tup is not None
async def test_put_multiple_checkpoints_same_thread(
saver: BaseCheckpointSaver,
) -> None:
"""Sequential puts on same thread, all retrievable."""
tid = str(uuid4())
ids = []
parent_cfg = None
for i in range(3):
config = generate_config(tid)
if parent_cfg is not None:
config["configurable"]["checkpoint_id"] = parent_cfg["configurable"][
"checkpoint_id"
]
cp = generate_checkpoint()
md = generate_metadata(step=i)
parent_cfg = await saver.aput(config, cp, md, {})
ids.append(cp["id"])
# All three should be retrievable
for cid in ids:
cfg = generate_config(tid, checkpoint_id=cid)
tup = await saver.aget_tuple(cfg)
assert tup is not None, f"checkpoint {cid} not found"
assert tup.checkpoint["id"] == cid
async def test_put_multiple_threads_isolated(saver: BaseCheckpointSaver) -> None:
"""Different thread_ids don't interfere."""
tid1, tid2 = str(uuid4()), str(uuid4())
config1 = generate_config(tid1)
cp1 = generate_checkpoint(channel_values={"x": "thread1"})
cp1["channel_versions"] = {"x": 1}
await saver.aput(config1, cp1, generate_metadata(), {"x": 1})
config2 = generate_config(tid2)
cp2 = generate_checkpoint(channel_values={"x": "thread2"})
cp2["channel_versions"] = {"x": 1}
await saver.aput(config2, cp2, generate_metadata(), {"x": 1})
tup1 = await saver.aget_tuple(generate_config(tid1))
tup2 = await saver.aget_tuple(generate_config(tid2))
assert tup1 is not None and tup2 is not None
assert tup1.checkpoint["channel_values"]["x"] == "thread1"
assert tup2.checkpoint["channel_values"]["x"] == "thread2"
async def test_put_parent_config(saver: BaseCheckpointSaver) -> None:
"""parent checkpoint_id tracked correctly."""
tid = str(uuid4())
config1 = generate_config(tid)
cp1 = generate_checkpoint()
stored1 = await saver.aput(config1, cp1, generate_metadata(step=0), {})
# Second checkpoint — its config carries the parent checkpoint_id
config2 = generate_config(tid)
config2["configurable"]["checkpoint_id"] = stored1["configurable"]["checkpoint_id"]
cp2 = generate_checkpoint()
stored2 = await saver.aput(config2, cp2, generate_metadata(step=1), {})
tup = await saver.aget_tuple(stored2)
assert tup is not None
assert tup.parent_config is not None
assert (
tup.parent_config["configurable"]["checkpoint_id"]
== stored1["configurable"]["checkpoint_id"]
)
async def test_put_incremental_channel_update(saver: BaseCheckpointSaver) -> None:
"""Only updated channels need new blobs; unchanged channels loaded from prior versions."""
tid = str(uuid4())
# Checkpoint 1: both channels are new
config1 = generate_config(tid)
cp1 = generate_checkpoint(
channel_values={"a": "v1", "b": "v2"},
channel_versions={"a": 1, "b": 1},
)
stored1 = await saver.aput(
config1, cp1, generate_metadata(step=0), {"a": 1, "b": 1}
)
# Checkpoint 2: only 'a' is updated
config2 = generate_config(tid)
config2["configurable"]["checkpoint_id"] = stored1["configurable"]["checkpoint_id"]
cp2 = generate_checkpoint(
channel_values={"a": "v1_updated", "b": "v2"},
channel_versions={"a": 2, "b": 1},
)
stored2 = await saver.aput(config2, cp2, generate_metadata(step=1), {"a": 2})
# cp2 should reconstruct full channel_values from blobs at mixed versions
tup2 = await saver.aget_tuple(stored2)
assert tup2 is not None
assert tup2.checkpoint["channel_values"].get("a") == "v1_updated", (
f"a: expected 'v1_updated', got {tup2.checkpoint['channel_values'].get('a')!r}"
)
assert tup2.checkpoint["channel_values"].get("b") == "v2", (
f"b: expected 'v2', got {tup2.checkpoint['channel_values'].get('b')!r}"
)
# cp1 should still return original values
tup1 = await saver.aget_tuple(stored1)
assert tup1 is not None
assert tup1.checkpoint["channel_values"].get("a") == "v1"
assert tup1.checkpoint["channel_values"].get("b") == "v2"
async def test_put_new_channel_added(saver: BaseCheckpointSaver) -> None:
"""A channel that appears for the first time in a later checkpoint."""
tid = str(uuid4())
config1 = generate_config(tid)
cp1 = generate_checkpoint(
channel_values={"a": "v1"},
channel_versions={"a": 1},
)
stored1 = await saver.aput(config1, cp1, generate_metadata(step=0), {"a": 1})
# Checkpoint 2: 'b' is brand new, 'a' is unchanged
config2 = generate_config(tid)
config2["configurable"]["checkpoint_id"] = stored1["configurable"]["checkpoint_id"]
cp2 = generate_checkpoint(
channel_values={"a": "v1", "b": "new_channel"},
channel_versions={"a": 1, "b": 1},
)
stored2 = await saver.aput(config2, cp2, generate_metadata(step=1), {"b": 1})
tup2 = await saver.aget_tuple(stored2)
assert tup2 is not None
assert tup2.checkpoint["channel_values"].get("a") == "v1", (
f"a: expected 'v1', got {tup2.checkpoint['channel_values'].get('a')!r}"
)
assert tup2.checkpoint["channel_values"].get("b") == "new_channel", (
f"b: expected 'new_channel', got {tup2.checkpoint['channel_values'].get('b')!r}"
)
async def test_put_channel_removed(saver: BaseCheckpointSaver) -> None:
"""Channel no longer in channel_versions should not appear in loaded values."""
tid = str(uuid4())
config1 = generate_config(tid)
cp1 = generate_checkpoint(
channel_values={"a": "v1", "b": "v2"},
channel_versions={"a": 1, "b": 1},
)
stored1 = await saver.aput(
config1, cp1, generate_metadata(step=0), {"a": 1, "b": 1}
)
# Checkpoint 2: 'b' dropped from channel_versions
config2 = generate_config(tid)
config2["configurable"]["checkpoint_id"] = stored1["configurable"]["checkpoint_id"]
cp2 = generate_checkpoint(
channel_values={"a": "v1_updated"},
channel_versions={"a": 2},
)
stored2 = await saver.aput(config2, cp2, generate_metadata(step=1), {"a": 2})
tup2 = await saver.aget_tuple(stored2)
assert tup2 is not None
assert tup2.checkpoint["channel_values"].get("a") == "v1_updated"
assert "b" not in tup2.checkpoint["channel_values"], (
f"'b' should not be present, got {tup2.checkpoint['channel_values']}"
)
async def test_put_preserves_run_id(saver: BaseCheckpointSaver) -> None:
"""run_id in metadata round-trips correctly."""
run_id = str(uuid4())
config = generate_config()
cp = generate_checkpoint()
md = generate_metadata(source="loop", step=0, run_id=run_id)
stored = await saver.aput(config, cp, md, {})
tup = await saver.aget_tuple(stored)
assert tup is not None
assert tup.metadata.get("run_id") == run_id, (
f"run_id: expected {run_id!r}, got {tup.metadata.get('run_id')!r}"
)
async def test_put_preserves_versions_seen_values(saver: BaseCheckpointSaver) -> None:
"""versions_seen values (not just keys) round-trip correctly."""
vs: dict[str, ChannelVersions] = {
"node1": {"ch_a": 1, "ch_b": 2},
"node2": {"ch_a": 3},
}
config = generate_config()
cp = generate_checkpoint(versions_seen=vs)
md = generate_metadata()
stored = await saver.aput(config, cp, md, {})
tup = await saver.aget_tuple(stored)
assert tup is not None
for node, expected_versions in vs.items():
assert node in tup.checkpoint["versions_seen"], f"versions_seen[{node}] missing"
actual_versions = tup.checkpoint["versions_seen"][node]
for ch, expected_v in expected_versions.items():
actual_v = actual_versions.get(ch)
assert actual_v is not None, f"versions_seen[{node}][{ch}] missing"
assert str(actual_v).split(".")[0] == str(expected_v).split(".")[0], (
f"versions_seen[{node}][{ch}]: expected {expected_v!r}, got {actual_v!r}"
)
ALL_PUT_TESTS = [
test_put_returns_config,
test_put_roundtrip,
test_put_preserves_channel_values,
test_put_preserves_channel_versions,
test_put_preserves_versions_seen,
test_put_preserves_metadata,
test_put_root_namespace,
test_put_child_namespace,
test_put_default_namespace,
test_put_multiple_checkpoints_same_thread,
test_put_multiple_threads_isolated,
test_put_parent_config,
test_put_incremental_channel_update,
test_put_new_channel_added,
test_put_channel_removed,
test_put_preserves_run_id,
test_put_preserves_versions_seen_values,
]
async def run_put_tests(
saver: BaseCheckpointSaver,
on_test_result: Callable[[str, str, bool, str | None], None] | None = None,
) -> tuple[int, int, list[str]]:
"""Run all put tests. Returns (passed, failed, failure_names)."""
passed = 0
failed = 0
failures: list[str] = []
for test_fn in ALL_PUT_TESTS:
try:
await test_fn(saver)
passed += 1
if on_test_result:
on_test_result("put", test_fn.__name__, True, None)
except Exception as e:
failed += 1
msg = f"{test_fn.__name__}: {e}"
failures.append(msg)
if on_test_result:
on_test_result("put", test_fn.__name__, False, traceback.format_exc())
return passed, failed, failures
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_put.py",
"license": "MIT License",
"lines": 340,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_put_writes.py | """PUT_WRITES capability tests — aput_writes + pending_writes retrieval."""
from __future__ import annotations
import traceback
from collections.abc import Callable
from uuid import uuid4
from langgraph.checkpoint.base import BaseCheckpointSaver
from langgraph.checkpoint.conformance.test_utils import (
generate_checkpoint,
generate_config,
generate_metadata,
)
async def test_put_writes_basic(saver: BaseCheckpointSaver) -> None:
"""Write stored, visible in aget_tuple pending_writes."""
tid = str(uuid4())
config = generate_config(tid)
cp = generate_checkpoint()
stored = await saver.aput(config, cp, generate_metadata(), {})
task_id = str(uuid4())
await saver.aput_writes(stored, [("channel1", "value1")], task_id)
tup = await saver.aget_tuple(stored)
assert tup is not None
assert tup.pending_writes is not None
# Verify exact write tuple: (task_id, channel, value)
matching = [w for w in tup.pending_writes if w[0] == task_id and w[1] == "channel1"]
assert len(matching) == 1, f"Expected 1 write, got {len(matching)}"
assert matching[0][2] == "value1", f"Value mismatch: {matching[0][2]!r}"
async def test_put_writes_multiple_writes_same_task(
saver: BaseCheckpointSaver,
) -> None:
"""Multiple (channel, value) pairs in a single call."""
tid = str(uuid4())
config = generate_config(tid)
cp = generate_checkpoint()
stored = await saver.aput(config, cp, generate_metadata(), {})
task_id = str(uuid4())
writes = [("ch1", "v1"), ("ch2", "v2"), ("ch3", "v3")]
await saver.aput_writes(stored, writes, task_id)
tup = await saver.aget_tuple(stored)
assert tup is not None
assert tup.pending_writes is not None
assert len(tup.pending_writes) == 3, (
f"Expected 3 writes, got {len(tup.pending_writes)}"
)
channels = {w[1] for w in tup.pending_writes}
assert channels == {"ch1", "ch2", "ch3"}, f"Expected exact channels, got {channels}"
# Verify values per channel
for expected_ch, expected_val in writes:
match = [
w for w in tup.pending_writes if w[0] == task_id and w[1] == expected_ch
]
assert len(match) == 1, f"Expected 1 write for {expected_ch}, got {len(match)}"
assert match[0][2] == expected_val, (
f"Value mismatch for {expected_ch}: {match[0][2]!r}"
)
async def test_put_writes_multiple_tasks(saver: BaseCheckpointSaver) -> None:
"""Different task_ids produce separate writes."""
tid = str(uuid4())
config = generate_config(tid)
cp = generate_checkpoint()
stored = await saver.aput(config, cp, generate_metadata(), {})
t1, t2 = str(uuid4()), str(uuid4())
await saver.aput_writes(stored, [("ch", "from_t1")], t1)
await saver.aput_writes(stored, [("ch", "from_t2")], t2)
tup = await saver.aget_tuple(stored)
assert tup is not None
assert tup.pending_writes is not None
assert len(tup.pending_writes) == 2, (
f"Expected 2 writes, got {len(tup.pending_writes)}"
)
# Verify values per task
t1_writes = [w for w in tup.pending_writes if w[0] == t1 and w[1] == "ch"]
t2_writes = [w for w in tup.pending_writes if w[0] == t2 and w[1] == "ch"]
assert len(t1_writes) == 1, f"Expected 1 write from t1, got {len(t1_writes)}"
assert len(t2_writes) == 1, f"Expected 1 write from t2, got {len(t2_writes)}"
assert t1_writes[0][2] == "from_t1", f"t1 value: {t1_writes[0][2]!r}"
assert t2_writes[0][2] == "from_t2", f"t2 value: {t2_writes[0][2]!r}"
async def test_put_writes_preserves_task_id(saver: BaseCheckpointSaver) -> None:
"""task_id in pending_writes matches what was passed to aput_writes."""
tid = str(uuid4())
config = generate_config(tid)
cp = generate_checkpoint()
stored = await saver.aput(config, cp, generate_metadata(), {})
task_id = str(uuid4())
await saver.aput_writes(stored, [("ch", "val")], task_id)
tup = await saver.aget_tuple(stored)
assert tup is not None
assert tup.pending_writes is not None
assert any(w[0] == task_id for w in tup.pending_writes)
async def test_put_writes_preserves_channel_and_value(
saver: BaseCheckpointSaver,
) -> None:
"""Channel name + value round-trip."""
tid = str(uuid4())
config = generate_config(tid)
cp = generate_checkpoint()
stored = await saver.aput(config, cp, generate_metadata(), {})
task_id = str(uuid4())
await saver.aput_writes(stored, [("my_channel", {"data": 123})], task_id)
tup = await saver.aget_tuple(stored)
assert tup is not None
assert tup.pending_writes is not None
match = [w for w in tup.pending_writes if w[0] == task_id and w[1] == "my_channel"]
assert len(match) == 1, f"Expected 1 write, got {len(match)}"
assert match[0][2] == {"data": 123}, f"Value mismatch: {match[0][2]!r}"
async def test_put_writes_task_path(saver: BaseCheckpointSaver) -> None:
"""task_path parameter accepted without error."""
tid = str(uuid4())
config = generate_config(tid)
cp = generate_checkpoint()
stored = await saver.aput(config, cp, generate_metadata(), {})
task_id = str(uuid4())
# Should not raise
await saver.aput_writes(stored, [("ch", "v")], task_id, task_path="a:b:c")
tup = await saver.aget_tuple(stored)
assert tup is not None
assert tup.pending_writes is not None
assert len(tup.pending_writes) == 1
async def test_put_writes_idempotent(saver: BaseCheckpointSaver) -> None:
"""Duplicate (task_id, idx) doesn't duplicate writes."""
tid = str(uuid4())
config = generate_config(tid)
cp = generate_checkpoint()
stored = await saver.aput(config, cp, generate_metadata(), {})
task_id = str(uuid4())
await saver.aput_writes(stored, [("ch", "val")], task_id)
await saver.aput_writes(stored, [("ch", "val")], task_id)
tup = await saver.aget_tuple(stored)
assert tup is not None
assert tup.pending_writes is not None
assert len(tup.pending_writes) == 1, (
f"Expected exactly 1 write total, got {len(tup.pending_writes)}"
)
# Should not have duplicated
matching = [w for w in tup.pending_writes if w[0] == task_id and w[1] == "ch"]
assert len(matching) == 1
async def test_put_writes_special_channels(saver: BaseCheckpointSaver) -> None:
"""ERROR and INTERRUPT channels handled correctly."""
from langgraph.checkpoint.serde.types import ERROR, INTERRUPT
tid = str(uuid4())
config = generate_config(tid)
cp = generate_checkpoint()
stored = await saver.aput(config, cp, generate_metadata(), {})
task_id = str(uuid4())
await saver.aput_writes(
stored,
[(ERROR, "something went wrong"), (INTERRUPT, {"reason": "human_input"})],
task_id,
)
tup = await saver.aget_tuple(stored)
assert tup is not None
assert tup.pending_writes is not None
channels = {w[1] for w in tup.pending_writes}
assert ERROR in channels
assert INTERRUPT in channels
# Verify values
err_writes = [w for w in tup.pending_writes if w[0] == task_id and w[1] == ERROR]
assert len(err_writes) == 1, f"Expected 1 ERROR write, got {len(err_writes)}"
assert err_writes[0][2] == "something went wrong", (
f"ERROR value: {err_writes[0][2]!r}"
)
int_writes = [
w for w in tup.pending_writes if w[0] == task_id and w[1] == INTERRUPT
]
assert len(int_writes) == 1, f"Expected 1 INTERRUPT write, got {len(int_writes)}"
assert int_writes[0][2] == {"reason": "human_input"}, (
f"INTERRUPT value: {int_writes[0][2]!r}"
)
async def test_put_writes_across_namespaces(saver: BaseCheckpointSaver) -> None:
"""Writes isolated by checkpoint_ns."""
tid = str(uuid4())
# Root namespace checkpoint + write
cfg_root = generate_config(tid, checkpoint_ns="")
cp_root = generate_checkpoint()
stored_root = await saver.aput(cfg_root, cp_root, generate_metadata(), {})
root_task = str(uuid4())
await saver.aput_writes(stored_root, [("ch", "root_val")], root_task)
# Child namespace checkpoint + write
cfg_child = generate_config(tid, checkpoint_ns="child:1")
cp_child = generate_checkpoint()
stored_child = await saver.aput(cfg_child, cp_child, generate_metadata(), {})
child_task = str(uuid4())
await saver.aput_writes(stored_child, [("ch", "child_val")], child_task)
# Verify isolation — root should have exactly 1 write with root_val
tup_root = await saver.aget_tuple(stored_root)
assert tup_root is not None
assert tup_root.pending_writes is not None
root_ch = [w for w in tup_root.pending_writes if w[1] == "ch"]
assert len(root_ch) == 1, f"Expected 1 root write, got {len(root_ch)}"
assert root_ch[0][2] == "root_val", f"Root value: {root_ch[0][2]!r}"
# Child should have exactly 1 write with child_val
tup_child = await saver.aget_tuple(stored_child)
assert tup_child is not None
assert tup_child.pending_writes is not None
child_ch = [w for w in tup_child.pending_writes if w[1] == "ch"]
assert len(child_ch) == 1, f"Expected 1 child write, got {len(child_ch)}"
assert child_ch[0][2] == "child_val", f"Child value: {child_ch[0][2]!r}"
async def test_put_writes_cleared_on_next_checkpoint(
saver: BaseCheckpointSaver,
) -> None:
"""New checkpoint starts with fresh pending_writes."""
tid = str(uuid4())
config = generate_config(tid)
cp1 = generate_checkpoint()
stored1 = await saver.aput(config, cp1, generate_metadata(step=0), {})
await saver.aput_writes(stored1, [("ch", "old_write")], str(uuid4()))
# New checkpoint
config2 = generate_config(tid)
config2["configurable"]["checkpoint_id"] = stored1["configurable"]["checkpoint_id"]
cp2 = generate_checkpoint()
stored2 = await saver.aput(config2, cp2, generate_metadata(step=1), {})
tup = await saver.aget_tuple(stored2)
assert tup is not None
# New checkpoint should have no pending writes
writes = tup.pending_writes or []
assert len(writes) == 0
ALL_PUT_WRITES_TESTS = [
test_put_writes_basic,
test_put_writes_multiple_writes_same_task,
test_put_writes_multiple_tasks,
test_put_writes_preserves_task_id,
test_put_writes_preserves_channel_and_value,
test_put_writes_task_path,
test_put_writes_idempotent,
test_put_writes_special_channels,
test_put_writes_across_namespaces,
test_put_writes_cleared_on_next_checkpoint,
]
async def run_put_writes_tests(
saver: BaseCheckpointSaver,
on_test_result: Callable[[str, str, bool, str | None], None] | None = None,
) -> tuple[int, int, list[str]]:
"""Run all put_writes tests. Returns (passed, failed, failure_names)."""
passed = 0
failed = 0
failures: list[str] = []
for test_fn in ALL_PUT_WRITES_TESTS:
try:
await test_fn(saver)
passed += 1
if on_test_result:
on_test_result("put_writes", test_fn.__name__, True, None)
except Exception as e:
failed += 1
msg = f"{test_fn.__name__}: {e}"
failures.append(msg)
if on_test_result:
on_test_result(
"put_writes", test_fn.__name__, False, traceback.format_exc()
)
return passed, failed, failures
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint-conformance/langgraph/checkpoint/conformance/spec/test_put_writes.py",
"license": "MIT License",
"lines": 250,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/checkpoint-conformance/langgraph/checkpoint/conformance/test_utils.py | """Test utilities: checkpoint generators, assertion helpers, bulk operations."""
from __future__ import annotations
from datetime import datetime, timezone
from typing import Any
from uuid import uuid4
from langchain_core.runnables import RunnableConfig
from langgraph.checkpoint.base import (
ChannelVersions,
Checkpoint,
CheckpointMetadata,
CheckpointTuple,
)
from langgraph.checkpoint.base.id import uuid6
def generate_checkpoint(
*,
checkpoint_id: str | None = None,
channel_values: dict[str, Any] | None = None,
channel_versions: ChannelVersions | None = None,
versions_seen: dict[str, ChannelVersions] | None = None,
) -> Checkpoint:
"""Create a well-formed Checkpoint with sensible defaults."""
return Checkpoint(
v=1,
id=checkpoint_id or str(uuid6(clock_seq=-1)),
ts=datetime.now(timezone.utc).isoformat(),
channel_values=channel_values if channel_values is not None else {},
channel_versions=channel_versions if channel_versions is not None else {},
versions_seen=versions_seen if versions_seen is not None else {},
pending_sends=[], # ty: ignore[invalid-key]
updated_channels=None,
)
def generate_config(
thread_id: str | None = None,
*,
checkpoint_ns: str = "",
checkpoint_id: str | None = None,
) -> RunnableConfig:
"""Create a RunnableConfig targeting a specific thread / namespace / checkpoint."""
configurable: dict[str, Any] = {
"thread_id": thread_id or str(uuid4()),
"checkpoint_ns": checkpoint_ns,
}
if checkpoint_id is not None:
configurable["checkpoint_id"] = checkpoint_id
return {"configurable": configurable}
def generate_metadata(
source: str = "loop",
step: int = 0,
**extra: Any,
) -> CheckpointMetadata:
"""Create CheckpointMetadata with defaults."""
md: dict[str, Any] = {"source": source, "step": step, "parents": {}}
md.update(extra)
return md
async def put_test_checkpoint(
saver: Any,
*,
thread_id: str | None = None,
checkpoint_ns: str = "",
parent_config: RunnableConfig | None = None,
channel_values: dict[str, Any] | None = None,
channel_versions: ChannelVersions | None = None,
metadata: CheckpointMetadata | None = None,
new_versions: ChannelVersions | None = None,
) -> RunnableConfig:
"""Put a single test checkpoint and return the stored config.
Handles wiring up parent_config, channel_values -> new_versions, etc.
"""
tid = thread_id or str(uuid4())
cp = generate_checkpoint(
channel_values=channel_values,
channel_versions=channel_versions,
)
# When channel_values are provided, ensure channel_versions + new_versions
# are consistent so the checkpointer stores the blobs correctly.
vals = channel_values or {}
cv = channel_versions
if cv is None and vals:
cv = {k: 1 for k in vals}
cp["channel_versions"] = cv
nv = new_versions
if nv is None:
nv = cv or {}
md = metadata or generate_metadata()
config = generate_config(tid, checkpoint_ns=checkpoint_ns)
if parent_config is not None:
config["configurable"]["checkpoint_id"] = parent_config["configurable"][
"checkpoint_id"
]
return await saver.aput(config, cp, md, nv)
async def put_test_checkpoints(
saver: Any,
*,
n_threads: int = 1,
n_checkpoints: int = 1,
namespaces: list[str] | None = None,
channel_values: dict[str, Any] | None = None,
) -> list[RunnableConfig]:
"""Convenience: put multiple checkpoints across threads/namespaces.
Returns the stored configs in insertion order.
"""
nss = namespaces or [""]
stored: list[RunnableConfig] = []
for t in range(n_threads):
tid = f"thread-{t}"
for ns in nss:
parent: RunnableConfig | None = None
for _c in range(n_checkpoints):
cfg = await put_test_checkpoint(
saver,
thread_id=tid,
checkpoint_ns=ns,
parent_config=parent,
channel_values=channel_values,
)
parent = cfg
stored.append(cfg)
return stored
def assert_checkpoint_equal(
actual: Checkpoint,
expected: Checkpoint,
*,
check_channel_values: bool = True,
) -> None:
"""Assert two checkpoints are semantically equal."""
assert actual["v"] == expected["v"], f"v mismatch: {actual['v']} != {expected['v']}"
assert actual["id"] == expected["id"], (
f"id mismatch: {actual['id']} != {expected['id']}"
)
assert actual["channel_versions"] == expected["channel_versions"], (
"channel_versions mismatch"
)
assert actual["versions_seen"] == expected["versions_seen"], (
"versions_seen mismatch"
)
if check_channel_values:
assert actual["channel_values"] == expected["channel_values"], (
"channel_values mismatch"
)
def assert_tuple_equal(
actual: CheckpointTuple,
expected: CheckpointTuple,
*,
check_writes: bool = True,
check_channel_values: bool = True,
) -> None:
"""Assert two CheckpointTuples are semantically equal."""
# Config
a_conf = actual.config["configurable"]
e_conf = expected.config["configurable"]
assert a_conf["thread_id"] == e_conf["thread_id"], (
f"thread_id mismatch: {a_conf['thread_id']} != {e_conf['thread_id']}"
)
assert a_conf.get("checkpoint_ns", "") == e_conf.get("checkpoint_ns", ""), (
"checkpoint_ns mismatch"
)
assert a_conf["checkpoint_id"] == e_conf["checkpoint_id"], "checkpoint_id mismatch"
# Checkpoint
assert_checkpoint_equal(
actual.checkpoint,
expected.checkpoint,
check_channel_values=check_channel_values,
)
# Metadata
for k, v in expected.metadata.items():
assert actual.metadata.get(k) == v, (
f"metadata[{k}] mismatch: {actual.metadata.get(k)} != {v}"
)
# Parent config
if expected.parent_config is not None:
assert actual.parent_config is not None, "expected parent_config, got None"
assert (
actual.parent_config["configurable"]["checkpoint_id"]
== expected.parent_config["configurable"]["checkpoint_id"]
), "parent checkpoint_id mismatch"
else:
assert actual.parent_config is None, (
f"expected no parent_config, got {actual.parent_config}"
)
# Pending writes
if check_writes and expected.pending_writes is not None:
assert actual.pending_writes is not None
assert len(actual.pending_writes) == len(expected.pending_writes), (
f"pending_writes length mismatch: {len(actual.pending_writes)} != {len(expected.pending_writes)}"
)
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint-conformance/langgraph/checkpoint/conformance/test_utils.py",
"license": "MIT License",
"lines": 185,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/checkpoint-conformance/langgraph/checkpoint/conformance/validate.py | """Core conformance runner — detects capabilities, runs test suites, builds report."""
from __future__ import annotations
from langgraph.checkpoint.conformance.capabilities import (
Capability,
DetectedCapabilities,
)
from langgraph.checkpoint.conformance.initializer import RegisteredCheckpointer
from langgraph.checkpoint.conformance.report import (
CapabilityReport,
CapabilityResult,
ProgressCallbacks,
)
from langgraph.checkpoint.conformance.spec.test_copy_thread import run_copy_thread_tests
from langgraph.checkpoint.conformance.spec.test_delete_for_runs import (
run_delete_for_runs_tests,
)
from langgraph.checkpoint.conformance.spec.test_delete_thread import (
run_delete_thread_tests,
)
from langgraph.checkpoint.conformance.spec.test_get_tuple import run_get_tuple_tests
from langgraph.checkpoint.conformance.spec.test_list import run_list_tests
from langgraph.checkpoint.conformance.spec.test_prune import run_prune_tests
from langgraph.checkpoint.conformance.spec.test_put import run_put_tests
from langgraph.checkpoint.conformance.spec.test_put_writes import run_put_writes_tests
# Maps capability to its runner function.
_RUNNERS = {
Capability.PUT: run_put_tests,
Capability.PUT_WRITES: run_put_writes_tests,
Capability.GET_TUPLE: run_get_tuple_tests,
Capability.LIST: run_list_tests,
Capability.DELETE_THREAD: run_delete_thread_tests,
Capability.DELETE_FOR_RUNS: run_delete_for_runs_tests,
Capability.COPY_THREAD: run_copy_thread_tests,
Capability.PRUNE: run_prune_tests,
}
async def validate(
registered: RegisteredCheckpointer,
*,
capabilities: set[str] | None = None,
progress: ProgressCallbacks | None = None,
) -> CapabilityReport:
"""Run the validation suite against a registered checkpointer.
Args:
registered: A RegisteredCheckpointer (from @checkpointer_test decorator).
capabilities: If given, only run tests for these capability names.
Otherwise, auto-detect and run all applicable tests.
progress: Optional progress callbacks for incremental output.
Use ``ProgressCallbacks.default()`` for dot-style,
``ProgressCallbacks.verbose()`` for per-test output, or
``None`` / ``ProgressCallbacks.quiet()`` for silent mode.
Returns:
A CapabilityReport with per-capability results.
"""
report = CapabilityReport(checkpointer_name=registered.name)
# Determine which capabilities to test.
caps_to_test: set[Capability]
if capabilities is not None:
caps_to_test = {Capability(c) for c in capabilities}
else:
caps_to_test = set(Capability)
async with registered.enter_lifespan():
for cap in Capability:
if cap in caps_to_test and cap.value not in registered.skip_capabilities:
# Create a fresh checkpointer for each capability suite.
async with registered.create() as saver:
detected = DetectedCapabilities.from_instance(saver)
is_detected = cap in detected.detected
if not is_detected:
if progress and progress.on_capability_start:
progress.on_capability_start(cap.value, False)
report.results[cap.value] = CapabilityResult(
detected=False,
passed=None,
tests_skipped=1,
)
continue
runner = _RUNNERS.get(cap)
if runner is None:
report.results[cap.value] = CapabilityResult(
detected=True,
passed=None,
tests_skipped=1,
)
continue
if progress and progress.on_capability_start:
progress.on_capability_start(cap.value, True)
passed, failed, failures = await runner(
saver,
on_test_result=progress.on_test_result if progress else None,
)
if progress and progress.on_capability_end:
progress.on_capability_end(cap.value)
report.results[cap.value] = CapabilityResult(
detected=True,
passed=failed == 0,
tests_passed=passed,
tests_failed=failed,
failures=failures,
)
else:
if progress and progress.on_capability_start:
progress.on_capability_start(cap.value, False)
report.results[cap.value] = CapabilityResult(
detected=False,
passed=None,
tests_skipped=1,
)
return report
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint-conformance/langgraph/checkpoint/conformance/validate.py",
"license": "MIT License",
"lines": 108,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langgraph:libs/checkpoint-conformance/tests/test_validate_memory.py | """Self-tests: run the conformance suite against InMemorySaver."""
from __future__ import annotations
import pytest
from langgraph.checkpoint.memory import InMemorySaver
from langgraph.checkpoint.conformance import checkpointer_test, validate
@checkpointer_test(name="InMemorySaver")
async def memory_checkpointer():
yield InMemorySaver()
@pytest.mark.asyncio
async def test_validate_memory_base():
"""InMemorySaver passes all base capability tests."""
report = await validate(memory_checkpointer)
report.print_report()
assert report.passed_all_base(), f"Base tests failed: {report.to_dict()}"
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/checkpoint-conformance/tests/test_validate_memory.py",
"license": "MIT License",
"lines": 14,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_async/assistants.py | """Async client for managing assistants in LangGraph."""
from __future__ import annotations
from collections.abc import Mapping
from typing import Any, Literal, cast, overload
import httpx
from langgraph_sdk._async.http import HttpClient
from langgraph_sdk.schema import (
Assistant,
AssistantSelectField,
AssistantSortBy,
AssistantsSearchResponse,
AssistantVersion,
Config,
Context,
GraphSchema,
Json,
OnConflictBehavior,
QueryParamTypes,
SortOrder,
Subgraphs,
)
class AssistantsClient:
"""Client for managing assistants in LangGraph.
This class provides methods to interact with assistants,
which are versioned configurations of your graph.
???+ example "Example"
```python
client = get_client(url="http://localhost:2024")
assistant = await client.assistants.get("assistant_id_123")
```
"""
def __init__(self, http: HttpClient) -> None:
self.http = http
async def get(
self,
assistant_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Assistant:
"""Get an assistant by ID.
Args:
assistant_id: The ID of the assistant to get.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
Assistant: Assistant Object.
???+ example "Example Usage"
```python
assistant = await client.assistants.get(
assistant_id="my_assistant_id"
)
print(assistant)
```
```shell
----------------------------------------------------
{
'assistant_id': 'my_assistant_id',
'graph_id': 'agent',
'created_at': '2024-06-25T17:10:33.109781+00:00',
'updated_at': '2024-06-25T17:10:33.109781+00:00',
'config': {},
'metadata': {'created_by': 'system'},
'version': 1,
'name': 'my_assistant'
}
```
"""
return await self.http.get(
f"/assistants/{assistant_id}", headers=headers, params=params
)
async def get_graph(
self,
assistant_id: str,
*,
xray: int | bool = False,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> dict[str, list[dict[str, Any]]]:
"""Get the graph of an assistant by ID.
Args:
assistant_id: The ID of the assistant to get the graph of.
xray: Include graph representation of subgraphs. If an integer value is provided, only subgraphs with a depth less than or equal to the value will be included.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
Graph: The graph information for the assistant in JSON format.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
graph_info = await client.assistants.get_graph(
assistant_id="my_assistant_id"
)
print(graph_info)
```
```shell
--------------------------------------------------------------------------------------------------------------------------
{
'nodes':
[
{'id': '__start__', 'type': 'schema', 'data': '__start__'},
{'id': '__end__', 'type': 'schema', 'data': '__end__'},
{'id': 'agent','type': 'runnable','data': {'id': ['langgraph', 'utils', 'RunnableCallable'],'name': 'agent'}},
],
'edges':
[
{'source': '__start__', 'target': 'agent'},
{'source': 'agent','target': '__end__'}
]
}
```
"""
query_params = {"xray": xray}
if params:
query_params.update(params)
return await self.http.get(
f"/assistants/{assistant_id}/graph", params=query_params, headers=headers
)
async def get_schemas(
self,
assistant_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> GraphSchema:
"""Get the schemas of an assistant by ID.
Args:
assistant_id: The ID of the assistant to get the schema of.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
GraphSchema: The graph schema for the assistant.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
schema = await client.assistants.get_schemas(
assistant_id="my_assistant_id"
)
print(schema)
```
```shell
----------------------------------------------------------------------------------------------------------------------------
{
'graph_id': 'agent',
'state_schema':
{
'title': 'LangGraphInput',
'$ref': '#/definitions/AgentState',
'definitions':
{
'BaseMessage':
{
'title': 'BaseMessage',
'description': 'Base abstract Message class. Messages are the inputs and outputs of ChatModels.',
'type': 'object',
'properties':
{
'content':
{
'title': 'Content',
'anyOf': [
{'type': 'string'},
{'type': 'array','items': {'anyOf': [{'type': 'string'}, {'type': 'object'}]}}
]
},
'additional_kwargs':
{
'title': 'Additional Kwargs',
'type': 'object'
},
'response_metadata':
{
'title': 'Response Metadata',
'type': 'object'
},
'type':
{
'title': 'Type',
'type': 'string'
},
'name':
{
'title': 'Name',
'type': 'string'
},
'id':
{
'title': 'Id',
'type': 'string'
}
},
'required': ['content', 'type']
},
'AgentState':
{
'title': 'AgentState',
'type': 'object',
'properties':
{
'messages':
{
'title': 'Messages',
'type': 'array',
'items': {'$ref': '#/definitions/BaseMessage'}
}
},
'required': ['messages']
}
}
},
'context_schema':
{
'title': 'Context',
'type': 'object',
'properties':
{
'model_name':
{
'title': 'Model Name',
'enum': ['anthropic', 'openai'],
'type': 'string'
}
}
}
}
```
"""
return await self.http.get(
f"/assistants/{assistant_id}/schemas", headers=headers, params=params
)
async def get_subgraphs(
self,
assistant_id: str,
namespace: str | None = None,
recurse: bool = False,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Subgraphs:
"""Get the schemas of an assistant by ID.
Args:
assistant_id: The ID of the assistant to get the schema of.
namespace: Optional namespace to filter by.
recurse: Whether to recursively get subgraphs.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
Subgraphs: The graph schema for the assistant.
"""
get_params = {"recurse": recurse}
if params:
get_params = {**get_params, **dict(params)}
if namespace is not None:
return await self.http.get(
f"/assistants/{assistant_id}/subgraphs/{namespace}",
params=get_params,
headers=headers,
)
else:
return await self.http.get(
f"/assistants/{assistant_id}/subgraphs",
params=get_params,
headers=headers,
)
async def create(
self,
graph_id: str | None,
config: Config | None = None,
*,
context: Context | None = None,
metadata: Json = None,
assistant_id: str | None = None,
if_exists: OnConflictBehavior | None = None,
name: str | None = None,
headers: Mapping[str, str] | None = None,
description: str | None = None,
params: QueryParamTypes | None = None,
) -> Assistant:
"""Create a new assistant.
Useful when graph is configurable and you want to create different assistants based on different configurations.
Args:
graph_id: The ID of the graph the assistant should use. The graph ID is normally set in your langgraph.json configuration.
config: Configuration to use for the graph.
metadata: Metadata to add to assistant.
context: Static context to add to the assistant.
!!! version-added "Added in version 0.6.0"
assistant_id: Assistant ID to use, will default to a random UUID if not provided.
if_exists: How to handle duplicate creation. Defaults to 'raise' under the hood.
Must be either 'raise' (raise error if duplicate), or 'do_nothing' (return existing assistant).
name: The name of the assistant. Defaults to 'Untitled' under the hood.
headers: Optional custom headers to include with the request.
description: Optional description of the assistant.
The description field is available for langgraph-api server version>=0.0.45
params: Optional query parameters to include with the request.
Returns:
Assistant: The created assistant.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
assistant = await client.assistants.create(
graph_id="agent",
context={"model_name": "openai"},
metadata={"number":1},
assistant_id="my-assistant-id",
if_exists="do_nothing",
name="my_name"
)
```
"""
payload: dict[str, Any] = {
"graph_id": graph_id,
}
if config:
payload["config"] = config
if context:
payload["context"] = context
if metadata:
payload["metadata"] = metadata
if assistant_id:
payload["assistant_id"] = assistant_id
if if_exists:
payload["if_exists"] = if_exists
if name:
payload["name"] = name
if description:
payload["description"] = description
return await self.http.post(
"/assistants", json=payload, headers=headers, params=params
)
async def update(
self,
assistant_id: str,
*,
graph_id: str | None = None,
config: Config | None = None,
context: Context | None = None,
metadata: Json = None,
name: str | None = None,
headers: Mapping[str, str] | None = None,
description: str | None = None,
params: QueryParamTypes | None = None,
) -> Assistant:
"""Update an assistant.
Use this to point to a different graph, update the configuration, or change the metadata of an assistant.
Args:
assistant_id: Assistant to update.
graph_id: The ID of the graph the assistant should use.
The graph ID is normally set in your langgraph.json configuration. If `None`, assistant will keep pointing to same graph.
config: Configuration to use for the graph.
context: Static context to add to the assistant.
!!! version-added "Added in version 0.6.0"
metadata: Metadata to merge with existing assistant metadata.
name: The new name for the assistant.
headers: Optional custom headers to include with the request.
description: Optional description of the assistant.
The description field is available for langgraph-api server version>=0.0.45
params: Optional query parameters to include with the request.
Returns:
The updated assistant.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
assistant = await client.assistants.update(
assistant_id='e280dad7-8618-443f-87f1-8e41841c180f',
graph_id="other-graph",
context={"model_name": "anthropic"},
metadata={"number":2}
)
```
"""
payload: dict[str, Any] = {}
if graph_id:
payload["graph_id"] = graph_id
if config is not None:
payload["config"] = config
if context is not None:
payload["context"] = context
if metadata:
payload["metadata"] = metadata
if name:
payload["name"] = name
if description:
payload["description"] = description
return await self.http.patch(
f"/assistants/{assistant_id}",
json=payload,
headers=headers,
params=params,
)
async def delete(
self,
assistant_id: str,
*,
delete_threads: bool = False,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Delete an assistant.
Args:
assistant_id: The assistant ID to delete.
delete_threads: If true, delete all threads with `metadata.assistant_id`
matching this assistant, along with runs and checkpoints belonging to
those threads.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
await client.assistants.delete(
assistant_id="my_assistant_id"
)
```
"""
query_params: dict[str, Any] = {}
if delete_threads:
query_params["delete_threads"] = True
if params:
query_params.update(params)
await self.http.delete(
f"/assistants/{assistant_id}",
headers=headers,
params=query_params or None,
)
@overload
async def search(
self,
*,
metadata: Json = None,
graph_id: str | None = None,
name: str | None = None,
limit: int = 10,
offset: int = 0,
sort_by: AssistantSortBy | None = None,
sort_order: SortOrder | None = None,
select: list[AssistantSelectField] | None = None,
response_format: Literal["object"],
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> AssistantsSearchResponse: ...
@overload
async def search(
self,
*,
metadata: Json = None,
graph_id: str | None = None,
name: str | None = None,
limit: int = 10,
offset: int = 0,
sort_by: AssistantSortBy | None = None,
sort_order: SortOrder | None = None,
select: list[AssistantSelectField] | None = None,
response_format: Literal["array"] = "array",
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> list[Assistant]: ...
async def search(
self,
*,
metadata: Json = None,
graph_id: str | None = None,
name: str | None = None,
limit: int = 10,
offset: int = 0,
sort_by: AssistantSortBy | None = None,
sort_order: SortOrder | None = None,
select: list[AssistantSelectField] | None = None,
response_format: Literal["array", "object"] = "array",
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> AssistantsSearchResponse | list[Assistant]:
"""Search for assistants.
Args:
metadata: Metadata to filter by. Exact match filter for each KV pair.
graph_id: The ID of the graph to filter by.
The graph ID is normally set in your langgraph.json configuration.
name: The name of the assistant to filter by.
The filtering logic will match assistants where 'name' is a substring (case insensitive) of the assistant name.
limit: The maximum number of results to return.
offset: The number of results to skip.
sort_by: The field to sort by.
sort_order: The order to sort by.
select: Specific assistant fields to include in the response.
response_format: Controls the response shape. Use `"array"` (default)
to return a bare list of assistants, or `"object"` to return
a mapping containing assistants plus pagination metadata.
Defaults to "array", though this default will be changed to "object" in a future release.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
A list of assistants (when `response_format="array"`) or a mapping
with the assistants and the next pagination cursor (when
`response_format="object"`).
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
response = await client.assistants.search(
metadata = {"name":"my_name"},
graph_id="my_graph_id",
limit=5,
offset=5,
response_format="object"
)
next_cursor = response["next"]
assistants = response["assistants"]
```
"""
if response_format not in ("array", "object"):
raise ValueError(
f"response_format must be 'array' or 'object', got {response_format!r}"
)
payload: dict[str, Any] = {
"limit": limit,
"offset": offset,
}
if metadata:
payload["metadata"] = metadata
if graph_id:
payload["graph_id"] = graph_id
if name:
payload["name"] = name
if sort_by:
payload["sort_by"] = sort_by
if sort_order:
payload["sort_order"] = sort_order
if select:
payload["select"] = select
next_cursor: str | None = None
def capture_pagination(response: httpx.Response) -> None:
nonlocal next_cursor
next_cursor = response.headers.get("X-Pagination-Next")
assistants = cast(
list[Assistant],
await self.http.post(
"/assistants/search",
json=payload,
headers=headers,
params=params,
on_response=capture_pagination if response_format == "object" else None,
),
)
if response_format == "object":
return {"assistants": assistants, "next": next_cursor}
return assistants
async def count(
self,
*,
metadata: Json = None,
graph_id: str | None = None,
name: str | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> int:
"""Count assistants matching filters.
Args:
metadata: Metadata to filter by. Exact match for each key/value.
graph_id: Optional graph id to filter by.
name: Optional name to filter by.
The filtering logic will match assistants where 'name' is a substring (case insensitive) of the assistant name.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
int: Number of assistants matching the criteria.
"""
payload: dict[str, Any] = {}
if metadata:
payload["metadata"] = metadata
if graph_id:
payload["graph_id"] = graph_id
if name:
payload["name"] = name
return await self.http.post(
"/assistants/count", json=payload, headers=headers, params=params
)
async def get_versions(
self,
assistant_id: str,
metadata: Json = None,
limit: int = 10,
offset: int = 0,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> list[AssistantVersion]:
"""List all versions of an assistant.
Args:
assistant_id: The assistant ID to get versions for.
metadata: Metadata to filter versions by. Exact match filter for each KV pair.
limit: The maximum number of versions to return.
offset: The number of versions to skip.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
A list of assistant versions.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
assistant_versions = await client.assistants.get_versions(
assistant_id="my_assistant_id"
)
```
"""
payload: dict[str, Any] = {
"limit": limit,
"offset": offset,
}
if metadata:
payload["metadata"] = metadata
return await self.http.post(
f"/assistants/{assistant_id}/versions",
json=payload,
headers=headers,
params=params,
)
async def set_latest(
self,
assistant_id: str,
version: int,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Assistant:
"""Change the version of an assistant.
Args:
assistant_id: The assistant ID to delete.
version: The version to change to.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
Assistant Object.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
new_version_assistant = await client.assistants.set_latest(
assistant_id="my_assistant_id",
version=3
)
```
"""
payload: dict[str, Any] = {"version": version}
return await self.http.post(
f"/assistants/{assistant_id}/latest",
json=payload,
headers=headers,
params=params,
)
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_async/assistants.py",
"license": "MIT License",
"lines": 647,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_async/client.py | """Async LangGraph client."""
from __future__ import annotations
import logging
import os
from collections.abc import Mapping
from types import TracebackType
import httpx
from langgraph_sdk._async.assistants import AssistantsClient
from langgraph_sdk._async.cron import CronClient
from langgraph_sdk._async.http import HttpClient
from langgraph_sdk._async.runs import RunsClient
from langgraph_sdk._async.store import StoreClient
from langgraph_sdk._async.threads import ThreadsClient
from langgraph_sdk._shared.types import TimeoutTypes
from langgraph_sdk._shared.utilities import (
NOT_PROVIDED,
_get_headers,
_registered_transports,
get_asgi_transport,
)
logger = logging.getLogger(__name__)
def get_client(
*,
url: str | None = None,
api_key: str | None = NOT_PROVIDED,
headers: Mapping[str, str] | None = None,
timeout: TimeoutTypes | None = None,
) -> LangGraphClient:
"""Create and configure a LangGraphClient.
The client provides programmatic access to LangSmith Deployment. It supports
both remote servers and local in-process connections (when running inside a LangGraph server).
Args:
url:
Base URL of the LangGraph API.
- If `None`, the client first attempts an in-process connection via ASGI transport.
If that fails, it defers registration until after app initialization. This
only works if the client is used from within the Agent server.
api_key:
API key for authentication. Can be:
- A string: use this exact API key
- `None`: explicitly skip loading from environment variables
- Not provided (default): auto-load from environment in this order:
1. `LANGGRAPH_API_KEY`
2. `LANGSMITH_API_KEY`
3. `LANGCHAIN_API_KEY`
headers:
Additional HTTP headers to include in requests. Merged with authentication headers.
timeout:
HTTP timeout configuration. May be:
- `httpx.Timeout` instance
- float (total seconds)
- tuple `(connect, read, write, pool)` in seconds
Defaults: connect=5, read=300, write=300, pool=5.
Returns:
LangGraphClient:
A top-level client exposing sub-clients for assistants, threads,
runs, and cron operations.
???+ example "Connect to a remote server:"
```python
from langgraph_sdk import get_client
# get top-level LangGraphClient
client = get_client(url="http://localhost:8123")
# example usage: client.<model>.<method_name>()
assistants = await client.assistants.get(assistant_id="some_uuid")
```
???+ example "Connect in-process to a running LangGraph server:"
```python
from langgraph_sdk import get_client
client = get_client(url=None)
async def my_node(...):
subagent_result = await client.runs.wait(
thread_id=None,
assistant_id="agent",
input={"messages": [{"role": "user", "content": "Foo"}]},
)
```
???+ example "Skip auto-loading API key from environment:"
```python
from langgraph_sdk import get_client
# Don't load API key from environment variables
client = get_client(
url="http://localhost:8123",
api_key=None
)
```
"""
transport: httpx.AsyncBaseTransport | None = None
if url is None:
url = "http://api"
if os.environ.get("__LANGGRAPH_DEFER_LOOPBACK_TRANSPORT") == "true":
transport = get_asgi_transport()(app=None, root_path="/noauth") # type: ignore[invalid-argument-type]
_registered_transports.append(transport)
else:
try:
from langgraph_api.server import app # type: ignore
transport = get_asgi_transport()(app, root_path="/noauth")
except Exception:
logger.debug(
"Failed to connect to in-process LangGraph server. Deferring configuration.",
exc_info=True,
)
transport = get_asgi_transport()(app=None, root_path="/noauth") # type: ignore[invalid-argument-type]
_registered_transports.append(transport)
if transport is None:
transport = httpx.AsyncHTTPTransport(retries=5)
client = httpx.AsyncClient(
base_url=url,
transport=transport,
timeout=(
httpx.Timeout(timeout) # type: ignore[arg-type]
if timeout is not None
else httpx.Timeout(connect=5, read=300, write=300, pool=5)
),
headers=_get_headers(api_key, headers),
)
return LangGraphClient(client)
class LangGraphClient:
"""Top-level client for LangGraph API.
Attributes:
assistants: Manages versioned configuration for your graphs.
threads: Handles (potentially) multi-turn interactions, such as conversational threads.
runs: Controls individual invocations of the graph.
crons: Manages scheduled operations.
store: Interfaces with persistent, shared data storage.
"""
def __init__(self, client: httpx.AsyncClient) -> None:
self.http = HttpClient(client)
self.assistants = AssistantsClient(self.http)
self.threads = ThreadsClient(self.http)
self.runs = RunsClient(self.http)
self.crons = CronClient(self.http)
self.store = StoreClient(self.http)
async def __aenter__(self) -> LangGraphClient:
"""Enter the async context manager."""
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
"""Exit the async context manager."""
await self.aclose()
async def aclose(self) -> None:
"""Close the underlying HTTP client."""
if hasattr(self, "http"):
await self.http.client.aclose()
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_async/client.py",
"license": "MIT License",
"lines": 147,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_async/cron.py | """Async client for managing recurrent runs (cron jobs) in LangGraph."""
from __future__ import annotations
import warnings
from collections.abc import Mapping, Sequence
from datetime import datetime
from typing import Any
from langgraph_sdk._async.http import HttpClient
from langgraph_sdk.schema import (
All,
Config,
Context,
Cron,
CronSelectField,
CronSortBy,
Durability,
Input,
OnCompletionBehavior,
QueryParamTypes,
Run,
SortOrder,
StreamMode,
)
class CronClient:
"""Client for managing recurrent runs (cron jobs) in LangGraph.
A run is a single invocation of an assistant with optional input, config, and context.
This client allows scheduling recurring runs to occur automatically.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024"))
cron_job = await client.crons.create_for_thread(
thread_id="thread_123",
assistant_id="asst_456",
schedule="0 9 * * *",
input={"message": "Daily update"}
)
```
!!! note "Feature Availability"
The crons client functionality is not supported on all licenses.
Please check the relevant license documentation for the most up-to-date
details on feature availability.
"""
def __init__(self, http_client: HttpClient) -> None:
self.http = http_client
async def create_for_thread(
self,
thread_id: str,
assistant_id: str,
*,
schedule: str,
input: Input | None = None,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint_during: bool | None = None, # deprecated
interrupt_before: All | list[str] | None = None,
interrupt_after: All | list[str] | None = None,
webhook: str | None = None,
multitask_strategy: str | None = None,
end_time: datetime | None = None,
enabled: bool | None = None,
stream_mode: StreamMode | Sequence[StreamMode] | None = None,
stream_subgraphs: bool | None = None,
stream_resumable: bool | None = None,
durability: Durability | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Run:
"""Create a cron job for a thread.
Args:
thread_id: the thread ID to run the cron job on.
assistant_id: The assistant ID or graph name to use for the cron job.
If using graph name, will default to first assistant created from that graph.
schedule: The cron schedule to execute this job on.
Schedules are interpreted in UTC.
input: The input to the graph.
metadata: Metadata to assign to the cron job runs.
config: The configuration for the assistant.
context: Static context to add to the assistant.
!!! version-added "Added in version 0.6.0"
checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
interrupt_before: Nodes to interrupt immediately before they get executed.
interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
webhook: Webhook to call after LangGraph API call is done.
multitask_strategy: Multitask strategy to use.
Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
end_time: The time to stop running the cron job. If not provided, the cron job will run indefinitely.
enabled: Whether the cron job is enabled or not.
stream_mode: The stream mode(s) to use.
stream_subgraphs: Whether to stream output from subgraphs.
stream_resumable: Whether to persist the stream chunks in order to resume the stream later.
durability: Durability level for the run. Must be one of 'sync', 'async', or 'exit'.
"async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
"sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
"exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
The cron run.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
cron_run = await client.crons.create_for_thread(
thread_id="my-thread-id",
assistant_id="agent",
schedule="27 15 * * *",
input={"messages": [{"role": "user", "content": "hello!"}]},
metadata={"name":"my_run"},
context={"model_name": "openai"},
interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
webhook="https://my.fake.webhook.com",
multitask_strategy="interrupt",
enabled=True,
)
```
"""
if checkpoint_during is not None:
warnings.warn(
"`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
DeprecationWarning,
stacklevel=2,
)
payload = {
"schedule": schedule,
"input": input,
"config": config,
"metadata": metadata,
"context": context,
"assistant_id": assistant_id,
"checkpoint_during": checkpoint_during,
"interrupt_before": interrupt_before,
"interrupt_after": interrupt_after,
"webhook": webhook,
"end_time": end_time.isoformat() if end_time else None,
"enabled": enabled,
"stream_mode": stream_mode,
"stream_subgraphs": stream_subgraphs,
"stream_resumable": stream_resumable,
"durability": durability,
}
if multitask_strategy:
payload["multitask_strategy"] = multitask_strategy
payload = {k: v for k, v in payload.items() if v is not None}
return await self.http.post(
f"/threads/{thread_id}/runs/crons",
json=payload,
headers=headers,
params=params,
)
async def create(
self,
assistant_id: str,
*,
schedule: str,
input: Input | None = None,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint_during: bool | None = None, # deprecated
interrupt_before: All | list[str] | None = None,
interrupt_after: All | list[str] | None = None,
webhook: str | None = None,
on_run_completed: OnCompletionBehavior | None = None,
multitask_strategy: str | None = None,
end_time: datetime | None = None,
enabled: bool | None = None,
stream_mode: StreamMode | Sequence[StreamMode] | None = None,
stream_subgraphs: bool | None = None,
stream_resumable: bool | None = None,
durability: Durability | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Run:
"""Create a cron run.
Args:
assistant_id: The assistant ID or graph name to use for the cron job.
If using graph name, will default to first assistant created from that graph.
schedule: The cron schedule to execute this job on.
Schedules are interpreted in UTC.
input: The input to the graph.
metadata: Metadata to assign to the cron job runs.
config: The configuration for the assistant.
context: Static context to add to the assistant.
!!! version-added "Added in version 0.6.0"
checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
interrupt_before: Nodes to interrupt immediately before they get executed.
interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
webhook: Webhook to call after LangGraph API call is done.
on_run_completed: What to do with the thread after the run completes.
Must be one of 'delete' (default) or 'keep'. 'delete' removes the thread
after execution. 'keep' creates a new thread for each execution but does not
clean them up. Clients are responsible for cleaning up kept threads.
multitask_strategy: Multitask strategy to use.
Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
end_time: The time to stop running the cron job. If not provided, the cron job will run indefinitely.
enabled: Whether the cron job is enabled or not.
stream_mode: The stream mode(s) to use.
stream_subgraphs: Whether to stream output from subgraphs.
stream_resumable: Whether to persist the stream chunks in order to resume the stream later.
durability: Durability level for the run. Must be one of 'sync', 'async', or 'exit'.
"async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
"sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
"exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
The cron run.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
cron_run = client.crons.create(
assistant_id="agent",
schedule="27 15 * * *",
input={"messages": [{"role": "user", "content": "hello!"}]},
metadata={"name":"my_run"},
context={"model_name": "openai"},
interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
webhook="https://my.fake.webhook.com",
multitask_strategy="interrupt",
enabled=True,
)
```
"""
if checkpoint_during is not None:
warnings.warn(
"`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
DeprecationWarning,
stacklevel=2,
)
payload = {
"schedule": schedule,
"input": input,
"config": config,
"metadata": metadata,
"context": context,
"assistant_id": assistant_id,
"checkpoint_during": checkpoint_during,
"interrupt_before": interrupt_before,
"interrupt_after": interrupt_after,
"webhook": webhook,
"on_run_completed": on_run_completed,
"end_time": end_time.isoformat() if end_time else None,
"enabled": enabled,
"stream_mode": stream_mode,
"stream_subgraphs": stream_subgraphs,
"stream_resumable": stream_resumable,
"durability": durability,
}
if multitask_strategy:
payload["multitask_strategy"] = multitask_strategy
payload = {k: v for k, v in payload.items() if v is not None}
return await self.http.post(
"/runs/crons", json=payload, headers=headers, params=params
)
async def delete(
self,
cron_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Delete a cron.
Args:
cron_id: The cron ID to delete.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
await client.crons.delete(
cron_id="cron_to_delete"
)
```
"""
await self.http.delete(f"/runs/crons/{cron_id}", headers=headers, params=params)
async def update(
self,
cron_id: str,
*,
schedule: str | None = None,
end_time: datetime | None = None,
input: Input | None = None,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
webhook: str | None = None,
interrupt_before: All | list[str] | None = None,
interrupt_after: All | list[str] | None = None,
on_run_completed: OnCompletionBehavior | None = None,
enabled: bool | None = None,
stream_mode: StreamMode | Sequence[StreamMode] | None = None,
stream_subgraphs: bool | None = None,
stream_resumable: bool | None = None,
durability: Durability | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Cron:
"""Update a cron job by ID.
Args:
cron_id: The cron ID to update.
schedule: The cron schedule to execute this job on.
Schedules are interpreted in UTC.
end_time: The end date to stop running the cron.
input: The input to the graph.
metadata: Metadata to assign to the cron job runs.
config: The configuration for the assistant.
context: Static context added to the assistant.
webhook: Webhook to call after LangGraph API call is done.
interrupt_before: Nodes to interrupt immediately before they get executed.
interrupt_after: Nodes to interrupt immediately after they get executed.
on_run_completed: What to do with the thread after the run completes.
Must be one of 'delete' or 'keep'. 'delete' removes the thread
after execution. 'keep' creates a new thread for each execution but does not
clean them up.
enabled: Enable or disable the cron job.
stream_mode: The stream mode(s) to use.
stream_subgraphs: Whether to stream output from subgraphs.
stream_resumable: Whether to persist the stream chunks in order to resume the stream later.
durability: Durability level for the run. Must be one of 'sync', 'async', or 'exit'.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
The updated cron job.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
updated_cron = await client.crons.update(
cron_id="1ef3cefa-4c09-6926-96d0-3dc97fd5e39b",
schedule="0 10 * * *",
enabled=False,
)
```
"""
payload = {
"schedule": schedule,
"end_time": end_time.isoformat() if end_time else None,
"input": input,
"metadata": metadata,
"config": config,
"context": context,
"webhook": webhook,
"interrupt_before": interrupt_before,
"interrupt_after": interrupt_after,
"on_run_completed": on_run_completed,
"enabled": enabled,
"stream_mode": stream_mode,
"stream_subgraphs": stream_subgraphs,
"stream_resumable": stream_resumable,
"durability": durability,
}
payload = {k: v for k, v in payload.items() if v is not None}
return await self.http.patch(
f"/runs/crons/{cron_id}",
json=payload,
headers=headers,
params=params,
)
async def search(
self,
*,
assistant_id: str | None = None,
thread_id: str | None = None,
enabled: bool | None = None,
limit: int = 10,
offset: int = 0,
sort_by: CronSortBy | None = None,
sort_order: SortOrder | None = None,
select: list[CronSelectField] | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> list[Cron]:
"""Get a list of cron jobs.
Args:
assistant_id: The assistant ID or graph name to search for.
thread_id: the thread ID to search for.
enabled: The enabled status to search for.
limit: The maximum number of results to return.
offset: The number of results to skip.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
The list of cron jobs returned by the search,
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
cron_jobs = await client.crons.search(
assistant_id="my_assistant_id",
thread_id="my_thread_id",
enabled=True,
limit=5,
offset=5,
)
print(cron_jobs)
```
```shell
----------------------------------------------------------
[
{
'cron_id': '1ef3cefa-4c09-6926-96d0-3dc97fd5e39b',
'assistant_id': 'my_assistant_id',
'thread_id': 'my_thread_id',
'user_id': None,
'payload':
{
'input': {'start_time': ''},
'schedule': '4 * * * *',
'assistant_id': 'my_assistant_id'
},
'schedule': '4 * * * *',
'next_run_date': '2024-07-25T17:04:00+00:00',
'end_time': None,
'created_at': '2024-07-08T06:02:23.073257+00:00',
'updated_at': '2024-07-08T06:02:23.073257+00:00'
}
]
```
"""
payload = {
"assistant_id": assistant_id,
"thread_id": thread_id,
"enabled": enabled,
"limit": limit,
"offset": offset,
}
if sort_by:
payload["sort_by"] = sort_by
if sort_order:
payload["sort_order"] = sort_order
if select:
payload["select"] = select
payload = {k: v for k, v in payload.items() if v is not None}
return await self.http.post(
"/runs/crons/search", json=payload, headers=headers, params=params
)
async def count(
self,
*,
assistant_id: str | None = None,
thread_id: str | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> int:
"""Count cron jobs matching filters.
Args:
assistant_id: Assistant ID to filter by.
thread_id: Thread ID to filter by.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
int: Number of crons matching the criteria.
"""
payload: dict[str, Any] = {}
if assistant_id:
payload["assistant_id"] = assistant_id
if thread_id:
payload["thread_id"] = thread_id
return await self.http.post(
"/runs/crons/count", json=payload, headers=headers, params=params
)
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_async/cron.py",
"license": "MIT License",
"lines": 462,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_async/http.py | """HTTP client for async operations."""
from __future__ import annotations
import asyncio
import logging
import sys
import warnings
from collections.abc import AsyncIterator, Callable, Mapping
from typing import Any, cast
import httpx
import orjson
from langgraph_sdk._shared.utilities import _orjson_default
from langgraph_sdk.errors import _araise_for_status_typed
from langgraph_sdk.schema import QueryParamTypes, StreamPart
from langgraph_sdk.sse import SSEDecoder, aiter_lines_raw
logger = logging.getLogger(__name__)
class HttpClient:
"""Handle async requests to the LangGraph API.
Adds additional error messaging & content handling above the
provided httpx client.
Attributes:
client (httpx.AsyncClient): Underlying HTTPX async client.
"""
def __init__(self, client: httpx.AsyncClient) -> None:
self.client = client
async def get(
self,
path: str,
*,
params: QueryParamTypes | None = None,
headers: Mapping[str, str] | None = None,
on_response: Callable[[httpx.Response], None] | None = None,
) -> Any:
"""Send a `GET` request."""
r = await self.client.get(path, params=params, headers=headers)
if on_response:
on_response(r)
await _araise_for_status_typed(r)
return await _adecode_json(r)
async def post(
self,
path: str,
*,
json: dict[str, Any] | list | None,
params: QueryParamTypes | None = None,
headers: Mapping[str, str] | None = None,
on_response: Callable[[httpx.Response], None] | None = None,
) -> Any:
"""Send a `POST` request."""
if json is not None:
request_headers, content = await _aencode_json(json)
else:
request_headers, content = {}, b""
# Merge headers, with runtime headers taking precedence
if headers:
request_headers.update(headers)
r = await self.client.post(
path, headers=request_headers, content=content, params=params
)
if on_response:
on_response(r)
await _araise_for_status_typed(r)
return await _adecode_json(r)
async def put(
self,
path: str,
*,
json: dict,
params: QueryParamTypes | None = None,
headers: Mapping[str, str] | None = None,
on_response: Callable[[httpx.Response], None] | None = None,
) -> Any:
"""Send a `PUT` request."""
request_headers, content = await _aencode_json(json)
if headers:
request_headers.update(headers)
r = await self.client.put(
path, headers=request_headers, content=content, params=params
)
if on_response:
on_response(r)
await _araise_for_status_typed(r)
return await _adecode_json(r)
async def patch(
self,
path: str,
*,
json: dict,
params: QueryParamTypes | None = None,
headers: Mapping[str, str] | None = None,
on_response: Callable[[httpx.Response], None] | None = None,
) -> Any:
"""Send a `PATCH` request."""
request_headers, content = await _aencode_json(json)
if headers:
request_headers.update(headers)
r = await self.client.patch(
path, headers=request_headers, content=content, params=params
)
if on_response:
on_response(r)
await _araise_for_status_typed(r)
return await _adecode_json(r)
async def delete(
self,
path: str,
*,
json: Any | None = None,
params: QueryParamTypes | None = None,
headers: Mapping[str, str] | None = None,
on_response: Callable[[httpx.Response], None] | None = None,
) -> None:
"""Send a `DELETE` request."""
r = await self.client.request(
"DELETE", path, json=json, params=params, headers=headers
)
if on_response:
on_response(r)
await _araise_for_status_typed(r)
async def request_reconnect(
self,
path: str,
method: str,
*,
json: dict[str, Any] | None = None,
params: QueryParamTypes | None = None,
headers: Mapping[str, str] | None = None,
on_response: Callable[[httpx.Response], None] | None = None,
reconnect_limit: int = 5,
) -> Any:
"""Send a request that automatically reconnects to Location header."""
request_headers, content = await _aencode_json(json)
if headers:
request_headers.update(headers)
async with self.client.stream(
method, path, headers=request_headers, content=content, params=params
) as r:
if on_response:
on_response(r)
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
body = (await r.aread()).decode()
if sys.version_info >= (3, 11):
e.add_note(body)
else:
logger.error(f"Error from langgraph-api: {body}", exc_info=e)
raise e
loc = r.headers.get("location")
if reconnect_limit <= 0 or not loc:
return await _adecode_json(r)
try:
return await _adecode_json(r)
except httpx.HTTPError:
warnings.warn(
f"Request failed, attempting reconnect to Location: {loc}",
stacklevel=2,
)
await r.aclose()
return await self.request_reconnect(
loc,
"GET",
headers=request_headers,
# don't pass on_response so it's only called once
reconnect_limit=reconnect_limit - 1,
)
async def stream(
self,
path: str,
method: str,
*,
json: dict[str, Any] | None = None,
params: QueryParamTypes | None = None,
headers: Mapping[str, str] | None = None,
on_response: Callable[[httpx.Response], None] | None = None,
) -> AsyncIterator[StreamPart]:
"""Stream results using SSE."""
request_headers, content = await _aencode_json(json)
request_headers["Accept"] = "text/event-stream"
request_headers["Cache-Control"] = "no-store"
# Add runtime headers with precedence
if headers:
request_headers.update(headers)
reconnect_headers = {
key: value
for key, value in request_headers.items()
if key.lower() not in {"content-length", "content-type"}
}
last_event_id: str | None = None
reconnect_path: str | None = None
reconnect_attempts = 0
max_reconnect_attempts = 5
while True:
current_headers = dict(
request_headers if reconnect_path is None else reconnect_headers
)
if last_event_id is not None:
current_headers["Last-Event-ID"] = last_event_id
current_method = method if reconnect_path is None else "GET"
current_content = content if reconnect_path is None else None
current_params = params if reconnect_path is None else None
retry = False
async with self.client.stream(
current_method,
reconnect_path or path,
headers=current_headers,
content=current_content,
params=current_params,
) as res:
if reconnect_path is None and on_response:
on_response(res)
# check status
await _araise_for_status_typed(res)
# check content type
content_type = res.headers.get("content-type", "").partition(";")[0]
if "text/event-stream" not in content_type:
raise httpx.TransportError(
"Expected response header Content-Type to contain 'text/event-stream', "
f"got {content_type!r}"
)
reconnect_location = res.headers.get("location")
if reconnect_location:
reconnect_path = reconnect_location
# parse SSE
decoder = SSEDecoder()
try:
async for line in aiter_lines_raw(res):
sse = decoder.decode(line=cast("bytes", line).rstrip(b"\n"))
if sse is not None:
if decoder.last_event_id is not None:
last_event_id = decoder.last_event_id
if sse.event or sse.data is not None:
yield sse
except httpx.HTTPError:
# httpx.TransportError inherits from HTTPError, so transient
# disconnects during streaming land here.
if reconnect_path is None:
raise
retry = True
else:
if sse := decoder.decode(b""):
if decoder.last_event_id is not None:
last_event_id = decoder.last_event_id
if sse.event or sse.data is not None:
# decoder.decode(b"") flushes the in-flight event and may
# return an empty placeholder when there is no pending
# message. Skip these no-op events so the stream doesn't
# emit a trailing blank item after reconnects.
yield sse
if retry:
reconnect_attempts += 1
if reconnect_attempts > max_reconnect_attempts:
raise httpx.TransportError(
"Exceeded maximum SSE reconnection attempts"
)
continue
break
async def _aencode_json(json: Any) -> tuple[dict[str, str], bytes | None]:
if json is None:
return {}, None
body = await asyncio.get_running_loop().run_in_executor(
None,
orjson.dumps,
json,
_orjson_default,
orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NON_STR_KEYS,
)
content_length = str(len(body))
content_type = "application/json"
headers = {"Content-Length": content_length, "Content-Type": content_type}
return headers, body
async def _adecode_json(r: httpx.Response) -> Any:
body = await r.aread()
return (
await asyncio.get_running_loop().run_in_executor(None, orjson.loads, body)
if body
else None
)
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_async/http.py",
"license": "MIT License",
"lines": 277,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_async/runs.py | """Async client for managing runs in LangGraph."""
from __future__ import annotations
import builtins
import warnings
from collections.abc import AsyncIterator, Callable, Mapping, Sequence
from typing import Any, overload
import httpx
from langgraph_sdk._async.http import HttpClient
from langgraph_sdk._shared.utilities import _get_run_metadata_from_response
from langgraph_sdk.schema import (
All,
BulkCancelRunsStatus,
CancelAction,
Checkpoint,
Command,
Config,
Context,
DisconnectMode,
Durability,
IfNotExists,
Input,
MultitaskStrategy,
OnCompletionBehavior,
QueryParamTypes,
Run,
RunCreate,
RunCreateMetadata,
RunSelectField,
RunStatus,
StreamMode,
StreamPart,
)
class RunsClient:
"""Client for managing runs in LangGraph.
A run is a single assistant invocation with optional input, config, context, and metadata.
This client manages runs, which can be stateful (on threads) or stateless.
???+ example "Example"
```python
client = get_client(url="http://localhost:2024")
run = await client.runs.create(assistant_id="asst_123", thread_id="thread_456", input={"query": "Hello"})
```
"""
def __init__(self, http: HttpClient) -> None:
self.http = http
@overload
def stream(
self,
thread_id: str,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
stream_mode: StreamMode | Sequence[StreamMode] = "values",
stream_subgraphs: bool = False,
stream_resumable: bool = False,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None,
checkpoint_during: bool | None = None,
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
feedback_keys: Sequence[str] | None = None,
on_disconnect: DisconnectMode | None = None,
webhook: str | None = None,
multitask_strategy: MultitaskStrategy | None = None,
if_not_exists: IfNotExists | None = None,
after_seconds: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
) -> AsyncIterator[StreamPart]: ...
@overload
def stream(
self,
thread_id: None,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
stream_mode: StreamMode | Sequence[StreamMode] = "values",
stream_subgraphs: bool = False,
stream_resumable: bool = False,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
checkpoint_during: bool | None = None,
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
feedback_keys: Sequence[str] | None = None,
on_disconnect: DisconnectMode | None = None,
on_completion: OnCompletionBehavior | None = None,
if_not_exists: IfNotExists | None = None,
webhook: str | None = None,
after_seconds: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
) -> AsyncIterator[StreamPart]: ...
def stream(
self,
thread_id: str | None,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
stream_mode: StreamMode | Sequence[StreamMode] = "values",
stream_subgraphs: bool = False,
stream_resumable: bool = False,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None,
checkpoint_during: bool | None = None, # deprecated
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
feedback_keys: Sequence[str] | None = None,
on_disconnect: DisconnectMode | None = None,
on_completion: OnCompletionBehavior | None = None,
webhook: str | None = None,
multitask_strategy: MultitaskStrategy | None = None,
if_not_exists: IfNotExists | None = None,
after_seconds: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
durability: Durability | None = None,
) -> AsyncIterator[StreamPart]:
"""Create a run and stream the results.
Args:
thread_id: the thread ID to assign to the thread.
If `None` will create a stateless run.
assistant_id: The assistant ID or graph name to stream from.
If using graph name, will default to first assistant created from that graph.
input: The input to the graph.
command: A command to execute. Cannot be combined with input.
stream_mode: The stream mode(s) to use.
stream_subgraphs: Whether to stream output from subgraphs.
stream_resumable: Whether the stream is considered resumable.
If true, the stream can be resumed and replayed in its entirety even after disconnection.
metadata: Metadata to assign to the run.
config: The configuration for the assistant.
context: Static context to add to the assistant.
!!! version-added "Added in version 0.6.0"
checkpoint: The checkpoint to resume from.
checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
interrupt_before: Nodes to interrupt immediately before they get executed.
interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
feedback_keys: Feedback keys to assign to run.
on_disconnect: The disconnect mode to use.
Must be one of 'cancel' or 'continue'.
on_completion: Whether to delete or keep the thread created for a stateless run.
Must be one of 'delete' or 'keep'.
webhook: Webhook to call after LangGraph API call is done.
multitask_strategy: Multitask strategy to use.
Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
if_not_exists: How to handle missing thread. Defaults to 'reject'.
Must be either 'reject' (raise error if missing), or 'create' (create new thread).
after_seconds: The number of seconds to wait before starting the run.
Use to schedule future runs.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
on_run_created: Callback when a run is created.
durability: The durability to use for the run. Values are "sync", "async", or "exit".
"async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
"sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
"exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
Returns:
Asynchronous iterator of stream results.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024)
async for chunk in client.runs.stream(
thread_id=None,
assistant_id="agent",
input={"messages": [{"role": "user", "content": "how are you?"}]},
stream_mode=["values","debug"],
metadata={"name":"my_run"},
context={"model_name": "anthropic"},
interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
feedback_keys=["my_feedback_key_1","my_feedback_key_2"],
webhook="https://my.fake.webhook.com",
multitask_strategy="interrupt"
):
print(chunk)
```
```shell
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
StreamPart(event='metadata', data={'run_id': '1ef4a9b8-d7da-679a-a45a-872054341df2'})
StreamPart(event='values', data={'messages': [{'content': 'how are you?', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'fe0a5778-cfe9-42ee-b807-0adaa1873c10', 'example': False}]})
StreamPart(event='values', data={'messages': [{'content': 'how are you?', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'fe0a5778-cfe9-42ee-b807-0adaa1873c10', 'example': False}, {'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.", 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'ai', 'name': None, 'id': 'run-159b782c-b679-4830-83c6-cef87798fe8b', 'example': False, 'tool_calls': [], 'invalid_tool_calls': [], 'usage_metadata': None}]})
StreamPart(event='end', data=None)
```
"""
if checkpoint_during is not None:
warnings.warn(
"`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
DeprecationWarning,
stacklevel=2,
)
payload = {
"input": input,
"command": (
{k: v for k, v in command.items() if v is not None} if command else None
),
"config": config,
"context": context,
"metadata": metadata,
"stream_mode": stream_mode,
"stream_subgraphs": stream_subgraphs,
"stream_resumable": stream_resumable,
"assistant_id": assistant_id,
"interrupt_before": interrupt_before,
"interrupt_after": interrupt_after,
"feedback_keys": feedback_keys,
"webhook": webhook,
"checkpoint": checkpoint,
"checkpoint_id": checkpoint_id,
"checkpoint_during": checkpoint_during,
"multitask_strategy": multitask_strategy,
"if_not_exists": if_not_exists,
"on_disconnect": on_disconnect,
"on_completion": on_completion,
"after_seconds": after_seconds,
"durability": durability,
}
endpoint = (
f"/threads/{thread_id}/runs/stream"
if thread_id is not None
else "/runs/stream"
)
def on_response(res: httpx.Response):
"""Callback function to handle the response."""
if on_run_created and (metadata := _get_run_metadata_from_response(res)):
on_run_created(metadata)
return self.http.stream(
endpoint,
"POST",
json={k: v for k, v in payload.items() if v is not None},
params=params,
headers=headers,
on_response=on_response if on_run_created else None,
)
@overload
async def create(
self,
thread_id: None,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
stream_mode: StreamMode | Sequence[StreamMode] = "values",
stream_subgraphs: bool = False,
stream_resumable: bool = False,
metadata: Mapping[str, Any] | None = None,
checkpoint_during: bool | None = None,
config: Config | None = None,
context: Context | None = None,
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
webhook: str | None = None,
on_completion: OnCompletionBehavior | None = None,
if_not_exists: IfNotExists | None = None,
after_seconds: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
) -> Run: ...
@overload
async def create(
self,
thread_id: str,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
stream_mode: StreamMode | Sequence[StreamMode] = "values",
stream_subgraphs: bool = False,
stream_resumable: bool = False,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None,
checkpoint_during: bool | None = None,
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
webhook: str | None = None,
multitask_strategy: MultitaskStrategy | None = None,
if_not_exists: IfNotExists | None = None,
after_seconds: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
) -> Run: ...
async def create(
self,
thread_id: str | None,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
stream_mode: StreamMode | Sequence[StreamMode] = "values",
stream_subgraphs: bool = False,
stream_resumable: bool = False,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None,
checkpoint_during: bool | None = None, # deprecated
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
webhook: str | None = None,
multitask_strategy: MultitaskStrategy | None = None,
if_not_exists: IfNotExists | None = None,
on_completion: OnCompletionBehavior | None = None,
after_seconds: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
durability: Durability | None = None,
) -> Run:
"""Create a background run.
Args:
thread_id: the thread ID to assign to the thread.
If `None` will create a stateless run.
assistant_id: The assistant ID or graph name to stream from.
If using graph name, will default to first assistant created from that graph.
input: The input to the graph.
command: A command to execute. Cannot be combined with input.
stream_mode: The stream mode(s) to use.
stream_subgraphs: Whether to stream output from subgraphs.
stream_resumable: Whether the stream is considered resumable.
If true, the stream can be resumed and replayed in its entirety even after disconnection.
metadata: Metadata to assign to the run.
config: The configuration for the assistant.
context: Static context to add to the assistant.
!!! version-added "Added in version 0.6.0"
checkpoint: The checkpoint to resume from.
checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
interrupt_before: Nodes to interrupt immediately before they get executed.
interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
webhook: Webhook to call after LangGraph API call is done.
multitask_strategy: Multitask strategy to use.
Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
on_completion: Whether to delete or keep the thread created for a stateless run.
Must be one of 'delete' or 'keep'.
if_not_exists: How to handle missing thread. Defaults to 'reject'.
Must be either 'reject' (raise error if missing), or 'create' (create new thread).
after_seconds: The number of seconds to wait before starting the run.
Use to schedule future runs.
headers: Optional custom headers to include with the request.
on_run_created: Optional callback to call when a run is created.
durability: The durability to use for the run. Values are "sync", "async", or "exit".
"async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
"sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
"exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
Returns:
The created background run.
???+ example "Example Usage"
```python
background_run = await client.runs.create(
thread_id="my_thread_id",
assistant_id="my_assistant_id",
input={"messages": [{"role": "user", "content": "hello!"}]},
metadata={"name":"my_run"},
context={"model_name": "openai"},
interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
webhook="https://my.fake.webhook.com",
multitask_strategy="interrupt"
)
print(background_run)
```
```shell
--------------------------------------------------------------------------------
{
'run_id': 'my_run_id',
'thread_id': 'my_thread_id',
'assistant_id': 'my_assistant_id',
'created_at': '2024-07-25T15:35:42.598503+00:00',
'updated_at': '2024-07-25T15:35:42.598503+00:00',
'metadata': {},
'status': 'pending',
'kwargs':
{
'input':
{
'messages': [
{
'role': 'user',
'content': 'how are you?'
}
]
},
'config':
{
'metadata':
{
'created_by': 'system'
},
'configurable':
{
'run_id': 'my_run_id',
'user_id': None,
'graph_id': 'agent',
'thread_id': 'my_thread_id',
'checkpoint_id': None,
'assistant_id': 'my_assistant_id'
},
},
'context':
{
'model_name': 'openai'
}
'webhook': "https://my.fake.webhook.com",
'temporary': False,
'stream_mode': ['values'],
'feedback_keys': None,
'interrupt_after': ["node_to_stop_after_1","node_to_stop_after_2"],
'interrupt_before': ["node_to_stop_before_1","node_to_stop_before_2"]
},
'multitask_strategy': 'interrupt'
}
```
"""
if checkpoint_during is not None:
warnings.warn(
"`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
DeprecationWarning,
stacklevel=2,
)
payload = {
"input": input,
"command": (
{k: v for k, v in command.items() if v is not None} if command else None
),
"stream_mode": stream_mode,
"stream_subgraphs": stream_subgraphs,
"stream_resumable": stream_resumable,
"config": config,
"context": context,
"metadata": metadata,
"assistant_id": assistant_id,
"interrupt_before": interrupt_before,
"interrupt_after": interrupt_after,
"webhook": webhook,
"checkpoint": checkpoint,
"checkpoint_id": checkpoint_id,
"checkpoint_during": checkpoint_during,
"multitask_strategy": multitask_strategy,
"if_not_exists": if_not_exists,
"on_completion": on_completion,
"after_seconds": after_seconds,
"durability": durability,
}
payload = {k: v for k, v in payload.items() if v is not None}
def on_response(res: httpx.Response):
"""Callback function to handle the response."""
if on_run_created and (metadata := _get_run_metadata_from_response(res)):
on_run_created(metadata)
return await self.http.post(
f"/threads/{thread_id}/runs" if thread_id else "/runs",
json=payload,
params=params,
headers=headers,
on_response=on_response if on_run_created else None,
)
async def create_batch(
self,
payloads: builtins.list[RunCreate],
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> builtins.list[Run]:
"""Create a batch of stateless background runs."""
def filter_payload(payload: RunCreate):
return {k: v for k, v in payload.items() if v is not None}
filtered = [filter_payload(payload) for payload in payloads]
return await self.http.post(
"/runs/batch", json=filtered, headers=headers, params=params
)
@overload
async def wait(
self,
thread_id: str,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None,
checkpoint_during: bool | None = None,
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
webhook: str | None = None,
on_disconnect: DisconnectMode | None = None,
multitask_strategy: MultitaskStrategy | None = None,
if_not_exists: IfNotExists | None = None,
after_seconds: int | None = None,
raise_error: bool = True,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
) -> builtins.list[dict] | dict[str, Any]: ...
@overload
async def wait(
self,
thread_id: None,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint_during: bool | None = None,
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
webhook: str | None = None,
on_disconnect: DisconnectMode | None = None,
on_completion: OnCompletionBehavior | None = None,
if_not_exists: IfNotExists | None = None,
after_seconds: int | None = None,
raise_error: bool = True,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
) -> builtins.list[dict] | dict[str, Any]: ...
async def wait(
self,
thread_id: str | None,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None,
checkpoint_during: bool | None = None, # deprecated
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
webhook: str | None = None,
on_disconnect: DisconnectMode | None = None,
on_completion: OnCompletionBehavior | None = None,
multitask_strategy: MultitaskStrategy | None = None,
if_not_exists: IfNotExists | None = None,
after_seconds: int | None = None,
raise_error: bool = True,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
durability: Durability | None = None,
) -> builtins.list[dict] | dict[str, Any]:
"""Create a run, wait until it finishes and return the final state.
Args:
thread_id: the thread ID to create the run on.
If `None` will create a stateless run.
assistant_id: The assistant ID or graph name to run.
If using graph name, will default to first assistant created from that graph.
input: The input to the graph.
command: A command to execute. Cannot be combined with input.
metadata: Metadata to assign to the run.
config: The configuration for the assistant.
context: Static context to add to the assistant.
!!! version-added "Added in version 0.6.0"
checkpoint: The checkpoint to resume from.
checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
interrupt_before: Nodes to interrupt immediately before they get executed.
interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
webhook: Webhook to call after LangGraph API call is done.
on_disconnect: The disconnect mode to use.
Must be one of 'cancel' or 'continue'.
on_completion: Whether to delete or keep the thread created for a stateless run.
Must be one of 'delete' or 'keep'.
multitask_strategy: Multitask strategy to use.
Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
if_not_exists: How to handle missing thread. Defaults to 'reject'.
Must be either 'reject' (raise error if missing), or 'create' (create new thread).
after_seconds: The number of seconds to wait before starting the run.
Use to schedule future runs.
headers: Optional custom headers to include with the request.
on_run_created: Optional callback to call when a run is created.
durability: The durability to use for the run. Values are "sync", "async", or "exit".
"async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
"sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
"exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
Returns:
The output of the run.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
final_state_of_run = await client.runs.wait(
thread_id=None,
assistant_id="agent",
input={"messages": [{"role": "user", "content": "how are you?"}]},
metadata={"name":"my_run"},
context={"model_name": "anthropic"},
interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
webhook="https://my.fake.webhook.com",
multitask_strategy="interrupt"
)
print(final_state_of_run)
```
```shell
-------------------------------------------------------------------------------------------------------------------------------------------
{
'messages': [
{
'content': 'how are you?',
'additional_kwargs': {},
'response_metadata': {},
'type': 'human',
'name': None,
'id': 'f51a862c-62fe-4866-863b-b0863e8ad78a',
'example': False
},
{
'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.",
'additional_kwargs': {},
'response_metadata': {},
'type': 'ai',
'name': None,
'id': 'run-bf1cd3c6-768f-4c16-b62d-ba6f17ad8b36',
'example': False,
'tool_calls': [],
'invalid_tool_calls': [],
'usage_metadata': None
}
]
}
```
"""
if checkpoint_during is not None:
warnings.warn(
"`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
DeprecationWarning,
stacklevel=2,
)
payload = {
"input": input,
"command": (
{k: v for k, v in command.items() if v is not None} if command else None
),
"config": config,
"context": context,
"metadata": metadata,
"assistant_id": assistant_id,
"interrupt_before": interrupt_before,
"interrupt_after": interrupt_after,
"webhook": webhook,
"checkpoint": checkpoint,
"checkpoint_id": checkpoint_id,
"multitask_strategy": multitask_strategy,
"checkpoint_during": checkpoint_during,
"if_not_exists": if_not_exists,
"on_disconnect": on_disconnect,
"on_completion": on_completion,
"after_seconds": after_seconds,
"durability": durability,
}
endpoint = (
f"/threads/{thread_id}/runs/wait" if thread_id is not None else "/runs/wait"
)
def on_response(res: httpx.Response):
"""Callback function to handle the response."""
if on_run_created and (metadata := _get_run_metadata_from_response(res)):
on_run_created(metadata)
response = await self.http.request_reconnect(
endpoint,
"POST",
json={k: v for k, v in payload.items() if v is not None},
params=params,
headers=headers,
on_response=on_response if on_run_created else None,
)
if (
raise_error
and isinstance(response, dict)
and "__error__" in response
and isinstance(response["__error__"], dict)
):
raise Exception(
f"{response['__error__'].get('error')}: {response['__error__'].get('message')}"
)
return response
async def list(
self,
thread_id: str,
*,
limit: int = 10,
offset: int = 0,
status: RunStatus | None = None,
select: builtins.list[RunSelectField] | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> builtins.list[Run]:
"""List runs.
Args:
thread_id: The thread ID to list runs for.
limit: The maximum number of results to return.
offset: The number of results to skip.
status: The status of the run to filter by.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
The runs for the thread.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
await client.runs.list(
thread_id="thread_id",
limit=5,
offset=5,
)
```
"""
query_params: dict[str, Any] = {
"limit": limit,
"offset": offset,
}
if status is not None:
query_params["status"] = status
if select:
query_params["select"] = select
if params:
query_params.update(params)
return await self.http.get(
f"/threads/{thread_id}/runs", params=query_params, headers=headers
)
async def get(
self,
thread_id: str,
run_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Run:
"""Get a run.
Args:
thread_id: The thread ID to get.
run_id: The run ID to get.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`Run` object.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
run = await client.runs.get(
thread_id="thread_id_to_delete",
run_id="run_id_to_delete",
)
```
"""
return await self.http.get(
f"/threads/{thread_id}/runs/{run_id}", headers=headers, params=params
)
async def cancel(
self,
thread_id: str,
run_id: str,
*,
wait: bool = False,
action: CancelAction = "interrupt",
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Get a run.
Args:
thread_id: The thread ID to cancel.
run_id: The run ID to cancel.
wait: Whether to wait until run has completed.
action: Action to take when cancelling the run. Possible values
are `interrupt` or `rollback`. Default is `interrupt`.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
await client.runs.cancel(
thread_id="thread_id_to_cancel",
run_id="run_id_to_cancel",
wait=True,
action="interrupt"
)
```
"""
query_params = {
"wait": 1 if wait else 0,
"action": action,
}
if params:
query_params.update(params)
if wait:
return await self.http.request_reconnect(
f"/threads/{thread_id}/runs/{run_id}/cancel",
"POST",
params=query_params,
headers=headers,
)
else:
return await self.http.post(
f"/threads/{thread_id}/runs/{run_id}/cancel",
json=None,
params=query_params,
headers=headers,
)
async def cancel_many(
self,
*,
thread_id: str | None = None,
run_ids: Sequence[str] | None = None,
status: BulkCancelRunsStatus | None = None,
action: CancelAction = "interrupt",
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Cancel one or more runs.
Can cancel runs by thread ID and run IDs, or by status filter.
Args:
thread_id: The ID of the thread containing runs to cancel.
run_ids: List of run IDs to cancel.
status: Filter runs by status to cancel. Must be one of
`"pending"`, `"running"`, or `"all"`.
action: Action to take when cancelling the run. Possible values
are `"interrupt"` or `"rollback"`. Default is `"interrupt"`.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
# Cancel all pending runs
await client.runs.cancel_many(status="pending")
# Cancel specific runs on a thread
await client.runs.cancel_many(
thread_id="my_thread_id",
run_ids=["run_1", "run_2"],
action="rollback",
)
```
"""
payload: dict[str, Any] = {}
if thread_id:
payload["thread_id"] = thread_id
if run_ids:
payload["run_ids"] = run_ids
if status:
payload["status"] = status
query_params: dict[str, Any] = {"action": action}
if params:
query_params.update(params)
await self.http.post(
"/runs/cancel",
json=payload,
headers=headers,
params=query_params,
)
async def join(
self,
thread_id: str,
run_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> dict:
"""Block until a run is done. Returns the final state of the thread.
Args:
thread_id: The thread ID to join.
run_id: The run ID to join.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
result =await client.runs.join(
thread_id="thread_id_to_join",
run_id="run_id_to_join"
)
```
"""
return await self.http.request_reconnect(
f"/threads/{thread_id}/runs/{run_id}/join",
"GET",
headers=headers,
params=params,
)
def join_stream(
self,
thread_id: str,
run_id: str,
*,
cancel_on_disconnect: bool = False,
stream_mode: StreamMode | Sequence[StreamMode] | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
last_event_id: str | None = None,
) -> AsyncIterator[StreamPart]:
"""Stream output from a run in real-time, until the run is done.
Output is not buffered, so any output produced before this call will
not be received here.
Args:
thread_id: The thread ID to join.
run_id: The run ID to join.
cancel_on_disconnect: Whether to cancel the run when the stream is disconnected.
stream_mode: The stream mode(s) to use. Must be a subset of the stream modes passed
when creating the run. Background runs default to having the union of all
stream modes.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
last_event_id: The last event ID to use for the stream.
Returns:
The stream of parts.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
async for part in client.runs.join_stream(
thread_id="thread_id_to_join",
run_id="run_id_to_join",
stream_mode=["values", "debug"]
):
print(part)
```
"""
query_params = {
"cancel_on_disconnect": cancel_on_disconnect,
"stream_mode": stream_mode,
}
if params:
query_params.update(params)
return self.http.stream(
f"/threads/{thread_id}/runs/{run_id}/stream",
"GET",
params=query_params,
headers={
**({"Last-Event-ID": last_event_id} if last_event_id else {}),
**(headers or {}),
}
or None,
)
async def delete(
self,
thread_id: str,
run_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Delete a run.
Args:
thread_id: The thread ID to delete.
run_id: The run ID to delete.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
await client.runs.delete(
thread_id="thread_id_to_delete",
run_id="run_id_to_delete"
)
```
"""
await self.http.delete(
f"/threads/{thread_id}/runs/{run_id}", headers=headers, params=params
)
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_async/runs.py",
"license": "MIT License",
"lines": 983,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_async/store.py | """Async Store client for LangGraph SDK."""
from __future__ import annotations
from collections.abc import Mapping, Sequence
from typing import Any, Literal
from langgraph_sdk._async.http import HttpClient
from langgraph_sdk._shared.utilities import _provided_vals
from langgraph_sdk.schema import (
Item,
ListNamespaceResponse,
QueryParamTypes,
SearchItemsResponse,
)
class StoreClient:
"""Client for interacting with the graph's shared storage.
The Store provides a key-value storage system for persisting data across graph executions,
allowing for stateful operations and data sharing across threads.
???+ example "Example"
```python
client = get_client(url="http://localhost:2024")
await client.store.put_item(["users", "user123"], "mem-123451342", {"name": "Alice", "score": 100})
```
"""
def __init__(self, http: HttpClient) -> None:
self.http = http
async def put_item(
self,
namespace: Sequence[str],
/,
key: str,
value: Mapping[str, Any],
index: Literal[False] | list[str] | None = None,
ttl: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Store or update an item.
Args:
namespace: A list of strings representing the namespace path.
key: The unique identifier for the item within the namespace.
value: A dictionary containing the item's data.
index: Controls search indexing - None (use defaults), False (disable), or list of field paths to index.
ttl: Optional time-to-live in minutes for the item, or None for no expiration.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
await client.store.put_item(
["documents", "user123"],
key="item456",
value={"title": "My Document", "content": "Hello World"}
)
```
"""
for label in namespace:
if "." in label:
raise ValueError(
f"Invalid namespace label '{label}'. Namespace labels cannot contain periods ('.')."
)
payload = {
"namespace": namespace,
"key": key,
"value": value,
"index": index,
"ttl": ttl,
}
await self.http.put(
"/store/items", json=_provided_vals(payload), headers=headers, params=params
)
async def get_item(
self,
namespace: Sequence[str],
/,
key: str,
*,
refresh_ttl: bool | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Item:
"""Retrieve a single item.
Args:
key: The unique identifier for the item.
namespace: Optional list of strings representing the namespace path.
refresh_ttl: Whether to refresh the TTL on this read operation. If `None`, uses the store's default behavior.
Returns:
Item: The retrieved item.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
item = await client.store.get_item(
["documents", "user123"],
key="item456",
)
print(item)
```
```shell
----------------------------------------------------------------
{
'namespace': ['documents', 'user123'],
'key': 'item456',
'value': {'title': 'My Document', 'content': 'Hello World'},
'created_at': '2024-07-30T12:00:00Z',
'updated_at': '2024-07-30T12:00:00Z'
}
```
"""
for label in namespace:
if "." in label:
raise ValueError(
f"Invalid namespace label '{label}'. Namespace labels cannot contain periods ('.')."
)
get_params = {"namespace": ".".join(namespace), "key": key}
if refresh_ttl is not None:
get_params["refresh_ttl"] = refresh_ttl
if params:
get_params = {**get_params, **dict(params)}
return await self.http.get("/store/items", params=get_params, headers=headers)
async def delete_item(
self,
namespace: Sequence[str],
/,
key: str,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Delete an item.
Args:
key: The unique identifier for the item.
namespace: Optional list of strings representing the namespace path.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
await client.store.delete_item(
["documents", "user123"],
key="item456",
)
```
"""
await self.http.delete(
"/store/items",
json={"namespace": namespace, "key": key},
headers=headers,
params=params,
)
async def search_items(
self,
namespace_prefix: Sequence[str],
/,
filter: Mapping[str, Any] | None = None,
limit: int = 10,
offset: int = 0,
query: str | None = None,
refresh_ttl: bool | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> SearchItemsResponse:
"""Search for items within a namespace prefix.
Args:
namespace_prefix: List of strings representing the namespace prefix.
filter: Optional dictionary of key-value pairs to filter results.
limit: Maximum number of items to return (default is 10).
offset: Number of items to skip before returning results (default is 0).
query: Optional query for natural language search.
refresh_ttl: Whether to refresh the TTL on items returned by this search. If `None`, uses the store's default behavior.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
A list of items matching the search criteria.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
items = await client.store.search_items(
["documents"],
filter={"author": "John Doe"},
limit=5,
offset=0
)
print(items)
```
```shell
----------------------------------------------------------------
{
"items": [
{
"namespace": ["documents", "user123"],
"key": "item789",
"value": {
"title": "Another Document",
"author": "John Doe"
},
"created_at": "2024-07-30T12:00:00Z",
"updated_at": "2024-07-30T12:00:00Z"
},
# ... additional items ...
]
}
```
"""
payload = {
"namespace_prefix": namespace_prefix,
"filter": filter,
"limit": limit,
"offset": offset,
"query": query,
"refresh_ttl": refresh_ttl,
}
return await self.http.post(
"/store/items/search",
json=_provided_vals(payload),
headers=headers,
params=params,
)
async def list_namespaces(
self,
prefix: list[str] | None = None,
suffix: list[str] | None = None,
max_depth: int | None = None,
limit: int = 100,
offset: int = 0,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> ListNamespaceResponse:
"""List namespaces with optional match conditions.
Args:
prefix: Optional list of strings representing the prefix to filter namespaces.
suffix: Optional list of strings representing the suffix to filter namespaces.
max_depth: Optional integer specifying the maximum depth of namespaces to return.
limit: Maximum number of namespaces to return (default is 100).
offset: Number of namespaces to skip before returning results (default is 0).
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
A list of namespaces matching the criteria.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
namespaces = await client.store.list_namespaces(
prefix=["documents"],
max_depth=3,
limit=10,
offset=0
)
print(namespaces)
----------------------------------------------------------------
[
["documents", "user123", "reports"],
["documents", "user456", "invoices"],
...
]
```
"""
payload = {
"prefix": prefix,
"suffix": suffix,
"max_depth": max_depth,
"limit": limit,
"offset": offset,
}
return await self.http.post(
"/store/namespaces",
json=_provided_vals(payload),
headers=headers,
params=params,
)
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_async/store.py",
"license": "MIT License",
"lines": 272,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_async/threads.py | """Async client for managing threads in LangGraph."""
from __future__ import annotations
from collections.abc import AsyncIterator, Mapping, Sequence
from typing import Any
from langgraph_sdk._async.http import HttpClient
from langgraph_sdk.schema import (
Checkpoint,
Json,
OnConflictBehavior,
PruneStrategy,
QueryParamTypes,
SortOrder,
StreamPart,
Thread,
ThreadSelectField,
ThreadSortBy,
ThreadState,
ThreadStatus,
ThreadStreamMode,
ThreadUpdateStateResponse,
)
class ThreadsClient:
"""Client for managing threads in LangGraph.
A thread maintains the state of a graph across multiple interactions/invocations (aka runs).
It accumulates and persists the graph's state, allowing for continuity between separate
invocations of the graph.
???+ example "Example"
```python
client = get_client(url="http://localhost:2024"))
new_thread = await client.threads.create(metadata={"user_id": "123"})
```
"""
def __init__(self, http: HttpClient) -> None:
self.http = http
async def get(
self,
thread_id: str,
*,
include: Sequence[str] | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Thread:
"""Get a thread by ID.
Args:
thread_id: The ID of the thread to get.
include: Additional fields to include in the response.
Supported values: `"ttl"`.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
Thread object.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
thread = await client.threads.get(
thread_id="my_thread_id"
)
print(thread)
```
```shell
-----------------------------------------------------
{
'thread_id': 'my_thread_id',
'created_at': '2024-07-18T18:35:15.540834+00:00',
'updated_at': '2024-07-18T18:35:15.540834+00:00',
'metadata': {'graph_id': 'agent'}
}
```
"""
query_params: dict[str, Any] = {}
if include:
query_params["include"] = ",".join(include)
if params:
query_params.update(params)
return await self.http.get(
f"/threads/{thread_id}",
headers=headers,
params=query_params or None,
)
async def create(
self,
*,
metadata: Json = None,
thread_id: str | None = None,
if_exists: OnConflictBehavior | None = None,
supersteps: Sequence[dict[str, Sequence[dict[str, Any]]]] | None = None,
graph_id: str | None = None,
ttl: int | Mapping[str, Any] | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Thread:
"""Create a new thread.
Args:
metadata: Metadata to add to thread.
thread_id: ID of thread.
If `None`, ID will be a randomly generated UUID.
if_exists: How to handle duplicate creation. Defaults to 'raise' under the hood.
Must be either 'raise' (raise error if duplicate), or 'do_nothing' (return existing thread).
supersteps: Apply a list of supersteps when creating a thread, each containing a sequence of updates.
Each update has `values` or `command` and `as_node`. Used for copying a thread between deployments.
graph_id: Optional graph ID to associate with the thread.
ttl: Optional time-to-live in minutes for the thread. You can pass an
integer (minutes) or a mapping with keys `ttl` and optional
`strategy` (defaults to "delete").
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
The created thread.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
thread = await client.threads.create(
metadata={"number":1},
thread_id="my-thread-id",
if_exists="raise"
)
```
"""
payload: dict[str, Any] = {}
if thread_id:
payload["thread_id"] = thread_id
if metadata or graph_id:
payload["metadata"] = {
**(metadata or {}),
**({"graph_id": graph_id} if graph_id else {}),
}
if if_exists:
payload["if_exists"] = if_exists
if supersteps:
payload["supersteps"] = [
{
"updates": [
{
"values": u["values"],
"command": u.get("command"),
"as_node": u["as_node"],
}
for u in s["updates"]
]
}
for s in supersteps
]
if ttl is not None:
if isinstance(ttl, (int, float)):
payload["ttl"] = {"ttl": ttl, "strategy": "delete"}
else:
payload["ttl"] = ttl
return await self.http.post(
"/threads", json=payload, headers=headers, params=params
)
async def update(
self,
thread_id: str,
*,
metadata: Mapping[str, Any],
ttl: int | Mapping[str, Any] | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Thread:
"""Update a thread.
Args:
thread_id: ID of thread to update.
metadata: Metadata to merge with existing thread metadata.
ttl: Optional time-to-live in minutes for the thread. You can pass an
integer (minutes) or a mapping with keys `ttl` and optional
`strategy` (defaults to "delete").
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
The created thread.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
thread = await client.threads.update(
thread_id="my-thread-id",
metadata={"number":1},
ttl=43_200,
)
```
"""
payload: dict[str, Any] = {"metadata": metadata}
if ttl is not None:
if isinstance(ttl, (int, float)):
payload["ttl"] = {"ttl": ttl, "strategy": "delete"}
else:
payload["ttl"] = ttl
return await self.http.patch(
f"/threads/{thread_id}",
json=payload,
headers=headers,
params=params,
)
async def delete(
self,
thread_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Delete a thread.
Args:
thread_id: The ID of the thread to delete.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_client(url="http://localhost2024)
await client.threads.delete(
thread_id="my_thread_id"
)
```
"""
await self.http.delete(f"/threads/{thread_id}", headers=headers, params=params)
async def search(
self,
*,
metadata: Json = None,
values: Json = None,
ids: Sequence[str] | None = None,
status: ThreadStatus | None = None,
limit: int = 10,
offset: int = 0,
sort_by: ThreadSortBy | None = None,
sort_order: SortOrder | None = None,
select: list[ThreadSelectField] | None = None,
extract: dict[str, str] | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> list[Thread]:
"""Search for threads.
Args:
metadata: Thread metadata to filter on.
values: State values to filter on.
ids: List of thread IDs to filter by.
status: Thread status to filter on.
Must be one of 'idle', 'busy', 'interrupted' or 'error'.
limit: Limit on number of threads to return.
offset: Offset in threads table to start search from.
sort_by: Sort by field.
sort_order: Sort order.
select: List of fields to include in the response.
extract: Dictionary mapping aliases to JSONB paths to extract
from thread data. Paths use dot notation for nested keys and
bracket notation for array indices (e.g.,
`{"last_msg": "values.messages[-1]"}`). Extracted values are
returned in an `extracted` field on each thread. Maximum 10
paths per request.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
List of the threads matching the search parameters.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
threads = await client.threads.search(
metadata={"number":1},
status="interrupted",
limit=15,
offset=5
)
```
"""
payload: dict[str, Any] = {
"limit": limit,
"offset": offset,
}
if metadata:
payload["metadata"] = metadata
if values:
payload["values"] = values
if ids:
payload["ids"] = ids
if status:
payload["status"] = status
if sort_by:
payload["sort_by"] = sort_by
if sort_order:
payload["sort_order"] = sort_order
if select:
payload["select"] = select
if extract:
payload["extract"] = extract
return await self.http.post(
"/threads/search",
json=payload,
headers=headers,
params=params,
)
async def count(
self,
*,
metadata: Json = None,
values: Json = None,
status: ThreadStatus | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> int:
"""Count threads matching filters.
Args:
metadata: Thread metadata to filter on.
values: State values to filter on.
status: Thread status to filter on.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
int: Number of threads matching the criteria.
"""
payload: dict[str, Any] = {}
if metadata:
payload["metadata"] = metadata
if values:
payload["values"] = values
if status:
payload["status"] = status
return await self.http.post(
"/threads/count", json=payload, headers=headers, params=params
)
async def copy(
self,
thread_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Copy a thread.
Args:
thread_id: The ID of the thread to copy.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024)
await client.threads.copy(
thread_id="my_thread_id"
)
```
"""
return await self.http.post(
f"/threads/{thread_id}/copy", json=None, headers=headers, params=params
)
async def prune(
self,
thread_ids: Sequence[str],
*,
strategy: PruneStrategy = "delete",
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> dict[str, Any]:
"""Prune threads by ID.
Args:
thread_ids: List of thread IDs to prune.
strategy: The prune strategy. `"delete"` removes threads entirely.
`"keep_latest"` prunes old checkpoints but keeps threads and their
latest state. Defaults to `"delete"`.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
A dict containing `pruned_count` (number of threads pruned).
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024")
result = await client.threads.prune(
thread_ids=["thread_1", "thread_2"],
)
print(result) # {'pruned_count': 2}
```
"""
payload: dict[str, Any] = {
"thread_ids": thread_ids,
}
if strategy != "delete":
payload["strategy"] = strategy
return await self.http.post(
"/threads/prune", json=payload, headers=headers, params=params
)
async def get_state(
self,
thread_id: str,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None, # deprecated
*,
subgraphs: bool = False,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> ThreadState:
"""Get the state of a thread.
Args:
thread_id: The ID of the thread to get the state of.
checkpoint: The checkpoint to get the state of.
checkpoint_id: (deprecated) The checkpoint ID to get the state of.
subgraphs: Include subgraphs states.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
The thread of the state.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024)
thread_state = await client.threads.get_state(
thread_id="my_thread_id",
checkpoint_id="my_checkpoint_id"
)
print(thread_state)
```
```shell
----------------------------------------------------------------------------------------------------------------------------------------------------------------------
{
'values': {
'messages': [
{
'content': 'how are you?',
'additional_kwargs': {},
'response_metadata': {},
'type': 'human',
'name': None,
'id': 'fe0a5778-cfe9-42ee-b807-0adaa1873c10',
'example': False
},
{
'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.",
'additional_kwargs': {},
'response_metadata': {},
'type': 'ai',
'name': None,
'id': 'run-159b782c-b679-4830-83c6-cef87798fe8b',
'example': False,
'tool_calls': [],
'invalid_tool_calls': [],
'usage_metadata': None
}
]
},
'next': [],
'checkpoint':
{
'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
'checkpoint_ns': '',
'checkpoint_id': '1ef4a9b8-e6fb-67b1-8001-abd5184439d1'
}
'metadata':
{
'step': 1,
'run_id': '1ef4a9b8-d7da-679a-a45a-872054341df2',
'source': 'loop',
'writes':
{
'agent':
{
'messages': [
{
'id': 'run-159b782c-b679-4830-83c6-cef87798fe8b',
'name': None,
'type': 'ai',
'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.",
'example': False,
'tool_calls': [],
'usage_metadata': None,
'additional_kwargs': {},
'response_metadata': {},
'invalid_tool_calls': []
}
]
}
},
'user_id': None,
'graph_id': 'agent',
'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
'created_by': 'system',
'assistant_id': 'fe096781-5601-53d2-b2f6-0d3403f7e9ca'},
'created_at': '2024-07-25T15:35:44.184703+00:00',
'parent_config':
{
'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
'checkpoint_ns': '',
'checkpoint_id': '1ef4a9b8-d80d-6fa7-8000-9300467fad0f'
}
}
```
"""
if checkpoint:
return await self.http.post(
f"/threads/{thread_id}/state/checkpoint",
json={"checkpoint": checkpoint, "subgraphs": subgraphs},
headers=headers,
params=params,
)
elif checkpoint_id:
get_params = {"subgraphs": subgraphs}
if params:
get_params = {**get_params, **dict(params)}
return await self.http.get(
f"/threads/{thread_id}/state/{checkpoint_id}",
params=get_params,
headers=headers,
)
else:
get_params = {"subgraphs": subgraphs}
if params:
get_params = {**get_params, **dict(params)}
return await self.http.get(
f"/threads/{thread_id}/state",
params=get_params,
headers=headers,
)
async def update_state(
self,
thread_id: str,
values: dict[str, Any] | Sequence[dict] | None,
*,
as_node: str | None = None,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None, # deprecated
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> ThreadUpdateStateResponse:
"""Update the state of a thread.
Args:
thread_id: The ID of the thread to update.
values: The values to update the state with.
as_node: Update the state as if this node had just executed.
checkpoint: The checkpoint to update the state of.
checkpoint_id: (deprecated) The checkpoint ID to update the state of.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
Response after updating a thread's state.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024)
response = await client.threads.update_state(
thread_id="my_thread_id",
values={"messages":[{"role": "user", "content": "hello!"}]},
as_node="my_node",
)
print(response)
```
```shell
----------------------------------------------------------------------------------------------------------------------------------------------------------------------
{
'checkpoint': {
'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
'checkpoint_ns': '',
'checkpoint_id': '1ef4a9b8-e6fb-67b1-8001-abd5184439d1',
'checkpoint_map': {}
}
}
```
"""
payload: dict[str, Any] = {
"values": values,
}
if checkpoint_id:
payload["checkpoint_id"] = checkpoint_id
if checkpoint:
payload["checkpoint"] = checkpoint
if as_node:
payload["as_node"] = as_node
return await self.http.post(
f"/threads/{thread_id}/state", json=payload, headers=headers, params=params
)
async def get_history(
self,
thread_id: str,
*,
limit: int = 10,
before: str | Checkpoint | None = None,
metadata: Mapping[str, Any] | None = None,
checkpoint: Checkpoint | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> list[ThreadState]:
"""Get the state history of a thread.
Args:
thread_id: The ID of the thread to get the state history for.
checkpoint: Return states for this subgraph. If empty defaults to root.
limit: The maximum number of states to return.
before: Return states before this checkpoint.
metadata: Filter states by metadata key-value pairs.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
The state history of the thread.
???+ example "Example Usage"
```python
client = get_client(url="http://localhost:2024)
thread_state = await client.threads.get_history(
thread_id="my_thread_id",
limit=5,
)
```
"""
payload: dict[str, Any] = {
"limit": limit,
}
if before:
payload["before"] = before
if metadata:
payload["metadata"] = metadata
if checkpoint:
payload["checkpoint"] = checkpoint
return await self.http.post(
f"/threads/{thread_id}/history",
json=payload,
headers=headers,
params=params,
)
async def join_stream(
self,
thread_id: str,
*,
last_event_id: str | None = None,
stream_mode: ThreadStreamMode | Sequence[ThreadStreamMode] = "run_modes",
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> AsyncIterator[StreamPart]:
"""Get a stream of events for a thread.
Args:
thread_id: The ID of the thread to get the stream for.
last_event_id: The ID of the last event to get.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
An iterator of stream parts.
???+ example "Example Usage"
```python
for chunk in client.threads.join_stream(
thread_id="my_thread_id",
last_event_id="my_event_id",
):
print(chunk)
```
"""
query_params = {
"stream_mode": stream_mode,
}
if params:
query_params.update(params)
return self.http.stream(
f"/threads/{thread_id}/stream",
"GET",
headers={
**({"Last-Event-ID": last_event_id} if last_event_id else {}),
**(headers or {}),
},
params=query_params,
)
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_async/threads.py",
"license": "MIT License",
"lines": 650,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_shared/types.py | """Type aliases and constants."""
from __future__ import annotations
TimeoutTypes = (
None
| float
| tuple[float | None, float | None]
| tuple[float | None, float | None, float | None, float | None]
)
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_shared/types.py",
"license": "MIT License",
"lines": 8,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_simple |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_shared/utilities.py | """Shared utility functions for async and sync clients."""
from __future__ import annotations
import functools
import os
import re
from collections.abc import Mapping
from typing import Any, cast
import httpx
import langgraph_sdk
from langgraph_sdk.schema import RunCreateMetadata
RESERVED_HEADERS = ("x-api-key",)
NOT_PROVIDED = cast(None, object())
def _get_api_key(api_key: str | None = NOT_PROVIDED) -> str | None:
"""Get the API key from the environment.
Precedence:
1. explicit string argument
2. LANGGRAPH_API_KEY (if api_key not provided)
3. LANGSMITH_API_KEY (if api_key not provided)
4. LANGCHAIN_API_KEY (if api_key not provided)
Args:
api_key: The API key to use. Can be:
- A string: use this exact API key
- None: explicitly skip loading from environment
- NOT_PROVIDED (default): auto-load from environment variables
"""
if isinstance(api_key, str):
return api_key
if api_key is NOT_PROVIDED:
# api_key is not explicitly provided, try to load from environment
for prefix in ["LANGGRAPH", "LANGSMITH", "LANGCHAIN"]:
if env := os.getenv(f"{prefix}_API_KEY"):
return env.strip().strip('"').strip("'")
# api_key is explicitly None, don't load from environment
return None
def _get_headers(
api_key: str | None,
custom_headers: Mapping[str, str] | None,
) -> dict[str, str]:
"""Combine api_key and custom user-provided headers."""
custom_headers = custom_headers or {}
for header in RESERVED_HEADERS:
if header in custom_headers:
raise ValueError(f"Cannot set reserved header '{header}'")
headers = {
"User-Agent": f"langgraph-sdk-py/{langgraph_sdk.__version__}",
**custom_headers,
}
resolved_api_key = _get_api_key(api_key)
if resolved_api_key:
headers["x-api-key"] = resolved_api_key
return headers
def _orjson_default(obj: Any) -> Any:
is_class = isinstance(obj, type)
if hasattr(obj, "model_dump") and callable(obj.model_dump):
if is_class:
raise TypeError(
f"Cannot JSON-serialize type object: {obj!r}. Did you mean to pass an instance of the object instead?"
f"\nReceived type: {obj!r}"
)
return obj.model_dump()
elif hasattr(obj, "dict") and callable(obj.dict):
if is_class:
raise TypeError(
f"Cannot JSON-serialize type object: {obj!r}. Did you mean to pass an instance of the object instead?"
f"\nReceived type: {obj!r}"
)
return obj.dict()
elif isinstance(obj, (set, frozenset)):
return list(obj)
else:
raise TypeError(f"Object of type {type(obj)} is not JSON serializable")
# Compiled regex pattern for extracting run metadata from Content-Location header
_RUN_METADATA_PATTERN = re.compile(
r"(\/threads\/(?P<thread_id>.+))?\/runs\/(?P<run_id>.+)"
)
def _get_run_metadata_from_response(
response: httpx.Response,
) -> RunCreateMetadata | None:
"""Extract run metadata from the response headers."""
if (content_location := response.headers.get("Content-Location")) and (
match := _RUN_METADATA_PATTERN.search(content_location)
):
return RunCreateMetadata(
run_id=match.group("run_id"),
thread_id=match.group("thread_id") or None,
)
return None
def _provided_vals(d: Mapping[str, Any]) -> dict[str, Any]:
return {k: v for k, v in d.items() if v is not None}
_registered_transports: list[httpx.ASGITransport] = []
# Do not move; this is used in the server.
def configure_loopback_transports(app: Any) -> None:
for transport in _registered_transports:
transport.app = app
@functools.lru_cache(maxsize=1)
def get_asgi_transport() -> type[httpx.ASGITransport]:
try:
from langgraph_api import asgi_transport # type: ignore[unresolved-import]
return asgi_transport.ASGITransport
except ImportError:
# Older versions of the server
return httpx.ASGITransport
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_shared/utilities.py",
"license": "MIT License",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_sync/assistants.py | """Synchronous client for managing assistants in LangGraph."""
from __future__ import annotations
from collections.abc import Mapping
from typing import Any, Literal, cast, overload
import httpx
from langgraph_sdk._sync.http import SyncHttpClient
from langgraph_sdk.schema import (
Assistant,
AssistantSelectField,
AssistantSortBy,
AssistantsSearchResponse,
AssistantVersion,
Config,
Context,
GraphSchema,
Json,
OnConflictBehavior,
QueryParamTypes,
SortOrder,
Subgraphs,
)
class SyncAssistantsClient:
"""Client for managing assistants in LangGraph synchronously.
This class provides methods to interact with assistants, which are versioned configurations of your graph.
???+ example "Example"
```python
client = get_sync_client(url="http://localhost:2024")
assistant = client.assistants.get("assistant_id_123")
```
"""
def __init__(self, http: SyncHttpClient) -> None:
self.http = http
def get(
self,
assistant_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Assistant:
"""Get an assistant by ID.
Args:
assistant_id: The ID of the assistant to get OR the name of the graph (to use the default assistant).
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`Assistant` Object.
???+ example "Example Usage"
```python
assistant = client.assistants.get(
assistant_id="my_assistant_id"
)
print(assistant)
```
```shell
----------------------------------------------------
{
'assistant_id': 'my_assistant_id',
'graph_id': 'agent',
'created_at': '2024-06-25T17:10:33.109781+00:00',
'updated_at': '2024-06-25T17:10:33.109781+00:00',
'config': {},
'context': {},
'metadata': {'created_by': 'system'}
}
```
"""
return self.http.get(
f"/assistants/{assistant_id}", headers=headers, params=params
)
def get_graph(
self,
assistant_id: str,
*,
xray: int | bool = False,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> dict[str, list[dict[str, Any]]]:
"""Get the graph of an assistant by ID.
Args:
assistant_id: The ID of the assistant to get the graph of.
xray: Include graph representation of subgraphs. If an integer value is provided, only subgraphs with a depth less than or equal to the value will be included.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
The graph information for the assistant in JSON format.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
graph_info = client.assistants.get_graph(
assistant_id="my_assistant_id"
)
print(graph_info)
--------------------------------------------------------------------------------------------------------------------------
{
'nodes':
[
{'id': '__start__', 'type': 'schema', 'data': '__start__'},
{'id': '__end__', 'type': 'schema', 'data': '__end__'},
{'id': 'agent','type': 'runnable','data': {'id': ['langgraph', 'utils', 'RunnableCallable'],'name': 'agent'}},
],
'edges':
[
{'source': '__start__', 'target': 'agent'},
{'source': 'agent','target': '__end__'}
]
}
```
"""
query_params = {"xray": xray}
if params:
query_params.update(params)
return self.http.get(
f"/assistants/{assistant_id}/graph", params=query_params, headers=headers
)
def get_schemas(
self,
assistant_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> GraphSchema:
"""Get the schemas of an assistant by ID.
Args:
assistant_id: The ID of the assistant to get the schema of.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
GraphSchema: The graph schema for the assistant.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
schema = client.assistants.get_schemas(
assistant_id="my_assistant_id"
)
print(schema)
```
```shell
----------------------------------------------------------------------------------------------------------------------------
{
'graph_id': 'agent',
'state_schema':
{
'title': 'LangGraphInput',
'$ref': '#/definitions/AgentState',
'definitions':
{
'BaseMessage':
{
'title': 'BaseMessage',
'description': 'Base abstract Message class. Messages are the inputs and outputs of ChatModels.',
'type': 'object',
'properties':
{
'content':
{
'title': 'Content',
'anyOf': [
{'type': 'string'},
{'type': 'array','items': {'anyOf': [{'type': 'string'}, {'type': 'object'}]}}
]
},
'additional_kwargs':
{
'title': 'Additional Kwargs',
'type': 'object'
},
'response_metadata':
{
'title': 'Response Metadata',
'type': 'object'
},
'type':
{
'title': 'Type',
'type': 'string'
},
'name':
{
'title': 'Name',
'type': 'string'
},
'id':
{
'title': 'Id',
'type': 'string'
}
},
'required': ['content', 'type']
},
'AgentState':
{
'title': 'AgentState',
'type': 'object',
'properties':
{
'messages':
{
'title': 'Messages',
'type': 'array',
'items': {'$ref': '#/definitions/BaseMessage'}
}
},
'required': ['messages']
}
}
},
'config_schema':
{
'title': 'Configurable',
'type': 'object',
'properties':
{
'model_name':
{
'title': 'Model Name',
'enum': ['anthropic', 'openai'],
'type': 'string'
}
}
},
'context_schema':
{
'title': 'Context',
'type': 'object',
'properties':
{
'model_name':
{
'title': 'Model Name',
'enum': ['anthropic', 'openai'],
'type': 'string'
}
}
}
}
```
"""
return self.http.get(
f"/assistants/{assistant_id}/schemas", headers=headers, params=params
)
def get_subgraphs(
self,
assistant_id: str,
namespace: str | None = None,
recurse: bool = False,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Subgraphs:
"""Get the schemas of an assistant by ID.
Args:
assistant_id: The ID of the assistant to get the schema of.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
Subgraphs: The graph schema for the assistant.
"""
get_params = {"recurse": recurse}
if params:
get_params = {**get_params, **dict(params)}
if namespace is not None:
return self.http.get(
f"/assistants/{assistant_id}/subgraphs/{namespace}",
params=get_params,
headers=headers,
)
else:
return self.http.get(
f"/assistants/{assistant_id}/subgraphs",
params=get_params,
headers=headers,
)
def create(
self,
graph_id: str | None,
config: Config | None = None,
*,
context: Context | None = None,
metadata: Json = None,
assistant_id: str | None = None,
if_exists: OnConflictBehavior | None = None,
name: str | None = None,
headers: Mapping[str, str] | None = None,
description: str | None = None,
params: QueryParamTypes | None = None,
) -> Assistant:
"""Create a new assistant.
Useful when graph is configurable and you want to create different assistants based on different configurations.
Args:
graph_id: The ID of the graph the assistant should use. The graph ID is normally set in your langgraph.json configuration.
config: Configuration to use for the graph.
context: Static context to add to the assistant.
!!! version-added "Added in version 0.6.0"
metadata: Metadata to add to assistant.
assistant_id: Assistant ID to use, will default to a random UUID if not provided.
if_exists: How to handle duplicate creation. Defaults to 'raise' under the hood.
Must be either 'raise' (raise error if duplicate), or 'do_nothing' (return existing assistant).
name: The name of the assistant. Defaults to 'Untitled' under the hood.
headers: Optional custom headers to include with the request.
description: Optional description of the assistant.
The description field is available for langgraph-api server version>=0.0.45
params: Optional query parameters to include with the request.
Returns:
The created assistant.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
assistant = client.assistants.create(
graph_id="agent",
context={"model_name": "openai"},
metadata={"number":1},
assistant_id="my-assistant-id",
if_exists="do_nothing",
name="my_name"
)
```
"""
payload: dict[str, Any] = {
"graph_id": graph_id,
}
if config:
payload["config"] = config
if context:
payload["context"] = context
if metadata:
payload["metadata"] = metadata
if assistant_id:
payload["assistant_id"] = assistant_id
if if_exists:
payload["if_exists"] = if_exists
if name:
payload["name"] = name
if description:
payload["description"] = description
return self.http.post(
"/assistants", json=payload, headers=headers, params=params
)
def update(
self,
assistant_id: str,
*,
graph_id: str | None = None,
config: Config | None = None,
context: Context | None = None,
metadata: Json = None,
name: str | None = None,
headers: Mapping[str, str] | None = None,
description: str | None = None,
params: QueryParamTypes | None = None,
) -> Assistant:
"""Update an assistant.
Use this to point to a different graph, update the configuration, or change the metadata of an assistant.
Args:
assistant_id: Assistant to update.
graph_id: The ID of the graph the assistant should use.
The graph ID is normally set in your langgraph.json configuration. If `None`, assistant will keep pointing to same graph.
config: Configuration to use for the graph.
context: Static context to add to the assistant.
!!! version-added "Added in version 0.6.0"
metadata: Metadata to merge with existing assistant metadata.
name: The new name for the assistant.
headers: Optional custom headers to include with the request.
description: Optional description of the assistant.
The description field is available for langgraph-api server version>=0.0.45
Returns:
The updated assistant.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
assistant = client.assistants.update(
assistant_id='e280dad7-8618-443f-87f1-8e41841c180f',
graph_id="other-graph",
context={"model_name": "anthropic"},
metadata={"number":2}
)
```
"""
payload: dict[str, Any] = {}
if graph_id:
payload["graph_id"] = graph_id
if config is not None:
payload["config"] = config
if context is not None:
payload["context"] = context
if metadata:
payload["metadata"] = metadata
if name:
payload["name"] = name
if description:
payload["description"] = description
return self.http.patch(
f"/assistants/{assistant_id}",
json=payload,
headers=headers,
params=params,
)
def delete(
self,
assistant_id: str,
*,
delete_threads: bool = False,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Delete an assistant.
Args:
assistant_id: The assistant ID to delete.
delete_threads: If true, delete all threads with `metadata.assistant_id`
matching this assistant, along with runs and checkpoints belonging to
those threads.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
client.assistants.delete(
assistant_id="my_assistant_id"
)
```
"""
query_params: dict[str, Any] = {}
if delete_threads:
query_params["delete_threads"] = True
if params:
query_params.update(params)
self.http.delete(
f"/assistants/{assistant_id}",
headers=headers,
params=query_params or None,
)
@overload
def search(
self,
*,
metadata: Json = None,
graph_id: str | None = None,
name: str | None = None,
limit: int = 10,
offset: int = 0,
sort_by: AssistantSortBy | None = None,
sort_order: SortOrder | None = None,
select: list[AssistantSelectField] | None = None,
response_format: Literal["object"],
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> AssistantsSearchResponse: ...
@overload
def search(
self,
*,
metadata: Json = None,
graph_id: str | None = None,
name: str | None = None,
limit: int = 10,
offset: int = 0,
sort_by: AssistantSortBy | None = None,
sort_order: SortOrder | None = None,
select: list[AssistantSelectField] | None = None,
response_format: Literal["array"] = "array",
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> list[Assistant]: ...
def search(
self,
*,
metadata: Json = None,
graph_id: str | None = None,
name: str | None = None,
limit: int = 10,
offset: int = 0,
sort_by: AssistantSortBy | None = None,
sort_order: SortOrder | None = None,
select: list[AssistantSelectField] | None = None,
response_format: Literal["array", "object"] = "array",
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> AssistantsSearchResponse | list[Assistant]:
"""Search for assistants.
Args:
metadata: Metadata to filter by. Exact match filter for each KV pair.
graph_id: The ID of the graph to filter by.
The graph ID is normally set in your langgraph.json configuration.
name: The name of the assistant to filter by.
The filtering logic will match assistants where 'name' is a substring (case insensitive) of the assistant name.
limit: The maximum number of results to return.
offset: The number of results to skip.
sort_by: The field to sort by.
sort_order: The order to sort by.
select: Specific assistant fields to include in the response.
response_format: Controls the response shape. Use `"array"` (default)
to return a bare list of assistants, or `"object"` to return
a mapping containing assistants plus pagination metadata.
Defaults to "array", though this default will be changed to "object" in a future release.
headers: Optional custom headers to include with the request.
Returns:
A list of assistants (when `response_format="array"`) or a mapping
with the assistants and the next pagination cursor (when
`response_format="object"`).
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
response = client.assistants.search(
metadata = {"name":"my_name"},
graph_id="my_graph_id",
limit=5,
offset=5,
response_format="object",
)
assistants = response["assistants"]
next_cursor = response["next"]
```
"""
if response_format not in ("array", "object"):
raise ValueError("response_format must be 'array' or 'object'")
payload: dict[str, Any] = {
"limit": limit,
"offset": offset,
}
if metadata:
payload["metadata"] = metadata
if graph_id:
payload["graph_id"] = graph_id
if name:
payload["name"] = name
if sort_by:
payload["sort_by"] = sort_by
if sort_order:
payload["sort_order"] = sort_order
if select:
payload["select"] = select
next_cursor: str | None = None
def capture_pagination(response: httpx.Response) -> None:
nonlocal next_cursor
next_cursor = response.headers.get("X-Pagination-Next")
assistants = cast(
list[Assistant],
self.http.post(
"/assistants/search",
json=payload,
headers=headers,
params=params,
on_response=capture_pagination if response_format == "object" else None,
),
)
if response_format == "object":
return {"assistants": assistants, "next": next_cursor}
return assistants
def count(
self,
*,
metadata: Json = None,
graph_id: str | None = None,
name: str | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> int:
"""Count assistants matching filters.
Args:
metadata: Metadata to filter by. Exact match for each key/value.
graph_id: Optional graph id to filter by.
name: Optional name to filter by.
The filtering logic will match assistants where 'name' is a substring (case insensitive) of the assistant name.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
int: Number of assistants matching the criteria.
"""
payload: dict[str, Any] = {}
if metadata:
payload["metadata"] = metadata
if graph_id:
payload["graph_id"] = graph_id
if name:
payload["name"] = name
return self.http.post(
"/assistants/count", json=payload, headers=headers, params=params
)
def get_versions(
self,
assistant_id: str,
metadata: Json = None,
limit: int = 10,
offset: int = 0,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> list[AssistantVersion]:
"""List all versions of an assistant.
Args:
assistant_id: The assistant ID to get versions for.
metadata: Metadata to filter versions by. Exact match filter for each KV pair.
limit: The maximum number of versions to return.
offset: The number of versions to skip.
headers: Optional custom headers to include with the request.
Returns:
A list of assistants.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
assistant_versions = client.assistants.get_versions(
assistant_id="my_assistant_id"
)
```
"""
payload: dict[str, Any] = {
"limit": limit,
"offset": offset,
}
if metadata:
payload["metadata"] = metadata
return self.http.post(
f"/assistants/{assistant_id}/versions",
json=payload,
headers=headers,
params=params,
)
def set_latest(
self,
assistant_id: str,
version: int,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Assistant:
"""Change the version of an assistant.
Args:
assistant_id: The assistant ID to delete.
version: The version to change to.
headers: Optional custom headers to include with the request.
Returns:
`Assistant` Object.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
new_version_assistant = client.assistants.set_latest(
assistant_id="my_assistant_id",
version=3
)
```
"""
payload: dict[str, Any] = {"version": version}
return self.http.post(
f"/assistants/{assistant_id}/latest",
json=payload,
headers=headers,
params=params,
)
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_sync/assistants.py",
"license": "MIT License",
"lines": 649,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_sync/client.py | """Sync LangGraph client."""
from __future__ import annotations
from collections.abc import Mapping
from types import TracebackType
import httpx
from langgraph_sdk._shared.types import TimeoutTypes
from langgraph_sdk._shared.utilities import NOT_PROVIDED, _get_headers
from langgraph_sdk._sync.assistants import SyncAssistantsClient
from langgraph_sdk._sync.cron import SyncCronClient
from langgraph_sdk._sync.http import SyncHttpClient
from langgraph_sdk._sync.runs import SyncRunsClient
from langgraph_sdk._sync.store import SyncStoreClient
from langgraph_sdk._sync.threads import SyncThreadsClient
def get_sync_client(
*,
url: str | None = None,
api_key: str | None = NOT_PROVIDED,
headers: Mapping[str, str] | None = None,
timeout: TimeoutTypes | None = None,
) -> SyncLangGraphClient:
"""Get a synchronous LangGraphClient instance.
Args:
url: The URL of the LangGraph API.
api_key: API key for authentication. Can be:
- A string: use this exact API key
- `None`: explicitly skip loading from environment variables
- Not provided (default): auto-load from environment in this order:
1. `LANGGRAPH_API_KEY`
2. `LANGSMITH_API_KEY`
3. `LANGCHAIN_API_KEY`
headers: Optional custom headers
timeout: Optional timeout configuration for the HTTP client.
Accepts an httpx.Timeout instance, a float (seconds), or a tuple of timeouts.
Tuple format is (connect, read, write, pool)
If not provided, defaults to connect=5s, read=300s, write=300s, and pool=5s.
Returns:
SyncLangGraphClient: The top-level synchronous client for accessing AssistantsClient,
ThreadsClient, RunsClient, and CronClient.
???+ example "Example"
```python
from langgraph_sdk import get_sync_client
# get top-level synchronous LangGraphClient
client = get_sync_client(url="http://localhost:8123")
# example usage: client.<model>.<method_name>()
assistant = client.assistants.get(assistant_id="some_uuid")
```
???+ example "Skip auto-loading API key from environment:"
```python
from langgraph_sdk import get_sync_client
# Don't load API key from environment variables
client = get_sync_client(
url="http://localhost:8123",
api_key=None
)
```
"""
if url is None:
url = "http://localhost:8123"
transport = httpx.HTTPTransport(retries=5)
client = httpx.Client(
base_url=url,
transport=transport,
timeout=(
httpx.Timeout(timeout) # type: ignore[arg-type]
if timeout is not None
else httpx.Timeout(connect=5, read=300, write=300, pool=5)
),
headers=_get_headers(api_key, headers),
)
return SyncLangGraphClient(client)
class SyncLangGraphClient:
"""Synchronous client for interacting with the LangGraph API.
This class provides synchronous access to LangGraph API endpoints for managing
assistants, threads, runs, cron jobs, and data storage.
???+ example "Example"
```python
client = get_sync_client(url="http://localhost:2024")
assistant = client.assistants.get("asst_123")
```
"""
def __init__(self, client: httpx.Client) -> None:
self.http = SyncHttpClient(client)
self.assistants = SyncAssistantsClient(self.http)
self.threads = SyncThreadsClient(self.http)
self.runs = SyncRunsClient(self.http)
self.crons = SyncCronClient(self.http)
self.store = SyncStoreClient(self.http)
def __enter__(self) -> SyncLangGraphClient:
"""Enter the sync context manager."""
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
"""Exit the sync context manager."""
self.close()
def close(self) -> None:
"""Close the underlying HTTP client."""
if hasattr(self, "http"):
self.http.client.close()
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_sync/client.py",
"license": "MIT License",
"lines": 102,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_sync/cron.py | """Synchronous cron client for LangGraph SDK."""
from __future__ import annotations
import warnings
from collections.abc import Mapping, Sequence
from datetime import datetime
from typing import Any
from langgraph_sdk._sync.http import SyncHttpClient
from langgraph_sdk.schema import (
All,
Config,
Context,
Cron,
CronSelectField,
CronSortBy,
Durability,
Input,
OnCompletionBehavior,
QueryParamTypes,
Run,
SortOrder,
StreamMode,
)
class SyncCronClient:
"""Synchronous client for managing cron jobs in LangGraph.
This class provides methods to create and manage scheduled tasks (cron jobs) for automated graph executions.
???+ example "Example"
```python
client = get_sync_client(url="http://localhost:8123")
cron_job = client.crons.create_for_thread(thread_id="thread_123", assistant_id="asst_456", schedule="0 * * * *")
```
!!! note "Feature Availability"
The crons client functionality is not supported on all licenses.
Please check the relevant license documentation for the most up-to-date
details on feature availability.
"""
def __init__(self, http_client: SyncHttpClient) -> None:
self.http = http_client
def create_for_thread(
self,
thread_id: str,
assistant_id: str,
*,
schedule: str,
input: Input | None = None,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint_during: bool | None = None, # deprecated
interrupt_before: All | list[str] | None = None,
interrupt_after: All | list[str] | None = None,
webhook: str | None = None,
multitask_strategy: str | None = None,
end_time: datetime | None = None,
enabled: bool | None = None,
stream_mode: StreamMode | Sequence[StreamMode] | None = None,
stream_subgraphs: bool | None = None,
stream_resumable: bool | None = None,
durability: Durability | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Run:
"""Create a cron job for a thread.
Args:
thread_id: the thread ID to run the cron job on.
assistant_id: The assistant ID or graph name to use for the cron job.
If using graph name, will default to first assistant created from that graph.
schedule: The cron schedule to execute this job on.
Schedules are interpreted in UTC.
input: The input to the graph.
metadata: Metadata to assign to the cron job runs.
config: The configuration for the assistant.
context: Static context to add to the assistant.
!!! version-added "Added in version 0.6.0"
checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
interrupt_before: Nodes to interrupt immediately before they get executed.
interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
webhook: Webhook to call after LangGraph API call is done.
multitask_strategy: Multitask strategy to use.
Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
end_time: The time to stop running the cron job. If not provided, the cron job will run indefinitely.
enabled: Whether the cron job is enabled. By default, it is considered enabled.
stream_mode: The stream mode(s) to use.
stream_subgraphs: Whether to stream output from subgraphs.
stream_resumable: Whether to persist the stream chunks in order to resume the stream later.
durability: Durability level for the run. Must be one of 'sync', 'async', or 'exit'.
"async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
"sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
"exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
headers: Optional custom headers to include with the request.
Returns:
The cron `Run`.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:8123")
cron_run = client.crons.create_for_thread(
thread_id="my-thread-id",
assistant_id="agent",
schedule="27 15 * * *",
input={"messages": [{"role": "user", "content": "hello!"}]},
metadata={"name":"my_run"},
context={"model_name": "openai"},
interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
webhook="https://my.fake.webhook.com",
multitask_strategy="interrupt",
enabled=True
)
```
"""
if checkpoint_during is not None:
warnings.warn(
"`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
DeprecationWarning,
stacklevel=2,
)
payload = {
"schedule": schedule,
"input": input,
"config": config,
"metadata": metadata,
"context": context,
"assistant_id": assistant_id,
"interrupt_before": interrupt_before,
"interrupt_after": interrupt_after,
"checkpoint_during": checkpoint_during,
"webhook": webhook,
"multitask_strategy": multitask_strategy,
"end_time": end_time.isoformat() if end_time else None,
"enabled": enabled,
"stream_mode": stream_mode,
"stream_subgraphs": stream_subgraphs,
"stream_resumable": stream_resumable,
"durability": durability,
}
payload = {k: v for k, v in payload.items() if v is not None}
return self.http.post(
f"/threads/{thread_id}/runs/crons",
json=payload,
headers=headers,
params=params,
)
def create(
self,
assistant_id: str,
*,
schedule: str,
input: Input | None = None,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint_during: bool | None = None, # deprecated
interrupt_before: All | list[str] | None = None,
interrupt_after: All | list[str] | None = None,
webhook: str | None = None,
on_run_completed: OnCompletionBehavior | None = None,
multitask_strategy: str | None = None,
end_time: datetime | None = None,
enabled: bool | None = None,
stream_mode: StreamMode | Sequence[StreamMode] | None = None,
stream_subgraphs: bool | None = None,
stream_resumable: bool | None = None,
durability: Durability | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Run:
"""Create a cron run.
Args:
assistant_id: The assistant ID or graph name to use for the cron job.
If using graph name, will default to first assistant created from that graph.
schedule: The cron schedule to execute this job on.
Schedules are interpreted in UTC.
input: The input to the graph.
metadata: Metadata to assign to the cron job runs.
config: The configuration for the assistant.
context: Static context to add to the assistant.
!!! version-added "Added in version 0.6.0"
checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
interrupt_before: Nodes to interrupt immediately before they get executed.
interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
webhook: Webhook to call after LangGraph API call is done.
on_run_completed: What to do with the thread after the run completes.
Must be one of 'delete' (default) or 'keep'. 'delete' removes the thread
after execution. 'keep' creates a new thread for each execution but does not
clean them up. Clients are responsible for cleaning up kept threads.
multitask_strategy: Multitask strategy to use.
Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
end_time: The time to stop running the cron job. If not provided, the cron job will run indefinitely.
enabled: Whether the cron job is enabled. By default, it is considered enabled.
stream_mode: The stream mode(s) to use.
stream_subgraphs: Whether to stream output from subgraphs.
stream_resumable: Whether to persist the stream chunks in order to resume the stream later.
durability: Durability level for the run. Must be one of 'sync', 'async', or 'exit'.
"async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
"sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
"exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
headers: Optional custom headers to include with the request.
Returns:
The cron `Run`.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:8123")
cron_run = client.crons.create(
assistant_id="agent",
schedule="27 15 * * *",
input={"messages": [{"role": "user", "content": "hello!"}]},
metadata={"name":"my_run"},
context={"model_name": "openai"},
checkpoint_during=True,
interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
webhook="https://my.fake.webhook.com",
multitask_strategy="interrupt",
enabled=True
)
```
"""
if checkpoint_during is not None:
warnings.warn(
"`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
DeprecationWarning,
stacklevel=2,
)
payload = {
"schedule": schedule,
"input": input,
"config": config,
"metadata": metadata,
"context": context,
"assistant_id": assistant_id,
"interrupt_before": interrupt_before,
"interrupt_after": interrupt_after,
"webhook": webhook,
"checkpoint_during": checkpoint_during,
"on_run_completed": on_run_completed,
"multitask_strategy": multitask_strategy,
"end_time": end_time.isoformat() if end_time else None,
"enabled": enabled,
"stream_mode": stream_mode,
"stream_subgraphs": stream_subgraphs,
"stream_resumable": stream_resumable,
"durability": durability,
}
payload = {k: v for k, v in payload.items() if v is not None}
return self.http.post(
"/runs/crons", json=payload, headers=headers, params=params
)
def delete(
self,
cron_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Delete a cron.
Args:
cron_id: The cron ID to delete.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:8123")
client.crons.delete(
cron_id="cron_to_delete"
)
```
"""
self.http.delete(f"/runs/crons/{cron_id}", headers=headers, params=params)
def update(
self,
cron_id: str,
*,
schedule: str | None = None,
end_time: datetime | None = None,
input: Input | None = None,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
webhook: str | None = None,
interrupt_before: All | list[str] | None = None,
interrupt_after: All | list[str] | None = None,
on_run_completed: OnCompletionBehavior | None = None,
enabled: bool | None = None,
stream_mode: StreamMode | Sequence[StreamMode] | None = None,
stream_subgraphs: bool | None = None,
stream_resumable: bool | None = None,
durability: Durability | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Cron:
"""Update a cron job by ID.
Args:
cron_id: The cron ID to update.
schedule: The cron schedule to execute this job on.
Schedules are interpreted in UTC.
end_time: The end date to stop running the cron.
input: The input to the graph.
metadata: Metadata to assign to the cron job runs.
config: The configuration for the assistant.
context: Static context added to the assistant.
webhook: Webhook to call after LangGraph API call is done.
interrupt_before: Nodes to interrupt immediately before they get executed.
interrupt_after: Nodes to interrupt immediately after they get executed.
on_run_completed: What to do with the thread after the run completes.
Must be one of 'delete' or 'keep'. 'delete' removes the thread
after execution. 'keep' creates a new thread for each execution but does not
clean them up.
enabled: Enable or disable the cron job.
stream_mode: The stream mode(s) to use.
stream_subgraphs: Whether to stream output from subgraphs.
stream_resumable: Whether to persist the stream chunks in order to resume the stream later.
durability: Durability level for the run. Must be one of 'sync', 'async', or 'exit'.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
The updated cron job.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:8123")
updated_cron = client.crons.update(
cron_id="1ef3cefa-4c09-6926-96d0-3dc97fd5e39b",
schedule="0 10 * * *",
enabled=False,
)
```
"""
payload = {
"schedule": schedule,
"end_time": end_time.isoformat() if end_time else None,
"input": input,
"metadata": metadata,
"config": config,
"context": context,
"webhook": webhook,
"interrupt_before": interrupt_before,
"interrupt_after": interrupt_after,
"on_run_completed": on_run_completed,
"enabled": enabled,
"stream_mode": stream_mode,
"stream_subgraphs": stream_subgraphs,
"stream_resumable": stream_resumable,
"durability": durability,
}
payload = {k: v for k, v in payload.items() if v is not None}
return self.http.patch(
f"/runs/crons/{cron_id}",
json=payload,
headers=headers,
params=params,
)
def search(
self,
*,
assistant_id: str | None = None,
thread_id: str | None = None,
enabled: bool | None = None,
limit: int = 10,
offset: int = 0,
sort_by: CronSortBy | None = None,
sort_order: SortOrder | None = None,
select: list[CronSelectField] | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> list[Cron]:
"""Get a list of cron jobs.
Args:
assistant_id: The assistant ID or graph name to search for.
thread_id: the thread ID to search for.
enabled: Whether the cron job is enabled.
limit: The maximum number of results to return.
offset: The number of results to skip.
headers: Optional custom headers to include with the request.
Returns:
The list of cron jobs returned by the search,
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:8123")
cron_jobs = client.crons.search(
assistant_id="my_assistant_id",
thread_id="my_thread_id",
enabled=True,
limit=5,
offset=5,
)
print(cron_jobs)
```
```shell
----------------------------------------------------------
[
{
'cron_id': '1ef3cefa-4c09-6926-96d0-3dc97fd5e39b',
'assistant_id': 'my_assistant_id',
'thread_id': 'my_thread_id',
'user_id': None,
'payload':
{
'input': {'start_time': ''},
'schedule': '4 * * * *',
'assistant_id': 'my_assistant_id'
},
'schedule': '4 * * * *',
'next_run_date': '2024-07-25T17:04:00+00:00',
'end_time': None,
'created_at': '2024-07-08T06:02:23.073257+00:00',
'updated_at': '2024-07-08T06:02:23.073257+00:00'
}
]
```
"""
payload = {
"assistant_id": assistant_id,
"thread_id": thread_id,
"enabled": enabled,
"limit": limit,
"offset": offset,
}
if sort_by:
payload["sort_by"] = sort_by
if sort_order:
payload["sort_order"] = sort_order
if select:
payload["select"] = select
payload = {k: v for k, v in payload.items() if v is not None}
return self.http.post(
"/runs/crons/search", json=payload, headers=headers, params=params
)
def count(
self,
*,
assistant_id: str | None = None,
thread_id: str | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> int:
"""Count cron jobs matching filters.
Args:
assistant_id: Assistant ID to filter by.
thread_id: Thread ID to filter by.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
int: Number of crons matching the criteria.
"""
payload: dict[str, Any] = {}
if assistant_id:
payload["assistant_id"] = assistant_id
if thread_id:
payload["thread_id"] = thread_id
return self.http.post(
"/runs/crons/count", json=payload, headers=headers, params=params
)
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_sync/cron.py",
"license": "MIT License",
"lines": 452,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_sync/http.py | """Synchronous HTTP client for LangGraph API."""
from __future__ import annotations
import logging
import sys
import warnings
from collections.abc import Callable, Iterator, Mapping
from typing import Any, cast
import httpx
import orjson
from langgraph_sdk._shared.utilities import _orjson_default
from langgraph_sdk.errors import _raise_for_status_typed
from langgraph_sdk.schema import QueryParamTypes, StreamPart
from langgraph_sdk.sse import SSEDecoder, iter_lines_raw
logger = logging.getLogger(__name__)
class SyncHttpClient:
"""Handle synchronous requests to the LangGraph API.
Provides error messaging and content handling enhancements above the
underlying httpx client, mirroring the interface of [HttpClient](#HttpClient)
but for sync usage.
Attributes:
client (httpx.Client): Underlying HTTPX sync client.
"""
def __init__(self, client: httpx.Client) -> None:
self.client = client
def get(
self,
path: str,
*,
params: QueryParamTypes | None = None,
headers: Mapping[str, str] | None = None,
on_response: Callable[[httpx.Response], None] | None = None,
) -> Any:
"""Send a `GET` request."""
r = self.client.get(path, params=params, headers=headers)
if on_response:
on_response(r)
_raise_for_status_typed(r)
return _decode_json(r)
def post(
self,
path: str,
*,
json: dict[str, Any] | list | None,
params: QueryParamTypes | None = None,
headers: Mapping[str, str] | None = None,
on_response: Callable[[httpx.Response], None] | None = None,
) -> Any:
"""Send a `POST` request."""
if json is not None:
request_headers, content = _encode_json(json)
else:
request_headers, content = {}, b""
if headers:
request_headers.update(headers)
r = self.client.post(
path, headers=request_headers, content=content, params=params
)
if on_response:
on_response(r)
_raise_for_status_typed(r)
return _decode_json(r)
def put(
self,
path: str,
*,
json: dict,
params: QueryParamTypes | None = None,
headers: Mapping[str, str] | None = None,
on_response: Callable[[httpx.Response], None] | None = None,
) -> Any:
"""Send a `PUT` request."""
request_headers, content = _encode_json(json)
if headers:
request_headers.update(headers)
r = self.client.put(
path, headers=request_headers, content=content, params=params
)
if on_response:
on_response(r)
_raise_for_status_typed(r)
return _decode_json(r)
def patch(
self,
path: str,
*,
json: dict,
params: QueryParamTypes | None = None,
headers: Mapping[str, str] | None = None,
on_response: Callable[[httpx.Response], None] | None = None,
) -> Any:
"""Send a `PATCH` request."""
request_headers, content = _encode_json(json)
if headers:
request_headers.update(headers)
r = self.client.patch(
path, headers=request_headers, content=content, params=params
)
if on_response:
on_response(r)
_raise_for_status_typed(r)
return _decode_json(r)
def delete(
self,
path: str,
*,
json: Any | None = None,
params: QueryParamTypes | None = None,
headers: Mapping[str, str] | None = None,
on_response: Callable[[httpx.Response], None] | None = None,
) -> None:
"""Send a `DELETE` request."""
r = self.client.request(
"DELETE", path, json=json, params=params, headers=headers
)
if on_response:
on_response(r)
_raise_for_status_typed(r)
def request_reconnect(
self,
path: str,
method: str,
*,
json: dict[str, Any] | None = None,
params: QueryParamTypes | None = None,
headers: Mapping[str, str] | None = None,
on_response: Callable[[httpx.Response], None] | None = None,
reconnect_limit: int = 5,
) -> Any:
"""Send a request that automatically reconnects to Location header."""
request_headers, content = _encode_json(json)
if headers:
request_headers.update(headers)
with self.client.stream(
method, path, headers=request_headers, content=content, params=params
) as r:
if on_response:
on_response(r)
try:
r.raise_for_status()
except httpx.HTTPStatusError as e:
body = r.read().decode()
if sys.version_info >= (3, 11):
e.add_note(body)
else:
logger.error(f"Error from langgraph-api: {body}", exc_info=e)
raise e
loc = r.headers.get("location")
if reconnect_limit <= 0 or not loc:
return _decode_json(r)
try:
return _decode_json(r)
except httpx.HTTPError:
warnings.warn(
f"Request failed, attempting reconnect to Location: {loc}",
stacklevel=2,
)
r.close()
return self.request_reconnect(
loc,
"GET",
headers=request_headers,
# don't pass on_response so it's only called once
reconnect_limit=reconnect_limit - 1,
)
def stream(
self,
path: str,
method: str,
*,
json: dict[str, Any] | None = None,
params: QueryParamTypes | None = None,
headers: Mapping[str, str] | None = None,
on_response: Callable[[httpx.Response], None] | None = None,
) -> Iterator[StreamPart]:
"""Stream the results of a request using SSE."""
if json is not None:
request_headers, content = _encode_json(json)
else:
request_headers, content = {}, None
request_headers["Accept"] = "text/event-stream"
request_headers["Cache-Control"] = "no-store"
if headers:
request_headers.update(headers)
reconnect_headers = {
key: value
for key, value in request_headers.items()
if key.lower() not in {"content-length", "content-type"}
}
last_event_id: str | None = None
reconnect_path: str | None = None
reconnect_attempts = 0
max_reconnect_attempts = 5
while True:
current_headers = dict(
request_headers if reconnect_path is None else reconnect_headers
)
if last_event_id is not None:
current_headers["Last-Event-ID"] = last_event_id
current_method = method if reconnect_path is None else "GET"
current_content = content if reconnect_path is None else None
current_params = params if reconnect_path is None else None
retry = False
with self.client.stream(
current_method,
reconnect_path or path,
headers=current_headers,
content=current_content,
params=current_params,
) as res:
if reconnect_path is None and on_response:
on_response(res)
# check status
_raise_for_status_typed(res)
# check content type
content_type = res.headers.get("content-type", "").partition(";")[0]
if "text/event-stream" not in content_type:
raise httpx.TransportError(
"Expected response header Content-Type to contain 'text/event-stream', "
f"got {content_type!r}"
)
reconnect_location = res.headers.get("location")
if reconnect_location:
reconnect_path = reconnect_location
decoder = SSEDecoder()
try:
for line in iter_lines_raw(res):
sse = decoder.decode(cast(bytes, line).rstrip(b"\n"))
if sse is not None:
if decoder.last_event_id is not None:
last_event_id = decoder.last_event_id
if sse.event or sse.data is not None:
yield sse
except httpx.HTTPError:
# httpx.TransportError inherits from HTTPError, so transient
# disconnects during streaming land here.
if reconnect_path is None:
raise
retry = True
else:
if sse := decoder.decode(b""):
if decoder.last_event_id is not None:
last_event_id = decoder.last_event_id
if sse.event or sse.data is not None:
# See async stream implementation for rationale on
# skipping empty flush events.
yield sse
if retry:
reconnect_attempts += 1
if reconnect_attempts > max_reconnect_attempts:
raise httpx.TransportError(
"Exceeded maximum SSE reconnection attempts"
)
continue
break
def _encode_json(json: Any) -> tuple[dict[str, str], bytes]:
body = orjson.dumps(
json,
_orjson_default,
orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_NON_STR_KEYS,
)
content_length = str(len(body))
content_type = "application/json"
headers = {"Content-Length": content_length, "Content-Type": content_type}
return headers, body
def _decode_json(r: httpx.Response) -> Any:
body = r.read()
return orjson.loads(body) if body else None
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_sync/http.py",
"license": "MIT License",
"lines": 267,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | function_complex |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_sync/runs.py | """Synchronous client for managing runs in LangGraph."""
from __future__ import annotations
import builtins
import warnings
from collections.abc import Callable, Iterator, Mapping, Sequence
from typing import Any, overload
import httpx
from langgraph_sdk._shared.utilities import _get_run_metadata_from_response
from langgraph_sdk._sync.http import SyncHttpClient
from langgraph_sdk.schema import (
All,
BulkCancelRunsStatus,
CancelAction,
Checkpoint,
Command,
Config,
Context,
DisconnectMode,
Durability,
IfNotExists,
Input,
MultitaskStrategy,
OnCompletionBehavior,
QueryParamTypes,
Run,
RunCreate,
RunCreateMetadata,
RunSelectField,
RunStatus,
StreamMode,
StreamPart,
)
class SyncRunsClient:
"""Synchronous client for managing runs in LangGraph.
This class provides methods to create, retrieve, and manage runs, which represent
individual executions of graphs.
???+ example "Example"
```python
client = get_sync_client(url="http://localhost:2024")
run = client.runs.create(thread_id="thread_123", assistant_id="asst_456")
```
"""
def __init__(self, http: SyncHttpClient) -> None:
self.http = http
@overload
def stream(
self,
thread_id: str,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
stream_mode: StreamMode | Sequence[StreamMode] = "values",
stream_subgraphs: bool = False,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None,
checkpoint_during: bool | None = None,
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
feedback_keys: Sequence[str] | None = None,
on_disconnect: DisconnectMode | None = None,
webhook: str | None = None,
multitask_strategy: MultitaskStrategy | None = None,
if_not_exists: IfNotExists | None = None,
after_seconds: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
) -> Iterator[StreamPart]: ...
@overload
def stream(
self,
thread_id: None,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
stream_mode: StreamMode | Sequence[StreamMode] = "values",
stream_subgraphs: bool = False,
stream_resumable: bool = False,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint_during: bool | None = None,
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
feedback_keys: Sequence[str] | None = None,
on_disconnect: DisconnectMode | None = None,
on_completion: OnCompletionBehavior | None = None,
if_not_exists: IfNotExists | None = None,
webhook: str | None = None,
after_seconds: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
) -> Iterator[StreamPart]: ...
def stream(
self,
thread_id: str | None,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
stream_mode: StreamMode | Sequence[StreamMode] = "values",
stream_subgraphs: bool = False,
stream_resumable: bool = False,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None,
checkpoint_during: bool | None = None, # deprecated
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
feedback_keys: Sequence[str] | None = None,
on_disconnect: DisconnectMode | None = None,
on_completion: OnCompletionBehavior | None = None,
webhook: str | None = None,
multitask_strategy: MultitaskStrategy | None = None,
if_not_exists: IfNotExists | None = None,
after_seconds: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
durability: Durability | None = None,
) -> Iterator[StreamPart]:
"""Create a run and stream the results.
Args:
thread_id: the thread ID to assign to the thread.
If `None` will create a stateless run.
assistant_id: The assistant ID or graph name to stream from.
If using graph name, will default to first assistant created from that graph.
input: The input to the graph.
command: The command to execute.
stream_mode: The stream mode(s) to use.
stream_subgraphs: Whether to stream output from subgraphs.
stream_resumable: Whether the stream is considered resumable.
If true, the stream can be resumed and replayed in its entirety even after disconnection.
metadata: Metadata to assign to the run.
config: The configuration for the assistant.
context: Static context to add to the assistant.
!!! version-added "Added in version 0.6.0"
checkpoint: The checkpoint to resume from.
checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
interrupt_before: Nodes to interrupt immediately before they get executed.
interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
feedback_keys: Feedback keys to assign to run.
on_disconnect: The disconnect mode to use.
Must be one of 'cancel' or 'continue'.
on_completion: Whether to delete or keep the thread created for a stateless run.
Must be one of 'delete' or 'keep'.
webhook: Webhook to call after LangGraph API call is done.
multitask_strategy: Multitask strategy to use.
Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
if_not_exists: How to handle missing thread. Defaults to 'reject'.
Must be either 'reject' (raise error if missing), or 'create' (create new thread).
after_seconds: The number of seconds to wait before starting the run.
Use to schedule future runs.
headers: Optional custom headers to include with the request.
on_run_created: Optional callback to call when a run is created.
durability: The durability to use for the run. Values are "sync", "async", or "exit".
"async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
"sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
"exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
Returns:
Iterator of stream results.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
async for chunk in client.runs.stream(
thread_id=None,
assistant_id="agent",
input={"messages": [{"role": "user", "content": "how are you?"}]},
stream_mode=["values","debug"],
metadata={"name":"my_run"},
context={"model_name": "anthropic"},
interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
feedback_keys=["my_feedback_key_1","my_feedback_key_2"],
webhook="https://my.fake.webhook.com",
multitask_strategy="interrupt"
):
print(chunk)
```
```shell
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
StreamPart(event='metadata', data={'run_id': '1ef4a9b8-d7da-679a-a45a-872054341df2'})
StreamPart(event='values', data={'messages': [{'content': 'how are you?', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'fe0a5778-cfe9-42ee-b807-0adaa1873c10', 'example': False}]})
StreamPart(event='values', data={'messages': [{'content': 'how are you?', 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'human', 'name': None, 'id': 'fe0a5778-cfe9-42ee-b807-0adaa1873c10', 'example': False}, {'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.", 'additional_kwargs': {}, 'response_metadata': {}, 'type': 'ai', 'name': None, 'id': 'run-159b782c-b679-4830-83c6-cef87798fe8b', 'example': False, 'tool_calls': [], 'invalid_tool_calls': [], 'usage_metadata': None}]})
StreamPart(event='end', data=None)
```
"""
if checkpoint_during is not None:
warnings.warn(
"`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
DeprecationWarning,
stacklevel=2,
)
payload = {
"input": input,
"command": (
{k: v for k, v in command.items() if v is not None} if command else None
),
"config": config,
"context": context,
"metadata": metadata,
"stream_mode": stream_mode,
"stream_subgraphs": stream_subgraphs,
"stream_resumable": stream_resumable,
"assistant_id": assistant_id,
"interrupt_before": interrupt_before,
"interrupt_after": interrupt_after,
"feedback_keys": feedback_keys,
"webhook": webhook,
"checkpoint": checkpoint,
"checkpoint_id": checkpoint_id,
"checkpoint_during": checkpoint_during,
"multitask_strategy": multitask_strategy,
"if_not_exists": if_not_exists,
"on_disconnect": on_disconnect,
"on_completion": on_completion,
"after_seconds": after_seconds,
"durability": durability,
}
endpoint = (
f"/threads/{thread_id}/runs/stream"
if thread_id is not None
else "/runs/stream"
)
def on_response(res: httpx.Response):
"""Callback function to handle the response."""
if on_run_created and (metadata := _get_run_metadata_from_response(res)):
on_run_created(metadata)
return self.http.stream(
endpoint,
"POST",
json={k: v for k, v in payload.items() if v is not None},
params=params,
headers=headers,
on_response=on_response if on_run_created else None,
)
@overload
def create(
self,
thread_id: None,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
stream_mode: StreamMode | Sequence[StreamMode] = "values",
stream_subgraphs: bool = False,
stream_resumable: bool = False,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint_during: bool | None = None,
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
webhook: str | None = None,
on_completion: OnCompletionBehavior | None = None,
if_not_exists: IfNotExists | None = None,
after_seconds: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
) -> Run: ...
@overload
def create(
self,
thread_id: str,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
stream_mode: StreamMode | Sequence[StreamMode] = "values",
stream_subgraphs: bool = False,
stream_resumable: bool = False,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None,
checkpoint_during: bool | None = None,
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
webhook: str | None = None,
multitask_strategy: MultitaskStrategy | None = None,
if_not_exists: IfNotExists | None = None,
after_seconds: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
) -> Run: ...
def create(
self,
thread_id: str | None,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
stream_mode: StreamMode | Sequence[StreamMode] = "values",
stream_subgraphs: bool = False,
stream_resumable: bool = False,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None,
checkpoint_during: bool | None = None, # deprecated
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
webhook: str | None = None,
multitask_strategy: MultitaskStrategy | None = None,
if_not_exists: IfNotExists | None = None,
on_completion: OnCompletionBehavior | None = None,
after_seconds: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
durability: Durability | None = None,
) -> Run:
"""Create a background run.
Args:
thread_id: the thread ID to assign to the thread.
If `None` will create a stateless run.
assistant_id: The assistant ID or graph name to stream from.
If using graph name, will default to first assistant created from that graph.
input: The input to the graph.
command: The command to execute.
stream_mode: The stream mode(s) to use.
stream_subgraphs: Whether to stream output from subgraphs.
stream_resumable: Whether the stream is considered resumable.
If true, the stream can be resumed and replayed in its entirety even after disconnection.
metadata: Metadata to assign to the run.
config: The configuration for the assistant.
context: Static context to add to the assistant.
!!! version-added "Added in version 0.6.0"
checkpoint: The checkpoint to resume from.
checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
interrupt_before: Nodes to interrupt immediately before they get executed.
interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
webhook: Webhook to call after LangGraph API call is done.
multitask_strategy: Multitask strategy to use.
Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
on_completion: Whether to delete or keep the thread created for a stateless run.
Must be one of 'delete' or 'keep'.
if_not_exists: How to handle missing thread. Defaults to 'reject'.
Must be either 'reject' (raise error if missing), or 'create' (create new thread).
after_seconds: The number of seconds to wait before starting the run.
Use to schedule future runs.
headers: Optional custom headers to include with the request.
on_run_created: Optional callback to call when a run is created.
durability: The durability to use for the run. Values are "sync", "async", or "exit".
"async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
"sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
"exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
Returns:
The created background `Run`.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
background_run = client.runs.create(
thread_id="my_thread_id",
assistant_id="my_assistant_id",
input={"messages": [{"role": "user", "content": "hello!"}]},
metadata={"name":"my_run"},
context={"model_name": "openai"},
interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
webhook="https://my.fake.webhook.com",
multitask_strategy="interrupt"
)
print(background_run)
```
```shell
--------------------------------------------------------------------------------
{
'run_id': 'my_run_id',
'thread_id': 'my_thread_id',
'assistant_id': 'my_assistant_id',
'created_at': '2024-07-25T15:35:42.598503+00:00',
'updated_at': '2024-07-25T15:35:42.598503+00:00',
'metadata': {},
'status': 'pending',
'kwargs':
{
'input':
{
'messages': [
{
'role': 'user',
'content': 'how are you?'
}
]
},
'config':
{
'metadata':
{
'created_by': 'system'
},
'configurable':
{
'run_id': 'my_run_id',
'user_id': None,
'graph_id': 'agent',
'thread_id': 'my_thread_id',
'checkpoint_id': None,
'assistant_id': 'my_assistant_id'
}
},
'context':
{
'model_name': 'openai'
},
'webhook': "https://my.fake.webhook.com",
'temporary': False,
'stream_mode': ['values'],
'feedback_keys': None,
'interrupt_after': ["node_to_stop_after_1","node_to_stop_after_2"],
'interrupt_before': ["node_to_stop_before_1","node_to_stop_before_2"]
},
'multitask_strategy': 'interrupt'
}
```
"""
if checkpoint_during is not None:
warnings.warn(
"`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
DeprecationWarning,
stacklevel=2,
)
payload = {
"input": input,
"command": (
{k: v for k, v in command.items() if v is not None} if command else None
),
"stream_mode": stream_mode,
"stream_subgraphs": stream_subgraphs,
"stream_resumable": stream_resumable,
"config": config,
"context": context,
"metadata": metadata,
"assistant_id": assistant_id,
"interrupt_before": interrupt_before,
"interrupt_after": interrupt_after,
"webhook": webhook,
"checkpoint": checkpoint,
"checkpoint_id": checkpoint_id,
"checkpoint_during": checkpoint_during,
"multitask_strategy": multitask_strategy,
"if_not_exists": if_not_exists,
"on_completion": on_completion,
"after_seconds": after_seconds,
"durability": durability,
}
payload = {k: v for k, v in payload.items() if v is not None}
def on_response(res: httpx.Response):
"""Callback function to handle the response."""
if on_run_created and (metadata := _get_run_metadata_from_response(res)):
on_run_created(metadata)
return self.http.post(
f"/threads/{thread_id}/runs" if thread_id else "/runs",
json=payload,
params=params,
headers=headers,
on_response=on_response if on_run_created else None,
)
def create_batch(
self,
payloads: builtins.list[RunCreate],
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> builtins.list[Run]:
"""Create a batch of stateless background runs."""
def filter_payload(payload: RunCreate):
return {k: v for k, v in payload.items() if v is not None}
filtered = [filter_payload(payload) for payload in payloads]
return self.http.post(
"/runs/batch", json=filtered, headers=headers, params=params
)
@overload
def wait(
self,
thread_id: str,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None,
checkpoint_during: bool | None = None,
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
webhook: str | None = None,
on_disconnect: DisconnectMode | None = None,
multitask_strategy: MultitaskStrategy | None = None,
if_not_exists: IfNotExists | None = None,
after_seconds: int | None = None,
raise_error: bool = True,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
) -> builtins.list[dict] | dict[str, Any]: ...
@overload
def wait(
self,
thread_id: None,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint_during: bool | None = None,
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
webhook: str | None = None,
on_disconnect: DisconnectMode | None = None,
on_completion: OnCompletionBehavior | None = None,
if_not_exists: IfNotExists | None = None,
after_seconds: int | None = None,
raise_error: bool = True,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
) -> builtins.list[dict] | dict[str, Any]: ...
def wait(
self,
thread_id: str | None,
assistant_id: str,
*,
input: Input | None = None,
command: Command | None = None,
metadata: Mapping[str, Any] | None = None,
config: Config | None = None,
context: Context | None = None,
checkpoint_during: bool | None = None, # deprecated
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None,
interrupt_before: All | Sequence[str] | None = None,
interrupt_after: All | Sequence[str] | None = None,
webhook: str | None = None,
on_disconnect: DisconnectMode | None = None,
on_completion: OnCompletionBehavior | None = None,
multitask_strategy: MultitaskStrategy | None = None,
if_not_exists: IfNotExists | None = None,
after_seconds: int | None = None,
raise_error: bool = True,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
on_run_created: Callable[[RunCreateMetadata], None] | None = None,
durability: Durability | None = None,
) -> builtins.list[dict] | dict[str, Any]:
"""Create a run, wait until it finishes and return the final state.
Args:
thread_id: the thread ID to create the run on.
If `None` will create a stateless run.
assistant_id: The assistant ID or graph name to run.
If using graph name, will default to first assistant created from that graph.
input: The input to the graph.
command: The command to execute.
metadata: Metadata to assign to the run.
config: The configuration for the assistant.
context: Static context to add to the assistant.
!!! version-added "Added in version 0.6.0"
checkpoint: The checkpoint to resume from.
checkpoint_during: (deprecated) Whether to checkpoint during the run (or only at the end/interruption).
interrupt_before: Nodes to interrupt immediately before they get executed.
interrupt_after: Nodes to Nodes to interrupt immediately after they get executed.
webhook: Webhook to call after LangGraph API call is done.
on_disconnect: The disconnect mode to use.
Must be one of 'cancel' or 'continue'.
on_completion: Whether to delete or keep the thread created for a stateless run.
Must be one of 'delete' or 'keep'.
multitask_strategy: Multitask strategy to use.
Must be one of 'reject', 'interrupt', 'rollback', or 'enqueue'.
if_not_exists: How to handle missing thread. Defaults to 'reject'.
Must be either 'reject' (raise error if missing), or 'create' (create new thread).
after_seconds: The number of seconds to wait before starting the run.
Use to schedule future runs.
raise_error: Whether to raise an error if the run fails.
headers: Optional custom headers to include with the request.
on_run_created: Optional callback to call when a run is created.
durability: The durability to use for the run. Values are "sync", "async", or "exit".
"async" means checkpoints are persisted async while next graph step executes, replaces checkpoint_during=True
"sync" means checkpoints are persisted sync after graph step executes, replaces checkpoint_during=False
"exit" means checkpoints are only persisted when the run exits, does not save intermediate steps
Returns:
The output of the `Run`.
???+ example "Example Usage"
```python
final_state_of_run = client.runs.wait(
thread_id=None,
assistant_id="agent",
input={"messages": [{"role": "user", "content": "how are you?"}]},
metadata={"name":"my_run"},
context={"model_name": "anthropic"},
interrupt_before=["node_to_stop_before_1","node_to_stop_before_2"],
interrupt_after=["node_to_stop_after_1","node_to_stop_after_2"],
webhook="https://my.fake.webhook.com",
multitask_strategy="interrupt"
)
print(final_state_of_run)
```
```shell
-------------------------------------------------------------------------------------------------------------------------------------------
{
'messages': [
{
'content': 'how are you?',
'additional_kwargs': {},
'response_metadata': {},
'type': 'human',
'name': None,
'id': 'f51a862c-62fe-4866-863b-b0863e8ad78a',
'example': False
},
{
'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.",
'additional_kwargs': {},
'response_metadata': {},
'type': 'ai',
'name': None,
'id': 'run-bf1cd3c6-768f-4c16-b62d-ba6f17ad8b36',
'example': False,
'tool_calls': [],
'invalid_tool_calls': [],
'usage_metadata': None
}
]
}
```
"""
if checkpoint_during is not None:
warnings.warn(
"`checkpoint_during` is deprecated and will be removed in a future version. Use `durability` instead.",
DeprecationWarning,
stacklevel=2,
)
payload = {
"input": input,
"command": (
{k: v for k, v in command.items() if v is not None} if command else None
),
"config": config,
"context": context,
"metadata": metadata,
"assistant_id": assistant_id,
"interrupt_before": interrupt_before,
"interrupt_after": interrupt_after,
"webhook": webhook,
"checkpoint": checkpoint,
"checkpoint_id": checkpoint_id,
"multitask_strategy": multitask_strategy,
"if_not_exists": if_not_exists,
"on_disconnect": on_disconnect,
"checkpoint_during": checkpoint_during,
"on_completion": on_completion,
"after_seconds": after_seconds,
"raise_error": raise_error,
"durability": durability,
}
def on_response(res: httpx.Response):
"""Callback function to handle the response."""
if on_run_created and (metadata := _get_run_metadata_from_response(res)):
on_run_created(metadata)
endpoint = (
f"/threads/{thread_id}/runs/wait" if thread_id is not None else "/runs/wait"
)
return self.http.request_reconnect(
endpoint,
"POST",
json={k: v for k, v in payload.items() if v is not None},
params=params,
headers=headers,
on_response=on_response if on_run_created else None,
)
def list(
self,
thread_id: str,
*,
limit: int = 10,
offset: int = 0,
status: RunStatus | None = None,
select: builtins.list[RunSelectField] | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> builtins.list[Run]:
"""List runs.
Args:
thread_id: The thread ID to list runs for.
limit: The maximum number of results to return.
offset: The number of results to skip.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
The runs for the thread.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
client.runs.list(
thread_id="thread_id",
limit=5,
offset=5,
)
```
"""
query_params: dict[str, Any] = {"limit": limit, "offset": offset}
if status is not None:
query_params["status"] = status
if select:
query_params["select"] = select
if params:
query_params.update(params)
return self.http.get(
f"/threads/{thread_id}/runs", params=query_params, headers=headers
)
def get(
self,
thread_id: str,
run_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Run:
"""Get a run.
Args:
thread_id: The thread ID to get.
run_id: The run ID to get.
headers: Optional custom headers to include with the request.
Returns:
`Run` object.
???+ example "Example Usage"
```python
run = client.runs.get(
thread_id="thread_id_to_delete",
run_id="run_id_to_delete",
)
```
"""
return self.http.get(
f"/threads/{thread_id}/runs/{run_id}", headers=headers, params=params
)
def cancel(
self,
thread_id: str,
run_id: str,
*,
wait: bool = False,
action: CancelAction = "interrupt",
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Get a run.
Args:
thread_id: The thread ID to cancel.
run_id: The run ID to cancel.
wait: Whether to wait until run has completed.
action: Action to take when cancelling the run. Possible values
are `interrupt` or `rollback`. Default is `interrupt`.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
client.runs.cancel(
thread_id="thread_id_to_cancel",
run_id="run_id_to_cancel",
wait=True,
action="interrupt"
)
```
"""
query_params = {
"wait": 1 if wait else 0,
"action": action,
}
if params:
query_params.update(params)
if wait:
return self.http.request_reconnect(
f"/threads/{thread_id}/runs/{run_id}/cancel",
"POST",
json=None,
params=query_params,
headers=headers,
)
return self.http.post(
f"/threads/{thread_id}/runs/{run_id}/cancel",
json=None,
params=query_params,
headers=headers,
)
def cancel_many(
self,
*,
thread_id: str | None = None,
run_ids: Sequence[str] | None = None,
status: BulkCancelRunsStatus | None = None,
action: CancelAction = "interrupt",
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Cancel one or more runs.
Can cancel runs by thread ID and run IDs, or by status filter.
Args:
thread_id: The ID of the thread containing runs to cancel.
run_ids: List of run IDs to cancel.
status: Filter runs by status to cancel. Must be one of
`"pending"`, `"running"`, or `"all"`.
action: Action to take when cancelling the run. Possible values
are `"interrupt"` or `"rollback"`. Default is `"interrupt"`.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
# Cancel all pending runs
client.runs.cancel_many(status="pending")
# Cancel specific runs on a thread
client.runs.cancel_many(
thread_id="my_thread_id",
run_ids=["run_1", "run_2"],
action="rollback",
)
```
"""
payload: dict[str, Any] = {}
if thread_id:
payload["thread_id"] = thread_id
if run_ids:
payload["run_ids"] = run_ids
if status:
payload["status"] = status
query_params: dict[str, Any] = {"action": action}
if params:
query_params.update(params)
self.http.post(
"/runs/cancel",
json=payload,
headers=headers,
params=query_params,
)
def join(
self,
thread_id: str,
run_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> dict:
"""Block until a run is done. Returns the final state of the thread.
Args:
thread_id: The thread ID to join.
run_id: The run ID to join.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
client.runs.join(
thread_id="thread_id_to_join",
run_id="run_id_to_join"
)
```
"""
return self.http.request_reconnect(
f"/threads/{thread_id}/runs/{run_id}/join",
"GET",
headers=headers,
params=params,
)
def join_stream(
self,
thread_id: str,
run_id: str,
*,
cancel_on_disconnect: bool = False,
stream_mode: StreamMode | Sequence[StreamMode] | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
last_event_id: str | None = None,
) -> Iterator[StreamPart]:
"""Stream output from a run in real-time, until the run is done.
Output is not buffered, so any output produced before this call will
not be received here.
Args:
thread_id: The thread ID to join.
run_id: The run ID to join.
stream_mode: The stream mode(s) to use. Must be a subset of the stream modes passed
when creating the run. Background runs default to having the union of all
stream modes.
cancel_on_disconnect: Whether to cancel the run when the stream is disconnected.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
last_event_id: The last event ID to use for the stream.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
client.runs.join_stream(
thread_id="thread_id_to_join",
run_id="run_id_to_join",
stream_mode=["values", "debug"]
)
```
"""
query_params = {
"stream_mode": stream_mode,
"cancel_on_disconnect": cancel_on_disconnect,
}
if params:
query_params.update(params)
return self.http.stream(
f"/threads/{thread_id}/runs/{run_id}/stream",
"GET",
params=query_params,
headers={
**({"Last-Event-ID": last_event_id} if last_event_id else {}),
**(headers or {}),
}
or None,
)
def delete(
self,
thread_id: str,
run_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Delete a run.
Args:
thread_id: The thread ID to delete.
run_id: The run ID to delete.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
client.runs.delete(
thread_id="thread_id_to_delete",
run_id="run_id_to_delete"
)
```
"""
self.http.delete(
f"/threads/{thread_id}/runs/{run_id}", headers=headers, params=params
)
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_sync/runs.py",
"license": "MIT License",
"lines": 967,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_sync/store.py | """Synchronous store client for LangGraph SDK."""
from __future__ import annotations
from collections.abc import Mapping, Sequence
from typing import Any, Literal
from langgraph_sdk._shared.utilities import _provided_vals
from langgraph_sdk._sync.http import SyncHttpClient
from langgraph_sdk.schema import (
Item,
ListNamespaceResponse,
QueryParamTypes,
SearchItemsResponse,
)
class SyncStoreClient:
"""A client for synchronous operations on a key-value store.
Provides methods to interact with a remote key-value store, allowing
storage and retrieval of items within namespaced hierarchies.
???+ example "Example"
```python
client = get_sync_client(url="http://localhost:2024"))
client.store.put_item(["users", "profiles"], "user123", {"name": "Alice", "age": 30})
```
"""
def __init__(self, http: SyncHttpClient) -> None:
self.http = http
def put_item(
self,
namespace: Sequence[str],
/,
key: str,
value: Mapping[str, Any],
index: Literal[False] | list[str] | None = None,
ttl: int | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Store or update an item.
Args:
namespace: A list of strings representing the namespace path.
key: The unique identifier for the item within the namespace.
value: A dictionary containing the item's data.
index: Controls search indexing - None (use defaults), False (disable), or list of field paths to index.
ttl: Optional time-to-live in minutes for the item, or None for no expiration.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:8123")
client.store.put_item(
["documents", "user123"],
key="item456",
value={"title": "My Document", "content": "Hello World"}
)
```
"""
for label in namespace:
if "." in label:
raise ValueError(
f"Invalid namespace label '{label}'. Namespace labels cannot contain periods ('.')."
)
payload = {
"namespace": namespace,
"key": key,
"value": value,
"index": index,
"ttl": ttl,
}
self.http.put(
"/store/items", json=_provided_vals(payload), headers=headers, params=params
)
def get_item(
self,
namespace: Sequence[str],
/,
key: str,
*,
refresh_ttl: bool | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Item:
"""Retrieve a single item.
Args:
key: The unique identifier for the item.
namespace: Optional list of strings representing the namespace path.
refresh_ttl: Whether to refresh the TTL on this read operation. If `None`, uses the store's default behavior.
headers: Optional custom headers to include with the request.
Returns:
The retrieved item.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:8123")
item = client.store.get_item(
["documents", "user123"],
key="item456",
)
print(item)
```
```shell
----------------------------------------------------------------
{
'namespace': ['documents', 'user123'],
'key': 'item456',
'value': {'title': 'My Document', 'content': 'Hello World'},
'created_at': '2024-07-30T12:00:00Z',
'updated_at': '2024-07-30T12:00:00Z'
}
```
"""
for label in namespace:
if "." in label:
raise ValueError(
f"Invalid namespace label '{label}'. Namespace labels cannot contain periods ('.')."
)
query_params = {"key": key, "namespace": ".".join(namespace)}
if refresh_ttl is not None:
query_params["refresh_ttl"] = refresh_ttl
if params:
query_params.update(params)
return self.http.get("/store/items", params=query_params, headers=headers)
def delete_item(
self,
namespace: Sequence[str],
/,
key: str,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Delete an item.
Args:
key: The unique identifier for the item.
namespace: Optional list of strings representing the namespace path.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:8123")
client.store.delete_item(
["documents", "user123"],
key="item456",
)
```
"""
self.http.delete(
"/store/items",
json={"key": key, "namespace": namespace},
headers=headers,
params=params,
)
def search_items(
self,
namespace_prefix: Sequence[str],
/,
filter: Mapping[str, Any] | None = None,
limit: int = 10,
offset: int = 0,
query: str | None = None,
refresh_ttl: bool | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> SearchItemsResponse:
"""Search for items within a namespace prefix.
Args:
namespace_prefix: List of strings representing the namespace prefix.
filter: Optional dictionary of key-value pairs to filter results.
limit: Maximum number of items to return (default is 10).
offset: Number of items to skip before returning results (default is 0).
query: Optional query for natural language search.
refresh_ttl: Whether to refresh the TTL on items returned by this search. If `None`, uses the store's default behavior.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
A list of items matching the search criteria.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:8123")
items = client.store.search_items(
["documents"],
filter={"author": "John Doe"},
limit=5,
offset=0
)
print(items)
```
```shell
----------------------------------------------------------------
{
"items": [
{
"namespace": ["documents", "user123"],
"key": "item789",
"value": {
"title": "Another Document",
"author": "John Doe"
},
"created_at": "2024-07-30T12:00:00Z",
"updated_at": "2024-07-30T12:00:00Z"
},
# ... additional items ...
]
}
```
"""
payload = {
"namespace_prefix": namespace_prefix,
"filter": filter,
"limit": limit,
"offset": offset,
"query": query,
"refresh_ttl": refresh_ttl,
}
return self.http.post(
"/store/items/search",
json=_provided_vals(payload),
headers=headers,
params=params,
)
def list_namespaces(
self,
prefix: list[str] | None = None,
suffix: list[str] | None = None,
max_depth: int | None = None,
limit: int = 100,
offset: int = 0,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> ListNamespaceResponse:
"""List namespaces with optional match conditions.
Args:
prefix: Optional list of strings representing the prefix to filter namespaces.
suffix: Optional list of strings representing the suffix to filter namespaces.
max_depth: Optional integer specifying the maximum depth of namespaces to return.
limit: Maximum number of namespaces to return (default is 100).
offset: Number of namespaces to skip before returning results (default is 0).
headers: Optional custom headers to include with the request.
Returns:
A list of namespaces matching the criteria.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:8123")
namespaces = client.store.list_namespaces(
prefix=["documents"],
max_depth=3,
limit=10,
offset=0
)
print(namespaces)
```
```shell
----------------------------------------------------------------
[
["documents", "user123", "reports"],
["documents", "user456", "invoices"],
...
]
```
"""
payload = {
"prefix": prefix,
"suffix": suffix,
"max_depth": max_depth,
"limit": limit,
"offset": offset,
}
return self.http.post(
"/store/namespaces",
json=_provided_vals(payload),
headers=headers,
params=params,
)
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_sync/store.py",
"license": "MIT License",
"lines": 273,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/_sync/threads.py | """Synchronous client for managing threads in LangGraph."""
from __future__ import annotations
from collections.abc import Iterator, Mapping, Sequence
from typing import Any
from langgraph_sdk._sync.http import SyncHttpClient
from langgraph_sdk.schema import (
Checkpoint,
Json,
OnConflictBehavior,
PruneStrategy,
QueryParamTypes,
SortOrder,
StreamPart,
Thread,
ThreadSelectField,
ThreadSortBy,
ThreadState,
ThreadStatus,
ThreadStreamMode,
ThreadUpdateStateResponse,
)
class SyncThreadsClient:
"""Synchronous client for managing threads in LangGraph.
This class provides methods to create, retrieve, and manage threads,
which represent conversations or stateful interactions.
???+ example "Example"
```python
client = get_sync_client(url="http://localhost:2024")
thread = client.threads.create(metadata={"user_id": "123"})
```
"""
def __init__(self, http: SyncHttpClient) -> None:
self.http = http
def get(
self,
thread_id: str,
*,
include: Sequence[str] | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Thread:
"""Get a thread by ID.
Args:
thread_id: The ID of the thread to get.
include: Additional fields to include in the response.
Supported values: `"ttl"`.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`Thread` object.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
thread = client.threads.get(
thread_id="my_thread_id"
)
print(thread)
```
```shell
-----------------------------------------------------
{
'thread_id': 'my_thread_id',
'created_at': '2024-07-18T18:35:15.540834+00:00',
'updated_at': '2024-07-18T18:35:15.540834+00:00',
'metadata': {'graph_id': 'agent'}
}
```
"""
query_params: dict[str, Any] = {}
if include:
query_params["include"] = ",".join(include)
if params:
query_params.update(params)
return self.http.get(
f"/threads/{thread_id}",
headers=headers,
params=query_params or None,
)
def create(
self,
*,
metadata: Json = None,
thread_id: str | None = None,
if_exists: OnConflictBehavior | None = None,
supersteps: Sequence[dict[str, Sequence[dict[str, Any]]]] | None = None,
graph_id: str | None = None,
ttl: int | Mapping[str, Any] | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Thread:
"""Create a new thread.
Args:
metadata: Metadata to add to thread.
thread_id: ID of thread.
If `None`, ID will be a randomly generated UUID.
if_exists: How to handle duplicate creation. Defaults to 'raise' under the hood.
Must be either 'raise' (raise error if duplicate), or 'do_nothing' (return existing thread).
supersteps: Apply a list of supersteps when creating a thread, each containing a sequence of updates.
Each update has `values` or `command` and `as_node`. Used for copying a thread between deployments.
graph_id: Optional graph ID to associate with the thread.
ttl: Optional time-to-live in minutes for the thread. You can pass an
integer (minutes) or a mapping with keys `ttl` and optional
`strategy` (defaults to "delete").
headers: Optional custom headers to include with the request.
Returns:
The created `Thread`.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
thread = client.threads.create(
metadata={"number":1},
thread_id="my-thread-id",
if_exists="raise"
)
```
)
"""
payload: dict[str, Any] = {}
if thread_id:
payload["thread_id"] = thread_id
if metadata or graph_id:
payload["metadata"] = {
**(metadata or {}),
**({"graph_id": graph_id} if graph_id else {}),
}
if if_exists:
payload["if_exists"] = if_exists
if supersteps:
payload["supersteps"] = [
{
"updates": [
{
"values": u["values"],
"command": u.get("command"),
"as_node": u["as_node"],
}
for u in s["updates"]
]
}
for s in supersteps
]
if ttl is not None:
if isinstance(ttl, (int, float)):
payload["ttl"] = {"ttl": ttl, "strategy": "delete"}
else:
payload["ttl"] = ttl
return self.http.post("/threads", json=payload, headers=headers, params=params)
def update(
self,
thread_id: str,
*,
metadata: Mapping[str, Any],
ttl: int | Mapping[str, Any] | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Thread:
"""Update a thread.
Args:
thread_id: ID of thread to update.
metadata: Metadata to merge with existing thread metadata.
ttl: Optional time-to-live in minutes for the thread. You can pass an
integer (minutes) or a mapping with keys `ttl` and optional
`strategy` (defaults to "delete").
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
The created `Thread`.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
thread = client.threads.update(
thread_id="my-thread-id",
metadata={"number":1},
ttl=43_200,
)
```
"""
payload: dict[str, Any] = {"metadata": metadata}
if ttl is not None:
if isinstance(ttl, (int, float)):
payload["ttl"] = {"ttl": ttl, "strategy": "delete"}
else:
payload["ttl"] = ttl
return self.http.patch(
f"/threads/{thread_id}",
json=payload,
headers=headers,
params=params,
)
def delete(
self,
thread_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Delete a thread.
Args:
thread_id: The ID of the thread to delete.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client.threads.delete(
thread_id="my_thread_id"
)
```
"""
self.http.delete(f"/threads/{thread_id}", headers=headers, params=params)
def search(
self,
*,
metadata: Json = None,
values: Json = None,
ids: Sequence[str] | None = None,
status: ThreadStatus | None = None,
limit: int = 10,
offset: int = 0,
sort_by: ThreadSortBy | None = None,
sort_order: SortOrder | None = None,
select: list[ThreadSelectField] | None = None,
extract: dict[str, str] | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> list[Thread]:
"""Search for threads.
Args:
metadata: Thread metadata to filter on.
values: State values to filter on.
ids: List of thread IDs to filter by.
status: Thread status to filter on.
Must be one of 'idle', 'busy', 'interrupted' or 'error'.
limit: Limit on number of threads to return.
offset: Offset in threads table to start search from.
sort_by: Sort by field.
sort_order: Sort order.
select: List of fields to include in the response.
extract: Dictionary mapping aliases to JSONB paths to extract
from thread data. Paths use dot notation for nested keys and
bracket notation for array indices (e.g.,
`{"last_msg": "values.messages[-1]"}`). Extracted values are
returned in an `extracted` field on each thread. Maximum 10
paths per request.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
List of the threads matching the search parameters.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
threads = client.threads.search(
metadata={"number":1},
status="interrupted",
limit=15,
offset=5
)
```
"""
payload: dict[str, Any] = {
"limit": limit,
"offset": offset,
}
if metadata:
payload["metadata"] = metadata
if values:
payload["values"] = values
if ids:
payload["ids"] = ids
if status:
payload["status"] = status
if sort_by:
payload["sort_by"] = sort_by
if sort_order:
payload["sort_order"] = sort_order
if select:
payload["select"] = select
if extract:
payload["extract"] = extract
return self.http.post(
"/threads/search", json=payload, headers=headers, params=params
)
def count(
self,
*,
metadata: Json = None,
values: Json = None,
status: ThreadStatus | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> int:
"""Count threads matching filters.
Args:
metadata: Thread metadata to filter on.
values: State values to filter on.
status: Thread status to filter on.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
int: Number of threads matching the criteria.
"""
payload: dict[str, Any] = {}
if metadata:
payload["metadata"] = metadata
if values:
payload["values"] = values
if status:
payload["status"] = status
return self.http.post(
"/threads/count", json=payload, headers=headers, params=params
)
def copy(
self,
thread_id: str,
*,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> None:
"""Copy a thread.
Args:
thread_id: The ID of the thread to copy.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
`None`
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
client.threads.copy(
thread_id="my_thread_id"
)
```
"""
return self.http.post(
f"/threads/{thread_id}/copy", json=None, headers=headers, params=params
)
def prune(
self,
thread_ids: Sequence[str],
*,
strategy: PruneStrategy = "delete",
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> dict[str, Any]:
"""Prune threads by ID.
Args:
thread_ids: List of thread IDs to prune.
strategy: The prune strategy. `"delete"` removes threads entirely.
`"keep_latest"` prunes old checkpoints but keeps threads and their
latest state. Defaults to `"delete"`.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
A dict containing `pruned_count` (number of threads pruned).
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
result = client.threads.prune(
thread_ids=["thread_1", "thread_2"],
)
print(result) # {'pruned_count': 2}
```
"""
payload: dict[str, Any] = {
"thread_ids": thread_ids,
}
if strategy != "delete":
payload["strategy"] = strategy
return self.http.post(
"/threads/prune", json=payload, headers=headers, params=params
)
def get_state(
self,
thread_id: str,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None, # deprecated
*,
subgraphs: bool = False,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> ThreadState:
"""Get the state of a thread.
Args:
thread_id: The ID of the thread to get the state of.
checkpoint: The checkpoint to get the state of.
subgraphs: Include subgraphs states.
headers: Optional custom headers to include with the request.
Returns:
The thread of the state.
???+ example "Example Usage"
```python
client = get_sync_client(url="http://localhost:2024")
thread_state = client.threads.get_state(
thread_id="my_thread_id",
checkpoint_id="my_checkpoint_id"
)
print(thread_state)
```
```shell
----------------------------------------------------------------------------------------------------------------------------------------------------------------------
{
'values': {
'messages': [
{
'content': 'how are you?',
'additional_kwargs': {},
'response_metadata': {},
'type': 'human',
'name': None,
'id': 'fe0a5778-cfe9-42ee-b807-0adaa1873c10',
'example': False
},
{
'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.",
'additional_kwargs': {},
'response_metadata': {},
'type': 'ai',
'name': None,
'id': 'run-159b782c-b679-4830-83c6-cef87798fe8b',
'example': False,
'tool_calls': [],
'invalid_tool_calls': [],
'usage_metadata': None
}
]
},
'next': [],
'checkpoint':
{
'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
'checkpoint_ns': '',
'checkpoint_id': '1ef4a9b8-e6fb-67b1-8001-abd5184439d1'
}
'metadata':
{
'step': 1,
'run_id': '1ef4a9b8-d7da-679a-a45a-872054341df2',
'source': 'loop',
'writes':
{
'agent':
{
'messages': [
{
'id': 'run-159b782c-b679-4830-83c6-cef87798fe8b',
'name': None,
'type': 'ai',
'content': "I'm doing well, thanks for asking! I'm an AI assistant created by Anthropic to be helpful, honest, and harmless.",
'example': False,
'tool_calls': [],
'usage_metadata': None,
'additional_kwargs': {},
'response_metadata': {},
'invalid_tool_calls': []
}
]
}
},
'user_id': None,
'graph_id': 'agent',
'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
'created_by': 'system',
'assistant_id': 'fe096781-5601-53d2-b2f6-0d3403f7e9ca'},
'created_at': '2024-07-25T15:35:44.184703+00:00',
'parent_config':
{
'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
'checkpoint_ns': '',
'checkpoint_id': '1ef4a9b8-d80d-6fa7-8000-9300467fad0f'
}
}
```
"""
if checkpoint:
return self.http.post(
f"/threads/{thread_id}/state/checkpoint",
json={"checkpoint": checkpoint, "subgraphs": subgraphs},
headers=headers,
params=params,
)
elif checkpoint_id:
get_params = {"subgraphs": subgraphs}
if params:
get_params = {**get_params, **dict(params)}
return self.http.get(
f"/threads/{thread_id}/state/{checkpoint_id}",
params=get_params,
headers=headers,
)
else:
get_params = {"subgraphs": subgraphs}
if params:
get_params = {**get_params, **dict(params)}
return self.http.get(
f"/threads/{thread_id}/state",
params=get_params,
headers=headers,
)
def update_state(
self,
thread_id: str,
values: dict[str, Any] | Sequence[dict] | None,
*,
as_node: str | None = None,
checkpoint: Checkpoint | None = None,
checkpoint_id: str | None = None, # deprecated
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> ThreadUpdateStateResponse:
"""Update the state of a thread.
Args:
thread_id: The ID of the thread to update.
values: The values to update the state with.
as_node: Update the state as if this node had just executed.
checkpoint: The checkpoint to update the state of.
headers: Optional custom headers to include with the request.
Returns:
Response after updating a thread's state.
???+ example "Example Usage"
```python
response = await client.threads.update_state(
thread_id="my_thread_id",
values={"messages":[{"role": "user", "content": "hello!"}]},
as_node="my_node",
)
print(response)
----------------------------------------------------------------------------------------------------------------------------------------------------------------------
{
'checkpoint': {
'thread_id': 'e2496803-ecd5-4e0c-a779-3226296181c2',
'checkpoint_ns': '',
'checkpoint_id': '1ef4a9b8-e6fb-67b1-8001-abd5184439d1',
'checkpoint_map': {}
}
}
```
"""
payload: dict[str, Any] = {
"values": values,
}
if checkpoint_id:
payload["checkpoint_id"] = checkpoint_id
if checkpoint:
payload["checkpoint"] = checkpoint
if as_node:
payload["as_node"] = as_node
return self.http.post(
f"/threads/{thread_id}/state", json=payload, headers=headers, params=params
)
def get_history(
self,
thread_id: str,
*,
limit: int = 10,
before: str | Checkpoint | None = None,
metadata: Mapping[str, Any] | None = None,
checkpoint: Checkpoint | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> list[ThreadState]:
"""Get the state history of a thread.
Args:
thread_id: The ID of the thread to get the state history for.
checkpoint: Return states for this subgraph. If empty defaults to root.
limit: The maximum number of states to return.
before: Return states before this checkpoint.
metadata: Filter states by metadata key-value pairs.
headers: Optional custom headers to include with the request.
Returns:
The state history of the `Thread`.
???+ example "Example Usage"
```python
thread_state = client.threads.get_history(
thread_id="my_thread_id",
limit=5,
before="my_timestamp",
metadata={"name":"my_name"}
)
```
"""
payload: dict[str, Any] = {
"limit": limit,
}
if before:
payload["before"] = before
if metadata:
payload["metadata"] = metadata
if checkpoint:
payload["checkpoint"] = checkpoint
return self.http.post(
f"/threads/{thread_id}/history",
json=payload,
headers=headers,
params=params,
)
def join_stream(
self,
thread_id: str,
*,
stream_mode: ThreadStreamMode | Sequence[ThreadStreamMode] = "run_modes",
last_event_id: str | None = None,
headers: Mapping[str, str] | None = None,
params: QueryParamTypes | None = None,
) -> Iterator[StreamPart]:
"""Get a stream of events for a thread.
Args:
thread_id: The ID of the thread to get the stream for.
last_event_id: The ID of the last event to get.
headers: Optional custom headers to include with the request.
params: Optional query parameters to include with the request.
Returns:
An iterator of stream parts.
???+ example "Example Usage"
```python
for chunk in client.threads.join_stream(
thread_id="my_thread_id",
last_event_id="my_event_id",
stream_mode="run_modes",
):
print(chunk)
```
"""
query_params = {
"stream_mode": stream_mode,
}
if params:
query_params.update(params)
return self.http.stream(
f"/threads/{thread_id}/stream",
"GET",
headers={
**({"Last-Event-ID": last_event_id} if last_event_id else {}),
**(headers or {}),
},
params=query_params,
)
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/_sync/threads.py",
"license": "MIT License",
"lines": 637,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langgraph:libs/sdk-py/tests/test_client_exports.py | """Test that all expected symbols are exported from langgraph_sdk.client.
This test ensures backwards compatibility during refactoring.
"""
import httpx
from langgraph_sdk import get_client as public_get_client
from langgraph_sdk import get_sync_client as public_get_sync_client
from langgraph_sdk.client import (
AssistantsClient,
CronClient,
HttpClient,
LangGraphClient,
RunsClient,
StoreClient,
SyncAssistantsClient,
SyncCronClient,
SyncHttpClient,
SyncLangGraphClient,
SyncRunsClient,
SyncStoreClient,
SyncThreadsClient,
ThreadsClient,
_adecode_json,
_aencode_json,
_decode_json,
_encode_json,
configure_loopback_transports,
get_client,
get_sync_client,
)
def test_client_exports():
"""Verify all expected symbols can be imported from langgraph_sdk.client."""
# Factory functions (public API)
assert callable(get_client)
assert callable(get_sync_client)
# Top-level client classes
assert LangGraphClient is not None
assert SyncLangGraphClient is not None
# HTTP client classes
assert HttpClient is not None
assert SyncHttpClient is not None
# Resource client classes - Async
assert AssistantsClient is not None
assert ThreadsClient is not None
assert RunsClient is not None
assert CronClient is not None
assert StoreClient is not None
# Resource client classes - Sync
assert SyncAssistantsClient is not None
assert SyncThreadsClient is not None
assert SyncRunsClient is not None
assert SyncCronClient is not None
assert SyncStoreClient is not None
# Internal utilities (used by tests)
assert callable(_aencode_json)
assert callable(_adecode_json)
# Sync JSON utilities (might be used internally)
assert callable(_encode_json)
assert callable(_decode_json)
# Loopback transport configuration (used by langgraph-api)
assert callable(configure_loopback_transports)
def test_public_api_exports():
"""Verify public API exports from langgraph_sdk package."""
assert callable(public_get_client)
assert callable(public_get_sync_client)
def test_client_instantiation():
"""Verify that we can instantiate clients."""
# Test async client instantiation
async_http = httpx.AsyncClient(base_url="http://test.example.com")
async_client = HttpClient(async_http)
assert async_client is not None
# Test sync client instantiation
sync_http = httpx.Client(base_url="http://test.example.com")
sync_client = SyncHttpClient(sync_http)
assert sync_client is not None
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/tests/test_client_exports.py",
"license": "MIT License",
"lines": 74,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/runtime.py | from __future__ import annotations
import sys
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Generic, Literal, TypeVar
if sys.version_info >= (3, 13):
ContextT = TypeVar("ContextT", default=None)
else:
ContextT = TypeVar("ContextT")
if sys.version_info >= (3, 12):
from typing import TypeAliasType
else:
from typing_extensions import TypeAliasType
from langgraph_sdk.auth.types import BaseUser
if TYPE_CHECKING:
from langgraph.store.base import BaseStore
__all__ = [
"AccessContext",
"ServerRuntime",
]
AccessContext = Literal[
"threads.create_run",
"threads.update",
"threads.read",
"assistants.read",
]
@dataclass(kw_only=True, slots=True, frozen=True)
class _ServerRuntimeBase(Generic[ContextT]):
"""Base for server runtime variants.
!!! warning "Beta"
This API is in beta and may change in future releases.
"""
access_context: AccessContext
"""Why the graph factory is being called.
The server accesses graphs in several contexts beyond just executing runs.
For example, it calls the graph factory to retrieve schemas, render the
graph structure, or read state history. This field tells you which
operation triggered the current call.
In all contexts, the returned graph must have the same topology (nodes,
edges, state schema) as the graph used for execution. Use
`.execution_runtime` to conditionally set up expensive *resources*
(MCP servers, DB connections) without changing the graph structure.
Write contexts (graph is used to write state):
- `threads.create_run` (`graph.astream`) — full graph execution
(nodes + edges). `context` is available (use `.execution_runtime`
to narrow).
- `threads.update` (`graph.aupdate_state`) — does NOT execute node
functions or evaluate edges. Only runs the node's channel writers
to apply the provided values to state channels as if the specified
node had returned them. Reducers are applied and channel triggers
are set, so the next `invoke`/`stream` call will evaluate edges
from that node to determine the next step. Does not need access to
external resources, but a different graph topology will apply
writes to the wrong channels.
Read state contexts (graph used to format the returned
`StateSnapshot`). A different topology may cause `get_state` to
report incorrect pending tasks. Note that `useStream` uses the state
history endpoint to render interrupts and support branching:
- `threads.read` (`graph.aget_state`, `graph.aget_state_history`) —
the graph structure informs which tasks to include in the prepared
view of the latest checkpoint and how to process subgraphs.
Introspection contexts (graph structure only, no execution).
A different topology may cause schemas and visualizations to not
match actual execution:
- `assistants.read` (`graph.aget_graph`, `graph.aget_subgraphs`,
`graph.aget_schemas`) — return the graph definition, subgraph
definitions, and input/output/config schemas. Used for
visualization in the studio UI and to populate schemas for MCP,
A2A, and other protocol integrations.
"""
user: BaseUser | None = field(default=None)
"""The authenticated user, or `None` if no custom auth is configured."""
store: BaseStore
"""Store for the graph run, enabling persistence and memory."""
@property
def execution_runtime(self) -> _ExecutionRuntime[ContextT] | None:
"""Narrow to the execution runtime, or `None` if not in an execution context.
When the server calls the graph factory for `threads.create_run`, the returned
object provides access to `context` (typed by the graph's
`context_schema`). For all other access contexts (introspection, state
reads, state updates), this returns `None`.
Use this to conditionally set up expensive resources (MCP tool servers,
database connections, etc.) that are only needed during execution:
```python
import contextlib
from langgraph_sdk.runtime import ServerRuntime
@contextlib.asynccontextmanager
async def my_factory(runtime: ServerRuntime[MyCtx]):
if ert := runtime.execution_runtime:
# Only connect to MCP servers when actually executing a run.
# Introspection calls (get_schema, get_graph, ...) skip this.
mcp_tools = await connect_mcp(ert.context.mcp_endpoint)
yield create_agent(model, tools=mcp_tools)
await disconnect_mcp()
else:
yield create_agent(model, tools=[])
```
"""
if isinstance(self, _ExecutionRuntime):
return self
return None
def ensure_user(self) -> BaseUser:
"""Return the authenticated user, or raise if not available.
When custom auth is configured, `user` is set for all access contexts
(the factory is only called from HTTP handlers where the auth
middleware has already run). This method raises only when no custom
auth is configured.
Raises:
PermissionError: If no user is authenticated.
"""
if self.user is None:
raise PermissionError(
f"No authenticated user available in access_context='{self.access_context}'. "
"Ensure custom auth is configured for the server."
)
return self.user
@dataclass(kw_only=True, slots=True, frozen=True)
class _ExecutionRuntime(_ServerRuntimeBase[ContextT], Generic[ContextT]):
"""Runtime for `threads.create_run` — the graph will be fully executed.
Access this via `.execution_runtime` on `ServerRuntime`. Do not
construct directly.
!!! warning "Beta"
This API is in beta and may change in future releases.
"""
context: ContextT = field(default=None) # type: ignore[assignment]
"""The graph run context, typed by the graph's `context_schema`.
Only available during `threads.create_run`.
"""
@dataclass(kw_only=True, slots=True, frozen=True)
class _ReadRuntime(_ServerRuntimeBase[ContextT], Generic[ContextT]):
"""Runtime for non-execution access contexts.
Used for introspection (`assistants.read`), state operations
(`threads.read`), and state updates (`threads.update`).
No `context` is available.
!!! warning "Beta"
This API is in beta and may change in future releases.
"""
ServerRuntime = TypeAliasType(
"ServerRuntime",
_ExecutionRuntime[ContextT] | _ReadRuntime[ContextT],
type_params=(ContextT,),
)
"""Runtime context passed to graph builder factories within the Agent Server.
Requires version 0.7.30 or later of the agent server.
The server calls your graph factory in multiple contexts: executing runs,
reading state, fetching schemas, and more. `ServerRuntime` provides
the authenticated user, store, and access context for every call. Use
`.execution_runtime` to narrow to the execution variant and access
`context`.
Example — conditionally initialize MCP tools only during execution:
```python
import contextlib
from dataclasses import dataclass
from langchain.agents import create_agent
from langgraph_sdk.runtime import ServerRuntime
from my_agent import connect_mcp, disconnect_mcp
@dataclass
class MyCtx:
mcp_endpoint: str
_readonly_agent = create_agent("anthropic:claude-3-5-haiku", tools=[])
@contextlib.asynccontextmanager
async def my_factory(runtime: ServerRuntime[MyCtx]):
if ert := runtime.execution_runtime:
# Only connect to MCP servers for actual runs.
# Schema / graph introspection calls skip this.
user_id = runtime.ensure_user().identity
mcp_tools = await connect_mcp(ert.context.mcp_endpoint, user_id)
yield create_agent("anthropic:claude-3-5-haiku", tools=mcp_tools)
await disconnect_mcp()
else:
yield _readonly_agent
```
Example — simple factory that ignores context:
```python
from langgraph_sdk.runtime import ServerRuntime
def build_graph(user: BaseUser) -> CompiledGraph:
...
async def my_factory(runtime: ServerRuntime) -> CompiledGraph:
# No generic needed if you don't use context.
return build_graph(runtime.ensure_user())
```
!!! warning "Beta"
This API is in beta and may change in future releases.
"""
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/runtime.py",
"license": "MIT License",
"lines": 183,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langgraph:libs/sdk-py/tests/test_crons_client.py | """Tests for the crons client."""
from __future__ import annotations
import json
from datetime import datetime, timezone
import httpx
import pytest
from langgraph_sdk.client import (
CronClient,
HttpClient,
SyncCronClient,
SyncHttpClient,
)
def _cron_payload() -> dict[str, object]:
"""Return a mock cron response payload."""
return {
"run_id": "run_123",
"thread_id": "thread_123",
"assistant_id": "asst_123",
"created_at": "2024-01-01T00:00:00Z",
"updated_at": "2024-01-02T00:00:00Z",
"status": "success",
"metadata": {},
"multitask_strategy": "reject",
}
@pytest.mark.asyncio
async def test_async_create_for_thread():
"""Test that CronClient.create_for_thread works without end_time."""
cron = _cron_payload()
async def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "POST"
assert request.url.path == "/threads/thread_123/runs/crons"
# Parse the request body
body = json.loads(request.content)
assert body["schedule"] == "0 0 * * *"
assert body["assistant_id"] == "asst_123"
assert "end_time" not in body # Should be filtered out by the None check
return httpx.Response(200, json=cron)
transport = httpx.MockTransport(handler)
async with httpx.AsyncClient(
transport=transport, base_url="https://example.com"
) as client:
http_client = HttpClient(client)
cron_client = CronClient(http_client)
result = await cron_client.create_for_thread(
thread_id="thread_123",
assistant_id="asst_123",
schedule="0 0 * * *",
)
assert result == cron
@pytest.mark.asyncio
async def test_async_create_for_thread_with_end_time():
"""Test that CronClient.create_for_thread includes end_time in the payload."""
cron = _cron_payload()
end_time = datetime(2025, 12, 31, 23, 59, 59, tzinfo=timezone.utc)
async def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "POST"
assert request.url.path == "/threads/thread_123/runs/crons"
# Parse the request body
body = json.loads(request.content)
assert body["schedule"] == "0 0 * * *"
assert body["assistant_id"] == "asst_123"
assert body["end_time"] == "2025-12-31T23:59:59+00:00"
return httpx.Response(200, json=cron)
transport = httpx.MockTransport(handler)
async with httpx.AsyncClient(
transport=transport, base_url="https://example.com"
) as client:
http_client = HttpClient(client)
cron_client = CronClient(http_client)
result = await cron_client.create_for_thread(
thread_id="thread_123",
assistant_id="asst_123",
schedule="0 0 * * *",
end_time=end_time,
)
assert result == cron
@pytest.mark.asyncio
async def test_async_create():
"""Test that CronClient.create works without end_time."""
cron = _cron_payload()
async def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "POST"
assert request.url.path == "/runs/crons"
# Parse the request body
body = json.loads(request.content)
assert body["schedule"] == "0 12 * * *"
assert body["assistant_id"] == "asst_456"
assert "end_time" not in body # Should be filtered out by the None check
return httpx.Response(200, json=cron)
transport = httpx.MockTransport(handler)
async with httpx.AsyncClient(
transport=transport, base_url="https://example.com"
) as client:
http_client = HttpClient(client)
cron_client = CronClient(http_client)
result = await cron_client.create(
assistant_id="asst_456",
schedule="0 12 * * *",
)
assert result == cron
@pytest.mark.asyncio
async def test_async_create_with_end_time():
"""Test that CronClient.create includes end_time in the payload."""
cron = _cron_payload()
end_time = datetime(2025, 6, 15, 12, 0, 0, tzinfo=timezone.utc)
async def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "POST"
assert request.url.path == "/runs/crons"
# Parse the request body
body = json.loads(request.content)
assert body["schedule"] == "0 12 * * *"
assert body["assistant_id"] == "asst_456"
assert body["end_time"] == "2025-06-15T12:00:00+00:00"
return httpx.Response(200, json=cron)
transport = httpx.MockTransport(handler)
async with httpx.AsyncClient(
transport=transport, base_url="https://example.com"
) as client:
http_client = HttpClient(client)
cron_client = CronClient(http_client)
result = await cron_client.create(
assistant_id="asst_456",
schedule="0 12 * * *",
end_time=end_time,
)
assert result == cron
def test_sync_create_for_thread():
"""Test that SyncCronClient.create_for_thread works without end_time."""
cron = _cron_payload()
def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "POST"
assert request.url.path == "/threads/thread_123/runs/crons"
# Parse the request body
body = json.loads(request.content)
assert body["schedule"] == "0 0 * * *"
assert body["assistant_id"] == "asst_123"
assert "end_time" not in body # Should be filtered out by the None check
return httpx.Response(200, json=cron)
transport = httpx.MockTransport(handler)
with httpx.Client(transport=transport, base_url="https://example.com") as client:
http_client = SyncHttpClient(client)
cron_client = SyncCronClient(http_client)
result = cron_client.create_for_thread(
thread_id="thread_123",
assistant_id="asst_123",
schedule="0 0 * * *",
)
assert result == cron
def test_sync_create_for_thread_with_end_time():
"""Test that SyncCronClient.create_for_thread includes end_time in the payload."""
cron = _cron_payload()
end_time = datetime(2025, 12, 31, 23, 59, 59, tzinfo=timezone.utc)
def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "POST"
assert request.url.path == "/threads/thread_123/runs/crons"
# Parse the request body
body = json.loads(request.content)
assert body["schedule"] == "0 0 * * *"
assert body["assistant_id"] == "asst_123"
assert body["end_time"] == "2025-12-31T23:59:59+00:00"
return httpx.Response(200, json=cron)
transport = httpx.MockTransport(handler)
with httpx.Client(transport=transport, base_url="https://example.com") as client:
http_client = SyncHttpClient(client)
cron_client = SyncCronClient(http_client)
result = cron_client.create_for_thread(
thread_id="thread_123",
assistant_id="asst_123",
schedule="0 0 * * *",
end_time=end_time,
)
assert result == cron
def test_sync_create():
"""Test that SyncCronClient.create works without end_time."""
cron = _cron_payload()
def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "POST"
assert request.url.path == "/runs/crons"
# Parse the request body
body = json.loads(request.content)
assert body["schedule"] == "0 12 * * *"
assert body["assistant_id"] == "asst_456"
assert "end_time" not in body # Should be filtered out by the None check
return httpx.Response(200, json=cron)
transport = httpx.MockTransport(handler)
with httpx.Client(transport=transport, base_url="https://example.com") as client:
http_client = SyncHttpClient(client)
cron_client = SyncCronClient(http_client)
result = cron_client.create(
assistant_id="asst_456",
schedule="0 12 * * *",
)
assert result == cron
def test_sync_create_with_end_time():
"""Test that SyncCronClient.create includes end_time in the payload."""
cron = _cron_payload()
end_time = datetime(2025, 6, 15, 12, 0, 0, tzinfo=timezone.utc)
def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "POST"
assert request.url.path == "/runs/crons"
# Parse the request body
body = json.loads(request.content)
assert body["schedule"] == "0 12 * * *"
assert body["assistant_id"] == "asst_456"
assert body["end_time"] == "2025-06-15T12:00:00+00:00"
return httpx.Response(200, json=cron)
transport = httpx.MockTransport(handler)
with httpx.Client(transport=transport, base_url="https://example.com") as client:
http_client = SyncHttpClient(client)
cron_client = SyncCronClient(http_client)
result = cron_client.create(
assistant_id="asst_456",
schedule="0 12 * * *",
end_time=end_time,
)
assert result == cron
@pytest.mark.parametrize(
"enabled_value",
[True, False],
ids=["enabled", "disabled"],
)
def test_sync_create_with_enabled_parameter(enabled_value):
"""Test that SyncCronClient.create includes enabled parameter in the payload."""
cron = _cron_payload()
def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "POST"
assert request.url.path == "/runs/crons"
body = json.loads(request.content)
assert body["schedule"] == "0 12 * * *"
assert body["assistant_id"] == "asst_456"
assert body["enabled"] == enabled_value
return httpx.Response(200, json=cron)
transport = httpx.MockTransport(handler)
with httpx.Client(transport=transport, base_url="https://example.com") as client:
http_client = SyncHttpClient(client)
cron_client = SyncCronClient(http_client)
result = cron_client.create(
assistant_id="asst_456",
schedule="0 12 * * *",
enabled=enabled_value,
)
assert result == cron
def _cron_response() -> dict[str, object]:
"""Return a mock Cron object response."""
return {
"cron_id": "cron_123",
"assistant_id": "asst_123",
"thread_id": "thread_123",
"on_run_completed": None,
"end_time": "2025-12-31T23:59:59+00:00",
"schedule": "0 10 * * *",
"created_at": "2024-01-01T00:00:00Z",
"updated_at": "2024-01-02T00:00:00Z",
"payload": {},
"user_id": None,
"next_run_date": "2024-01-03T10:00:00Z",
"metadata": {},
"enabled": True,
}
@pytest.mark.asyncio
async def test_async_update():
"""Test that CronClient.update works with schedule and enabled parameters."""
cron = _cron_response()
async def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "PATCH"
assert request.url.path == "/runs/crons/cron_123"
# Parse the request body
body = json.loads(request.content)
assert body["schedule"] == "0 10 * * *"
assert body["enabled"] is False
assert "end_time" not in body # Should be filtered out by the None check
return httpx.Response(200, json=cron)
transport = httpx.MockTransport(handler)
async with httpx.AsyncClient(
transport=transport, base_url="https://example.com"
) as client:
http_client = HttpClient(client)
cron_client = CronClient(http_client)
result = await cron_client.update(
cron_id="cron_123",
schedule="0 10 * * *",
enabled=False,
)
assert result == cron
@pytest.mark.asyncio
async def test_async_update_with_end_time():
"""Test that CronClient.update includes end_time in the payload."""
cron = _cron_response()
end_time = datetime(2025, 12, 31, 23, 59, 59, tzinfo=timezone.utc)
async def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "PATCH"
assert request.url.path == "/runs/crons/cron_123"
# Parse the request body
body = json.loads(request.content)
assert body["schedule"] == "0 10 * * *"
assert body["end_time"] == "2025-12-31T23:59:59+00:00"
assert body["enabled"] is True
return httpx.Response(200, json=cron)
transport = httpx.MockTransport(handler)
async with httpx.AsyncClient(
transport=transport, base_url="https://example.com"
) as client:
http_client = HttpClient(client)
cron_client = CronClient(http_client)
result = await cron_client.update(
cron_id="cron_123",
schedule="0 10 * * *",
end_time=end_time,
enabled=True,
)
assert result == cron
def test_sync_update():
"""Test that SyncCronClient.update works with schedule and enabled parameters."""
cron = _cron_response()
def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "PATCH"
assert request.url.path == "/runs/crons/cron_123"
# Parse the request body
body = json.loads(request.content)
assert body["schedule"] == "0 10 * * *"
assert body["enabled"] is False
assert "end_time" not in body # Should be filtered out by the None check
return httpx.Response(200, json=cron)
transport = httpx.MockTransport(handler)
with httpx.Client(transport=transport, base_url="https://example.com") as client:
http_client = SyncHttpClient(client)
cron_client = SyncCronClient(http_client)
result = cron_client.update(
cron_id="cron_123",
schedule="0 10 * * *",
enabled=False,
)
assert result == cron
def test_sync_update_with_end_time():
"""Test that SyncCronClient.update includes end_time in the payload."""
cron = _cron_response()
end_time = datetime(2025, 12, 31, 23, 59, 59, tzinfo=timezone.utc)
def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "PATCH"
assert request.url.path == "/runs/crons/cron_123"
# Parse the request body
body = json.loads(request.content)
assert body["schedule"] == "0 10 * * *"
assert body["end_time"] == "2025-12-31T23:59:59+00:00"
assert body["enabled"] is True
return httpx.Response(200, json=cron)
transport = httpx.MockTransport(handler)
with httpx.Client(transport=transport, base_url="https://example.com") as client:
http_client = SyncHttpClient(client)
cron_client = SyncCronClient(http_client)
result = cron_client.update(
cron_id="cron_123",
schedule="0 10 * * *",
end_time=end_time,
enabled=True,
)
assert result == cron
@pytest.mark.parametrize(
"enabled_value",
[True, False],
ids=["enabled", "disabled"],
)
def test_sync_update_with_enabled_parameter(enabled_value):
"""Test that SyncCronClient.update includes enabled parameter in the payload."""
cron = _cron_response()
def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "PATCH"
assert request.url.path == "/runs/crons/cron_456"
body = json.loads(request.content)
assert body["enabled"] == enabled_value
assert "schedule" not in body # Only enabled is set
return httpx.Response(200, json=cron)
transport = httpx.MockTransport(handler)
with httpx.Client(transport=transport, base_url="https://example.com") as client:
http_client = SyncHttpClient(client)
cron_client = SyncCronClient(http_client)
result = cron_client.update(
cron_id="cron_456",
enabled=enabled_value,
)
assert result == cron
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/tests/test_crons_client.py",
"license": "MIT License",
"lines": 381,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/sdk-py/langgraph_sdk/encryption/types.py | """Encryption and decryption types for LangGraph.
This module defines the core types used for custom at-rest encryption
in LangGraph. It includes context types and typed dictionaries for
encryption operations.
"""
from __future__ import annotations
import typing
from collections.abc import Awaitable, Callable
Json = dict[str, typing.Any]
"""JSON-serializable dictionary type for structured data encryption."""
class EncryptionContext:
"""Context passed to encryption/decryption handlers.
Contains arbitrary non-secret key-values that will be stored on encrypt.
These key-values are intended to be sent to an external service that
manages keys and handles the actual encryption and decryption of data.
Attributes:
model: The model type being encrypted (e.g., "assistant", "thread", "run", "checkpoint")
field: The specific field being encrypted (e.g., "metadata", "context", "kwargs", "values")
metadata: Additional context metadata that can be used for encryption decisions
"""
__slots__ = ("field", "metadata", "model")
def __init__(
self,
model: str | None = None,
metadata: dict[str, typing.Any] | None = None,
field: str | None = None,
):
self.model = model
self.field = field
self.metadata = metadata or {}
def __repr__(self) -> str:
return f"EncryptionContext(model={self.model!r}, field={self.field!r}, metadata={self.metadata!r})"
BlobEncryptor = Callable[[EncryptionContext, bytes], Awaitable[bytes]]
"""Handler for encrypting opaque blob data like checkpoints.
Note: Must be an async function. Encryption typically involves I/O operations
(calling external KMS services), which should be async.
Args:
ctx: Encryption context with model type and metadata
blob: The raw bytes to encrypt
Returns:
Awaitable that resolves to encrypted bytes
"""
BlobDecryptor = Callable[[EncryptionContext, bytes], Awaitable[bytes]]
"""Handler for decrypting opaque blob data like checkpoints.
Note: Must be an async function. Decryption typically involves I/O operations
(calling external KMS services), which should be async.
Args:
ctx: Encryption context with model type and metadata
blob: The encrypted bytes to decrypt
Returns:
Awaitable that resolves to decrypted bytes
"""
JsonEncryptor = Callable[[EncryptionContext, Json], Awaitable[Json]]
"""Handler for encrypting structured JSON data.
Note: Must be an async function. Encryption typically involves I/O operations
(calling external KMS services), which should be async.
Used for encrypting structured data like metadata, context, kwargs, values,
and other JSON-serializable fields across different model types.
Maps plaintext fields to encrypted fields. A practical approach:
- Keep "owner" field unencrypted for search/filtering
- Encrypt VALUES (not keys) for fields with specific prefix (e.g., "my.customer.org/")
- Pass through all other fields unencrypted
Example:
Input: {"owner": "user123", "my.customer.org/email": "john@example.com", "tenant_id": "t-456"}
Output: {"owner": "user123", "my.customer.org/email": "ENCRYPTED", "tenant_id": "t-456"}
Note: Encrypted field VALUES cannot be reliably searched, as most real-world
encryption implementations use nonces (non-deterministic encryption).
Only unencrypted fields can be used in search queries.
Args:
ctx: Encryption context with model type, field name, and metadata
data: The plaintext JSON dictionary
Returns:
Awaitable that resolves to encrypted JSON dictionary
"""
JsonDecryptor = Callable[[EncryptionContext, Json], Awaitable[Json]]
"""Handler for decrypting structured JSON data.
Note: Must be an async function. Decryption typically involves I/O operations
(calling external KMS services), which should be async.
Inverse of JsonEncryptor. Must be able to decrypt data that
was encrypted by the corresponding encryptor.
Args:
ctx: Encryption context with model type, field name, and metadata
data: The encrypted JSON dictionary
Returns:
Awaitable that resolves to decrypted JSON dictionary
"""
if typing.TYPE_CHECKING:
from starlette.authentication import BaseUser
ContextHandler = Callable[
["BaseUser", EncryptionContext], Awaitable[dict[str, typing.Any]]
]
"""Handler for deriving encryption context from authenticated user info.
Note: Must be an async function as it may involve I/O operations.
The context handler is called once per request in middleware (after auth),
allowing encryption context to be derived from JWT claims, user properties,
or other auth-derived data instead of requiring a separate X-Encryption-Context header.
The return value becomes ctx.metadata for subsequent encrypt/decrypt operations
and is persisted with encrypted data for later decryption.
Note: ctx.model and ctx.field will be None in context handlers since
the handler runs once per request before any specific model/field is known.
Args:
user: The authenticated user (from Starlette's AuthenticationMiddleware)
ctx: Current encryption context with metadata from X-Encryption-Context header
Returns:
Awaitable that resolves to dict that becomes the new ctx.metadata
"""
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/langgraph_sdk/encryption/types.py",
"license": "MIT License",
"lines": 106,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | documentation |
langchain-ai/langgraph:libs/sdk-py/tests/test_encryption.py | import pytest
from langgraph_sdk.encryption import DuplicateHandlerError, Encryption
class TestHandlerValidation:
"""Test duplicate handler and signature validation."""
def test_duplicate_handlers_raise_error(self):
"""Registering the same handler type twice raises DuplicateHandlerError."""
encryption = Encryption()
@encryption.encrypt.blob
async def blob_enc(_ctx, data):
return data
@encryption.decrypt.blob
async def blob_dec(_ctx, data):
return data
@encryption.encrypt.json
async def json_enc(_ctx, data):
return data
@encryption.decrypt.json
async def json_dec(_ctx, data):
return data
# All duplicates should raise
with pytest.raises(DuplicateHandlerError):
@encryption.encrypt.blob
async def dup(_ctx, data):
return data
with pytest.raises(DuplicateHandlerError):
@encryption.decrypt.blob
async def dup(_ctx, data):
return data
with pytest.raises(DuplicateHandlerError):
@encryption.encrypt.json
async def dup(_ctx, data):
return data
with pytest.raises(DuplicateHandlerError):
@encryption.decrypt.json
async def dup(_ctx, data):
return data
def test_handlers_must_be_async(self):
"""Sync functions raise TypeError."""
encryption = Encryption()
with pytest.raises(TypeError, match="must be an async function"):
@encryption.encrypt.blob
def sync_handler(_ctx, data):
return data
def test_handlers_must_have_two_params(self):
"""Wrong parameter count raises TypeError."""
encryption = Encryption()
with pytest.raises(TypeError, match="must accept exactly 2 parameters"):
@encryption.encrypt.blob # type: ignore[arg-type]
async def wrong_params(ctx):
return ctx
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/tests/test_encryption.py",
"license": "MIT License",
"lines": 50,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/sdk-py/tests/test_assistants_client.py | from __future__ import annotations
import httpx
import pytest
from langgraph_sdk.client import (
AssistantsClient,
HttpClient,
SyncAssistantsClient,
SyncHttpClient,
)
def _assistant_payload() -> dict[str, object]:
return {
"assistant_id": "asst_123",
"graph_id": "graph_123",
"config": {"configurable": {"foo": "bar"}},
"context": {"foo": "bar"},
"created_at": "2024-01-01T00:00:00Z",
"metadata": {"env": "test"},
"version": 1,
"name": "My Assistant",
"description": "Example",
"updated_at": "2024-01-02T00:00:00Z",
}
@pytest.mark.asyncio
async def test_assistants_search_returns_list_by_default():
assistant = _assistant_payload()
async def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "POST"
assert request.url.path == "/assistants/search"
return httpx.Response(200, json=[assistant])
transport = httpx.MockTransport(handler)
async with httpx.AsyncClient(
transport=transport, base_url="https://example.com"
) as client:
http_client = HttpClient(client)
assistants_client = AssistantsClient(http_client)
result = await assistants_client.search(limit=3)
assert result == [assistant]
@pytest.mark.asyncio
async def test_assistants_search_can_return_object_with_pagination_metadata():
assistant = _assistant_payload()
async def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "POST"
assert request.url.path == "/assistants/search"
return httpx.Response(
200,
headers={"X-Pagination-Next": "42"},
json=[assistant],
)
transport = httpx.MockTransport(handler)
async with httpx.AsyncClient(
transport=transport, base_url="https://example.com"
) as client:
http_client = HttpClient(client)
assistants_client = AssistantsClient(http_client)
result = await assistants_client.search(response_format="object")
assert result == {"assistants": [assistant], "next": "42"}
def test_sync_assistants_search_can_return_object_with_pagination_metadata():
assistant = _assistant_payload()
def handler(request: httpx.Request) -> httpx.Response:
assert request.method == "POST"
assert request.url.path == "/assistants/search"
return httpx.Response(
200,
headers={"X-Pagination-Next": "84"},
json=[assistant],
)
transport = httpx.MockTransport(handler)
with httpx.Client(transport=transport, base_url="https://example.com") as client:
http_client = SyncHttpClient(client)
assistants_client = SyncAssistantsClient(http_client)
result = assistants_client.search(response_format="object")
assert result == {"assistants": [assistant], "next": "84"}
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/tests/test_assistants_client.py",
"license": "MIT License",
"lines": 72,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
langchain-ai/langgraph:libs/sdk-py/tests/test_skip_auto_load_api_key.py | """Tests for api_key parameter behavior."""
import pytest
from langgraph_sdk import get_client, get_sync_client
class TestSkipAutoLoadApiKey:
"""Test the api_key parameter's auto-loading behavior."""
@pytest.mark.asyncio
async def test_get_client_loads_from_env_by_default(self, monkeypatch):
"""Test that API key is loaded from environment by default."""
monkeypatch.setenv("LANGGRAPH_API_KEY", "test-key-from-env")
client = get_client(url="http://localhost:8123")
assert "x-api-key" in client.http.client.headers
assert client.http.client.headers["x-api-key"] == "test-key-from-env"
await client.aclose()
@pytest.mark.asyncio
async def test_get_client_skips_env_when_sentinel_used(self, monkeypatch):
"""Test that API key is not loaded from environment when None is explicitly passed."""
monkeypatch.setenv("LANGGRAPH_API_KEY", "test-key-from-env")
client = get_client(url="http://localhost:8123", api_key=None)
assert "x-api-key" not in client.http.client.headers
await client.aclose()
@pytest.mark.asyncio
async def test_get_client_uses_explicit_key_when_provided(self, monkeypatch):
"""Test that explicit API key takes precedence over environment."""
monkeypatch.setenv("LANGGRAPH_API_KEY", "test-key-from-env")
client = get_client(
url="http://localhost:8123",
api_key="explicit-key",
)
assert "x-api-key" in client.http.client.headers
assert client.http.client.headers["x-api-key"] == "explicit-key"
await client.aclose()
def test_get_sync_client_loads_from_env_by_default(self, monkeypatch):
"""Test that sync client loads API key from environment by default."""
monkeypatch.setenv("LANGGRAPH_API_KEY", "test-key-from-env")
client = get_sync_client(url="http://localhost:8123")
assert "x-api-key" in client.http.client.headers
assert client.http.client.headers["x-api-key"] == "test-key-from-env"
client.close()
def test_get_sync_client_skips_env_when_sentinel_used(self, monkeypatch):
"""Test that sync client doesn't load from environment when None is explicitly passed."""
monkeypatch.setenv("LANGGRAPH_API_KEY", "test-key-from-env")
client = get_sync_client(url="http://localhost:8123", api_key=None)
assert "x-api-key" not in client.http.client.headers
client.close()
def test_get_sync_client_uses_explicit_key_when_provided(self, monkeypatch):
"""Test that sync client uses explicit API key when provided."""
monkeypatch.setenv("LANGGRAPH_API_KEY", "test-key-from-env")
client = get_sync_client(
url="http://localhost:8123",
api_key="explicit-key",
)
assert "x-api-key" in client.http.client.headers
assert client.http.client.headers["x-api-key"] == "explicit-key"
client.close()
| {
"repo_id": "langchain-ai/langgraph",
"file_path": "libs/sdk-py/tests/test_skip_auto_load_api_key.py",
"license": "MIT License",
"lines": 54,
"canary_id": -1,
"canary_value": "",
"pii_type": "",
"provider": "",
"regex_pattern": "",
"repetition": -1,
"template": ""
} | test |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.