index int64 0 0 | repo_id stringclasses 596 values | file_path stringlengths 31 168 | content stringlengths 1 6.2M |
|---|---|---|---|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/test_tracing_interops.py | import json
import sys
import time
from typing import Any, Callable, Tuple, TypedDict, TypeVar
from unittest.mock import MagicMock
import langsmith as ls
import pytest
from langchain_core.runnables import RunnableConfig
from langchain_core.tracers import LangChainTracer
from langgraph.graph import StateGraph
pytestmark = pytest.mark.anyio
def _get_mock_client(**kwargs: Any) -> ls.Client:
mock_session = MagicMock()
return ls.Client(session=mock_session, api_key="test", **kwargs)
def _get_calls(
mock_client: Any,
verbs: set[str] = {"POST"},
) -> list:
return [
c
for c in mock_client.session.request.mock_calls
if c.args and c.args[0] in verbs
]
T = TypeVar("T")
def wait_for(
condition: Callable[[], Tuple[T, bool]],
max_sleep_time: int = 10,
sleep_time: int = 3,
) -> T:
"""Wait for a condition to be true."""
start_time = time.time()
last_e = None
while time.time() - start_time < max_sleep_time:
try:
res, cond = condition()
if cond:
return res
except Exception as e:
last_e = e
time.sleep(sleep_time)
total_time = time.time() - start_time
if last_e is not None:
raise last_e
raise ValueError(f"Callable did not return within {total_time}")
@pytest.mark.skip("This test times out in CI")
async def test_nested_tracing():
lt_py_311 = sys.version_info < (3, 11)
mock_client = _get_mock_client()
class State(TypedDict):
value: str
@ls.traceable
async def some_traceable(content: State):
return await child_graph.ainvoke(content)
async def parent_node(state: State, config: RunnableConfig) -> State:
if lt_py_311:
result = await some_traceable(state, langsmith_extra={"config": config})
else:
result = await some_traceable(state)
return {"value": f"parent_{result['value']}"}
async def child_node(state: State) -> State:
return {"value": f"child_{state['value']}"}
child_builder = StateGraph(State)
child_builder.add_node(child_node)
child_builder.add_edge("__start__", "child_node")
child_graph = child_builder.compile().with_config(run_name="child_graph")
parent_builder = StateGraph(State)
parent_builder.add_node(parent_node)
parent_builder.add_edge("__start__", "parent_node")
parent_graph = parent_builder.compile()
tracer = LangChainTracer(client=mock_client)
result = await parent_graph.ainvoke({"value": "input"}, {"callbacks": [tracer]})
assert result == {"value": "parent_child_input"}
def get_posts():
post_calls = _get_calls(mock_client, verbs={"POST"})
posts = [p for c in post_calls for p in json.loads(c.kwargs["data"])["post"]]
names = [p.get("name") for p in posts]
if "child_node" in names:
return posts, True
return None, False
posts = wait_for(get_posts)
# If the callbacks weren't propagated correctly, we'd
# end up with broken dotted_orders
parent_run = next(data for data in posts if data["name"] == "parent_node")
child_run = next(data for data in posts if data["name"] == "child_graph")
traceable_run = next(data for data in posts if data["name"] == "some_traceable")
assert child_run["dotted_order"].startswith(traceable_run["dotted_order"])
assert traceable_run["dotted_order"].startswith(parent_run["dotted_order"])
assert child_run["parent_run_id"] == traceable_run["id"]
assert traceable_run["parent_run_id"] == parent_run["id"]
assert parent_run["trace_id"] == child_run["trace_id"] == traceable_run["trace_id"]
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/tests/test_utils.py | import functools
import sys
import uuid
from typing import (
Any,
Callable,
Dict,
ForwardRef,
List,
Literal,
Optional,
TypedDict,
TypeVar,
Union,
)
from unittest.mock import patch
import langsmith
import pytest
from typing_extensions import Annotated, NotRequired, Required
from langgraph.graph import END, StateGraph
from langgraph.graph.graph import CompiledGraph
from langgraph.utils.fields import _is_optional_type, get_field_default
from langgraph.utils.runnable import is_async_callable, is_async_generator
pytestmark = pytest.mark.anyio
def test_is_async() -> None:
async def func() -> None:
pass
assert is_async_callable(func)
wrapped_func = functools.wraps(func)(func)
assert is_async_callable(wrapped_func)
def sync_func() -> None:
pass
assert not is_async_callable(sync_func)
wrapped_sync_func = functools.wraps(sync_func)(sync_func)
assert not is_async_callable(wrapped_sync_func)
class AsyncFuncCallable:
async def __call__(self) -> None:
pass
runnable = AsyncFuncCallable()
assert is_async_callable(runnable)
wrapped_runnable = functools.wraps(runnable)(runnable)
assert is_async_callable(wrapped_runnable)
class SyncFuncCallable:
def __call__(self) -> None:
pass
sync_runnable = SyncFuncCallable()
assert not is_async_callable(sync_runnable)
wrapped_sync_runnable = functools.wraps(sync_runnable)(sync_runnable)
assert not is_async_callable(wrapped_sync_runnable)
def test_is_generator() -> None:
async def gen():
yield
assert is_async_generator(gen)
wrapped_gen = functools.wraps(gen)(gen)
assert is_async_generator(wrapped_gen)
def sync_gen():
yield
assert not is_async_generator(sync_gen)
wrapped_sync_gen = functools.wraps(sync_gen)(sync_gen)
assert not is_async_generator(wrapped_sync_gen)
class AsyncGenCallable:
async def __call__(self):
yield
runnable = AsyncGenCallable()
assert is_async_generator(runnable)
wrapped_runnable = functools.wraps(runnable)(runnable)
assert is_async_generator(wrapped_runnable)
class SyncGenCallable:
def __call__(self):
yield
sync_runnable = SyncGenCallable()
assert not is_async_generator(sync_runnable)
wrapped_sync_runnable = functools.wraps(sync_runnable)(sync_runnable)
assert not is_async_generator(wrapped_sync_runnable)
@pytest.fixture
def rt_graph() -> CompiledGraph:
class State(TypedDict):
foo: int
node_run_id: int
def node(_: State):
from langsmith import get_current_run_tree # type: ignore
return {"node_run_id": get_current_run_tree().id} # type: ignore
graph = StateGraph(State)
graph.add_node(node)
graph.set_entry_point("node")
graph.add_edge("node", END)
return graph.compile()
def test_runnable_callable_tracing_nested(rt_graph: CompiledGraph) -> None:
with patch("langsmith.client.Client", spec=langsmith.Client) as mock_client:
with patch("langchain_core.tracers.langchain.get_client") as mock_get_client:
mock_get_client.return_value = mock_client
with langsmith.tracing_context(enabled=True):
res = rt_graph.invoke({"foo": 1})
assert isinstance(res["node_run_id"], uuid.UUID)
@pytest.mark.skipif(
sys.version_info < (3, 11),
reason="Python 3.11+ is required for async contextvars support",
)
async def test_runnable_callable_tracing_nested_async(rt_graph: CompiledGraph) -> None:
with patch("langsmith.client.Client", spec=langsmith.Client) as mock_client:
with patch("langchain_core.tracers.langchain.get_client") as mock_get_client:
mock_get_client.return_value = mock_client
with langsmith.tracing_context(enabled=True):
res = await rt_graph.ainvoke({"foo": 1})
assert isinstance(res["node_run_id"], uuid.UUID)
def test_is_optional_type():
assert _is_optional_type(None)
assert not _is_optional_type(type(None))
assert _is_optional_type(Optional[list])
assert not _is_optional_type(int)
assert _is_optional_type(Optional[Literal[1, 2, 3]])
assert not _is_optional_type(Literal[1, 2, 3])
assert _is_optional_type(Optional[List[int]])
assert _is_optional_type(Optional[Dict[str, int]])
assert not _is_optional_type(List[Optional[int]])
assert _is_optional_type(Union[Optional[str], Optional[int]])
assert _is_optional_type(
Union[
Union[Optional[str], Optional[int]], Union[Optional[float], Optional[dict]]
]
)
assert not _is_optional_type(Union[Union[str, int], Union[float, dict]])
assert _is_optional_type(Union[int, None])
assert _is_optional_type(Union[str, None, int])
assert _is_optional_type(Union[None, str, int])
assert not _is_optional_type(Union[int, str])
assert not _is_optional_type(Any) # Do we actually want this?
assert _is_optional_type(Optional[Any])
class MyClass:
pass
assert _is_optional_type(Optional[MyClass])
assert not _is_optional_type(MyClass)
assert _is_optional_type(Optional[ForwardRef("MyClass")])
assert not _is_optional_type(ForwardRef("MyClass"))
assert _is_optional_type(Optional[Union[List[int], Dict[str, Optional[int]]]])
assert not _is_optional_type(Union[List[int], Dict[str, Optional[int]]])
assert _is_optional_type(Optional[Callable[[int], str]])
assert not _is_optional_type(Callable[[int], Optional[str]])
T = TypeVar("T")
assert _is_optional_type(Optional[T])
assert not _is_optional_type(T)
U = TypeVar("U", bound=Optional[T]) # type: ignore
assert _is_optional_type(U)
def test_is_required():
class MyBaseTypedDict(TypedDict):
val_1: Required[Optional[str]]
val_2: Required[str]
val_3: NotRequired[str]
val_4: NotRequired[Optional[str]]
val_5: Annotated[NotRequired[int], "foo"]
val_6: NotRequired[Annotated[int, "foo"]]
val_7: Annotated[Required[int], "foo"]
val_8: Required[Annotated[int, "foo"]]
val_9: Optional[str]
val_10: str
annos = MyBaseTypedDict.__annotations__
assert get_field_default("val_1", annos["val_1"], MyBaseTypedDict) == ...
assert get_field_default("val_2", annos["val_2"], MyBaseTypedDict) == ...
assert get_field_default("val_3", annos["val_3"], MyBaseTypedDict) is None
assert get_field_default("val_4", annos["val_4"], MyBaseTypedDict) is None
# See https://peps.python.org/pep-0655/#interaction-with-annotated
assert get_field_default("val_5", annos["val_5"], MyBaseTypedDict) is None
assert get_field_default("val_6", annos["val_6"], MyBaseTypedDict) is None
assert get_field_default("val_7", annos["val_7"], MyBaseTypedDict) == ...
assert get_field_default("val_8", annos["val_8"], MyBaseTypedDict) == ...
assert get_field_default("val_9", annos["val_9"], MyBaseTypedDict) is None
assert get_field_default("val_10", annos["val_10"], MyBaseTypedDict) == ...
class MyChildDict(MyBaseTypedDict):
val_11: int
val_11b: Optional[int]
val_11c: Union[int, None, str]
class MyGrandChildDict(MyChildDict, total=False):
val_12: int
val_13: Required[str]
cannos = MyChildDict.__annotations__
gcannos = MyGrandChildDict.__annotations__
assert get_field_default("val_11", cannos["val_11"], MyChildDict) == ...
assert get_field_default("val_11b", cannos["val_11b"], MyChildDict) is None
assert get_field_default("val_11c", cannos["val_11c"], MyChildDict) is None
assert get_field_default("val_12", gcannos["val_12"], MyGrandChildDict) is None
assert get_field_default("val_9", gcannos["val_9"], MyGrandChildDict) is None
assert get_field_default("val_13", gcannos["val_13"], MyGrandChildDict) == ...
|
0 | lc_public_repos/langgraph/libs/langgraph/tests | lc_public_repos/langgraph/libs/langgraph/tests/__snapshots__/test_pregel_async.ambr | # serializer version: 1
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class
'''
+-----------+
| __start__ |
+-----------+
*
*
*
+---------------+
| rewrite_query |
+---------------+
*** ...
* .
** ...
+--------------+ .
| analyzer_one | .
+--------------+ .
* .
* .
* .
+---------------+ +---------------+
| retriever_one | | retriever_two |
+---------------+ +---------------+
*** ***
* *
** **
+----+
| qa |
+----+
*
*
*
+---------+
| __end__ |
+---------+
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class[memory]
'''
+-----------+
| __start__ |
+-----------+
*
*
*
+---------------+
| rewrite_query |
+---------------+
*** ...
* .
** ...
+--------------+ .
| analyzer_one | .
+--------------+ .
* .
* .
* .
+---------------+ +---------------+
| retriever_one | | retriever_two |
+---------------+ +---------------+
*** ***
* *
** **
+----+
| qa |
+----+
*
*
*
+---------+
| __end__ |
+---------+
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class[postgres_aio]
'''
+-----------+
| __start__ |
+-----------+
*
*
*
+---------------+
| rewrite_query |
+---------------+
*** ...
* .
** ...
+--------------+ .
| analyzer_one | .
+--------------+ .
* .
* .
* .
+---------------+ +---------------+
| retriever_one | | retriever_two |
+---------------+ +---------------+
*** ***
* *
** **
+----+
| qa |
+----+
*
*
*
+---------+
| __end__ |
+---------+
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class[postgres_aio_pipe]
'''
+-----------+
| __start__ |
+-----------+
*
*
*
+---------------+
| rewrite_query |
+---------------+
*** ...
* .
** ...
+--------------+ .
| analyzer_one | .
+--------------+ .
* .
* .
* .
+---------------+ +---------------+
| retriever_one | | retriever_two |
+---------------+ +---------------+
*** ***
* *
** **
+----+
| qa |
+----+
*
*
*
+---------+
| __end__ |
+---------+
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class[postgres_aio_pool]
'''
+-----------+
| __start__ |
+-----------+
*
*
*
+---------------+
| rewrite_query |
+---------------+
*** ...
* .
** ...
+--------------+ .
| analyzer_one | .
+--------------+ .
* .
* .
* .
+---------------+ +---------------+
| retriever_one | | retriever_two |
+---------------+ +---------------+
*** ***
* *
** **
+----+
| qa |
+----+
*
*
*
+---------+
| __end__ |
+---------+
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class[sqlite_aio]
'''
+-----------+
| __start__ |
+-----------+
*
*
*
+---------------+
| rewrite_query |
+---------------+
*** ...
* .
** ...
+--------------+ .
| analyzer_one | .
+--------------+ .
* .
* .
* .
+---------------+ +---------------+
| retriever_one | | retriever_two |
+---------------+ +---------------+
*** ***
* *
** **
+----+
| qa |
+----+
*
*
*
+---------+
| __end__ |
+---------+
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2.1
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'answer': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'null',
}),
]),
'default': None,
'title': 'Answer',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
'docs',
]),
'title': 'State',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2.2
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'answer': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'null',
}),
]),
'default': None,
'title': 'Answer',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
'docs',
]),
'title': 'State',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[duckdb_aio]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[duckdb_aio].1
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'answer': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'null',
}),
]),
'default': None,
'title': 'Answer',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
'docs',
]),
'title': 'State',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[duckdb_aio].2
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'answer': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'null',
}),
]),
'default': None,
'title': 'Answer',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
'docs',
]),
'title': 'State',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory].1
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'answer': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'null',
}),
]),
'default': None,
'title': 'Answer',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
'docs',
]),
'title': 'State',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory].2
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'answer': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'null',
}),
]),
'default': None,
'title': 'Answer',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
'docs',
]),
'title': 'State',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_aio]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_aio].1
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'answer': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'null',
}),
]),
'default': None,
'title': 'Answer',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
'docs',
]),
'title': 'State',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_aio].2
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'answer': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'null',
}),
]),
'default': None,
'title': 'Answer',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
'docs',
]),
'title': 'State',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_aio_pipe]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_aio_pipe].1
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'answer': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'null',
}),
]),
'default': None,
'title': 'Answer',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
'docs',
]),
'title': 'State',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_aio_pipe].2
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'answer': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'null',
}),
]),
'default': None,
'title': 'Answer',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
'docs',
]),
'title': 'State',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_aio_pool]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_aio_pool].1
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'answer': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'null',
}),
]),
'default': None,
'title': 'Answer',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
'docs',
]),
'title': 'State',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_aio_pool].2
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'answer': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'null',
}),
]),
'default': None,
'title': 'Answer',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
'docs',
]),
'title': 'State',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[sqlite_aio]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[sqlite_aio].1
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'answer': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'null',
}),
]),
'default': None,
'title': 'Answer',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
'docs',
]),
'title': 'State',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[sqlite_aio].2
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'answer': dict({
'anyOf': list([
dict({
'type': 'string',
}),
dict({
'type': 'null',
}),
]),
'default': None,
'title': 'Answer',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
'docs',
]),
'title': 'State',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch
'''
+-----------+
| __start__ |
+-----------+
*
*
*
+---------------+
| rewrite_query |
+---------------+
*** ...
* .
** ...
+--------------+ .
| analyzer_one | .
+--------------+ .
* .
* .
* .
+---------------+ +---------------+
| retriever_one | | retriever_two |
+---------------+ +---------------+
*** ***
* *
** **
+----+
| qa |
+----+
*
*
*
+---------+
| __end__ |
+---------+
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[memory]
'''
+-----------+
| __start__ |
+-----------+
*
*
*
+---------------+
| rewrite_query |
+---------------+
*** ...
* .
** ...
+--------------+ .
| analyzer_one | .
+--------------+ .
* .
* .
* .
+---------------+ +---------------+
| retriever_one | | retriever_two |
+---------------+ +---------------+
*** ***
* *
** **
+----+
| qa |
+----+
*
*
*
+---------+
| __end__ |
+---------+
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres_aio]
'''
+-----------+
| __start__ |
+-----------+
*
*
*
+---------------+
| rewrite_query |
+---------------+
*** ...
* .
** ...
+--------------+ .
| analyzer_one | .
+--------------+ .
* .
* .
* .
+---------------+ +---------------+
| retriever_one | | retriever_two |
+---------------+ +---------------+
*** ***
* *
** **
+----+
| qa |
+----+
*
*
*
+---------+
| __end__ |
+---------+
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres_aio_pipe]
'''
+-----------+
| __start__ |
+-----------+
*
*
*
+---------------+
| rewrite_query |
+---------------+
*** ...
* .
** ...
+--------------+ .
| analyzer_one | .
+--------------+ .
* .
* .
* .
+---------------+ +---------------+
| retriever_one | | retriever_two |
+---------------+ +---------------+
*** ***
* *
** **
+----+
| qa |
+----+
*
*
*
+---------+
| __end__ |
+---------+
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres_aio_pool]
'''
+-----------+
| __start__ |
+-----------+
*
*
*
+---------------+
| rewrite_query |
+---------------+
*** ...
* .
** ...
+--------------+ .
| analyzer_one | .
+--------------+ .
* .
* .
* .
+---------------+ +---------------+
| retriever_one | | retriever_two |
+---------------+ +---------------+
*** ***
* *
** **
+----+
| qa |
+----+
*
*
*
+---------+
| __end__ |
+---------+
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[sqlite_aio]
'''
+-----------+
| __start__ |
+-----------+
*
*
*
+---------------+
| rewrite_query |
+---------------+
*** ...
* .
** ...
+--------------+ .
| analyzer_one | .
+--------------+ .
* .
* .
* .
+---------------+ +---------------+
| retriever_one | | retriever_two |
+---------------+ +---------------+
*** ***
* *
** **
+----+
| qa |
+----+
*
*
*
+---------+
| __end__ |
+---------+
'''
# ---
# name: test_nested_graph
'''
+-----------+
| __start__ |
+-----------+
*
*
*
+-------+
| inner |
+-------+
*
*
*
+------+
| side |
+------+
*
*
*
+---------+
| __end__ |
+---------+
'''
# ---
# name: test_send_react_interrupt_control[memory]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
foo([foo]):::last
__start__ --> agent;
agent -.-> foo;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_send_react_interrupt_control[postgres_aio]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
foo([foo]):::last
__start__ --> agent;
agent -.-> foo;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_send_react_interrupt_control[postgres_aio_pipe]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
foo([foo]):::last
__start__ --> agent;
agent -.-> foo;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_send_react_interrupt_control[postgres_aio_pool]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
foo([foo]):::last
__start__ --> agent;
agent -.-> foo;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_send_react_interrupt_control[sqlite_aio]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
foo([foo]):::last
__start__ --> agent;
agent -.-> foo;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_weather_subgraph[duckdb_aio]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
router_node(router_node)
normal_llm_node(normal_llm_node)
weather_graph_model_node(model_node)
weather_graph_weather_node(weather_node<hr/><small><em>__interrupt = before</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> router_node;
normal_llm_node --> __end__;
weather_graph_weather_node --> __end__;
router_node -.-> normal_llm_node;
router_node -.-> weather_graph_model_node;
router_node -.-> __end__;
subgraph weather_graph
weather_graph_model_node --> weather_graph_weather_node;
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_weather_subgraph[memory]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
router_node(router_node)
normal_llm_node(normal_llm_node)
weather_graph_model_node(model_node)
weather_graph_weather_node(weather_node<hr/><small><em>__interrupt = before</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> router_node;
normal_llm_node --> __end__;
weather_graph_weather_node --> __end__;
router_node -.-> normal_llm_node;
router_node -.-> weather_graph_model_node;
router_node -.-> __end__;
subgraph weather_graph
weather_graph_model_node --> weather_graph_weather_node;
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_weather_subgraph[postgres_aio]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
router_node(router_node)
normal_llm_node(normal_llm_node)
weather_graph_model_node(model_node)
weather_graph_weather_node(weather_node<hr/><small><em>__interrupt = before</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> router_node;
normal_llm_node --> __end__;
weather_graph_weather_node --> __end__;
router_node -.-> normal_llm_node;
router_node -.-> weather_graph_model_node;
router_node -.-> __end__;
subgraph weather_graph
weather_graph_model_node --> weather_graph_weather_node;
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_weather_subgraph[postgres_aio_pipe]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
router_node(router_node)
normal_llm_node(normal_llm_node)
weather_graph_model_node(model_node)
weather_graph_weather_node(weather_node<hr/><small><em>__interrupt = before</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> router_node;
normal_llm_node --> __end__;
weather_graph_weather_node --> __end__;
router_node -.-> normal_llm_node;
router_node -.-> weather_graph_model_node;
router_node -.-> __end__;
subgraph weather_graph
weather_graph_model_node --> weather_graph_weather_node;
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_weather_subgraph[postgres_aio_pool]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
router_node(router_node)
normal_llm_node(normal_llm_node)
weather_graph_model_node(model_node)
weather_graph_weather_node(weather_node<hr/><small><em>__interrupt = before</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> router_node;
normal_llm_node --> __end__;
weather_graph_weather_node --> __end__;
router_node -.-> normal_llm_node;
router_node -.-> weather_graph_model_node;
router_node -.-> __end__;
subgraph weather_graph
weather_graph_model_node --> weather_graph_weather_node;
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_weather_subgraph[sqlite_aio]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
router_node(router_node)
normal_llm_node(normal_llm_node)
weather_graph_model_node(model_node)
weather_graph_weather_node(weather_node<hr/><small><em>__interrupt = before</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> router_node;
normal_llm_node --> __end__;
weather_graph_weather_node --> __end__;
router_node -.-> normal_llm_node;
router_node -.-> weather_graph_model_node;
router_node -.-> __end__;
subgraph weather_graph
weather_graph_model_node --> weather_graph_weather_node;
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
|
0 | lc_public_repos/langgraph/libs/langgraph/tests | lc_public_repos/langgraph/libs/langgraph/tests/__snapshots__/test_pregel.ambr | # serializer version: 1
# name: test_branch_then.1
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([__start__]):::first
prepare(prepare)
tool_two_slow(tool_two_slow)
tool_two_fast(tool_two_fast)
finish(finish)
__end__([__end__]):::last
__start__ --> prepare;
finish --> __end__;
prepare -.-> tool_two_slow;
tool_two_slow --> finish;
prepare -.-> tool_two_fast;
tool_two_fast --> finish;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_branch_then[duckdb]
'''
graph TD;
__start__ --> prepare;
finish --> __end__;
prepare -.-> tool_two_slow;
tool_two_slow --> finish;
prepare -.-> tool_two_fast;
tool_two_fast --> finish;
'''
# ---
# name: test_branch_then[duckdb].1
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
prepare(prepare)
tool_two_slow(tool_two_slow)
tool_two_fast(tool_two_fast)
finish(finish)
__end__([<p>__end__</p>]):::last
__start__ --> prepare;
finish --> __end__;
prepare -.-> tool_two_slow;
tool_two_slow --> finish;
prepare -.-> tool_two_fast;
tool_two_fast --> finish;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_branch_then[memory]
'''
graph TD;
__start__ --> prepare;
finish --> __end__;
prepare -.-> tool_two_slow;
tool_two_slow --> finish;
prepare -.-> tool_two_fast;
tool_two_fast --> finish;
'''
# ---
# name: test_branch_then[memory].1
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
prepare(prepare)
tool_two_slow(tool_two_slow)
tool_two_fast(tool_two_fast)
finish(finish)
__end__([<p>__end__</p>]):::last
__start__ --> prepare;
finish --> __end__;
prepare -.-> tool_two_slow;
tool_two_slow --> finish;
prepare -.-> tool_two_fast;
tool_two_fast --> finish;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_branch_then[postgres]
'''
graph TD;
__start__ --> prepare;
finish --> __end__;
prepare -.-> tool_two_slow;
tool_two_slow --> finish;
prepare -.-> tool_two_fast;
tool_two_fast --> finish;
'''
# ---
# name: test_branch_then[postgres].1
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
prepare(prepare)
tool_two_slow(tool_two_slow)
tool_two_fast(tool_two_fast)
finish(finish)
__end__([<p>__end__</p>]):::last
__start__ --> prepare;
finish --> __end__;
prepare -.-> tool_two_slow;
tool_two_slow --> finish;
prepare -.-> tool_two_fast;
tool_two_fast --> finish;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_branch_then[postgres_pipe]
'''
graph TD;
__start__ --> prepare;
finish --> __end__;
prepare -.-> tool_two_slow;
tool_two_slow --> finish;
prepare -.-> tool_two_fast;
tool_two_fast --> finish;
'''
# ---
# name: test_branch_then[postgres_pipe].1
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
prepare(prepare)
tool_two_slow(tool_two_slow)
tool_two_fast(tool_two_fast)
finish(finish)
__end__([<p>__end__</p>]):::last
__start__ --> prepare;
finish --> __end__;
prepare -.-> tool_two_slow;
tool_two_slow --> finish;
prepare -.-> tool_two_fast;
tool_two_fast --> finish;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_branch_then[postgres_pool]
'''
graph TD;
__start__ --> prepare;
finish --> __end__;
prepare -.-> tool_two_slow;
tool_two_slow --> finish;
prepare -.-> tool_two_fast;
tool_two_fast --> finish;
'''
# ---
# name: test_branch_then[postgres_pool].1
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
prepare(prepare)
tool_two_slow(tool_two_slow)
tool_two_fast(tool_two_fast)
finish(finish)
__end__([<p>__end__</p>]):::last
__start__ --> prepare;
finish --> __end__;
prepare -.-> tool_two_slow;
tool_two_slow --> finish;
prepare -.-> tool_two_fast;
tool_two_fast --> finish;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_branch_then[sqlite]
'''
graph TD;
__start__ --> prepare;
finish --> __end__;
prepare -.-> tool_two_slow;
tool_two_slow --> finish;
prepare -.-> tool_two_fast;
tool_two_fast --> finish;
'''
# ---
# name: test_branch_then[sqlite].1
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
prepare(prepare)
tool_two_slow(tool_two_slow)
tool_two_fast(tool_two_fast)
finish(finish)
__end__([<p>__end__</p>]):::last
__start__ --> prepare;
finish --> __end__;
prepare -.-> tool_two_slow;
tool_two_slow --> finish;
prepare -.-> tool_two_fast;
tool_two_fast --> finish;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_conditional_entrypoint_graph
'{"title": "LangGraphInput"}'
# ---
# name: test_conditional_entrypoint_graph.1
'{"title": "LangGraphOutput"}'
# ---
# name: test_conditional_entrypoint_graph.2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "left",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "left"
}
},
{
"id": "right",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "right"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "right",
"target": "__end__"
},
{
"source": "__start__",
"target": "left",
"data": "go-left",
"conditional": true
},
{
"source": "__start__",
"target": "right",
"data": "go-right",
"conditional": true
},
{
"source": "left",
"target": "__end__",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_entrypoint_graph.3
'''
graph TD;
right --> __end__;
__start__ -. go-left .-> left;
__start__ -. go-right .-> right;
left -.-> __end__;
'''
# ---
# name: test_conditional_entrypoint_graph_state
'{"properties": {"input": {"default": null, "title": "Input", "type": "string"}, "output": {"default": null, "title": "Output", "type": "string"}, "steps": {"default": null, "items": {"type": "string"}, "title": "Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}'
# ---
# name: test_conditional_entrypoint_graph_state.1
'{"properties": {"input": {"default": null, "title": "Input", "type": "string"}, "output": {"default": null, "title": "Output", "type": "string"}, "steps": {"default": null, "items": {"type": "string"}, "title": "Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}'
# ---
# name: test_conditional_entrypoint_graph_state.2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "left",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "left"
}
},
{
"id": "right",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "right"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "right",
"target": "__end__"
},
{
"source": "__start__",
"target": "left",
"data": "go-left",
"conditional": true
},
{
"source": "__start__",
"target": "right",
"data": "go-right",
"conditional": true
},
{
"source": "left",
"target": "__end__",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_entrypoint_graph_state.3
'''
graph TD;
right --> __end__;
__start__ -. go-left .-> left;
__start__ -. go-right .-> right;
left -.-> __end__;
'''
# ---
# name: test_conditional_entrypoint_to_multiple_state_graph
'{"properties": {"locations": {"items": {"type": "string"}, "title": "Locations", "type": "array"}, "results": {"items": {"type": "string"}, "title": "Results", "type": "array"}}, "required": ["locations", "results"], "title": "LangGraphInput", "type": "object"}'
# ---
# name: test_conditional_entrypoint_to_multiple_state_graph.1
'{"properties": {"locations": {"items": {"type": "string"}, "title": "Locations", "type": "array"}, "results": {"items": {"type": "string"}, "title": "Results", "type": "array"}}, "required": ["locations", "results"], "title": "LangGraphOutput", "type": "object"}'
# ---
# name: test_conditional_entrypoint_to_multiple_state_graph.2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "get_weather",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "get_weather"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "get_weather",
"target": "__end__"
},
{
"source": "__start__",
"target": "get_weather",
"conditional": true
},
{
"source": "__start__",
"target": "__end__",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_entrypoint_to_multiple_state_graph.3
'''
graph TD;
get_weather --> __end__;
__start__ -.-> get_weather;
__start__ -.-> __end__;
'''
# ---
# name: test_conditional_graph.1
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -.  continue  .-> tools;
agent -.  exit  .-> __end__;
'''
# ---
# name: test_conditional_graph.2
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([__start__]):::first
agent(agent)
tools(tools<hr/><small><em>version = 2
variant = b</em></small>)
__end__([__end__]):::last
__start__ --> agent;
tools --> agent;
agent -.  continue  .-> tools;
agent -.  exit  .-> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_conditional_graph.3
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": 1,
"type": "schema",
"data": "Parallel<agent_outcome>Input"
},
{
"id": 2,
"type": "schema",
"data": "Parallel<agent_outcome>Output"
},
{
"id": 3,
"type": "runnable",
"data": {
"id": [
"langchain",
"prompts",
"prompt",
"PromptTemplate"
],
"name": "PromptTemplate"
}
},
{
"id": 4,
"type": "runnable",
"data": {
"id": [
"langchain_core",
"language_models",
"fake",
"FakeStreamingListLLM"
],
"name": "FakeStreamingListLLM"
}
},
{
"id": 5,
"type": "runnable",
"data": {
"id": [
"langchain_core",
"runnables",
"base",
"RunnableLambda"
],
"name": "agent_parser"
}
},
{
"id": 6,
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnablePassthrough"
],
"name": "Passthrough"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"RunnableCallable"
],
"name": "tools"
},
"metadata": {
"version": 2,
"variant": "b"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": 3,
"target": 4
},
{
"source": 4,
"target": 5
},
{
"source": 1,
"target": 3
},
{
"source": 5,
"target": 2
},
{
"source": 1,
"target": 6
},
{
"source": 6,
"target": 2
},
{
"source": "__start__",
"target": 1
},
{
"source": "tools",
"target": 1
},
{
"source": 2,
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": 2,
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_graph.4
'''
graph TD;
PromptTemplate --> FakeStreamingListLLM;
FakeStreamingListLLM --> agent_parser;
Parallel_agent_outcome_Input --> PromptTemplate;
agent_parser --> Parallel_agent_outcome_Output;
Parallel_agent_outcome_Input --> Passthrough;
Passthrough --> Parallel_agent_outcome_Output;
__start__ --> Parallel_agent_outcome_Input;
tools --> Parallel_agent_outcome_Input;
Parallel_agent_outcome_Output -.  continue  .-> tools;
Parallel_agent_outcome_Output -.  exit  .-> __end__;
'''
# ---
# name: test_conditional_graph.5
dict({
'edges': list([
dict({
'source': '__start__',
'target': 'agent',
}),
dict({
'source': 'tools',
'target': 'agent',
}),
dict({
'conditional': True,
'data': 'continue',
'source': 'agent',
'target': 'tools',
}),
dict({
'conditional': True,
'data': 'exit',
'source': 'agent',
'target': '__end__',
}),
]),
'nodes': list([
dict({
'data': '__start__',
'id': '__start__',
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'schema',
'runnable',
'RunnableAssign',
]),
'name': 'agent',
}),
'id': 'agent',
'metadata': dict({
'__interrupt': 'after',
}),
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'RunnableCallable',
]),
'name': 'tools',
}),
'id': 'tools',
'metadata': dict({
'variant': 'b',
'version': 2,
}),
'type': 'runnable',
}),
dict({
'data': '__end__',
'id': '__end__',
'type': 'schema',
}),
]),
})
# ---
# name: test_conditional_graph.6
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([__start__]):::first
agent(agent<hr/><small><em>__interrupt = after</em></small>)
tools(tools<hr/><small><em>version = 2
variant = b</em></small>)
__end__([__end__]):::last
__start__ --> agent;
tools --> agent;
agent -.  continue  .-> tools;
agent -.  exit  .-> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_conditional_graph[duckdb]
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableAssign"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
},
"metadata": {
"parents": {},
"version": 2,
"variant": "b"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_graph[duckdb].1
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_graph[duckdb].2
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
tools(tools<hr/><small><em>parents = {}
version = 2
variant = b</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_conditional_graph[duckdb].3
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableAssign"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
},
"metadata": {
"parents": {},
"version": 2,
"variant": "b"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_graph[duckdb].4
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_graph[duckdb].5
dict({
'edges': list([
dict({
'source': '__start__',
'target': 'agent',
}),
dict({
'source': 'tools',
'target': 'agent',
}),
dict({
'conditional': True,
'data': 'continue',
'source': 'agent',
'target': 'tools',
}),
dict({
'conditional': True,
'data': 'exit',
'source': 'agent',
'target': '__end__',
}),
]),
'nodes': list([
dict({
'data': '__start__',
'id': '__start__',
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'schema',
'runnable',
'RunnableAssign',
]),
'name': 'agent',
}),
'id': 'agent',
'metadata': dict({
'__interrupt': 'after',
}),
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'tools',
}),
'id': 'tools',
'metadata': dict({
'parents': dict({
}),
'variant': 'b',
'version': 2,
}),
'type': 'runnable',
}),
dict({
'data': '__end__',
'id': '__end__',
'type': 'schema',
}),
]),
})
# ---
# name: test_conditional_graph[duckdb].6
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent<hr/><small><em>__interrupt = after</em></small>)
tools(tools<hr/><small><em>parents = {}
version = 2
variant = b</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_conditional_graph[memory]
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableAssign"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
},
"metadata": {
"parents": {},
"version": 2,
"variant": "b"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_graph[memory].1
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_graph[memory].2
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
tools(tools<hr/><small><em>parents = {}
version = 2
variant = b</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_conditional_graph[memory].3
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableAssign"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
},
"metadata": {
"parents": {},
"version": 2,
"variant": "b"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_graph[memory].4
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_graph[memory].5
dict({
'edges': list([
dict({
'source': '__start__',
'target': 'agent',
}),
dict({
'source': 'tools',
'target': 'agent',
}),
dict({
'conditional': True,
'data': 'continue',
'source': 'agent',
'target': 'tools',
}),
dict({
'conditional': True,
'data': 'exit',
'source': 'agent',
'target': '__end__',
}),
]),
'nodes': list([
dict({
'data': '__start__',
'id': '__start__',
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'schema',
'runnable',
'RunnableAssign',
]),
'name': 'agent',
}),
'id': 'agent',
'metadata': dict({
'__interrupt': 'after',
}),
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'tools',
}),
'id': 'tools',
'metadata': dict({
'parents': dict({
}),
'variant': 'b',
'version': 2,
}),
'type': 'runnable',
}),
dict({
'data': '__end__',
'id': '__end__',
'type': 'schema',
}),
]),
})
# ---
# name: test_conditional_graph[memory].6
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent<hr/><small><em>__interrupt = after</em></small>)
tools(tools<hr/><small><em>parents = {}
version = 2
variant = b</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_conditional_graph[postgres]
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableAssign"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
},
"metadata": {
"parents": {},
"version": 2,
"variant": "b"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_graph[postgres].1
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_graph[postgres].2
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
tools(tools<hr/><small><em>parents = {}
version = 2
variant = b</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_conditional_graph[postgres].3
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableAssign"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
},
"metadata": {
"parents": {},
"version": 2,
"variant": "b"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_graph[postgres].4
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_graph[postgres].5
dict({
'edges': list([
dict({
'source': '__start__',
'target': 'agent',
}),
dict({
'source': 'tools',
'target': 'agent',
}),
dict({
'conditional': True,
'data': 'continue',
'source': 'agent',
'target': 'tools',
}),
dict({
'conditional': True,
'data': 'exit',
'source': 'agent',
'target': '__end__',
}),
]),
'nodes': list([
dict({
'data': '__start__',
'id': '__start__',
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'schema',
'runnable',
'RunnableAssign',
]),
'name': 'agent',
}),
'id': 'agent',
'metadata': dict({
'__interrupt': 'after',
}),
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'tools',
}),
'id': 'tools',
'metadata': dict({
'parents': dict({
}),
'variant': 'b',
'version': 2,
}),
'type': 'runnable',
}),
dict({
'data': '__end__',
'id': '__end__',
'type': 'schema',
}),
]),
})
# ---
# name: test_conditional_graph[postgres].6
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent<hr/><small><em>__interrupt = after</em></small>)
tools(tools<hr/><small><em>parents = {}
version = 2
variant = b</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_conditional_graph[postgres_pipe]
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableAssign"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
},
"metadata": {
"parents": {},
"version": 2,
"variant": "b"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_graph[postgres_pipe].1
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_graph[postgres_pipe].2
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
tools(tools<hr/><small><em>parents = {}
version = 2
variant = b</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_conditional_graph[postgres_pipe].3
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableAssign"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
},
"metadata": {
"parents": {},
"version": 2,
"variant": "b"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_graph[postgres_pipe].4
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_graph[postgres_pipe].5
dict({
'edges': list([
dict({
'source': '__start__',
'target': 'agent',
}),
dict({
'source': 'tools',
'target': 'agent',
}),
dict({
'conditional': True,
'data': 'continue',
'source': 'agent',
'target': 'tools',
}),
dict({
'conditional': True,
'data': 'exit',
'source': 'agent',
'target': '__end__',
}),
]),
'nodes': list([
dict({
'data': '__start__',
'id': '__start__',
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'schema',
'runnable',
'RunnableAssign',
]),
'name': 'agent',
}),
'id': 'agent',
'metadata': dict({
'__interrupt': 'after',
}),
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'tools',
}),
'id': 'tools',
'metadata': dict({
'parents': dict({
}),
'variant': 'b',
'version': 2,
}),
'type': 'runnable',
}),
dict({
'data': '__end__',
'id': '__end__',
'type': 'schema',
}),
]),
})
# ---
# name: test_conditional_graph[postgres_pipe].6
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent<hr/><small><em>__interrupt = after</em></small>)
tools(tools<hr/><small><em>parents = {}
version = 2
variant = b</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_conditional_graph[postgres_pool]
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableAssign"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
},
"metadata": {
"parents": {},
"version": 2,
"variant": "b"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_graph[postgres_pool].1
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_graph[postgres_pool].2
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
tools(tools<hr/><small><em>parents = {}
version = 2
variant = b</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_conditional_graph[postgres_pool].3
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableAssign"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
},
"metadata": {
"parents": {},
"version": 2,
"variant": "b"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_graph[postgres_pool].4
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_graph[postgres_pool].5
dict({
'edges': list([
dict({
'source': '__start__',
'target': 'agent',
}),
dict({
'source': 'tools',
'target': 'agent',
}),
dict({
'conditional': True,
'data': 'continue',
'source': 'agent',
'target': 'tools',
}),
dict({
'conditional': True,
'data': 'exit',
'source': 'agent',
'target': '__end__',
}),
]),
'nodes': list([
dict({
'data': '__start__',
'id': '__start__',
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'schema',
'runnable',
'RunnableAssign',
]),
'name': 'agent',
}),
'id': 'agent',
'metadata': dict({
'__interrupt': 'after',
}),
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'tools',
}),
'id': 'tools',
'metadata': dict({
'parents': dict({
}),
'variant': 'b',
'version': 2,
}),
'type': 'runnable',
}),
dict({
'data': '__end__',
'id': '__end__',
'type': 'schema',
}),
]),
})
# ---
# name: test_conditional_graph[postgres_pool].6
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent<hr/><small><em>__interrupt = after</em></small>)
tools(tools<hr/><small><em>parents = {}
version = 2
variant = b</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_conditional_graph[sqlite]
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableAssign"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
},
"metadata": {
"parents": {},
"version": 2,
"variant": "b"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_graph[sqlite].1
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_graph[sqlite].2
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
tools(tools<hr/><small><em>parents = {}
version = 2
variant = b</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_conditional_graph[sqlite].3
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableAssign"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
},
"metadata": {
"parents": {},
"version": 2,
"variant": "b"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_graph[sqlite].4
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_graph[sqlite].5
dict({
'edges': list([
dict({
'source': '__start__',
'target': 'agent',
}),
dict({
'source': 'tools',
'target': 'agent',
}),
dict({
'conditional': True,
'data': 'continue',
'source': 'agent',
'target': 'tools',
}),
dict({
'conditional': True,
'data': 'exit',
'source': 'agent',
'target': '__end__',
}),
]),
'nodes': list([
dict({
'data': '__start__',
'id': '__start__',
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langchain',
'schema',
'runnable',
'RunnableAssign',
]),
'name': 'agent',
}),
'id': 'agent',
'metadata': dict({
'__interrupt': 'after',
}),
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'tools',
}),
'id': 'tools',
'metadata': dict({
'parents': dict({
}),
'variant': 'b',
'version': 2,
}),
'type': 'runnable',
}),
dict({
'data': '__end__',
'id': '__end__',
'type': 'schema',
}),
]),
})
# ---
# name: test_conditional_graph[sqlite].6
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent<hr/><small><em>__interrupt = after</em></small>)
tools(tools<hr/><small><em>parents = {}
version = 2
variant = b</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_conditional_state_graph.1
'{"title": "LangGraphOutput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"title": "Agent Outcome", "anyOf": [{"$ref": "#/definitions/AgentAction"}, {"$ref": "#/definitions/AgentFinish"}]}, "intermediate_steps": {"title": "Intermediate Steps", "type": "array", "items": {"type": "array", "minItems": 2, "maxItems": 2, "items": [{"$ref": "#/definitions/AgentAction"}, {"type": "string"}]}}}, "definitions": {"AgentAction": {"title": "AgentAction", "description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "type": "object", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"title": "Tool Input", "anyOf": [{"type": "string"}, {"type": "object"}]}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentAction", "enum": ["AgentAction"], "type": "string"}}, "required": ["tool", "tool_input", "log"]}, "AgentFinish": {"title": "AgentFinish", "description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "type": "object", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentFinish", "enum": ["AgentFinish"], "type": "string"}}, "required": ["return_values", "log"]}}}'
# ---
# name: test_conditional_state_graph.2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableSequence"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"RunnableCallable"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_state_graph.3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -.  continue  .-> tools;
agent -.  exit  .-> __end__;
'''
# ---
# name: test_conditional_state_graph[duckdb]
'{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}'
# ---
# name: test_conditional_state_graph[duckdb].1
'{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}'
# ---
# name: test_conditional_state_graph[duckdb].2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableSequence"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_state_graph[duckdb].3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_state_graph[memory]
'{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}'
# ---
# name: test_conditional_state_graph[memory].1
'{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}'
# ---
# name: test_conditional_state_graph[memory].2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableSequence"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_state_graph[memory].3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_state_graph[postgres]
'{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}'
# ---
# name: test_conditional_state_graph[postgres].1
'{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}'
# ---
# name: test_conditional_state_graph[postgres].2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableSequence"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_state_graph[postgres].3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_state_graph[postgres_pipe]
'{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}'
# ---
# name: test_conditional_state_graph[postgres_pipe].1
'{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}'
# ---
# name: test_conditional_state_graph[postgres_pipe].2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableSequence"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_state_graph[postgres_pipe].3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_state_graph[postgres_pool]
'{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}'
# ---
# name: test_conditional_state_graph[postgres_pool].1
'{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}'
# ---
# name: test_conditional_state_graph[postgres_pool].2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableSequence"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_state_graph[postgres_pool].3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_state_graph[sqlite]
'{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphInput", "type": "object"}'
# ---
# name: test_conditional_state_graph[sqlite].1
'{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"default": null, "title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "title": "LangGraphOutput", "type": "object"}'
# ---
# name: test_conditional_state_graph[sqlite].2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain",
"schema",
"runnable",
"RunnableSequence"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "exit",
"conditional": true
}
]
}
'''
# ---
# name: test_conditional_state_graph[sqlite].3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. exit .-> __end__;
'''
# ---
# name: test_conditional_state_graph_with_list_edge_inputs
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "A",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "A"
}
},
{
"id": "B",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "B"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "A",
"target": "__end__"
},
{
"source": "B",
"target": "__end__"
},
{
"source": "__start__",
"target": "A"
},
{
"source": "__start__",
"target": "B"
}
]
}
'''
# ---
# name: test_conditional_state_graph_with_list_edge_inputs.1
'''
graph TD;
A --> __end__;
B --> __end__;
__start__ --> A;
__start__ --> B;
'''
# ---
# name: test_dynamic_interrupt
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([__start__]):::first
tool_two_slow(tool_two_slow)
tool_two_fast(tool_two_fast)
__end__([__end__]):::last
__start__ -.-> tool_two_slow;
tool_two_slow --> __end__;
__start__ -.-> tool_two_fast;
tool_two_fast --> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge[duckdb]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query --> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge[memory]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query --> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge[postgres]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query --> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge[postgres_pipe]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query --> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge[postgres_pool]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query --> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge[sqlite]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query --> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1.1
dict({
'definitions': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'inner': dict({
'$ref': '#/definitions/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
]),
'title': 'Input',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1.2
dict({
'properties': dict({
'answer': dict({
'title': 'Answer',
'type': 'string',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
}),
'required': list([
'answer',
'docs',
]),
'title': 'Output',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[duckdb]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[duckdb].1
dict({
'definitions': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'inner': dict({
'$ref': '#/definitions/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
]),
'title': 'Input',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[duckdb].2
dict({
'properties': dict({
'answer': dict({
'title': 'Answer',
'type': 'string',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
}),
'required': list([
'answer',
'docs',
]),
'title': 'Output',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[memory]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[memory].1
dict({
'definitions': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'inner': dict({
'$ref': '#/definitions/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
]),
'title': 'Input',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[memory].2
dict({
'properties': dict({
'answer': dict({
'title': 'Answer',
'type': 'string',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
}),
'required': list([
'answer',
'docs',
]),
'title': 'Output',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres].1
dict({
'definitions': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'inner': dict({
'$ref': '#/definitions/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
]),
'title': 'Input',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres].2
dict({
'properties': dict({
'answer': dict({
'title': 'Answer',
'type': 'string',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
}),
'required': list([
'answer',
'docs',
]),
'title': 'Output',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pipe]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pipe].1
dict({
'definitions': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'inner': dict({
'$ref': '#/definitions/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
]),
'title': 'Input',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pipe].2
dict({
'properties': dict({
'answer': dict({
'title': 'Answer',
'type': 'string',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
}),
'required': list([
'answer',
'docs',
]),
'title': 'Output',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pool]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pool].1
dict({
'definitions': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'inner': dict({
'$ref': '#/definitions/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
]),
'title': 'Input',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[postgres_pool].2
dict({
'properties': dict({
'answer': dict({
'title': 'Answer',
'type': 'string',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
}),
'required': list([
'answer',
'docs',
]),
'title': 'Output',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[sqlite]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[sqlite].1
dict({
'definitions': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'inner': dict({
'$ref': '#/definitions/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
]),
'title': 'Input',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic1[sqlite].2
dict({
'properties': dict({
'answer': dict({
'title': 'Answer',
'type': 'string',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
}),
'required': list([
'answer',
'docs',
]),
'title': 'Output',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2.1
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
]),
'title': 'Input',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2.2
dict({
'properties': dict({
'answer': dict({
'title': 'Answer',
'type': 'string',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
}),
'required': list([
'answer',
'docs',
]),
'title': 'Output',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[duckdb]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[duckdb].1
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
]),
'title': 'Input',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[duckdb].2
dict({
'properties': dict({
'answer': dict({
'title': 'Answer',
'type': 'string',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
}),
'required': list([
'answer',
'docs',
]),
'title': 'Output',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory].1
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
]),
'title': 'Input',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[memory].2
dict({
'properties': dict({
'answer': dict({
'title': 'Answer',
'type': 'string',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
}),
'required': list([
'answer',
'docs',
]),
'title': 'Output',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres].1
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
]),
'title': 'Input',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres].2
dict({
'properties': dict({
'answer': dict({
'title': 'Answer',
'type': 'string',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
}),
'required': list([
'answer',
'docs',
]),
'title': 'Output',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pipe]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pipe].1
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
]),
'title': 'Input',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pipe].2
dict({
'properties': dict({
'answer': dict({
'title': 'Answer',
'type': 'string',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
}),
'required': list([
'answer',
'docs',
]),
'title': 'Output',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pool]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pool].1
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
]),
'title': 'Input',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[postgres_pool].2
dict({
'properties': dict({
'answer': dict({
'title': 'Answer',
'type': 'string',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
}),
'required': list([
'answer',
'docs',
]),
'title': 'Output',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[sqlite]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[sqlite].1
dict({
'$defs': dict({
'InnerObject': dict({
'properties': dict({
'yo': dict({
'title': 'Yo',
'type': 'integer',
}),
}),
'required': list([
'yo',
]),
'title': 'InnerObject',
'type': 'object',
}),
}),
'properties': dict({
'inner': dict({
'$ref': '#/$defs/InnerObject',
}),
'query': dict({
'title': 'Query',
'type': 'string',
}),
}),
'required': list([
'query',
'inner',
]),
'title': 'Input',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_custom_state_class_pydantic2[sqlite].2
dict({
'properties': dict({
'answer': dict({
'title': 'Answer',
'type': 'string',
}),
'docs': dict({
'items': dict({
'type': 'string',
}),
'title': 'Docs',
'type': 'array',
}),
}),
'required': list([
'answer',
'docs',
]),
'title': 'Output',
'type': 'object',
})
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[duckdb]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[memory]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres_pipe]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[postgres_pool]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_in_one_fan_out_state_graph_waiting_edge_via_branch[sqlite]
'''
graph TD;
__start__ --> rewrite_query;
analyzer_one --> retriever_one;
qa --> __end__;
retriever_one --> qa;
retriever_two --> qa;
rewrite_query --> analyzer_one;
rewrite_query -.-> retriever_two;
'''
# ---
# name: test_message_graph.1
'{"title": "LangGraphOutput", "type": "array", "items": {"anyOf": [{"$ref": "#/definitions/AIMessage"}, {"$ref": "#/definitions/HumanMessage"}, {"$ref": "#/definitions/ChatMessage"}, {"$ref": "#/definitions/SystemMessage"}, {"$ref": "#/definitions/FunctionMessage"}, {"$ref": "#/definitions/ToolMessage"}]}, "definitions": {"ToolCall": {"title": "ToolCall", "type": "object", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"title": "Id", "type": "string"}, "type": {"title": "Type", "enum": ["tool_call"], "type": "string"}}, "required": ["name", "args", "id"]}, "InvalidToolCall": {"title": "InvalidToolCall", "type": "object", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "string"}, "id": {"title": "Id", "type": "string"}, "error": {"title": "Error", "type": "string"}, "type": {"title": "Type", "enum": ["invalid_tool_call"], "type": "string"}}, "required": ["name", "args", "id", "error"]}, "UsageMetadata": {"title": "UsageMetadata", "type": "object", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}}, "required": ["input_tokens", "output_tokens", "total_tokens"]}, "AIMessage": {"title": "AIMessage", "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "ai", "enum": ["ai"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "example": {"title": "Example", "default": false, "type": "boolean"}, "tool_calls": {"title": "Tool Calls", "default": [], "type": "array", "items": {"$ref": "#/definitions/ToolCall"}}, "invalid_tool_calls": {"title": "Invalid Tool Calls", "default": [], "type": "array", "items": {"$ref": "#/definitions/InvalidToolCall"}}, "usage_metadata": {"$ref": "#/definitions/UsageMetadata"}}, "required": ["content"]}, "HumanMessage": {"title": "HumanMessage", "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "human", "enum": ["human"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "example": {"title": "Example", "default": false, "type": "boolean"}}, "required": ["content"]}, "ChatMessage": {"title": "ChatMessage", "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "chat", "enum": ["chat"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"]}, "SystemMessage": {"title": "SystemMessage", "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "system", "enum": ["system"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content"]}, "FunctionMessage": {"title": "FunctionMessage", "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "function", "enum": ["function"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content", "name"]}, "ToolMessage": {"title": "ToolMessage", "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "default": "tool", "enum": ["tool"], "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"title": "Artifact"}, "status": {"title": "Status", "default": "success", "enum": ["success", "error"], "type": "string"}}, "required": ["content", "tool_call_id"]}}}'
# ---
# name: test_message_graph.2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"tests",
"test_pregel",
"FakeFuntionChatModel"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"prebuilt",
"tool_node",
"ToolNode"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "end",
"conditional": true
}
]
}
'''
# ---
# name: test_message_graph.3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -.  continue  .-> tools;
agent -.  end  .-> __end__;
'''
# ---
# name: test_message_graph[duckdb]
'{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphInput", "type": "array"}'
# ---
# name: test_message_graph[duckdb].1
'{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphOutput", "type": "array"}'
# ---
# name: test_message_graph[duckdb].2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"tests",
"test_pregel",
"FakeFuntionChatModel"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"prebuilt",
"tool_node",
"ToolNode"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "end",
"conditional": true
}
]
}
'''
# ---
# name: test_message_graph[duckdb].3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. end .-> __end__;
'''
# ---
# name: test_message_graph[memory]
'{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphInput", "type": "array"}'
# ---
# name: test_message_graph[memory].1
'{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphOutput", "type": "array"}'
# ---
# name: test_message_graph[memory].2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"tests",
"test_pregel",
"FakeFuntionChatModel"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"prebuilt",
"tool_node",
"ToolNode"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "end",
"conditional": true
}
]
}
'''
# ---
# name: test_message_graph[memory].3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. end .-> __end__;
'''
# ---
# name: test_message_graph[postgres]
'{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphInput", "type": "array"}'
# ---
# name: test_message_graph[postgres].1
'{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphOutput", "type": "array"}'
# ---
# name: test_message_graph[postgres].2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"tests",
"test_pregel",
"FakeFuntionChatModel"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"prebuilt",
"tool_node",
"ToolNode"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "end",
"conditional": true
}
]
}
'''
# ---
# name: test_message_graph[postgres].3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. end .-> __end__;
'''
# ---
# name: test_message_graph[postgres_pipe]
'{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphInput", "type": "array"}'
# ---
# name: test_message_graph[postgres_pipe].1
'{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphOutput", "type": "array"}'
# ---
# name: test_message_graph[postgres_pipe].2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"tests",
"test_pregel",
"FakeFuntionChatModel"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"prebuilt",
"tool_node",
"ToolNode"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "end",
"conditional": true
}
]
}
'''
# ---
# name: test_message_graph[postgres_pipe].3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. end .-> __end__;
'''
# ---
# name: test_message_graph[postgres_pool]
'{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphInput", "type": "array"}'
# ---
# name: test_message_graph[postgres_pool].1
'{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphOutput", "type": "array"}'
# ---
# name: test_message_graph[postgres_pool].2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"tests",
"test_pregel",
"FakeFuntionChatModel"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"prebuilt",
"tool_node",
"ToolNode"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "end",
"conditional": true
}
]
}
'''
# ---
# name: test_message_graph[postgres_pool].3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. end .-> __end__;
'''
# ---
# name: test_message_graph[sqlite]
'{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphInput", "type": "array"}'
# ---
# name: test_message_graph[sqlite].1
'{"$defs": {"AIMessage": {"additionalProperties": true, "description": "Message from an AI.\\n\\nAIMessage is returned from a chat model as a response to a prompt.\\n\\nThis message represents the output of the model and consists of both\\nthe raw output as returned by the model together standardized fields\\n(e.g., tool calls, usage metadata) added by the LangChain framework.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ai", "default": "ai", "enum": ["ai"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}}, "required": ["content"], "title": "AIMessage", "type": "object"}, "AIMessageChunk": {"additionalProperties": true, "description": "Message chunk from an AI.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "AIMessageChunk", "default": "AIMessageChunk", "enum": ["AIMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}, "tool_calls": {"default": [], "items": {"$ref": "#/$defs/ToolCall"}, "title": "Tool Calls", "type": "array"}, "invalid_tool_calls": {"default": [], "items": {"$ref": "#/$defs/InvalidToolCall"}, "title": "Invalid Tool Calls", "type": "array"}, "usage_metadata": {"anyOf": [{"$ref": "#/$defs/UsageMetadata"}, {"type": "null"}], "default": null}, "tool_call_chunks": {"default": [], "items": {"$ref": "#/$defs/ToolCallChunk"}, "title": "Tool Call Chunks", "type": "array"}}, "required": ["content"], "title": "AIMessageChunk", "type": "object"}, "ChatMessage": {"additionalProperties": true, "description": "Message that can be assigned an arbitrary speaker (i.e. role).", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "chat", "default": "chat", "enum": ["chat"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessage", "type": "object"}, "ChatMessageChunk": {"additionalProperties": true, "description": "Chat Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ChatMessageChunk", "default": "ChatMessageChunk", "enum": ["ChatMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "role": {"title": "Role", "type": "string"}}, "required": ["content", "role"], "title": "ChatMessageChunk", "type": "object"}, "FunctionMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nFunctionMessage are an older version of the ToolMessage schema, and\\ndo not contain the tool_call_id field.\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "function", "default": "function", "enum": ["function"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessage", "type": "object"}, "FunctionMessageChunk": {"additionalProperties": true, "description": "Function Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "FunctionMessageChunk", "default": "FunctionMessageChunk", "enum": ["FunctionMessageChunk"], "title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "name"], "title": "FunctionMessageChunk", "type": "object"}, "HumanMessage": {"additionalProperties": true, "description": "Message from a human.\\n\\nHumanMessages are messages that are passed in from a human to the model.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Instantiate a chat model and invoke it with the messages\\n model = ...\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "human", "default": "human", "enum": ["human"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessage", "type": "object"}, "HumanMessageChunk": {"additionalProperties": true, "description": "Human Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "HumanMessageChunk", "default": "HumanMessageChunk", "enum": ["HumanMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "example": {"default": false, "title": "Example", "type": "boolean"}}, "required": ["content"], "title": "HumanMessageChunk", "type": "object"}, "InputTokenDetails": {"description": "Breakdown of input token counts.\\n\\nDoes *not* need to sum to full input token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "cache_creation": {"title": "Cache Creation", "type": "integer"}, "cache_read": {"title": "Cache Read", "type": "integer"}}, "title": "InputTokenDetails", "type": "object"}, "InvalidToolCall": {"description": "Allowance for errors made by LLM.\\n\\nHere we add an `error` key to surface errors made during generation\\n(e.g., invalid JSON arguments.)", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "error": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Error"}, "type": {"const": "invalid_tool_call", "enum": ["invalid_tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "error"], "title": "InvalidToolCall", "type": "object"}, "OutputTokenDetails": {"description": "Breakdown of output token counts.\\n\\nDoes *not* need to sum to full output token count. Does *not* need to have all keys.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n\\n.. versionadded:: 0.3.9", "properties": {"audio": {"title": "Audio", "type": "integer"}, "reasoning": {"title": "Reasoning", "type": "integer"}}, "title": "OutputTokenDetails", "type": "object"}, "SystemMessage": {"additionalProperties": true, "description": "Message for priming AI behavior.\\n\\nThe system message is usually passed in as the first of a sequence\\nof input messages.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import HumanMessage, SystemMessage\\n\\n messages = [\\n SystemMessage(\\n content=\\"You are a helpful assistant! Your name is Bob.\\"\\n ),\\n HumanMessage(\\n content=\\"What is your name?\\"\\n )\\n ]\\n\\n # Define a chat model and invoke it with the messages\\n print(model.invoke(messages))", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "system", "default": "system", "enum": ["system"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessage", "type": "object"}, "SystemMessageChunk": {"additionalProperties": true, "description": "System Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "SystemMessageChunk", "default": "SystemMessageChunk", "enum": ["SystemMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content"], "title": "SystemMessageChunk", "type": "object"}, "ToolCall": {"description": "Represents a request to call a tool.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"name\\": \\"foo\\",\\n \\"args\\": {\\"a\\": 1},\\n \\"id\\": \\"123\\"\\n }\\n\\n This represents a request to call the tool named \\"foo\\" with arguments {\\"a\\": 1}\\n and an identifier of \\"123\\".", "properties": {"name": {"title": "Name", "type": "string"}, "args": {"title": "Args", "type": "object"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "type": {"const": "tool_call", "enum": ["tool_call"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id"], "title": "ToolCall", "type": "object"}, "ToolCallChunk": {"description": "A chunk of a tool call (e.g., as part of a stream).\\n\\nWhen merging ToolCallChunks (e.g., via AIMessageChunk.__add__),\\nall string attributes are concatenated. Chunks are only merged if their\\nvalues of `index` are equal and not None.\\n\\nExample:\\n\\n.. code-block:: python\\n\\n left_chunks = [ToolCallChunk(name=\\"foo\\", args=\'{\\"a\\":\', index=0)]\\n right_chunks = [ToolCallChunk(name=None, args=\'1}\', index=0)]\\n\\n (\\n AIMessageChunk(content=\\"\\", tool_call_chunks=left_chunks)\\n + AIMessageChunk(content=\\"\\", tool_call_chunks=right_chunks)\\n ).tool_call_chunks == [ToolCallChunk(name=\'foo\', args=\'{\\"a\\":1}\', index=0)]", "properties": {"name": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Name"}, "args": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Args"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Id"}, "index": {"anyOf": [{"type": "integer"}, {"type": "null"}], "title": "Index"}, "type": {"const": "tool_call_chunk", "enum": ["tool_call_chunk"], "title": "Type", "type": "string"}}, "required": ["name", "args", "id", "index"], "title": "ToolCallChunk", "type": "object"}, "ToolMessage": {"additionalProperties": true, "description": "Message for passing the result of executing a tool back to a model.\\n\\nToolMessages contain the result of a tool invocation. Typically, the result\\nis encoded inside the `content` field.\\n\\nExample: A ToolMessage representing a result of 42 from a tool call with id\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n ToolMessage(content=\'42\', tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\')\\n\\n\\nExample: A ToolMessage where only part of the tool output is sent to the model\\n and the full output is passed in to artifact.\\n\\n .. versionadded:: 0.2.17\\n\\n .. code-block:: python\\n\\n from langchain_core.messages import ToolMessage\\n\\n tool_output = {\\n \\"stdout\\": \\"From the graph we can see that the correlation between x and y is ...\\",\\n \\"stderr\\": None,\\n \\"artifacts\\": {\\"type\\": \\"image\\", \\"base64_data\\": \\"/9j/4gIcSU...\\"},\\n }\\n\\n ToolMessage(\\n content=tool_output[\\"stdout\\"],\\n artifact=tool_output,\\n tool_call_id=\'call_Jja7J89XsjrOLA5r!MEOW!SL\',\\n )\\n\\nThe tool_call_id field is used to associate the tool call request with the\\ntool call response. This is useful in situations where a chat model is able\\nto request multiple tool calls in parallel.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "tool", "default": "tool", "enum": ["tool"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessage", "type": "object"}, "ToolMessageChunk": {"additionalProperties": true, "description": "Tool Message chunk.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"const": "ToolMessageChunk", "default": "ToolMessageChunk", "enum": ["ToolMessageChunk"], "title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}, "tool_call_id": {"title": "Tool Call Id", "type": "string"}, "artifact": {"default": null, "title": "Artifact"}, "status": {"default": "success", "enum": ["success", "error"], "title": "Status", "type": "string"}}, "required": ["content", "tool_call_id"], "title": "ToolMessageChunk", "type": "object"}, "UsageMetadata": {"description": "Usage metadata for a message, such as token counts.\\n\\nThis is a standard representation of token usage that is consistent across models.\\n\\nExample:\\n\\n .. code-block:: python\\n\\n {\\n \\"input_tokens\\": 350,\\n \\"output_tokens\\": 240,\\n \\"total_tokens\\": 590,\\n \\"input_token_details\\": {\\n \\"audio\\": 10,\\n \\"cache_creation\\": 200,\\n \\"cache_read\\": 100,\\n },\\n \\"output_token_details\\": {\\n \\"audio\\": 10,\\n \\"reasoning\\": 200,\\n }\\n }\\n\\n.. versionchanged:: 0.3.9\\n\\n Added ``input_token_details`` and ``output_token_details``.", "properties": {"input_tokens": {"title": "Input Tokens", "type": "integer"}, "output_tokens": {"title": "Output Tokens", "type": "integer"}, "total_tokens": {"title": "Total Tokens", "type": "integer"}, "input_token_details": {"$ref": "#/$defs/InputTokenDetails"}, "output_token_details": {"$ref": "#/$defs/OutputTokenDetails"}}, "required": ["input_tokens", "output_tokens", "total_tokens"], "title": "UsageMetadata", "type": "object"}}, "default": null, "items": {"oneOf": [{"$ref": "#/$defs/AIMessage"}, {"$ref": "#/$defs/HumanMessage"}, {"$ref": "#/$defs/ChatMessage"}, {"$ref": "#/$defs/SystemMessage"}, {"$ref": "#/$defs/FunctionMessage"}, {"$ref": "#/$defs/ToolMessage"}, {"$ref": "#/$defs/AIMessageChunk"}, {"$ref": "#/$defs/HumanMessageChunk"}, {"$ref": "#/$defs/ChatMessageChunk"}, {"$ref": "#/$defs/SystemMessageChunk"}, {"$ref": "#/$defs/FunctionMessageChunk"}, {"$ref": "#/$defs/ToolMessageChunk"}]}, "title": "LangGraphOutput", "type": "array"}'
# ---
# name: test_message_graph[sqlite].2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"tests",
"test_pregel",
"FakeFuntionChatModel"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"prebuilt",
"tool_node",
"ToolNode"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "end",
"conditional": true
}
]
}
'''
# ---
# name: test_message_graph[sqlite].3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -. continue .-> tools;
agent -. end .-> __end__;
'''
# ---
# name: test_multiple_sinks_subgraphs
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
uno(uno)
dos(dos)
subgraph_one(one)
subgraph_two(two)
subgraph_three(three)
__start__ --> uno;
uno -.-> dos;
uno -.-> subgraph_one;
subgraph subgraph
subgraph_one -.-> subgraph_two;
subgraph_one -.-> subgraph_three;
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_nested_graph
'''
graph TD;
__start__ --> inner;
inner --> side;
side --> __end__;
'''
# ---
# name: test_nested_graph.1
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
inner(inner)
side(side)
__end__([<p>__end__</p>]):::last
__start__ --> inner;
inner --> side;
side --> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_nested_graph_xray
dict({
'edges': list([
dict({
'conditional': True,
'source': 'tool_two:__start__',
'target': 'tool_two:tool_two_slow',
}),
dict({
'source': 'tool_two:tool_two_slow',
'target': 'tool_two:__end__',
}),
dict({
'conditional': True,
'source': 'tool_two:__start__',
'target': 'tool_two:tool_two_fast',
}),
dict({
'source': 'tool_two:tool_two_fast',
'target': 'tool_two:__end__',
}),
dict({
'conditional': True,
'source': '__start__',
'target': 'tool_one',
}),
dict({
'source': 'tool_one',
'target': '__end__',
}),
dict({
'conditional': True,
'source': '__start__',
'target': 'tool_two:__start__',
}),
dict({
'source': 'tool_two:__end__',
'target': '__end__',
}),
dict({
'conditional': True,
'source': '__start__',
'target': 'tool_three',
}),
dict({
'source': 'tool_three',
'target': '__end__',
}),
]),
'nodes': list([
dict({
'data': '__start__',
'id': '__start__',
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'tool_one',
}),
'id': 'tool_one',
'type': 'runnable',
}),
dict({
'data': 'tool_two:__start__',
'id': 'tool_two:__start__',
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'tool_two:tool_two_slow',
}),
'id': 'tool_two:tool_two_slow',
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'tool_two:tool_two_fast',
}),
'id': 'tool_two:tool_two_fast',
'type': 'runnable',
}),
dict({
'data': 'tool_two:__end__',
'id': 'tool_two:__end__',
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'tool_three',
}),
'id': 'tool_three',
'type': 'runnable',
}),
dict({
'data': '__end__',
'id': '__end__',
'type': 'schema',
}),
]),
})
# ---
# name: test_nested_graph_xray.1
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
tool_one(tool_one)
tool_two___start__(<p>__start__</p>)
tool_two_tool_two_slow(tool_two_slow)
tool_two_tool_two_fast(tool_two_fast)
tool_two___end__(<p>__end__</p>)
tool_three(tool_three)
__end__([<p>__end__</p>]):::last
__start__ -.-> tool_one;
tool_one --> __end__;
__start__ -.-> tool_two___start__;
tool_two___end__ --> __end__;
__start__ -.-> tool_three;
tool_three --> __end__;
subgraph tool_two
tool_two___start__ -.-> tool_two_tool_two_slow;
tool_two_tool_two_slow --> tool_two___end__;
tool_two___start__ -.-> tool_two_tool_two_fast;
tool_two_tool_two_fast --> tool_two___end__;
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_prebuilt_chat
'{"title": "LangGraphInput", "type": "object", "properties": {"messages": {"title": "Messages", "type": "array", "items": {"$ref": "#/definitions/BaseMessage"}}}, "definitions": {"BaseMessage": {"title": "BaseMessage", "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content", "type"]}}}'
# ---
# name: test_prebuilt_chat.1
'{"title": "LangGraphOutput", "type": "object", "properties": {"messages": {"title": "Messages", "type": "array", "items": {"$ref": "#/definitions/BaseMessage"}}}, "definitions": {"BaseMessage": {"title": "BaseMessage", "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "type": "object", "properties": {"content": {"title": "Content", "anyOf": [{"type": "string"}, {"type": "array", "items": {"anyOf": [{"type": "string"}, {"type": "object"}]}}]}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"title": "Name", "type": "string"}, "id": {"title": "Id", "type": "string"}}, "required": ["content", "type"]}}}'
# ---
# name: test_prebuilt_chat.2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langchain_core",
"runnables",
"base",
"RunnableLambda"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langchain_core",
"runnables",
"base",
"RunnableLambda"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"data": "continue",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"data": "end",
"conditional": true
}
]
}
'''
# ---
# name: test_prebuilt_chat.3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -.  continue  .-> tools;
agent -.  end  .-> __end__;
'''
# ---
# name: test_prebuilt_tool_chat
'{"$defs": {"BaseMessage": {"additionalProperties": true, "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "type"], "title": "BaseMessage", "type": "object"}}, "properties": {"messages": {"items": {"$ref": "#/$defs/BaseMessage"}, "title": "Messages", "type": "array"}}, "required": ["messages"], "title": "LangGraphInput", "type": "object"}'
# ---
# name: test_prebuilt_tool_chat.1
'{"$defs": {"BaseMessage": {"additionalProperties": true, "description": "Base abstract message class.\\n\\nMessages are the inputs and outputs of ChatModels.", "properties": {"content": {"anyOf": [{"type": "string"}, {"items": {"anyOf": [{"type": "string"}, {"type": "object"}]}, "type": "array"}], "title": "Content"}, "additional_kwargs": {"title": "Additional Kwargs", "type": "object"}, "response_metadata": {"title": "Response Metadata", "type": "object"}, "type": {"title": "Type", "type": "string"}, "name": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Name"}, "id": {"anyOf": [{"type": "string"}, {"type": "null"}], "default": null, "title": "Id"}}, "required": ["content", "type"], "title": "BaseMessage", "type": "object"}}, "properties": {"messages": {"items": {"$ref": "#/$defs/BaseMessage"}, "title": "Messages", "type": "array"}}, "required": ["messages"], "title": "LangGraphOutput", "type": "object"}'
# ---
# name: test_prebuilt_tool_chat.2
'''
{
"nodes": [
{
"id": "__start__",
"type": "schema",
"data": "__start__"
},
{
"id": "agent",
"type": "runnable",
"data": {
"id": [
"langgraph",
"utils",
"runnable",
"RunnableCallable"
],
"name": "agent"
}
},
{
"id": "tools",
"type": "runnable",
"data": {
"id": [
"langgraph",
"prebuilt",
"tool_node",
"ToolNode"
],
"name": "tools"
}
},
{
"id": "__end__",
"type": "schema",
"data": "__end__"
}
],
"edges": [
{
"source": "__start__",
"target": "agent"
},
{
"source": "tools",
"target": "agent"
},
{
"source": "agent",
"target": "tools",
"conditional": true
},
{
"source": "agent",
"target": "__end__",
"conditional": true
}
]
}
'''
# ---
# name: test_prebuilt_tool_chat.3
'''
graph TD;
__start__ --> agent;
tools --> agent;
agent -.-> tools;
agent -.-> __end__;
'''
# ---
# name: test_repeat_condition
'''
graph TD;
__start__ --> Researcher;
Researcher -. continue .-> Chart_Generator;
Researcher -. call_tool .-> Call_Tool;
Researcher -. end .-> __end__;
Chart_Generator -. continue .-> Researcher;
Chart_Generator -. call_tool .-> Call_Tool;
Chart_Generator -. end .-> __end__;
Call_Tool -.-> Researcher;
Call_Tool -.-> Chart_Generator;
Researcher -. redo .-> Researcher;
'''
# ---
# name: test_send_react_interrupt_control[memory]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
foo([foo]):::last
__start__ --> agent;
agent -.-> foo;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_send_react_interrupt_control[postgres]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
foo([foo]):::last
__start__ --> agent;
agent -.-> foo;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_send_react_interrupt_control[postgres_pipe]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
foo([foo]):::last
__start__ --> agent;
agent -.-> foo;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_send_react_interrupt_control[postgres_pool]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
foo([foo]):::last
__start__ --> agent;
agent -.-> foo;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_send_react_interrupt_control[sqlite]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
agent(agent)
foo([foo]):::last
__start__ --> agent;
agent -.-> foo;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_simple_multi_edge
'''
graph TD;
__start__ --> up;
down --> __end__;
side --> down;
up --> down;
up --> other;
up --> side;
'''
# ---
# name: test_start_branch_then[duckdb]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
tool_two_slow(tool_two_slow)
tool_two_fast(tool_two_fast)
__end__([<p>__end__</p>]):::last
__start__ -.-> tool_two_slow;
tool_two_slow --> __end__;
__start__ -.-> tool_two_fast;
tool_two_fast --> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_start_branch_then[memory]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
tool_two_slow(tool_two_slow)
tool_two_fast(tool_two_fast)
__end__([<p>__end__</p>]):::last
__start__ -.-> tool_two_slow;
tool_two_slow --> __end__;
__start__ -.-> tool_two_fast;
tool_two_fast --> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_start_branch_then[postgres]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
tool_two_slow(tool_two_slow)
tool_two_fast(tool_two_fast)
__end__([<p>__end__</p>]):::last
__start__ -.-> tool_two_slow;
tool_two_slow --> __end__;
__start__ -.-> tool_two_fast;
tool_two_fast --> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_start_branch_then[postgres_pipe]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
tool_two_slow(tool_two_slow)
tool_two_fast(tool_two_fast)
__end__([<p>__end__</p>]):::last
__start__ -.-> tool_two_slow;
tool_two_slow --> __end__;
__start__ -.-> tool_two_fast;
tool_two_fast --> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_start_branch_then[postgres_pool]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
tool_two_slow(tool_two_slow)
tool_two_fast(tool_two_fast)
__end__([<p>__end__</p>]):::last
__start__ -.-> tool_two_slow;
tool_two_slow --> __end__;
__start__ -.-> tool_two_fast;
tool_two_fast --> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_start_branch_then[sqlite]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
tool_two_slow(tool_two_slow)
tool_two_fast(tool_two_fast)
__end__([<p>__end__</p>]):::last
__start__ -.-> tool_two_slow;
tool_two_slow --> __end__;
__start__ -.-> tool_two_fast;
tool_two_fast --> __end__;
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_state_graph_w_config
'{"title": "LangGraphConfig", "type": "object", "properties": {"configurable": {"$ref": "#/definitions/Configurable"}}, "definitions": {"Configurable": {"title": "Configurable", "type": "object", "properties": {"tools": {"title": "Tools", "type": "array", "items": {"type": "string"}}}}}}'
# ---
# name: test_state_graph_w_config.1
'{"title": "LangGraphInput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"title": "Agent Outcome", "anyOf": [{"$ref": "#/definitions/AgentAction"}, {"$ref": "#/definitions/AgentFinish"}]}, "intermediate_steps": {"title": "Intermediate Steps", "type": "array", "items": {"type": "array", "minItems": 2, "maxItems": 2, "items": [{"$ref": "#/definitions/AgentAction"}, {"type": "string"}]}}}, "definitions": {"AgentAction": {"title": "AgentAction", "description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "type": "object", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"title": "Tool Input", "anyOf": [{"type": "string"}, {"type": "object"}]}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentAction", "enum": ["AgentAction"], "type": "string"}}, "required": ["tool", "tool_input", "log"]}, "AgentFinish": {"title": "AgentFinish", "description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "type": "object", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentFinish", "enum": ["AgentFinish"], "type": "string"}}, "required": ["return_values", "log"]}}}'
# ---
# name: test_state_graph_w_config.2
'{"title": "LangGraphOutput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"title": "Agent Outcome", "anyOf": [{"$ref": "#/definitions/AgentAction"}, {"$ref": "#/definitions/AgentFinish"}]}, "intermediate_steps": {"title": "Intermediate Steps", "type": "array", "items": {"type": "array", "minItems": 2, "maxItems": 2, "items": [{"$ref": "#/definitions/AgentAction"}, {"type": "string"}]}}}, "definitions": {"AgentAction": {"title": "AgentAction", "description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "type": "object", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"title": "Tool Input", "anyOf": [{"type": "string"}, {"type": "object"}]}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentAction", "enum": ["AgentAction"], "type": "string"}}, "required": ["tool", "tool_input", "log"]}, "AgentFinish": {"title": "AgentFinish", "description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "type": "object", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentFinish", "enum": ["AgentFinish"], "type": "string"}}, "required": ["return_values", "log"]}}}'
# ---
# name: test_state_graph_w_config_inherited_state
'{"title": "LangGraphConfig", "type": "object", "properties": {"configurable": {"$ref": "#/definitions/Configurable"}}, "definitions": {"Configurable": {"title": "Configurable", "type": "object", "properties": {"tools": {"title": "Tools", "type": "array", "items": {"type": "string"}}}}}}'
# ---
# name: test_state_graph_w_config_inherited_state.1
'{"title": "LangGraphInput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"title": "Agent Outcome", "anyOf": [{"$ref": "#/definitions/AgentAction"}, {"$ref": "#/definitions/AgentFinish"}]}, "intermediate_steps": {"title": "Intermediate Steps", "type": "array", "items": {"type": "array", "minItems": 2, "maxItems": 2, "items": [{"$ref": "#/definitions/AgentAction"}, {"type": "string"}]}}}, "definitions": {"AgentAction": {"title": "AgentAction", "description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "type": "object", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"title": "Tool Input", "anyOf": [{"type": "string"}, {"type": "object"}]}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentAction", "enum": ["AgentAction"], "type": "string"}}, "required": ["tool", "tool_input", "log"]}, "AgentFinish": {"title": "AgentFinish", "description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "type": "object", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentFinish", "enum": ["AgentFinish"], "type": "string"}}, "required": ["return_values", "log"]}}}'
# ---
# name: test_state_graph_w_config_inherited_state.2
'{"title": "LangGraphOutput", "type": "object", "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"title": "Agent Outcome", "anyOf": [{"$ref": "#/definitions/AgentAction"}, {"$ref": "#/definitions/AgentFinish"}]}, "intermediate_steps": {"title": "Intermediate Steps", "type": "array", "items": {"type": "array", "minItems": 2, "maxItems": 2, "items": [{"$ref": "#/definitions/AgentAction"}, {"type": "string"}]}}}, "definitions": {"AgentAction": {"title": "AgentAction", "description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "type": "object", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"title": "Tool Input", "anyOf": [{"type": "string"}, {"type": "object"}]}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentAction", "enum": ["AgentAction"], "type": "string"}}, "required": ["tool", "tool_input", "log"]}, "AgentFinish": {"title": "AgentFinish", "description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "type": "object", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"title": "Type", "default": "AgentFinish", "enum": ["AgentFinish"], "type": "string"}}, "required": ["return_values", "log"]}}}'
# ---
# name: test_state_graph_w_config_inherited_state_keys
'{"$defs": {"Configurable": {"properties": {"tools": {"default": null, "items": {"type": "string"}, "title": "Tools", "type": "array"}}, "title": "Configurable", "type": "object"}}, "properties": {"configurable": {"$ref": "#/$defs/Configurable", "default": null}}, "title": "LangGraphConfig", "type": "object"}'
# ---
# name: test_state_graph_w_config_inherited_state_keys.1
'{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "required": ["input"], "title": "LangGraphInput", "type": "object"}'
# ---
# name: test_state_graph_w_config_inherited_state_keys.2
'{"$defs": {"AgentAction": {"description": "Represents a request to execute an action by an agent.\\n\\nThe action consists of the name of the tool to execute and the input to pass\\nto the tool. The log is used to pass along extra information about the action.", "properties": {"tool": {"title": "Tool", "type": "string"}, "tool_input": {"anyOf": [{"type": "string"}, {"type": "object"}], "title": "Tool Input"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentAction", "default": "AgentAction", "enum": ["AgentAction"], "title": "Type", "type": "string"}}, "required": ["tool", "tool_input", "log"], "title": "AgentAction", "type": "object"}, "AgentFinish": {"description": "Final return value of an ActionAgent.\\n\\nAgents return an AgentFinish when they have reached a stopping condition.", "properties": {"return_values": {"title": "Return Values", "type": "object"}, "log": {"title": "Log", "type": "string"}, "type": {"const": "AgentFinish", "default": "AgentFinish", "enum": ["AgentFinish"], "title": "Type", "type": "string"}}, "required": ["return_values", "log"], "title": "AgentFinish", "type": "object"}}, "properties": {"input": {"title": "Input", "type": "string"}, "agent_outcome": {"anyOf": [{"$ref": "#/$defs/AgentAction"}, {"$ref": "#/$defs/AgentFinish"}, {"type": "null"}], "default": null, "title": "Agent Outcome"}, "intermediate_steps": {"default": null, "items": {"maxItems": 2, "minItems": 2, "prefixItems": [{"$ref": "#/$defs/AgentAction"}, {"type": "string"}], "type": "array"}, "title": "Intermediate Steps", "type": "array"}}, "required": ["input"], "title": "LangGraphOutput", "type": "object"}'
# ---
# name: test_weather_subgraph[duckdb]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
router_node(router_node)
normal_llm_node(normal_llm_node)
weather_graph_model_node(model_node)
weather_graph_weather_node(weather_node<hr/><small><em>__interrupt = before</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> router_node;
normal_llm_node --> __end__;
weather_graph_weather_node --> __end__;
router_node -.-> normal_llm_node;
router_node -.-> weather_graph_model_node;
router_node -.-> __end__;
subgraph weather_graph
weather_graph_model_node --> weather_graph_weather_node;
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_weather_subgraph[memory]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
router_node(router_node)
normal_llm_node(normal_llm_node)
weather_graph_model_node(model_node)
weather_graph_weather_node(weather_node<hr/><small><em>__interrupt = before</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> router_node;
normal_llm_node --> __end__;
weather_graph_weather_node --> __end__;
router_node -.-> normal_llm_node;
router_node -.-> weather_graph_model_node;
router_node -.-> __end__;
subgraph weather_graph
weather_graph_model_node --> weather_graph_weather_node;
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_weather_subgraph[postgres]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
router_node(router_node)
normal_llm_node(normal_llm_node)
weather_graph_model_node(model_node)
weather_graph_weather_node(weather_node<hr/><small><em>__interrupt = before</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> router_node;
normal_llm_node --> __end__;
weather_graph_weather_node --> __end__;
router_node -.-> normal_llm_node;
router_node -.-> weather_graph_model_node;
router_node -.-> __end__;
subgraph weather_graph
weather_graph_model_node --> weather_graph_weather_node;
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_weather_subgraph[postgres_pipe]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
router_node(router_node)
normal_llm_node(normal_llm_node)
weather_graph_model_node(model_node)
weather_graph_weather_node(weather_node<hr/><small><em>__interrupt = before</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> router_node;
normal_llm_node --> __end__;
weather_graph_weather_node --> __end__;
router_node -.-> normal_llm_node;
router_node -.-> weather_graph_model_node;
router_node -.-> __end__;
subgraph weather_graph
weather_graph_model_node --> weather_graph_weather_node;
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_weather_subgraph[postgres_pool]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
router_node(router_node)
normal_llm_node(normal_llm_node)
weather_graph_model_node(model_node)
weather_graph_weather_node(weather_node<hr/><small><em>__interrupt = before</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> router_node;
normal_llm_node --> __end__;
weather_graph_weather_node --> __end__;
router_node -.-> normal_llm_node;
router_node -.-> weather_graph_model_node;
router_node -.-> __end__;
subgraph weather_graph
weather_graph_model_node --> weather_graph_weather_node;
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_weather_subgraph[sqlite]
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
router_node(router_node)
normal_llm_node(normal_llm_node)
weather_graph_model_node(model_node)
weather_graph_weather_node(weather_node<hr/><small><em>__interrupt = before</em></small>)
__end__([<p>__end__</p>]):::last
__start__ --> router_node;
normal_llm_node --> __end__;
weather_graph_weather_node --> __end__;
router_node -.-> normal_llm_node;
router_node -.-> weather_graph_model_node;
router_node -.-> __end__;
subgraph weather_graph
weather_graph_model_node --> weather_graph_weather_node;
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_xray_bool
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
gp_one(gp_one)
gp_two___start__(<p>__start__</p>)
gp_two_p_one(p_one)
gp_two_p_two___start__(<p>__start__</p>)
gp_two_p_two_c_one(c_one)
gp_two_p_two_c_two(c_two)
gp_two_p_two___end__(<p>__end__</p>)
gp_two___end__(<p>__end__</p>)
__end__([<p>__end__</p>]):::last
__start__ --> gp_one;
gp_two___end__ --> gp_one;
gp_one -. 0 .-> gp_two___start__;
gp_one -. 1 .-> __end__;
subgraph gp_two
gp_two___start__ --> gp_two_p_one;
gp_two_p_two___end__ --> gp_two_p_one;
gp_two_p_one -. 0 .-> gp_two_p_two___start__;
gp_two_p_one -. 1 .-> gp_two___end__;
subgraph p_two
gp_two_p_two___start__ --> gp_two_p_two_c_one;
gp_two_p_two_c_two --> gp_two_p_two_c_one;
gp_two_p_two_c_one -. 0 .-> gp_two_p_two_c_two;
gp_two_p_two_c_one -. 1 .-> gp_two_p_two___end__;
end
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_xray_issue
'''
%%{init: {'flowchart': {'curve': 'linear'}}}%%
graph TD;
__start__([<p>__start__</p>]):::first
p_one(p_one)
p_two___start__(<p>__start__</p>)
p_two_c_one(c_one)
p_two_c_two(c_two)
p_two___end__(<p>__end__</p>)
__end__([<p>__end__</p>]):::last
__start__ --> p_one;
p_two___end__ --> p_one;
p_one -. 0 .-> p_two___start__;
p_one -. 1 .-> __end__;
subgraph p_two
p_two___start__ --> p_two_c_one;
p_two_c_two --> p_two_c_one;
p_two_c_one -. 0 .-> p_two_c_two;
p_two_c_one -. 1 .-> p_two___end__;
end
classDef default fill:#f2f0ff,line-height:1.2
classDef first fill-opacity:0
classDef last fill:#bfb6fc
'''
# ---
# name: test_xray_lance
dict({
'edges': list([
dict({
'source': '__start__',
'target': 'ask_question',
}),
dict({
'source': 'ask_question',
'target': 'answer_question',
}),
dict({
'conditional': True,
'source': 'answer_question',
'target': 'ask_question',
}),
dict({
'conditional': True,
'source': 'answer_question',
'target': '__end__',
}),
]),
'nodes': list([
dict({
'data': '__start__',
'id': '__start__',
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'ask_question',
}),
'id': 'ask_question',
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'answer_question',
}),
'id': 'answer_question',
'type': 'runnable',
}),
dict({
'data': '__end__',
'id': '__end__',
'type': 'schema',
}),
]),
})
# ---
# name: test_xray_lance.1
dict({
'edges': list([
dict({
'source': '__start__',
'target': 'generate_analysts',
}),
dict({
'source': 'conduct_interview',
'target': 'generate_sections',
}),
dict({
'source': 'generate_sections',
'target': '__end__',
}),
dict({
'conditional': True,
'source': 'generate_analysts',
'target': 'conduct_interview',
}),
]),
'nodes': list([
dict({
'data': '__start__',
'id': '__start__',
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'generate_analysts',
}),
'id': 'generate_analysts',
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'graph',
'state',
'CompiledStateGraph',
]),
'name': 'conduct_interview',
}),
'id': 'conduct_interview',
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'generate_sections',
}),
'id': 'generate_sections',
'type': 'runnable',
}),
dict({
'data': '__end__',
'id': '__end__',
'type': 'schema',
}),
]),
})
# ---
# name: test_xray_lance.2
dict({
'edges': list([
dict({
'source': 'conduct_interview:__start__',
'target': 'conduct_interview:ask_question',
}),
dict({
'source': 'conduct_interview:ask_question',
'target': 'conduct_interview:answer_question',
}),
dict({
'conditional': True,
'source': 'conduct_interview:answer_question',
'target': 'conduct_interview:ask_question',
}),
dict({
'conditional': True,
'source': 'conduct_interview:answer_question',
'target': 'conduct_interview:__end__',
}),
dict({
'source': '__start__',
'target': 'generate_analysts',
}),
dict({
'source': 'conduct_interview:__end__',
'target': 'generate_sections',
}),
dict({
'source': 'generate_sections',
'target': '__end__',
}),
dict({
'conditional': True,
'source': 'generate_analysts',
'target': 'conduct_interview:__start__',
}),
]),
'nodes': list([
dict({
'data': '__start__',
'id': '__start__',
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'generate_analysts',
}),
'id': 'generate_analysts',
'type': 'runnable',
}),
dict({
'data': 'conduct_interview:__start__',
'id': 'conduct_interview:__start__',
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'conduct_interview:ask_question',
}),
'id': 'conduct_interview:ask_question',
'type': 'runnable',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'conduct_interview:answer_question',
}),
'id': 'conduct_interview:answer_question',
'type': 'runnable',
}),
dict({
'data': 'conduct_interview:__end__',
'id': 'conduct_interview:__end__',
'type': 'schema',
}),
dict({
'data': dict({
'id': list([
'langgraph',
'utils',
'runnable',
'RunnableCallable',
]),
'name': 'generate_sections',
}),
'id': 'generate_sections',
'type': 'runnable',
}),
dict({
'data': '__end__',
'id': '__end__',
'type': 'schema',
}),
]),
})
# ---
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/version.py | """Exports package version."""
from importlib import metadata
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Case where package metadata is not available.
__version__ = ""
del metadata # optional, avoids polluting the results of dir(__package__)
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/errors.py | from enum import Enum
from typing import Any, Sequence
from langgraph.checkpoint.base import EmptyChannelError # noqa: F401
from langgraph.types import Command, Interrupt
# EmptyChannelError re-exported for backwards compatibility
class ErrorCode(Enum):
GRAPH_RECURSION_LIMIT = "GRAPH_RECURSION_LIMIT"
INVALID_CONCURRENT_GRAPH_UPDATE = "INVALID_CONCURRENT_GRAPH_UPDATE"
INVALID_GRAPH_NODE_RETURN_VALUE = "INVALID_GRAPH_NODE_RETURN_VALUE"
MULTIPLE_SUBGRAPHS = "MULTIPLE_SUBGRAPHS"
INVALID_CHAT_HISTORY = "INVALID_CHAT_HISTORY"
def create_error_message(*, message: str, error_code: ErrorCode) -> str:
return (
f"{message}\n"
"For troubleshooting, visit: https://python.langchain.com/docs/"
f"troubleshooting/errors/{error_code.value}"
)
class GraphRecursionError(RecursionError):
"""Raised when the graph has exhausted the maximum number of steps.
This prevents infinite loops. To increase the maximum number of steps,
run your graph with a config specifying a higher `recursion_limit`.
Troubleshooting Guides:
- [GRAPH_RECURSION_LIMIT](https://python.langchain.com/docs/troubleshooting/errors/GRAPH_RECURSION_LIMIT)
Examples:
graph = builder.compile()
graph.invoke(
{"messages": [("user", "Hello, world!")]},
# The config is the second positional argument
{"recursion_limit": 1000},
)
"""
pass
class InvalidUpdateError(Exception):
"""Raised when attempting to update a channel with an invalid set of updates.
Troubleshooting Guides:
- [INVALID_CONCURRENT_GRAPH_UPDATE](https://python.langchain.com/docs/troubleshooting/errors/INVALID_CONCURRENT_GRAPH_UPDATE)
- [INVALID_GRAPH_NODE_RETURN_VALUE](https://python.langchain.com/docs/troubleshooting/errors/INVALID_GRAPH_NODE_RETURN_VALUE)
"""
pass
class GraphBubbleUp(Exception):
pass
class GraphInterrupt(GraphBubbleUp):
"""Raised when a subgraph is interrupted, suppressed by the root graph.
Never raised directly, or surfaced to the user."""
def __init__(self, interrupts: Sequence[Interrupt] = ()) -> None:
super().__init__(interrupts)
class NodeInterrupt(GraphInterrupt):
"""Raised by a node to interrupt execution."""
def __init__(self, value: Any) -> None:
super().__init__([Interrupt(value=value)])
class GraphDelegate(GraphBubbleUp):
"""Raised when a graph is delegated (for distributed mode)."""
def __init__(self, *args: dict[str, Any]) -> None:
super().__init__(*args)
class ParentCommand(GraphBubbleUp):
args: tuple[Command]
def __init__(self, command: Command) -> None:
super().__init__(command)
class EmptyInputError(Exception):
"""Raised when graph receives an empty input."""
pass
class TaskNotFound(Exception):
"""Raised when the executor is unable to find a task (for distributed mode)."""
pass
class CheckpointNotLatest(Exception):
"""Raised when the checkpoint is not the latest version (for distributed mode)."""
pass
class MultipleSubgraphsError(Exception):
"""Raised when multiple subgraphs are called inside the same node.
Troubleshooting guides:
- [MULTIPLE_SUBGRAPHS](https://python.langchain.com/docs/troubleshooting/errors/MULTIPLE_SUBGRAPHS)
"""
pass
_SEEN_CHECKPOINT_NS: set[str] = set()
"""Used for subgraph detection."""
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/types.py | import dataclasses
import sys
from collections import deque
from typing import (
TYPE_CHECKING,
Any,
Callable,
ClassVar,
Generic,
Hashable,
Literal,
NamedTuple,
Optional,
Sequence,
Type,
TypedDict,
TypeVar,
Union,
cast,
)
from langchain_core.runnables import Runnable, RunnableConfig
from typing_extensions import Self
from langgraph.checkpoint.base import (
BaseCheckpointSaver,
CheckpointMetadata,
PendingWrite,
)
if TYPE_CHECKING:
from langgraph.store.base import BaseStore
All = Literal["*"]
"""Special value to indicate that graph should interrupt on all nodes."""
Checkpointer = Union[None, Literal[False], BaseCheckpointSaver]
"""Type of the checkpointer to use for a subgraph. False disables checkpointing,
even if the parent graph has a checkpointer. None inherits checkpointer."""
StreamMode = Literal["values", "updates", "debug", "messages", "custom"]
"""How the stream method should emit outputs.
- 'values': Emit all values of the state for each step.
- 'updates': Emit only the node name(s) and updates
that were returned by the node(s) **after** each step.
- 'debug': Emit debug events for each step.
- 'messages': Emit LLM messages token-by-token.
- 'custom': Emit custom output `write: StreamWriter` kwarg of each node.
"""
StreamWriter = Callable[[Any], None]
"""Callable that accepts a single argument and writes it to the output stream.
Always injected into nodes if requested as a keyword argument, but it's a no-op
when not using stream_mode="custom"."""
if sys.version_info >= (3, 10):
_DC_KWARGS = {"kw_only": True, "slots": True, "frozen": True}
else:
_DC_KWARGS = {"frozen": True}
def default_retry_on(exc: Exception) -> bool:
import httpx
import requests
if isinstance(exc, ConnectionError):
return True
if isinstance(
exc,
(
ValueError,
TypeError,
ArithmeticError,
ImportError,
LookupError,
NameError,
SyntaxError,
RuntimeError,
ReferenceError,
StopIteration,
StopAsyncIteration,
OSError,
),
):
return False
if isinstance(exc, httpx.HTTPStatusError):
return 500 <= exc.response.status_code < 600
if isinstance(exc, requests.HTTPError):
return 500 <= exc.response.status_code < 600 if exc.response else True
return True
class RetryPolicy(NamedTuple):
"""Configuration for retrying nodes."""
initial_interval: float = 0.5
"""Amount of time that must elapse before the first retry occurs. In seconds."""
backoff_factor: float = 2.0
"""Multiplier by which the interval increases after each retry."""
max_interval: float = 128.0
"""Maximum amount of time that may elapse between retries. In seconds."""
max_attempts: int = 3
"""Maximum number of attempts to make before giving up, including the first."""
jitter: bool = True
"""Whether to add random jitter to the interval between retries."""
retry_on: Union[
Type[Exception], Sequence[Type[Exception]], Callable[[Exception], bool]
] = default_retry_on
"""List of exception classes that should trigger a retry, or a callable that returns True for exceptions that should trigger a retry."""
class CachePolicy(NamedTuple):
"""Configuration for caching nodes."""
pass
@dataclasses.dataclass(**_DC_KWARGS)
class Interrupt:
value: Any
resumable: bool = False
ns: Optional[Sequence[str]] = None
when: Literal["during"] = "during"
class PregelTask(NamedTuple):
id: str
name: str
path: tuple[Union[str, int, tuple], ...]
error: Optional[Exception] = None
interrupts: tuple[Interrupt, ...] = ()
state: Union[None, RunnableConfig, "StateSnapshot"] = None
result: Optional[dict[str, Any]] = None
class PregelExecutableTask(NamedTuple):
name: str
input: Any
proc: Runnable
writes: deque[tuple[str, Any]]
config: RunnableConfig
triggers: list[str]
retry_policy: Optional[RetryPolicy]
cache_policy: Optional[CachePolicy]
id: str
path: tuple[Union[str, int, tuple], ...]
scheduled: bool = False
writers: Sequence[Runnable] = ()
class StateSnapshot(NamedTuple):
"""Snapshot of the state of the graph at the beginning of a step."""
values: Union[dict[str, Any], Any]
"""Current values of channels"""
next: tuple[str, ...]
"""The name of the node to execute in each task for this step."""
config: RunnableConfig
"""Config used to fetch this snapshot"""
metadata: Optional[CheckpointMetadata]
"""Metadata associated with this snapshot"""
created_at: Optional[str]
"""Timestamp of snapshot creation"""
parent_config: Optional[RunnableConfig]
"""Config used to fetch the parent snapshot, if any"""
tasks: tuple[PregelTask, ...]
"""Tasks to execute in this step. If already attempted, may contain an error."""
class Send:
"""A message or packet to send to a specific node in the graph.
The `Send` class is used within a `StateGraph`'s conditional edges to
dynamically invoke a node with a custom state at the next step.
Importantly, the sent state can differ from the core graph's state,
allowing for flexible and dynamic workflow management.
One such example is a "map-reduce" workflow where your graph invokes
the same node multiple times in parallel with different states,
before aggregating the results back into the main graph's state.
Attributes:
node (str): The name of the target node to send the message to.
arg (Any): The state or message to send to the target node.
Examples:
>>> from typing import Annotated
>>> import operator
>>> class OverallState(TypedDict):
... subjects: list[str]
... jokes: Annotated[list[str], operator.add]
...
>>> from langgraph.types import Send
>>> from langgraph.graph import END, START
>>> def continue_to_jokes(state: OverallState):
... return [Send("generate_joke", {"subject": s}) for s in state['subjects']]
...
>>> from langgraph.graph import StateGraph
>>> builder = StateGraph(OverallState)
>>> builder.add_node("generate_joke", lambda state: {"jokes": [f"Joke about {state['subject']}"]})
>>> builder.add_conditional_edges(START, continue_to_jokes)
>>> builder.add_edge("generate_joke", END)
>>> graph = builder.compile()
>>>
>>> # Invoking with two subjects results in a generated joke for each
>>> graph.invoke({"subjects": ["cats", "dogs"]})
{'subjects': ['cats', 'dogs'], 'jokes': ['Joke about cats', 'Joke about dogs']}
"""
__slots__ = ("node", "arg")
node: str
arg: Any
def __init__(self, /, node: str, arg: Any) -> None:
"""
Initialize a new instance of the Send class.
Args:
node (str): The name of the target node to send the message to.
arg (Any): The state or message to send to the target node.
"""
self.node = node
self.arg = arg
def __hash__(self) -> int:
return hash((self.node, self.arg))
def __repr__(self) -> str:
return f"Send(node={self.node!r}, arg={self.arg!r})"
def __eq__(self, value: object) -> bool:
return (
isinstance(value, Send)
and self.node == value.node
and self.arg == value.arg
)
N = TypeVar("N", bound=Hashable)
@dataclasses.dataclass(**_DC_KWARGS)
class Command(Generic[N]):
"""One or more commands to update the graph's state and send messages to nodes.
Args:
graph: graph to send the command to. Supported values are:
- None: the current graph (default)
- Command.PARENT: closest parent graph
update: update to apply to the graph's state.
resume: value to resume execution with. To be used together with [`interrupt()`][langgraph.types.interrupt].
goto: can be one of the following:
- name of the node to navigate to next (any node that belongs to the specified `graph`)
- sequence of node names to navigate to next
- `Send` object (to execute a node with the input provided)
- sequence of `Send` objects
"""
graph: Optional[str] = None
update: Union[dict[str, Any], Sequence[tuple[str, Any]]] = ()
resume: Optional[Union[Any, dict[str, Any]]] = None
goto: Union[Send, Sequence[Union[Send, str]], str] = ()
def __repr__(self) -> str:
# get all non-None values
contents = ", ".join(
f"{key}={value!r}"
for key, value in dataclasses.asdict(self).items()
if value
)
return f"Command({contents})"
def _update_as_tuples(self) -> Sequence[tuple[str, Any]]:
if isinstance(self.update, dict):
return list(self.update.items())
elif isinstance(self.update, (list, tuple)) and all(
isinstance(t, tuple) and len(t) == 2 and isinstance(t[0], str)
for t in self.update
):
return self.update
else:
return [("__root__", self.update)]
PARENT: ClassVar[Literal["__parent__"]] = "__parent__"
StreamChunk = tuple[tuple[str, ...], str, Any]
class StreamProtocol:
__slots__ = ("modes", "__call__")
modes: set[StreamMode]
__call__: Callable[[Self, StreamChunk], None]
def __init__(
self,
__call__: Callable[[StreamChunk], None],
modes: set[StreamMode],
) -> None:
self.__call__ = cast(Callable[[Self, StreamChunk], None], __call__)
self.modes = modes
class LoopProtocol:
config: RunnableConfig
store: Optional["BaseStore"]
stream: Optional[StreamProtocol]
step: int
stop: int
def __init__(
self,
*,
step: int,
stop: int,
config: RunnableConfig,
store: Optional["BaseStore"] = None,
stream: Optional[StreamProtocol] = None,
) -> None:
self.stream = stream
self.config = config
self.store = store
self.step = step
self.stop = stop
class PregelScratchpad(TypedDict, total=False):
interrupt_counter: int
used_null_resume: bool
resume: list[Any]
def interrupt(value: Any) -> Any:
from langgraph.constants import (
CONFIG_KEY_CHECKPOINT_NS,
CONFIG_KEY_SCRATCHPAD,
CONFIG_KEY_SEND,
CONFIG_KEY_TASK_ID,
CONFIG_KEY_WRITES,
NS_SEP,
NULL_TASK_ID,
RESUME,
)
from langgraph.errors import GraphInterrupt
from langgraph.utils.config import get_configurable
conf = get_configurable()
# track interrupt index
scratchpad: PregelScratchpad = conf[CONFIG_KEY_SCRATCHPAD]
if "interrupt_counter" not in scratchpad:
scratchpad["interrupt_counter"] = 0
else:
scratchpad["interrupt_counter"] += 1
idx = scratchpad["interrupt_counter"]
# find previous resume values
task_id = conf[CONFIG_KEY_TASK_ID]
writes: list[PendingWrite] = conf[CONFIG_KEY_WRITES]
scratchpad.setdefault(
"resume", next((w[2] for w in writes if w[0] == task_id and w[1] == RESUME), [])
)
if scratchpad["resume"]:
if idx < len(scratchpad["resume"]):
return scratchpad["resume"][idx]
# find current resume value
if not scratchpad.get("used_null_resume"):
scratchpad["used_null_resume"] = True
for tid, c, v in sorted(writes, key=lambda x: x[0], reverse=True):
if tid == NULL_TASK_ID and c == RESUME:
assert len(scratchpad["resume"]) == idx, (scratchpad["resume"], idx)
scratchpad["resume"].append(v)
conf[CONFIG_KEY_SEND]([(RESUME, scratchpad["resume"])])
return v
# no resume value found
raise GraphInterrupt(
(
Interrupt(
value=value,
resumable=True,
ns=cast(str, conf[CONFIG_KEY_CHECKPOINT_NS]).split(NS_SEP),
),
)
)
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/constants.py | import sys
from os import getenv
from types import MappingProxyType
from typing import Any, Literal, Mapping, cast
from langgraph.types import Interrupt, Send # noqa: F401
# Interrupt, Send re-exported for backwards compatibility
# --- Empty read-only containers ---
EMPTY_MAP: Mapping[str, Any] = MappingProxyType({})
EMPTY_SEQ: tuple[str, ...] = tuple()
MISSING = object()
# --- Public constants ---
TAG_NOSTREAM = sys.intern("langsmith:nostream")
"""Tag to disable streaming for a chat model."""
TAG_HIDDEN = sys.intern("langsmith:hidden")
"""Tag to hide a node/edge from certain tracing/streaming environments."""
START = sys.intern("__start__")
"""The first (maybe virtual) node in graph-style Pregel."""
END = sys.intern("__end__")
"""The last (maybe virtual) node in graph-style Pregel."""
SELF = sys.intern("__self__")
"""The implicit branch that handles each node's Control values."""
# --- Reserved write keys ---
INPUT = sys.intern("__input__")
# for values passed as input to the graph
INTERRUPT = sys.intern("__interrupt__")
# for dynamic interrupts raised by nodes
RESUME = sys.intern("__resume__")
# for values passed to resume a node after an interrupt
ERROR = sys.intern("__error__")
# for errors raised by nodes
NO_WRITES = sys.intern("__no_writes__")
# marker to signal node didn't write anything
SCHEDULED = sys.intern("__scheduled__")
# marker to signal node was scheduled (in distributed mode)
TASKS = sys.intern("__pregel_tasks")
# for Send objects returned by nodes/edges, corresponds to PUSH below
# --- Reserved config.configurable keys ---
CONFIG_KEY_SEND = sys.intern("__pregel_send")
# holds the `write` function that accepts writes to state/edges/reserved keys
CONFIG_KEY_READ = sys.intern("__pregel_read")
# holds the `read` function that returns a copy of the current state
CONFIG_KEY_CHECKPOINTER = sys.intern("__pregel_checkpointer")
# holds a `BaseCheckpointSaver` passed from parent graph to child graphs
CONFIG_KEY_STREAM = sys.intern("__pregel_stream")
# holds a `StreamProtocol` passed from parent graph to child graphs
CONFIG_KEY_STREAM_WRITER = sys.intern("__pregel_stream_writer")
# holds a `StreamWriter` for stream_mode=custom
CONFIG_KEY_STORE = sys.intern("__pregel_store")
# holds a `BaseStore` made available to managed values
CONFIG_KEY_RESUMING = sys.intern("__pregel_resuming")
# holds a boolean indicating if subgraphs should resume from a previous checkpoint
CONFIG_KEY_TASK_ID = sys.intern("__pregel_task_id")
# holds the task ID for the current task
CONFIG_KEY_DEDUPE_TASKS = sys.intern("__pregel_dedupe_tasks")
# holds a boolean indicating if tasks should be deduplicated (for distributed mode)
CONFIG_KEY_ENSURE_LATEST = sys.intern("__pregel_ensure_latest")
# holds a boolean indicating whether to assert the requested checkpoint is the latest
# (for distributed mode)
CONFIG_KEY_DELEGATE = sys.intern("__pregel_delegate")
# holds a boolean indicating whether to delegate subgraphs (for distributed mode)
CONFIG_KEY_CHECKPOINT_MAP = sys.intern("checkpoint_map")
# holds a mapping of checkpoint_ns -> checkpoint_id for parent graphs
CONFIG_KEY_CHECKPOINT_ID = sys.intern("checkpoint_id")
# holds the current checkpoint_id, if any
CONFIG_KEY_CHECKPOINT_NS = sys.intern("checkpoint_ns")
# holds the current checkpoint_ns, "" for root graph
CONFIG_KEY_NODE_FINISHED = sys.intern("__pregel_node_finished")
# holds the value that "answers" an interrupt() call
CONFIG_KEY_WRITES = sys.intern("__pregel_writes")
# read-only list of existing task writes
CONFIG_KEY_SCRATCHPAD = sys.intern("__pregel_scratchpad")
# holds a mutable dict for temporary storage scoped to the current task
# --- Other constants ---
PUSH = sys.intern("__pregel_push")
# denotes push-style tasks, ie. those created by Send objects
PULL = sys.intern("__pregel_pull")
# denotes pull-style tasks, ie. those triggered by edges
NS_SEP = sys.intern("|")
# for checkpoint_ns, separates each level (ie. graph|subgraph|subsubgraph)
NS_END = sys.intern(":")
# for checkpoint_ns, for each level, separates the namespace from the task_id
CONF = cast(Literal["configurable"], sys.intern("configurable"))
# key for the configurable dict in RunnableConfig
FF_SEND_V2 = getenv("LANGGRAPH_FF_SEND_V2", "false").lower() == "true"
# temporary flag to enable new Send semantics
NULL_TASK_ID = sys.intern("00000000-0000-0000-0000-000000000000")
# the task_id to use for writes that are not associated with a task
RESERVED = {
TAG_HIDDEN,
# reserved write keys
INPUT,
INTERRUPT,
RESUME,
ERROR,
NO_WRITES,
SCHEDULED,
TASKS,
# reserved config.configurable keys
CONFIG_KEY_SEND,
CONFIG_KEY_READ,
CONFIG_KEY_CHECKPOINTER,
CONFIG_KEY_STREAM,
CONFIG_KEY_STREAM_WRITER,
CONFIG_KEY_STORE,
CONFIG_KEY_CHECKPOINT_MAP,
CONFIG_KEY_RESUMING,
CONFIG_KEY_TASK_ID,
CONFIG_KEY_DEDUPE_TASKS,
CONFIG_KEY_ENSURE_LATEST,
CONFIG_KEY_DELEGATE,
CONFIG_KEY_CHECKPOINT_MAP,
CONFIG_KEY_CHECKPOINT_ID,
CONFIG_KEY_CHECKPOINT_NS,
# other constants
PUSH,
PULL,
NS_SEP,
NS_END,
CONF,
}
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/managed/base.py | from abc import ABC, abstractmethod
from contextlib import asynccontextmanager, contextmanager
from inspect import isclass
from typing import (
Any,
AsyncIterator,
Generic,
Iterator,
NamedTuple,
Sequence,
Type,
TypeVar,
Union,
)
from typing_extensions import Self, TypeGuard
from langgraph.types import LoopProtocol
V = TypeVar("V")
U = TypeVar("U")
class ManagedValue(ABC, Generic[V]):
def __init__(self, loop: LoopProtocol) -> None:
self.loop = loop
@classmethod
@contextmanager
def enter(cls, loop: LoopProtocol, **kwargs: Any) -> Iterator[Self]:
try:
value = cls(loop, **kwargs)
yield value
finally:
# because managed value and Pregel have reference to each other
# let's make sure to break the reference on exit
try:
del value
except UnboundLocalError:
pass
@classmethod
@asynccontextmanager
async def aenter(cls, loop: LoopProtocol, **kwargs: Any) -> AsyncIterator[Self]:
try:
value = cls(loop, **kwargs)
yield value
finally:
# because managed value and Pregel have reference to each other
# let's make sure to break the reference on exit
try:
del value
except UnboundLocalError:
pass
@abstractmethod
def __call__(self) -> V: ...
class WritableManagedValue(Generic[V, U], ManagedValue[V], ABC):
@abstractmethod
def update(self, writes: Sequence[U]) -> None: ...
@abstractmethod
async def aupdate(self, writes: Sequence[U]) -> None: ...
class ConfiguredManagedValue(NamedTuple):
cls: Type[ManagedValue]
kwargs: dict[str, Any]
ManagedValueSpec = Union[Type[ManagedValue], ConfiguredManagedValue]
def is_managed_value(value: Any) -> TypeGuard[ManagedValueSpec]:
return (isclass(value) and issubclass(value, ManagedValue)) or isinstance(
value, ConfiguredManagedValue
)
def is_readonly_managed_value(value: Any) -> TypeGuard[Type[ManagedValue]]:
return (
isclass(value)
and issubclass(value, ManagedValue)
and not issubclass(value, WritableManagedValue)
) or (
isinstance(value, ConfiguredManagedValue)
and not issubclass(value.cls, WritableManagedValue)
)
def is_writable_managed_value(value: Any) -> TypeGuard[Type[WritableManagedValue]]:
return (isclass(value) and issubclass(value, WritableManagedValue)) or (
isinstance(value, ConfiguredManagedValue)
and issubclass(value.cls, WritableManagedValue)
)
ChannelKeyPlaceholder = object()
ChannelTypePlaceholder = object()
ManagedValueMapping = dict[str, ManagedValue]
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/managed/is_last_step.py | from typing import Annotated
from langgraph.managed.base import ManagedValue
class IsLastStepManager(ManagedValue[bool]):
def __call__(self) -> bool:
return self.loop.step == self.loop.stop - 1
IsLastStep = Annotated[bool, IsLastStepManager]
class RemainingStepsManager(ManagedValue[int]):
def __call__(self) -> int:
return self.loop.stop - self.loop.step
RemainingSteps = Annotated[int, RemainingStepsManager]
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/managed/shared_value.py | import collections.abc
from contextlib import asynccontextmanager, contextmanager
from typing import (
Any,
AsyncIterator,
Iterator,
Optional,
Sequence,
Type,
)
from typing_extensions import NotRequired, Required, Self
from langgraph.constants import CONF
from langgraph.errors import InvalidUpdateError
from langgraph.managed.base import (
ChannelKeyPlaceholder,
ChannelTypePlaceholder,
ConfiguredManagedValue,
WritableManagedValue,
)
from langgraph.store.base import PutOp
from langgraph.types import LoopProtocol
V = dict[str, Any]
Value = dict[str, V]
Update = dict[str, Optional[V]]
# Adapted from typing_extensions
def _strip_extras(t): # type: ignore[no-untyped-def]
"""Strips Annotated, Required and NotRequired from a given type."""
if hasattr(t, "__origin__"):
return _strip_extras(t.__origin__)
if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired):
return _strip_extras(t.__args__[0])
return t
class SharedValue(WritableManagedValue[Value, Update]):
@staticmethod
def on(scope: str) -> ConfiguredManagedValue:
return ConfiguredManagedValue(
SharedValue,
{
"scope": scope,
"key": ChannelKeyPlaceholder,
"typ": ChannelTypePlaceholder,
},
)
@classmethod
@contextmanager
def enter(cls, loop: LoopProtocol, **kwargs: Any) -> Iterator[Self]:
with super().enter(loop, **kwargs) as value:
if loop.store is not None:
saved = loop.store.search(value.ns)
value.value = {it.key: it.value for it in saved}
yield value
@classmethod
@asynccontextmanager
async def aenter(cls, loop: LoopProtocol, **kwargs: Any) -> AsyncIterator[Self]:
async with super().aenter(loop, **kwargs) as value:
if loop.store is not None:
saved = await loop.store.asearch(value.ns)
value.value = {it.key: it.value for it in saved}
yield value
def __init__(
self, loop: LoopProtocol, *, typ: Type[Any], scope: str, key: str
) -> None:
super().__init__(loop)
if typ := _strip_extras(typ):
if typ not in (
dict,
collections.abc.Mapping,
collections.abc.MutableMapping,
):
raise ValueError("SharedValue must be a dict")
self.scope = scope
self.value: Value = {}
if self.loop.store is None:
pass
elif scope_value := self.loop.config[CONF].get(self.scope):
self.ns = ("scoped", scope, key, scope_value)
else:
raise ValueError(
f"Scope {scope} for shared state key not in config.configurable"
)
def __call__(self) -> Value:
return self.value
def _process_update(self, values: Sequence[Update]) -> list[PutOp]:
writes: list[PutOp] = []
for vv in values:
for k, v in vv.items():
if v is None:
if k in self.value:
del self.value[k]
writes.append(PutOp(self.ns, k, None))
elif not isinstance(v, dict):
raise InvalidUpdateError("Received a non-dict value")
else:
self.value[k] = v
writes.append(PutOp(self.ns, k, v))
return writes
def update(self, values: Sequence[Update]) -> None:
if self.loop.store is None:
self._process_update(values)
else:
return self.loop.store.batch(self._process_update(values))
async def aupdate(self, writes: Sequence[Update]) -> None:
if self.loop.store is None:
self._process_update(writes)
else:
return await self.loop.store.abatch(self._process_update(writes))
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/managed/context.py | from contextlib import asynccontextmanager, contextmanager
from inspect import signature
from typing import (
Any,
AsyncContextManager,
AsyncIterator,
Callable,
ContextManager,
Generic,
Iterator,
Optional,
Type,
Union,
)
from typing_extensions import Self
from langgraph.managed.base import ConfiguredManagedValue, ManagedValue, V
from langgraph.types import LoopProtocol
class Context(ManagedValue[V], Generic[V]):
runtime = True
value: V
@staticmethod
def of(
ctx: Union[
None,
Callable[..., ContextManager[V]],
Type[ContextManager[V]],
Callable[..., AsyncContextManager[V]],
Type[AsyncContextManager[V]],
] = None,
actx: Optional[
Union[
Callable[..., AsyncContextManager[V]],
Type[AsyncContextManager[V]],
]
] = None,
) -> ConfiguredManagedValue:
if ctx is None and actx is None:
raise ValueError("Must provide either sync or async context manager.")
return ConfiguredManagedValue(Context, {"ctx": ctx, "actx": actx})
@classmethod
@contextmanager
def enter(cls, loop: LoopProtocol, **kwargs: Any) -> Iterator[Self]:
with super().enter(loop, **kwargs) as self:
if self.ctx is None:
raise ValueError(
"Synchronous context manager not found. Please initialize Context value with a sync context manager, or invoke your graph asynchronously."
)
ctx = (
self.ctx(loop.config) # type: ignore[call-arg]
if signature(self.ctx).parameters.get("config")
else self.ctx()
)
with ctx as v: # type: ignore[union-attr]
self.value = v
yield self
@classmethod
@asynccontextmanager
async def aenter(cls, loop: LoopProtocol, **kwargs: Any) -> AsyncIterator[Self]:
async with super().aenter(loop, **kwargs) as self:
if self.actx is not None:
ctx = (
self.actx(loop.config) # type: ignore[call-arg]
if signature(self.actx).parameters.get("config")
else self.actx()
)
elif self.ctx is not None:
ctx = (
self.ctx(loop.config) # type: ignore
if signature(self.ctx).parameters.get("config")
else self.ctx()
)
else:
raise ValueError(
"Asynchronous context manager not found. Please initialize Context value with an async context manager, or invoke your graph synchronously."
)
if hasattr(ctx, "__aenter__"):
async with ctx as v:
self.value = v
yield self
elif hasattr(ctx, "__enter__") and hasattr(ctx, "__exit__"):
with ctx as v:
self.value = v
yield self
else:
raise ValueError(
"Context manager must have either __enter__ or __aenter__ method."
)
def __init__(
self,
loop: LoopProtocol,
*,
ctx: Union[None, Type[ContextManager[V]], Type[AsyncContextManager[V]]] = None,
actx: Optional[Type[AsyncContextManager[V]]] = None,
) -> None:
self.ctx = ctx
self.actx = actx
def __call__(self) -> V:
return self.value
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/managed/__init__.py | from langgraph.managed.is_last_step import IsLastStep, RemainingSteps
__all__ = ["IsLastStep", "RemainingSteps"]
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/_api/deprecation.py | import functools
import warnings
from typing import Any, Callable, Type, TypeVar, Union, cast
class LangGraphDeprecationWarning(DeprecationWarning):
pass
F = TypeVar("F", bound=Callable[..., Any])
C = TypeVar("C", bound=Type[Any])
def deprecated(
since: str, alternative: str, *, removal: str = "", example: str = ""
) -> Callable[[F], F]:
def decorator(obj: Union[F, C]) -> Union[F, C]:
removal_str = removal if removal else "a future version"
message = (
f"{obj.__name__} is deprecated as of version {since} and will be"
f" removed in {removal_str}. Use {alternative} instead.{example}"
)
if isinstance(obj, type):
original_init = obj.__init__ # type: ignore[misc]
@functools.wraps(original_init)
def new_init(self, *args: Any, **kwargs: Any) -> None: # type: ignore[no-untyped-def]
warnings.warn(message, LangGraphDeprecationWarning, stacklevel=2)
original_init(self, *args, **kwargs)
obj.__init__ = new_init # type: ignore[misc]
docstring = (
f"**Deprecated**: This class is deprecated as of version {since}. "
f"Use `{alternative}` instead."
)
if obj.__doc__:
docstring = docstring + f"\n\n{obj.__doc__}"
obj.__doc__ = docstring
return cast(C, obj)
elif callable(obj):
@functools.wraps(obj)
def wrapper(*args: Any, **kwargs: Any) -> Any:
warnings.warn(message, LangGraphDeprecationWarning, stacklevel=2)
return obj(*args, **kwargs)
docstring = (
f"**Deprecated**: This function is deprecated as of version {since}. "
f"Use `{alternative}` instead."
)
if obj.__doc__:
docstring = docstring + f"\n\n{obj.__doc__}"
wrapper.__doc__ = docstring
return cast(F, wrapper)
else:
raise TypeError(
f"Can only add deprecation decorator to classes or callables, got '{type(obj)}' instead."
)
return decorator
def deprecated_parameter(
arg_name: str, since: str, alternative: str, *, removal: str
) -> Callable[[F], F]:
def decorator(func: F) -> F:
@functools.wraps(func)
def wrapper(*args, **kwargs): # type: ignore[no-untyped-def]
if arg_name in kwargs:
warnings.warn(
f"Parameter '{arg_name}' in function '{func.__name__}' is "
f"deprecated as of version {since} and will be removed in version {removal}. "
f"Use '{alternative}' parameter instead.",
category=LangGraphDeprecationWarning,
stacklevel=2,
)
return func(*args, **kwargs)
return cast(F, wrapper)
return decorator
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/channels/named_barrier_value.py | from typing import Generic, Optional, Sequence, Type
from typing_extensions import Self
from langgraph.channels.base import BaseChannel, Value
from langgraph.errors import EmptyChannelError, InvalidUpdateError
class NamedBarrierValue(Generic[Value], BaseChannel[Value, Value, set[Value]]):
"""A channel that waits until all named values are received before making the value available."""
__slots__ = ("names", "seen")
names: set[Value]
seen: set[Value]
def __init__(self, typ: Type[Value], names: set[Value]) -> None:
super().__init__(typ)
self.names = names
self.seen: set[str] = set()
def __eq__(self, value: object) -> bool:
return isinstance(value, NamedBarrierValue) and value.names == self.names
@property
def ValueType(self) -> Type[Value]:
"""The type of the value stored in the channel."""
return self.typ
@property
def UpdateType(self) -> Type[Value]:
"""The type of the update received by the channel."""
return self.typ
def checkpoint(self) -> set[Value]:
return self.seen
def from_checkpoint(self, checkpoint: Optional[set[Value]]) -> Self:
empty = self.__class__(self.typ, self.names)
empty.key = self.key
if checkpoint is not None:
empty.seen = checkpoint
return empty
def update(self, values: Sequence[Value]) -> bool:
updated = False
for value in values:
if value in self.names:
if value not in self.seen:
self.seen.add(value)
updated = True
else:
raise InvalidUpdateError(
f"At key '{self.key}': Value {value} not in {self.names}"
)
return updated
def get(self) -> Value:
if self.seen != self.names:
raise EmptyChannelError()
return None
def consume(self) -> bool:
if self.seen == self.names:
self.seen = set()
return True
return False
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/channels/base.py | from abc import ABC, abstractmethod
from typing import Any, Generic, Optional, Sequence, Type, TypeVar
from typing_extensions import Self
from langgraph.errors import EmptyChannelError, InvalidUpdateError
Value = TypeVar("Value")
Update = TypeVar("Update")
C = TypeVar("C")
class BaseChannel(Generic[Value, Update, C], ABC):
__slots__ = ("key", "typ")
def __init__(self, typ: Type[Any], key: str = "") -> None:
self.typ = typ
self.key = key
@property
@abstractmethod
def ValueType(self) -> Any:
"""The type of the value stored in the channel."""
@property
@abstractmethod
def UpdateType(self) -> Any:
"""The type of the update received by the channel."""
# serialize/deserialize methods
def checkpoint(self) -> Optional[C]:
"""Return a serializable representation of the channel's current state.
Raises EmptyChannelError if the channel is empty (never updated yet),
or doesn't support checkpoints."""
return self.get()
@abstractmethod
def from_checkpoint(self, checkpoint: Optional[C]) -> Self:
"""Return a new identical channel, optionally initialized from a checkpoint.
If the checkpoint contains complex data structures, they should be copied."""
# state methods
@abstractmethod
def update(self, values: Sequence[Update]) -> bool:
"""Update the channel's value with the given sequence of updates.
The order of the updates in the sequence is arbitrary.
This method is called by Pregel for all channels at the end of each step.
If there are no updates, it is called with an empty sequence.
Raises InvalidUpdateError if the sequence of updates is invalid.
Returns True if the channel was updated, False otherwise."""
@abstractmethod
def get(self) -> Value:
"""Return the current value of the channel.
Raises EmptyChannelError if the channel is empty (never updated yet)."""
def consume(self) -> bool:
"""Mark the current value of the channel as consumed. By default, no-op.
This is called by Pregel before the start of the next step, for all
channels that triggered a node. If the channel was updated, return True.
"""
return False
__all__ = [
"BaseChannel",
"EmptyChannelError",
"InvalidUpdateError",
]
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/channels/topic.py | from typing import Any, Generic, Iterator, Optional, Sequence, Type, Union
from typing_extensions import Self
from langgraph.channels.base import BaseChannel, Value
from langgraph.errors import EmptyChannelError
def flatten(values: Sequence[Union[Value, list[Value]]]) -> Iterator[Value]:
for value in values:
if isinstance(value, list):
yield from value
else:
yield value
class Topic(
Generic[Value],
BaseChannel[
Sequence[Value], Union[Value, list[Value]], tuple[set[Value], list[Value]]
],
):
"""A configurable PubSub Topic.
Args:
typ: The type of the value stored in the channel.
accumulate: Whether to accumulate values across steps. If False, the channel will be emptied after each step.
"""
__slots__ = ("values", "accumulate")
def __init__(self, typ: Type[Value], accumulate: bool = False) -> None:
super().__init__(typ)
# attrs
self.accumulate = accumulate
# state
self.values = list[Value]()
def __eq__(self, value: object) -> bool:
return isinstance(value, Topic) and value.accumulate == self.accumulate
@property
def ValueType(self) -> Any:
"""The type of the value stored in the channel."""
return Sequence[self.typ] # type: ignore[name-defined]
@property
def UpdateType(self) -> Any:
"""The type of the update received by the channel."""
return Union[self.typ, list[self.typ]] # type: ignore[name-defined]
def checkpoint(self) -> tuple[set[Value], list[Value]]:
return self.values
def from_checkpoint(self, checkpoint: Optional[list[Value]]) -> Self:
empty = self.__class__(self.typ, self.accumulate)
empty.key = self.key
if checkpoint is not None:
if isinstance(checkpoint, tuple):
empty.values = checkpoint[1]
else:
empty.values = checkpoint
return empty
def update(self, values: Sequence[Union[Value, list[Value]]]) -> None:
current = list(self.values)
if not self.accumulate:
self.values = list[Value]()
if flat_values := flatten(values):
self.values.extend(flat_values)
return self.values != current
def get(self) -> Sequence[Value]:
if self.values:
return list(self.values)
else:
raise EmptyChannelError
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/channels/ephemeral_value.py | from typing import Any, Generic, Optional, Sequence, Type
from typing_extensions import Self
from langgraph.channels.base import BaseChannel, Value
from langgraph.errors import EmptyChannelError, InvalidUpdateError
class EphemeralValue(Generic[Value], BaseChannel[Value, Value, Value]):
"""Stores the value received in the step immediately preceding, clears after."""
__slots__ = ("value", "guard")
def __init__(self, typ: Any, guard: bool = True) -> None:
super().__init__(typ)
self.guard = guard
def __eq__(self, value: object) -> bool:
return isinstance(value, EphemeralValue) and value.guard == self.guard
@property
def ValueType(self) -> Type[Value]:
"""The type of the value stored in the channel."""
return self.typ
@property
def UpdateType(self) -> Type[Value]:
"""The type of the update received by the channel."""
return self.typ
def from_checkpoint(self, checkpoint: Optional[Value]) -> Self:
empty = self.__class__(self.typ, self.guard)
empty.key = self.key
if checkpoint is not None:
empty.value = checkpoint
return empty
def update(self, values: Sequence[Value]) -> bool:
if len(values) == 0:
try:
del self.value
return True
except AttributeError:
return False
if len(values) != 1 and self.guard:
raise InvalidUpdateError(
f"At key '{self.key}': EphemeralValue(guard=True) can receive only one value per step. Use guard=False if you want to store any one of multiple values."
)
self.value = values[-1]
return True
def get(self) -> Value:
try:
return self.value
except AttributeError:
raise EmptyChannelError()
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/channels/untracked_value.py | from typing import Generic, Optional, Sequence, Type
from typing_extensions import Self
from langgraph.channels.base import BaseChannel, Value
from langgraph.errors import EmptyChannelError, InvalidUpdateError
class UntrackedValue(Generic[Value], BaseChannel[Value, Value, Value]):
"""Stores the last value received, never checkpointed."""
__slots__ = ("value", "guard")
def __init__(self, typ: Type[Value], guard: bool = True) -> None:
super().__init__(typ)
self.guard = guard
def __eq__(self, value: object) -> bool:
return isinstance(value, UntrackedValue) and value.guard == self.guard
@property
def ValueType(self) -> Type[Value]:
"""The type of the value stored in the channel."""
return self.typ
@property
def UpdateType(self) -> Type[Value]:
"""The type of the update received by the channel."""
return self.typ
def checkpoint(self) -> Value:
raise EmptyChannelError()
def from_checkpoint(self, checkpoint: Optional[Value]) -> Self:
empty = self.__class__(self.typ, self.guard)
empty.key = self.key
return empty
def update(self, values: Sequence[Value]) -> bool:
if len(values) == 0:
return False
if len(values) != 1 and self.guard:
raise InvalidUpdateError(
f"At key '{self.key}': UntrackedValue(guard=True) can receive only one value per step. Use guard=False if you want to store any one of multiple values."
)
self.value = values[-1]
return True
def get(self) -> Value:
try:
return self.value
except AttributeError:
raise EmptyChannelError()
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/channels/last_value.py | from typing import Generic, Optional, Sequence, Type
from typing_extensions import Self
from langgraph.channels.base import BaseChannel, Value
from langgraph.errors import (
EmptyChannelError,
ErrorCode,
InvalidUpdateError,
create_error_message,
)
class LastValue(Generic[Value], BaseChannel[Value, Value, Value]):
"""Stores the last value received, can receive at most one value per step."""
__slots__ = ("value",)
def __eq__(self, value: object) -> bool:
return isinstance(value, LastValue)
@property
def ValueType(self) -> Type[Value]:
"""The type of the value stored in the channel."""
return self.typ
@property
def UpdateType(self) -> Type[Value]:
"""The type of the update received by the channel."""
return self.typ
def from_checkpoint(self, checkpoint: Optional[Value]) -> Self:
empty = self.__class__(self.typ)
empty.key = self.key
if checkpoint is not None:
empty.value = checkpoint
return empty
def update(self, values: Sequence[Value]) -> bool:
if len(values) == 0:
return False
if len(values) != 1:
msg = create_error_message(
message=f"At key '{self.key}': Can receive only one value per step. Use an Annotated key to handle multiple values.",
error_code=ErrorCode.INVALID_CONCURRENT_GRAPH_UPDATE,
)
raise InvalidUpdateError(msg)
self.value = values[-1]
return True
def get(self) -> Value:
try:
return self.value
except AttributeError:
raise EmptyChannelError()
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/channels/binop.py | import collections.abc
from typing import (
Callable,
Generic,
Optional,
Sequence,
Type,
)
from typing_extensions import NotRequired, Required, Self
from langgraph.channels.base import BaseChannel, Value
from langgraph.errors import EmptyChannelError
# Adapted from typing_extensions
def _strip_extras(t): # type: ignore[no-untyped-def]
"""Strips Annotated, Required and NotRequired from a given type."""
if hasattr(t, "__origin__"):
return _strip_extras(t.__origin__)
if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired):
return _strip_extras(t.__args__[0])
return t
class BinaryOperatorAggregate(Generic[Value], BaseChannel[Value, Value, Value]):
"""Stores the result of applying a binary operator to the current value and each new value.
```python
import operator
total = Channels.BinaryOperatorAggregate(int, operator.add)
```
"""
__slots__ = ("value", "operator")
def __init__(self, typ: Type[Value], operator: Callable[[Value, Value], Value]):
super().__init__(typ)
self.operator = operator
# special forms from typing or collections.abc are not instantiable
# so we need to replace them with their concrete counterparts
typ = _strip_extras(typ)
if typ in (collections.abc.Sequence, collections.abc.MutableSequence):
typ = list
if typ in (collections.abc.Set, collections.abc.MutableSet):
typ = set
if typ in (collections.abc.Mapping, collections.abc.MutableMapping):
typ = dict
try:
self.value = typ()
except Exception:
pass
def __eq__(self, value: object) -> bool:
return isinstance(value, BinaryOperatorAggregate) and (
value.operator is self.operator
if value.operator.__name__ != "<lambda>"
and self.operator.__name__ != "<lambda>"
else True
)
@property
def ValueType(self) -> Type[Value]:
"""The type of the value stored in the channel."""
return self.typ
@property
def UpdateType(self) -> Type[Value]:
"""The type of the update received by the channel."""
return self.typ
def from_checkpoint(self, checkpoint: Optional[Value]) -> Self:
empty = self.__class__(self.typ, self.operator)
empty.key = self.key
if checkpoint is not None:
empty.value = checkpoint
return empty
def update(self, values: Sequence[Value]) -> bool:
if not values:
return False
if not hasattr(self, "value"):
self.value = values[0]
values = values[1:]
for value in values:
self.value = self.operator(self.value, value)
return True
def get(self) -> Value:
try:
return self.value
except AttributeError:
raise EmptyChannelError()
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/channels/dynamic_barrier_value.py | from typing import Any, Generic, NamedTuple, Optional, Sequence, Type, Union
from typing_extensions import Self
from langgraph.channels.base import BaseChannel, Value
from langgraph.errors import EmptyChannelError, InvalidUpdateError
class WaitForNames(NamedTuple):
names: set[Any]
class DynamicBarrierValue(
Generic[Value], BaseChannel[Value, Union[Value, WaitForNames], set[Value]]
):
"""A channel that switches between two states
- in the "priming" state it can't be read from.
- if it receives a WaitForNames update, it switches to the "waiting" state.
- in the "waiting" state it collects named values until all are received.
- once all named values are received, it can be read once, and it switches
back to the "priming" state.
"""
__slots__ = ("names", "seen")
names: Optional[set[Value]]
seen: set[Value]
def __init__(self, typ: Type[Value]) -> None:
super().__init__(typ)
self.names = None
self.seen = set()
def __eq__(self, value: object) -> bool:
return isinstance(value, DynamicBarrierValue) and value.names == self.names
@property
def ValueType(self) -> Type[Value]:
"""The type of the value stored in the channel."""
return self.typ
@property
def UpdateType(self) -> Type[Value]:
"""The type of the update received by the channel."""
return self.typ
def checkpoint(self) -> tuple[Optional[set[Value]], set[Value]]:
return (self.names, self.seen)
def from_checkpoint(
self,
checkpoint: Optional[tuple[Optional[set[Value]], set[Value]]],
) -> Self:
empty = self.__class__(self.typ)
empty.key = self.key
if checkpoint is not None:
names, seen = checkpoint
empty.names = names if names is not None else None
empty.seen = seen
return empty
def update(self, values: Sequence[Union[Value, WaitForNames]]) -> bool:
if wait_for_names := [v for v in values if isinstance(v, WaitForNames)]:
if len(wait_for_names) > 1:
raise InvalidUpdateError(
f"At key '{self.key}': Received multiple WaitForNames updates in the same step."
)
self.names = wait_for_names[0].names
return True
elif self.names is not None:
updated = False
for value in values:
assert not isinstance(value, WaitForNames)
if value in self.names:
if value not in self.seen:
self.seen.add(value)
updated = True
else:
raise InvalidUpdateError(f"Value {value} not in {self.names}")
return updated
def get(self) -> Value:
if self.seen != self.names:
raise EmptyChannelError()
return None
def consume(self) -> bool:
if self.seen == self.names:
self.seen = set()
self.names = None
return True
return False
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/channels/context.py | from langgraph.managed.context import Context as ContextManagedValue
Context = ContextManagedValue.of
__all__ = ["Context"]
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/channels/__init__.py | from langgraph.channels.any_value import AnyValue
from langgraph.channels.binop import BinaryOperatorAggregate
from langgraph.channels.context import Context
from langgraph.channels.ephemeral_value import EphemeralValue
from langgraph.channels.last_value import LastValue
from langgraph.channels.topic import Topic
from langgraph.channels.untracked_value import UntrackedValue
__all__ = [
"LastValue",
"Topic",
"Context",
"BinaryOperatorAggregate",
"UntrackedValue",
"EphemeralValue",
"AnyValue",
]
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/channels/any_value.py | from typing import Generic, Optional, Sequence, Type
from typing_extensions import Self
from langgraph.channels.base import BaseChannel, Value
from langgraph.errors import EmptyChannelError
class AnyValue(Generic[Value], BaseChannel[Value, Value, Value]):
"""Stores the last value received, assumes that if multiple values are
received, they are all equal."""
__slots__ = ("typ", "value")
def __eq__(self, value: object) -> bool:
return isinstance(value, AnyValue)
@property
def ValueType(self) -> Type[Value]:
"""The type of the value stored in the channel."""
return self.typ
@property
def UpdateType(self) -> Type[Value]:
"""The type of the update received by the channel."""
return self.typ
def from_checkpoint(self, checkpoint: Optional[Value]) -> Self:
empty = self.__class__(self.typ)
empty.key = self.key
if checkpoint is not None:
empty.value = checkpoint
return empty
def update(self, values: Sequence[Value]) -> bool:
if len(values) == 0:
try:
del self.value
return True
except AttributeError:
return False
self.value = values[-1]
return True
def get(self) -> Value:
try:
return self.value
except AttributeError:
raise EmptyChannelError()
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/loop.py | import asyncio
import concurrent.futures
from collections import defaultdict, deque
from contextlib import AsyncExitStack, ExitStack
from types import TracebackType
from typing import (
Any,
AsyncContextManager,
Callable,
ContextManager,
Iterator,
List,
Literal,
Mapping,
Optional,
Sequence,
Type,
TypeVar,
Union,
cast,
)
from langchain_core.callbacks import AsyncParentRunManager, ParentRunManager
from langchain_core.runnables import RunnableConfig
from typing_extensions import ParamSpec, Self
from langgraph.channels.base import BaseChannel
from langgraph.checkpoint.base import (
WRITES_IDX_MAP,
BaseCheckpointSaver,
ChannelVersions,
Checkpoint,
CheckpointMetadata,
CheckpointTuple,
PendingWrite,
copy_checkpoint,
create_checkpoint,
empty_checkpoint,
)
from langgraph.constants import (
CONF,
CONFIG_KEY_CHECKPOINT_ID,
CONFIG_KEY_CHECKPOINT_MAP,
CONFIG_KEY_CHECKPOINT_NS,
CONFIG_KEY_DEDUPE_TASKS,
CONFIG_KEY_DELEGATE,
CONFIG_KEY_ENSURE_LATEST,
CONFIG_KEY_RESUMING,
CONFIG_KEY_STREAM,
CONFIG_KEY_TASK_ID,
EMPTY_SEQ,
ERROR,
INPUT,
INTERRUPT,
NS_SEP,
NULL_TASK_ID,
PUSH,
RESUME,
SCHEDULED,
TAG_HIDDEN,
)
from langgraph.errors import (
_SEEN_CHECKPOINT_NS,
CheckpointNotLatest,
EmptyInputError,
GraphDelegate,
GraphInterrupt,
MultipleSubgraphsError,
)
from langgraph.managed.base import (
ManagedValueMapping,
ManagedValueSpec,
WritableManagedValue,
)
from langgraph.pregel.algo import (
GetNextVersion,
PregelTaskWrites,
apply_writes,
increment,
prepare_next_tasks,
prepare_single_task,
should_interrupt,
)
from langgraph.pregel.debug import (
map_debug_checkpoint,
map_debug_task_results,
map_debug_tasks,
print_step_checkpoint,
print_step_tasks,
print_step_writes,
)
from langgraph.pregel.executor import (
AsyncBackgroundExecutor,
BackgroundExecutor,
Submit,
)
from langgraph.pregel.io import (
map_command,
map_input,
map_output_updates,
map_output_values,
read_channels,
single,
)
from langgraph.pregel.manager import AsyncChannelsManager, ChannelsManager
from langgraph.pregel.read import PregelNode
from langgraph.pregel.utils import get_new_channel_versions
from langgraph.store.base import BaseStore
from langgraph.types import (
All,
Command,
LoopProtocol,
PregelExecutableTask,
StreamChunk,
StreamProtocol,
)
from langgraph.utils.config import patch_configurable
V = TypeVar("V")
P = ParamSpec("P")
INPUT_DONE = object()
INPUT_RESUMING = object()
SPECIAL_CHANNELS = (ERROR, INTERRUPT, SCHEDULED)
def DuplexStream(*streams: StreamProtocol) -> StreamProtocol:
def __call__(value: StreamChunk) -> None:
for stream in streams:
if value[1] in stream.modes:
stream(value)
return StreamProtocol(__call__, {mode for s in streams for mode in s.modes})
class PregelLoop(LoopProtocol):
input: Optional[Any]
checkpointer: Optional[BaseCheckpointSaver]
nodes: Mapping[str, PregelNode]
specs: Mapping[str, Union[BaseChannel, ManagedValueSpec]]
output_keys: Union[str, Sequence[str]]
stream_keys: Union[str, Sequence[str]]
skip_done_tasks: bool
is_nested: bool
manager: Union[None, AsyncParentRunManager, ParentRunManager]
interrupt_after: Union[All, Sequence[str]]
interrupt_before: Union[All, Sequence[str]]
checkpointer_get_next_version: GetNextVersion
checkpointer_put_writes: Optional[
Callable[[RunnableConfig, Sequence[tuple[str, Any]], str], Any]
]
_checkpointer_put_after_previous: Optional[
Callable[
[
Optional[concurrent.futures.Future],
RunnableConfig,
Sequence[tuple[str, Any]],
str,
ChannelVersions,
],
Any,
]
]
submit: Submit
channels: Mapping[str, BaseChannel]
managed: ManagedValueMapping
checkpoint: Checkpoint
checkpoint_ns: tuple[str, ...]
checkpoint_config: RunnableConfig
checkpoint_metadata: CheckpointMetadata
checkpoint_pending_writes: List[PendingWrite]
checkpoint_previous_versions: dict[str, Union[str, float, int]]
prev_checkpoint_config: Optional[RunnableConfig]
status: Literal[
"pending", "done", "interrupt_before", "interrupt_after", "out_of_steps"
]
tasks: dict[str, PregelExecutableTask]
to_interrupt: list[PregelExecutableTask]
output: Union[None, dict[str, Any], Any] = None
# public
def __init__(
self,
input: Optional[Any],
*,
stream: Optional[StreamProtocol],
config: RunnableConfig,
store: Optional[BaseStore],
checkpointer: Optional[BaseCheckpointSaver],
nodes: Mapping[str, PregelNode],
specs: Mapping[str, Union[BaseChannel, ManagedValueSpec]],
output_keys: Union[str, Sequence[str]],
stream_keys: Union[str, Sequence[str]],
interrupt_after: Union[All, Sequence[str]] = EMPTY_SEQ,
interrupt_before: Union[All, Sequence[str]] = EMPTY_SEQ,
manager: Union[None, AsyncParentRunManager, ParentRunManager] = None,
check_subgraphs: bool = True,
debug: bool = False,
) -> None:
super().__init__(
step=0,
stop=0,
config=config,
stream=stream,
store=store,
)
self.input = input
self.checkpointer = checkpointer
self.nodes = nodes
self.specs = specs
self.output_keys = output_keys
self.stream_keys = stream_keys
self.interrupt_after = interrupt_after
self.interrupt_before = interrupt_before
self.manager = manager
self.is_nested = CONFIG_KEY_TASK_ID in self.config.get(CONF, {})
self.skip_done_tasks = (
CONFIG_KEY_CHECKPOINT_ID not in config[CONF]
or CONFIG_KEY_DEDUPE_TASKS in config[CONF]
)
self.debug = debug
if self.stream is not None and CONFIG_KEY_STREAM in config[CONF]:
self.stream = DuplexStream(self.stream, config[CONF][CONFIG_KEY_STREAM])
if not self.is_nested and config[CONF].get(CONFIG_KEY_CHECKPOINT_NS):
self.config = patch_configurable(
self.config,
{CONFIG_KEY_CHECKPOINT_NS: "", CONFIG_KEY_CHECKPOINT_ID: None},
)
if check_subgraphs and self.is_nested and self.checkpointer is not None:
if self.config[CONF][CONFIG_KEY_CHECKPOINT_NS] in _SEEN_CHECKPOINT_NS:
raise MultipleSubgraphsError(
"Multiple subgraphs called inside the same node\n\n"
"Troubleshooting URL: https://python.langchain.com/docs"
"/troubleshooting/errors/MULTIPLE_SUBGRAPHS/"
)
else:
_SEEN_CHECKPOINT_NS.add(self.config[CONF][CONFIG_KEY_CHECKPOINT_NS])
if (
CONFIG_KEY_CHECKPOINT_MAP in self.config[CONF]
and self.config[CONF].get(CONFIG_KEY_CHECKPOINT_NS)
in self.config[CONF][CONFIG_KEY_CHECKPOINT_MAP]
):
self.checkpoint_config = patch_configurable(
self.config,
{
CONFIG_KEY_CHECKPOINT_ID: config[CONF][CONFIG_KEY_CHECKPOINT_MAP][
self.config[CONF][CONFIG_KEY_CHECKPOINT_NS]
]
},
)
else:
self.checkpoint_config = config
self.checkpoint_ns = (
tuple(cast(str, self.config[CONF][CONFIG_KEY_CHECKPOINT_NS]).split(NS_SEP))
if self.config[CONF].get(CONFIG_KEY_CHECKPOINT_NS)
else ()
)
self.prev_checkpoint_config = None
def put_writes(self, task_id: str, writes: Sequence[tuple[str, Any]]) -> None:
"""Put writes for a task, to be read by the next tick."""
if not writes:
return
# deduplicate writes to special channels, last write wins
if all(w[0] in WRITES_IDX_MAP for w in writes):
writes = list({w[0]: w for w in writes}.values())
# save writes
for c, v in writes:
if (
c in WRITES_IDX_MAP
and (
idx := next(
(
i
for i, w in enumerate(self.checkpoint_pending_writes)
if w[0] == task_id and w[1] == c
),
None,
)
)
is not None
):
self.checkpoint_pending_writes[idx] = (task_id, c, v)
else:
self.checkpoint_pending_writes.append((task_id, c, v))
if self.checkpointer_put_writes is not None:
self.submit(
self.checkpointer_put_writes,
{
**self.checkpoint_config,
CONF: {
**self.checkpoint_config[CONF],
CONFIG_KEY_CHECKPOINT_NS: self.config[CONF].get(
CONFIG_KEY_CHECKPOINT_NS, ""
),
CONFIG_KEY_CHECKPOINT_ID: self.checkpoint["id"],
},
},
writes,
task_id,
)
# output writes
if hasattr(self, "tasks"):
self._output_writes(task_id, writes)
def accept_push(
self, task: PregelExecutableTask, write_idx: int
) -> Optional[PregelExecutableTask]:
"""Accept a PUSH from a task, potentially returning a new task to start."""
# don't start if an earlier PUSH has already triggered an interrupt
if self.to_interrupt:
return
# don't start if we should interrupt *after* the original task
if should_interrupt(self.checkpoint, self.interrupt_after, [task]):
self.to_interrupt.append(task)
return
if pushed := cast(
Optional[PregelExecutableTask],
prepare_single_task(
(PUSH, task.path, write_idx, task.id),
None,
checkpoint=self.checkpoint,
pending_writes=[(task.id, *w) for w in task.writes],
processes=self.nodes,
channels=self.channels,
managed=self.managed,
config=self.config,
step=self.step,
for_execution=True,
store=self.store,
checkpointer=self.checkpointer,
manager=self.manager,
),
):
# don't start if we should interrupt *before* the new task
if should_interrupt(self.checkpoint, self.interrupt_before, [pushed]):
self.to_interrupt.append(pushed)
return
# produce debug output
self._emit("debug", map_debug_tasks, self.step, [pushed])
# debug flag
if self.debug:
print_step_tasks(self.step, [pushed])
# save the new task
self.tasks[pushed.id] = pushed
# match any pending writes to the new task
if self.skip_done_tasks:
self._match_writes({pushed.id: pushed})
# return the new task, to be started, if not run before
if not pushed.writes:
return pushed
def tick(
self,
*,
input_keys: Union[str, Sequence[str]],
) -> bool:
"""Execute a single iteration of the Pregel loop.
Returns True if more iterations are needed."""
if self.status != "pending":
raise RuntimeError("Cannot tick when status is no longer 'pending'")
if self.input not in (INPUT_DONE, INPUT_RESUMING):
self._first(input_keys=input_keys)
elif self.to_interrupt:
# if we need to interrupt, do so
self.status = "interrupt_before"
raise GraphInterrupt()
elif all(task.writes for task in self.tasks.values()):
writes = [w for t in self.tasks.values() for w in t.writes]
# debug flag
if self.debug:
print_step_writes(
self.step,
writes,
(
[self.stream_keys]
if isinstance(self.stream_keys, str)
else self.stream_keys
),
)
# all tasks have finished
mv_writes = apply_writes(
self.checkpoint,
self.channels,
self.tasks.values(),
self.checkpointer_get_next_version,
)
# apply writes to managed values
for key, values in mv_writes.items():
self._update_mv(key, values)
# produce values output
self._emit(
"values", map_output_values, self.output_keys, writes, self.channels
)
# clear pending writes
self.checkpoint_pending_writes.clear()
# "not skip_done_tasks" only applies to first tick after resuming
self.skip_done_tasks = True
# save checkpoint
self._put_checkpoint(
{
"source": "loop",
"writes": single(
map_output_updates(
self.output_keys,
[(t, t.writes) for t in self.tasks.values()],
)
),
}
)
# after execution, check if we should interrupt
if should_interrupt(
self.checkpoint, self.interrupt_after, self.tasks.values()
):
self.status = "interrupt_after"
raise GraphInterrupt()
else:
return False
# check if iteration limit is reached
if self.step > self.stop:
self.status = "out_of_steps"
return False
# apply NULL writes
if null_writes := [
w[1:] for w in self.checkpoint_pending_writes if w[0] == NULL_TASK_ID
]:
mv_writes = apply_writes(
self.checkpoint,
self.channels,
[PregelTaskWrites((), INPUT, null_writes, [])],
self.checkpointer_get_next_version,
)
for key, values in mv_writes.items():
self._update_mv(key, values)
# prepare next tasks
self.tasks = prepare_next_tasks(
self.checkpoint,
self.checkpoint_pending_writes,
self.nodes,
self.channels,
self.managed,
self.config,
self.step,
for_execution=True,
manager=self.manager,
store=self.store,
checkpointer=self.checkpointer,
)
self.to_interrupt = []
# produce debug output
if self._checkpointer_put_after_previous is not None:
self._emit(
"debug",
map_debug_checkpoint,
self.step - 1, # printing checkpoint for previous step
self.checkpoint_config,
self.channels,
self.stream_keys,
self.checkpoint_metadata,
self.checkpoint,
self.tasks.values(),
self.checkpoint_pending_writes,
self.prev_checkpoint_config,
self.output_keys,
)
# if no more tasks, we're done
if not self.tasks:
self.status = "done"
return False
# check if we should delegate (used by subgraphs in distributed mode)
if self.config[CONF].get(CONFIG_KEY_DELEGATE):
assert self.input is INPUT_RESUMING
raise GraphDelegate(
{
"config": patch_configurable(
self.config, {CONFIG_KEY_DELEGATE: False}
),
"input": None,
}
)
# if there are pending writes from a previous loop, apply them
if self.skip_done_tasks and self.checkpoint_pending_writes:
self._match_writes(self.tasks)
# if all tasks have finished, re-tick
if all(task.writes for task in self.tasks.values()):
return self.tick(input_keys=input_keys)
# before execution, check if we should interrupt
if should_interrupt(
self.checkpoint, self.interrupt_before, self.tasks.values()
):
self.status = "interrupt_before"
raise GraphInterrupt()
# produce debug output
self._emit("debug", map_debug_tasks, self.step, self.tasks.values())
# debug flag
if self.debug:
print_step_tasks(self.step, list(self.tasks.values()))
# print output for any tasks we applied previous writes to
for task in self.tasks.values():
if task.writes:
self._output_writes(task.id, task.writes, cached=True)
return True
# private
def _match_writes(self, tasks: Mapping[str, PregelExecutableTask]) -> None:
for tid, k, v in self.checkpoint_pending_writes:
if k in (ERROR, INTERRUPT, RESUME):
continue
if task := tasks.get(tid):
if k == SCHEDULED:
if v == max(
self.checkpoint["versions_seen"].get(INTERRUPT, {}).values(),
default=None,
):
self.tasks[tid] = task._replace(scheduled=True)
else:
task.writes.append((k, v))
def _first(self, *, input_keys: Union[str, Sequence[str]]) -> None:
# resuming from previous checkpoint requires
# - finding a previous checkpoint
# - receiving None input (outer graph) or RESUMING flag (subgraph)
configurable = self.config.get(CONF, {})
is_resuming = bool(self.checkpoint["channel_versions"]) and bool(
configurable.get(CONFIG_KEY_RESUMING, self.input is None)
)
# proceed past previous checkpoint
if is_resuming:
self.checkpoint["versions_seen"].setdefault(INTERRUPT, {})
for k in self.channels:
if k in self.checkpoint["channel_versions"]:
version = self.checkpoint["channel_versions"][k]
self.checkpoint["versions_seen"][INTERRUPT][k] = version
# produce values output
self._emit(
"values", map_output_values, self.output_keys, True, self.channels
)
# map command to writes
elif isinstance(self.input, Command):
writes: defaultdict[str, list[tuple[str, Any]]] = defaultdict(list)
# group writes by task ID
for tid, c, v in map_command(self.input, self.checkpoint_pending_writes):
writes[tid].append((c, v))
if not writes:
raise EmptyInputError("Received empty Command input")
# save writes
for tid, ws in writes.items():
self.put_writes(tid, ws)
# map inputs to channel updates
elif input_writes := deque(map_input(input_keys, self.input)):
# TODO shouldn't these writes be passed to put_writes too?
# check if we should delegate (used by subgraphs in distributed mode)
if self.config[CONF].get(CONFIG_KEY_DELEGATE):
raise GraphDelegate(
{
"config": patch_configurable(
self.config, {CONFIG_KEY_DELEGATE: False}
),
"input": self.input,
}
)
# discard any unfinished tasks from previous checkpoint
discard_tasks = prepare_next_tasks(
self.checkpoint,
self.checkpoint_pending_writes,
self.nodes,
self.channels,
self.managed,
self.config,
self.step,
for_execution=True,
store=None,
checkpointer=None,
manager=None,
)
# apply input writes
mv_writes = apply_writes(
self.checkpoint,
self.channels,
[
*discard_tasks.values(),
PregelTaskWrites((), INPUT, input_writes, []),
],
self.checkpointer_get_next_version,
)
assert not mv_writes, "Can't write to SharedValues in graph input"
# save input checkpoint
self._put_checkpoint({"source": "input", "writes": dict(input_writes)})
elif CONFIG_KEY_RESUMING not in configurable:
raise EmptyInputError(f"Received no input for {input_keys}")
# done with input
self.input = INPUT_RESUMING if is_resuming else INPUT_DONE
# update config
if not self.is_nested:
self.config = patch_configurable(
self.config, {CONFIG_KEY_RESUMING: is_resuming}
)
def _put_checkpoint(self, metadata: CheckpointMetadata) -> None:
for k, v in self.config["metadata"].items():
metadata.setdefault(k, v) # type: ignore
# assign step and parents
metadata["step"] = self.step
metadata["parents"] = self.config[CONF].get(CONFIG_KEY_CHECKPOINT_MAP, {})
# debug flag
if self.debug:
print_step_checkpoint(
metadata,
self.channels,
(
[self.stream_keys]
if isinstance(self.stream_keys, str)
else self.stream_keys
),
)
# create new checkpoint
self.checkpoint = create_checkpoint(self.checkpoint, self.channels, self.step)
# bail if no checkpointer
if self._checkpointer_put_after_previous is not None:
self.checkpoint_metadata = metadata
self.prev_checkpoint_config = (
self.checkpoint_config
if CONFIG_KEY_CHECKPOINT_ID in self.checkpoint_config[CONF]
and self.checkpoint_config[CONF][CONFIG_KEY_CHECKPOINT_ID]
else None
)
self.checkpoint_config = {
**self.checkpoint_config,
CONF: {
**self.checkpoint_config[CONF],
CONFIG_KEY_CHECKPOINT_NS: self.config[CONF].get(
CONFIG_KEY_CHECKPOINT_NS, ""
),
},
}
channel_versions = self.checkpoint["channel_versions"].copy()
new_versions = get_new_channel_versions(
self.checkpoint_previous_versions, channel_versions
)
self.checkpoint_previous_versions = channel_versions
# save it, without blocking
# if there's a previous checkpoint save in progress, wait for it
# ensuring checkpointers receive checkpoints in order
self._put_checkpoint_fut = self.submit(
self._checkpointer_put_after_previous,
getattr(self, "_put_checkpoint_fut", None),
self.checkpoint_config,
copy_checkpoint(self.checkpoint),
self.checkpoint_metadata,
new_versions,
)
self.checkpoint_config = {
**self.checkpoint_config,
CONF: {
**self.checkpoint_config[CONF],
CONFIG_KEY_CHECKPOINT_ID: self.checkpoint["id"],
},
}
# increment step
self.step += 1
def _update_mv(self, key: str, values: Sequence[Any]) -> None:
raise NotImplementedError
def _suppress_interrupt(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Optional[bool]:
suppress = isinstance(exc_value, GraphInterrupt) and not self.is_nested
if suppress or exc_type is None:
# save final output
self.output = read_channels(self.channels, self.output_keys)
if suppress:
# emit one last "values" event, with pending writes applied
if (
hasattr(self, "tasks")
and self.checkpoint_pending_writes
and any(task.writes for task in self.tasks.values())
):
mv_writes = apply_writes(
self.checkpoint,
self.channels,
self.tasks.values(),
self.checkpointer_get_next_version,
)
for key, values in mv_writes.items():
self._update_mv(key, values)
self._emit(
"values",
map_output_values,
self.output_keys,
[w for t in self.tasks.values() for w in t.writes],
self.channels,
)
# emit INTERRUPT event
self._emit(
"updates",
lambda: iter([{INTERRUPT: cast(GraphInterrupt, exc_value).args[0]}]),
)
# suppress interrupt
return True
def _emit(
self,
mode: str,
values: Callable[P, Iterator[Any]],
*args: P.args,
**kwargs: P.kwargs,
) -> None:
if self.stream is None:
return
if mode not in self.stream.modes:
return
for v in values(*args, **kwargs):
self.stream((self.checkpoint_ns, mode, v))
def _output_writes(
self, task_id: str, writes: Sequence[tuple[str, Any]], *, cached: bool = False
) -> None:
if task := self.tasks.get(task_id):
if task.config is not None and TAG_HIDDEN in task.config.get(
"tags", EMPTY_SEQ
):
return
if writes[0][0] != ERROR and writes[0][0] != INTERRUPT:
self._emit(
"updates",
map_output_updates,
self.output_keys,
[(task, writes)],
cached,
)
if not cached:
self._emit(
"debug",
map_debug_task_results,
self.step,
(task, writes),
self.stream_keys,
)
class SyncPregelLoop(PregelLoop, ContextManager):
def __init__(
self,
input: Optional[Any],
*,
stream: Optional[StreamProtocol],
config: RunnableConfig,
store: Optional[BaseStore],
checkpointer: Optional[BaseCheckpointSaver],
nodes: Mapping[str, PregelNode],
specs: Mapping[str, Union[BaseChannel, ManagedValueSpec]],
manager: Union[None, AsyncParentRunManager, ParentRunManager] = None,
interrupt_after: Union[All, Sequence[str]] = EMPTY_SEQ,
interrupt_before: Union[All, Sequence[str]] = EMPTY_SEQ,
output_keys: Union[str, Sequence[str]] = EMPTY_SEQ,
stream_keys: Union[str, Sequence[str]] = EMPTY_SEQ,
check_subgraphs: bool = True,
debug: bool = False,
) -> None:
super().__init__(
input,
stream=stream,
config=config,
checkpointer=checkpointer,
store=store,
nodes=nodes,
specs=specs,
output_keys=output_keys,
stream_keys=stream_keys,
interrupt_after=interrupt_after,
interrupt_before=interrupt_before,
check_subgraphs=check_subgraphs,
manager=manager,
debug=debug,
)
self.stack = ExitStack()
if checkpointer:
self.checkpointer_get_next_version = checkpointer.get_next_version
self.checkpointer_put_writes = checkpointer.put_writes
else:
self.checkpointer_get_next_version = increment
self._checkpointer_put_after_previous = None # type: ignore[assignment]
self.checkpointer_put_writes = None
def _checkpointer_put_after_previous(
self,
prev: Optional[concurrent.futures.Future],
config: RunnableConfig,
checkpoint: Checkpoint,
metadata: CheckpointMetadata,
new_versions: ChannelVersions,
) -> RunnableConfig:
try:
if prev is not None:
prev.result()
finally:
cast(BaseCheckpointSaver, self.checkpointer).put(
config, checkpoint, metadata, new_versions
)
def _update_mv(self, key: str, values: Sequence[Any]) -> None:
return self.submit(cast(WritableManagedValue, self.managed[key]).update, values)
# context manager
def __enter__(self) -> Self:
if self.config.get(CONF, {}).get(
CONFIG_KEY_ENSURE_LATEST
) and self.checkpoint_config[CONF].get(CONFIG_KEY_CHECKPOINT_ID):
if self.checkpointer is None:
raise RuntimeError(
"Cannot ensure latest checkpoint without checkpointer"
)
saved = self.checkpointer.get_tuple(
patch_configurable(
self.checkpoint_config, {CONFIG_KEY_CHECKPOINT_ID: None}
)
)
if (
saved is None
or saved.checkpoint["id"]
!= self.checkpoint_config[CONF][CONFIG_KEY_CHECKPOINT_ID]
):
raise CheckpointNotLatest
elif self.checkpointer:
saved = self.checkpointer.get_tuple(self.checkpoint_config)
else:
saved = None
if saved is None:
saved = CheckpointTuple(
self.config, empty_checkpoint(), {"step": -2}, None, []
)
self.checkpoint_config = {
**self.config,
**saved.config,
CONF: {
CONFIG_KEY_CHECKPOINT_NS: "",
**self.config.get(CONF, {}),
**saved.config.get(CONF, {}),
},
}
self.prev_checkpoint_config = saved.parent_config
self.checkpoint = saved.checkpoint
self.checkpoint_metadata = saved.metadata
self.checkpoint_pending_writes = (
[(str(tid), k, v) for tid, k, v in saved.pending_writes]
if saved.pending_writes is not None
else []
)
self.submit = self.stack.enter_context(BackgroundExecutor(self.config))
self.channels, self.managed = self.stack.enter_context(
ChannelsManager(self.specs, self.checkpoint, self)
)
self.stack.push(self._suppress_interrupt)
self.status = "pending"
self.step = self.checkpoint_metadata["step"] + 1
self.stop = self.step + self.config["recursion_limit"] + 1
self.checkpoint_previous_versions = self.checkpoint["channel_versions"].copy()
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Optional[bool]:
# unwind stack
return self.stack.__exit__(exc_type, exc_value, traceback)
class AsyncPregelLoop(PregelLoop, AsyncContextManager):
def __init__(
self,
input: Optional[Any],
*,
stream: Optional[StreamProtocol],
config: RunnableConfig,
store: Optional[BaseStore],
checkpointer: Optional[BaseCheckpointSaver],
nodes: Mapping[str, PregelNode],
specs: Mapping[str, Union[BaseChannel, ManagedValueSpec]],
interrupt_after: Union[All, Sequence[str]] = EMPTY_SEQ,
interrupt_before: Union[All, Sequence[str]] = EMPTY_SEQ,
manager: Union[None, AsyncParentRunManager, ParentRunManager] = None,
output_keys: Union[str, Sequence[str]] = EMPTY_SEQ,
stream_keys: Union[str, Sequence[str]] = EMPTY_SEQ,
check_subgraphs: bool = True,
debug: bool = False,
) -> None:
super().__init__(
input,
stream=stream,
config=config,
checkpointer=checkpointer,
store=store,
nodes=nodes,
specs=specs,
output_keys=output_keys,
stream_keys=stream_keys,
interrupt_after=interrupt_after,
interrupt_before=interrupt_before,
check_subgraphs=check_subgraphs,
manager=manager,
debug=debug,
)
self.stack = AsyncExitStack()
if checkpointer:
self.checkpointer_get_next_version = checkpointer.get_next_version
self.checkpointer_put_writes = checkpointer.aput_writes
else:
self.checkpointer_get_next_version = increment
self._checkpointer_put_after_previous = None # type: ignore[assignment]
self.checkpointer_put_writes = None
async def _checkpointer_put_after_previous(
self,
prev: Optional[asyncio.Task],
config: RunnableConfig,
checkpoint: Checkpoint,
metadata: CheckpointMetadata,
new_versions: ChannelVersions,
) -> RunnableConfig:
try:
if prev is not None:
await prev
finally:
await cast(BaseCheckpointSaver, self.checkpointer).aput(
config, checkpoint, metadata, new_versions
)
def _update_mv(self, key: str, values: Sequence[Any]) -> None:
return self.submit(
cast(WritableManagedValue, self.managed[key]).aupdate, values
)
# context manager
async def __aenter__(self) -> Self:
if self.config.get(CONF, {}).get(
CONFIG_KEY_ENSURE_LATEST
) and self.checkpoint_config[CONF].get(CONFIG_KEY_CHECKPOINT_ID):
if self.checkpointer is None:
raise RuntimeError(
"Cannot ensure latest checkpoint without checkpointer"
)
saved = await self.checkpointer.aget_tuple(
patch_configurable(
self.checkpoint_config, {CONFIG_KEY_CHECKPOINT_ID: None}
)
)
if (
saved is None
or saved.checkpoint["id"]
!= self.checkpoint_config[CONF][CONFIG_KEY_CHECKPOINT_ID]
):
raise CheckpointNotLatest
elif self.checkpointer:
saved = await self.checkpointer.aget_tuple(self.checkpoint_config)
else:
saved = None
if saved is None:
saved = CheckpointTuple(
self.config, empty_checkpoint(), {"step": -2}, None, []
)
self.checkpoint_config = {
**self.config,
**saved.config,
CONF: {
CONFIG_KEY_CHECKPOINT_NS: "",
**self.config.get(CONF, {}),
**saved.config.get(CONF, {}),
},
}
self.prev_checkpoint_config = saved.parent_config
self.checkpoint = saved.checkpoint
self.checkpoint_metadata = saved.metadata
self.checkpoint_pending_writes = (
[(str(tid), k, v) for tid, k, v in saved.pending_writes]
if saved.pending_writes is not None
else []
)
self.submit = await self.stack.enter_async_context(
AsyncBackgroundExecutor(self.config)
)
self.channels, self.managed = await self.stack.enter_async_context(
AsyncChannelsManager(self.specs, self.checkpoint, self)
)
self.stack.push(self._suppress_interrupt)
self.status = "pending"
self.step = self.checkpoint_metadata["step"] + 1
self.stop = self.step + self.config["recursion_limit"] + 1
self.checkpoint_previous_versions = self.checkpoint["channel_versions"].copy()
return self
async def __aexit__(
self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Optional[bool]:
# unwind stack
return await asyncio.shield(
self.stack.__aexit__(exc_type, exc_value, traceback)
)
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/read.py | from __future__ import annotations
from functools import cached_property
from typing import (
Any,
AsyncIterator,
Callable,
Iterator,
Mapping,
Optional,
Sequence,
Union,
)
from langchain_core.runnables import (
Runnable,
RunnableConfig,
RunnablePassthrough,
RunnableSerializable,
)
from langchain_core.runnables.base import Input, Other, coerce_to_runnable
from langchain_core.runnables.utils import ConfigurableFieldSpec
from langgraph.constants import CONF, CONFIG_KEY_READ
from langgraph.pregel.retry import RetryPolicy
from langgraph.pregel.write import ChannelWrite
from langgraph.utils.config import merge_configs
from langgraph.utils.runnable import RunnableCallable, RunnableSeq
READ_TYPE = Callable[[Union[str, Sequence[str]], bool], Union[Any, dict[str, Any]]]
class ChannelRead(RunnableCallable):
"""Implements the logic for reading state from CONFIG_KEY_READ.
Usable both as a runnable as well as a static method to call imperatively."""
channel: Union[str, list[str]]
fresh: bool = False
mapper: Optional[Callable[[Any], Any]] = None
@property
def config_specs(self) -> list[ConfigurableFieldSpec]:
return [
ConfigurableFieldSpec(
id=CONFIG_KEY_READ,
name=CONFIG_KEY_READ,
description=None,
default=None,
annotation=None,
),
]
def __init__(
self,
channel: Union[str, list[str]],
*,
fresh: bool = False,
mapper: Optional[Callable[[Any], Any]] = None,
tags: Optional[list[str]] = None,
) -> None:
super().__init__(func=self._read, afunc=self._aread, tags=tags, name=None)
self.fresh = fresh
self.mapper = mapper
self.channel = channel
def get_name(
self, suffix: Optional[str] = None, *, name: Optional[str] = None
) -> str:
if name:
pass
elif isinstance(self.channel, str):
name = f"ChannelRead<{self.channel}>"
else:
name = f"ChannelRead<{','.join(self.channel)}>"
return super().get_name(suffix, name=name)
def _read(self, _: Any, config: RunnableConfig) -> Any:
return self.do_read(
config, select=self.channel, fresh=self.fresh, mapper=self.mapper
)
async def _aread(self, _: Any, config: RunnableConfig) -> Any:
return self.do_read(
config, select=self.channel, fresh=self.fresh, mapper=self.mapper
)
@staticmethod
def do_read(
config: RunnableConfig,
*,
select: Union[str, list[str]],
fresh: bool = False,
mapper: Optional[Callable[[Any], Any]] = None,
) -> Any:
try:
read: READ_TYPE = config[CONF][CONFIG_KEY_READ]
except KeyError:
raise RuntimeError(
"Not configured with a read function"
"Make sure to call in the context of a Pregel process"
)
if mapper:
return mapper(read(select, fresh))
else:
return read(select, fresh)
DEFAULT_BOUND: RunnablePassthrough = RunnablePassthrough()
class PregelNode(Runnable):
"""A node in a Pregel graph. This won't be invoked as a runnable by the graph
itself, but instead acts as a container for the components necessary to make
a PregelExecutableTask for a node."""
channels: Union[list[str], Mapping[str, str]]
"""The channels that will be passed as input to `bound`.
If a list, the node will be invoked with the first of that isn't empty.
If a dict, the keys are the names of the channels, and the values are the keys
to use in the input to `bound`."""
triggers: list[str]
"""If any of these channels is written to, this node will be triggered in
the next step."""
mapper: Optional[Callable[[Any], Any]]
"""A function to transform the input before passing it to `bound`."""
writers: list[Runnable]
"""A list of writers that will be executed after `bound`, responsible for
taking the output of `bound` and writing it to the appropriate channels."""
bound: Runnable[Any, Any]
"""The main logic of the node. This will be invoked with the input from
`channels`."""
retry_policy: Optional[RetryPolicy]
"""The retry policy to use when invoking the node."""
tags: Optional[Sequence[str]]
"""Tags to attach to the node for tracing."""
metadata: Optional[Mapping[str, Any]]
"""Metadata to attach to the node for tracing."""
def __init__(
self,
*,
channels: Union[list[str], Mapping[str, str]],
triggers: Sequence[str],
mapper: Optional[Callable[[Any], Any]] = None,
writers: Optional[list[Runnable]] = None,
tags: Optional[list[str]] = None,
metadata: Optional[Mapping[str, Any]] = None,
bound: Optional[Runnable[Any, Any]] = None,
retry_policy: Optional[RetryPolicy] = None,
) -> None:
self.channels = channels
self.triggers = list(triggers)
self.mapper = mapper
self.writers = writers or []
self.bound = bound if bound is not None else DEFAULT_BOUND
self.retry_policy = retry_policy
self.tags = tags
self.metadata = metadata
def copy(self, update: dict[str, Any]) -> PregelNode:
attrs = {**self.__dict__, **update}
return PregelNode(**attrs)
@cached_property
def flat_writers(self) -> list[Runnable]:
"""Get writers with optimizations applied. Dedupes consecutive ChannelWrites."""
writers = self.writers.copy()
while (
len(writers) > 1
and isinstance(writers[-1], ChannelWrite)
and isinstance(writers[-2], ChannelWrite)
):
# we can combine writes if they are consecutive
# careful to not modify the original writers list or ChannelWrite
writers[-2] = ChannelWrite(
writes=writers[-2].writes + writers[-1].writes,
tags=writers[-2].tags,
require_at_least_one_of=writers[-2].require_at_least_one_of,
)
writers.pop()
return writers
@cached_property
def node(self) -> Optional[Runnable[Any, Any]]:
"""Get a runnable that combines `bound` and `writers`."""
writers = self.flat_writers
if self.bound is DEFAULT_BOUND and not writers:
return None
elif self.bound is DEFAULT_BOUND and len(writers) == 1:
return writers[0]
elif self.bound is DEFAULT_BOUND:
return RunnableSeq(*writers)
elif writers:
return RunnableSeq(self.bound, *writers)
else:
return self.bound
def join(self, channels: Sequence[str]) -> PregelNode:
assert isinstance(channels, list) or isinstance(
channels, tuple
), "channels must be a list or tuple"
assert isinstance(
self.channels, dict
), "all channels must be named when using .join()"
return self.copy(
update=dict(
channels={
**self.channels,
**{chan: chan for chan in channels},
}
),
)
def __or__(
self,
other: Union[
Runnable[Any, Other],
Callable[[Any], Other],
Mapping[str, Runnable[Any, Other] | Callable[[Any], Other]],
],
) -> PregelNode:
if isinstance(other, Runnable) and ChannelWrite.is_writer(other):
return self.copy(update=dict(writers=[*self.writers, other]))
elif self.bound is DEFAULT_BOUND:
return self.copy(update=dict(bound=coerce_to_runnable(other)))
else:
return self.copy(update=dict(bound=RunnableSeq(self.bound, other)))
def pipe(
self,
*others: Runnable[Any, Other] | Callable[[Any], Other],
name: Optional[str] = None,
) -> RunnableSerializable[Any, Other]:
for other in others:
self = self | other
return self
def __ror__(
self,
other: Union[
Runnable[Other, Any],
Callable[[Any], Other],
Mapping[str, Union[Runnable[Other, Any], Callable[[Other], Any]]],
],
) -> RunnableSerializable:
raise NotImplementedError()
def invoke(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Any:
return self.bound.invoke(
input,
merge_configs({"metadata": self.metadata, "tags": self.tags}, config),
**kwargs,
)
async def ainvoke(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Any:
return await self.bound.ainvoke(
input,
merge_configs({"metadata": self.metadata, "tags": self.tags}, config),
**kwargs,
)
def stream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Iterator[Any]:
yield from self.bound.stream(
input,
merge_configs({"metadata": self.metadata, "tags": self.tags}, config),
**kwargs,
)
async def astream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> AsyncIterator[Any]:
async for item in self.bound.astream(
input,
merge_configs({"metadata": self.metadata, "tags": self.tags}, config),
**kwargs,
):
yield item
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/algo.py | import sys
from collections import defaultdict, deque
from functools import partial
from hashlib import sha1
from typing import (
Any,
Callable,
Iterable,
Iterator,
Literal,
Mapping,
NamedTuple,
Optional,
Protocol,
Sequence,
Union,
cast,
overload,
)
from uuid import UUID
from langchain_core.callbacks.manager import AsyncParentRunManager, ParentRunManager
from langchain_core.runnables.config import RunnableConfig
from langgraph.channels.base import BaseChannel
from langgraph.checkpoint.base import (
BaseCheckpointSaver,
Checkpoint,
PendingWrite,
V,
copy_checkpoint,
)
from langgraph.constants import (
CONF,
CONFIG_KEY_CHECKPOINT_ID,
CONFIG_KEY_CHECKPOINT_MAP,
CONFIG_KEY_CHECKPOINT_NS,
CONFIG_KEY_CHECKPOINTER,
CONFIG_KEY_READ,
CONFIG_KEY_SCRATCHPAD,
CONFIG_KEY_SEND,
CONFIG_KEY_STORE,
CONFIG_KEY_TASK_ID,
CONFIG_KEY_WRITES,
EMPTY_SEQ,
INTERRUPT,
NO_WRITES,
NS_END,
NS_SEP,
NULL_TASK_ID,
PULL,
PUSH,
RESERVED,
RESUME,
TAG_HIDDEN,
TASKS,
Send,
)
from langgraph.errors import EmptyChannelError, InvalidUpdateError
from langgraph.managed.base import ManagedValueMapping
from langgraph.pregel.io import read_channel, read_channels
from langgraph.pregel.log import logger
from langgraph.pregel.manager import ChannelsManager
from langgraph.pregel.read import PregelNode
from langgraph.store.base import BaseStore
from langgraph.types import All, LoopProtocol, PregelExecutableTask, PregelTask
from langgraph.utils.config import merge_configs, patch_config
GetNextVersion = Callable[[Optional[V], BaseChannel], V]
SUPPORTS_EXC_NOTES = sys.version_info >= (3, 11)
class WritesProtocol(Protocol):
"""Protocol for objects containing writes to be applied to checkpoint.
Implemented by PregelTaskWrites and PregelExecutableTask."""
@property
def path(self) -> tuple[Union[str, int, tuple], ...]: ...
@property
def name(self) -> str: ...
@property
def writes(self) -> Sequence[tuple[str, Any]]: ...
@property
def triggers(self) -> Sequence[str]: ...
class PregelTaskWrites(NamedTuple):
"""Simplest implementation of WritesProtocol, for usage with writes that
don't originate from a runnable task, eg. graph input, update_state, etc."""
path: tuple[Union[str, int, tuple], ...]
name: str
writes: Sequence[tuple[str, Any]]
triggers: Sequence[str]
def should_interrupt(
checkpoint: Checkpoint,
interrupt_nodes: Union[All, Sequence[str]],
tasks: Iterable[PregelExecutableTask],
) -> list[PregelExecutableTask]:
"""Check if the graph should be interrupted based on current state."""
version_type = type(next(iter(checkpoint["channel_versions"].values()), None))
null_version = version_type() # type: ignore[misc]
seen = checkpoint["versions_seen"].get(INTERRUPT, {})
# interrupt if any channel has been updated since last interrupt
any_updates_since_prev_interrupt = any(
version > seen.get(chan, null_version) # type: ignore[operator]
for chan, version in checkpoint["channel_versions"].items()
)
# and any triggered node is in interrupt_nodes list
return (
[
task
for task in tasks
if (
(
not task.config
or TAG_HIDDEN not in task.config.get("tags", EMPTY_SEQ)
)
if interrupt_nodes == "*"
else task.name in interrupt_nodes
)
]
if any_updates_since_prev_interrupt
else []
)
def local_read(
step: int,
checkpoint: Checkpoint,
channels: Mapping[str, BaseChannel],
managed: ManagedValueMapping,
task: WritesProtocol,
config: RunnableConfig,
select: Union[list[str], str],
fresh: bool = False,
) -> Union[dict[str, Any], Any]:
"""Function injected under CONFIG_KEY_READ in task config, to read current state.
Used by conditional edges to read a copy of the state with reflecting the writes
from that node only."""
if isinstance(select, str):
managed_keys = []
for c, _ in task.writes:
if c == select:
updated = {c}
break
else:
updated = set()
else:
managed_keys = [k for k in select if k in managed]
select = [k for k in select if k not in managed]
updated = set(select).intersection(c for c, _ in task.writes)
if fresh and updated:
with ChannelsManager(
{k: v for k, v in channels.items() if k in updated},
checkpoint,
LoopProtocol(config=config, step=step, stop=step + 1),
skip_context=True,
) as (local_channels, _):
apply_writes(copy_checkpoint(checkpoint), local_channels, [task], None)
values = read_channels({**channels, **local_channels}, select)
else:
values = read_channels(channels, select)
if managed_keys:
values.update({k: managed[k]() for k in managed_keys})
return values
def local_write(
commit: Callable[[Sequence[tuple[str, Any]]], None],
process_keys: Iterable[str],
writes: Sequence[tuple[str, Any]],
) -> None:
"""Function injected under CONFIG_KEY_SEND in task config, to write to channels.
Validates writes and forwards them to `commit` function."""
for chan, value in writes:
if chan in (PUSH, TASKS):
if not isinstance(value, Send):
raise InvalidUpdateError(f"Expected Send, got {value}")
if value.node not in process_keys:
raise InvalidUpdateError(f"Invalid node name {value.node} in packet")
commit(writes)
def increment(current: Optional[int], channel: BaseChannel) -> int:
"""Default channel versioning function, increments the current int version."""
return current + 1 if current is not None else 1
def apply_writes(
checkpoint: Checkpoint,
channels: Mapping[str, BaseChannel],
tasks: Iterable[WritesProtocol],
get_next_version: Optional[GetNextVersion],
) -> dict[str, list[Any]]:
"""Apply writes from a set of tasks (usually the tasks from a Pregel step)
to the checkpoint and channels, and return managed values writes to be applied
externally."""
# sort tasks on path, to ensure deterministic order for update application
# any path parts after the 3rd are ignored for sorting
# (we use them for eg. task ids which aren't good for sorting)
tasks = sorted(tasks, key=lambda t: t.path[:3])
# if no task has triggers this is applying writes from the null task only
# so we don't do anything other than update the channels written to
bump_step = any(t.triggers for t in tasks)
# update seen versions
for task in tasks:
checkpoint["versions_seen"].setdefault(task.name, {}).update(
{
chan: checkpoint["channel_versions"][chan]
for chan in task.triggers
if chan in checkpoint["channel_versions"]
}
)
# Find the highest version of all channels
if checkpoint["channel_versions"]:
max_version = max(checkpoint["channel_versions"].values())
else:
max_version = None
# Consume all channels that were read
for chan in {
chan
for task in tasks
for chan in task.triggers
if chan not in RESERVED and chan in channels
}:
if channels[chan].consume() and get_next_version is not None:
checkpoint["channel_versions"][chan] = get_next_version(
max_version,
channels[chan],
)
# clear pending sends
if checkpoint["pending_sends"] and bump_step:
checkpoint["pending_sends"].clear()
# Group writes by channel
pending_writes_by_channel: dict[str, list[Any]] = defaultdict(list)
pending_writes_by_managed: dict[str, list[Any]] = defaultdict(list)
for task in tasks:
for chan, val in task.writes:
if chan in (NO_WRITES, PUSH, RESUME, INTERRUPT):
pass
elif chan == TASKS: # TODO: remove branch in 1.0
checkpoint["pending_sends"].append(val)
elif chan in channels:
pending_writes_by_channel[chan].append(val)
else:
pending_writes_by_managed[chan].append(val)
# Find the highest version of all channels
if checkpoint["channel_versions"]:
max_version = max(checkpoint["channel_versions"].values())
else:
max_version = None
# Apply writes to channels
updated_channels: set[str] = set()
for chan, vals in pending_writes_by_channel.items():
if chan in channels:
if channels[chan].update(vals) and get_next_version is not None:
checkpoint["channel_versions"][chan] = get_next_version(
max_version,
channels[chan],
)
updated_channels.add(chan)
# Channels that weren't updated in this step are notified of a new step
if bump_step:
for chan in channels:
if chan not in updated_channels:
if channels[chan].update([]) and get_next_version is not None:
checkpoint["channel_versions"][chan] = get_next_version(
max_version,
channels[chan],
)
# Return managed values writes to be applied externally
return pending_writes_by_managed
@overload
def prepare_next_tasks(
checkpoint: Checkpoint,
pending_writes: Sequence[PendingWrite],
processes: Mapping[str, PregelNode],
channels: Mapping[str, BaseChannel],
managed: ManagedValueMapping,
config: RunnableConfig,
step: int,
*,
for_execution: Literal[False],
store: Literal[None] = None,
checkpointer: Literal[None] = None,
manager: Literal[None] = None,
) -> dict[str, PregelTask]: ...
@overload
def prepare_next_tasks(
checkpoint: Checkpoint,
pending_writes: Sequence[PendingWrite],
processes: Mapping[str, PregelNode],
channels: Mapping[str, BaseChannel],
managed: ManagedValueMapping,
config: RunnableConfig,
step: int,
*,
for_execution: Literal[True],
store: Optional[BaseStore],
checkpointer: Optional[BaseCheckpointSaver],
manager: Union[None, ParentRunManager, AsyncParentRunManager],
) -> dict[str, PregelExecutableTask]: ...
def prepare_next_tasks(
checkpoint: Checkpoint,
pending_writes: Sequence[PendingWrite],
processes: Mapping[str, PregelNode],
channels: Mapping[str, BaseChannel],
managed: ManagedValueMapping,
config: RunnableConfig,
step: int,
*,
for_execution: bool,
store: Optional[BaseStore] = None,
checkpointer: Optional[BaseCheckpointSaver] = None,
manager: Union[None, ParentRunManager, AsyncParentRunManager] = None,
) -> Union[dict[str, PregelTask], dict[str, PregelExecutableTask]]:
"""Prepare the set of tasks that will make up the next Pregel step.
This is the union of all PUSH tasks (Sends) and PULL tasks (nodes triggered
by edges)."""
tasks: list[Union[PregelTask, PregelExecutableTask]] = []
# Consume pending_sends from previous step (legacy version of Send)
for idx, _ in enumerate(checkpoint["pending_sends"]): # TODO: remove branch in 1.0
if task := prepare_single_task(
(PUSH, idx),
None,
checkpoint=checkpoint,
pending_writes=pending_writes,
processes=processes,
channels=channels,
managed=managed,
config=config,
step=step,
for_execution=for_execution,
store=store,
checkpointer=checkpointer,
manager=manager,
):
tasks.append(task)
# Check if any processes should be run in next step
# If so, prepare the values to be passed to them
for name in processes:
if task := prepare_single_task(
(PULL, name),
None,
checkpoint=checkpoint,
pending_writes=pending_writes,
processes=processes,
channels=channels,
managed=managed,
config=config,
step=step,
for_execution=for_execution,
store=store,
checkpointer=checkpointer,
manager=manager,
):
tasks.append(task)
# Consume pending Sends from this step (new version of Send)
if any(c == PUSH for _, c, _ in pending_writes):
# group writes by task id
grouped_by_task = defaultdict(list)
for tid, c, _ in pending_writes:
grouped_by_task[tid].append(c)
# prepare send tasks from grouped writes
# 1. start from sends originating from existing tasks
tidx = 0
while tidx < len(tasks):
task = tasks[tidx]
if twrites := grouped_by_task.pop(task.id, None):
for idx, c in enumerate(twrites):
if c != PUSH:
continue
if next_task := prepare_single_task(
(PUSH, task.path, idx, task.id),
None,
checkpoint=checkpoint,
pending_writes=pending_writes,
processes=processes,
channels=channels,
managed=managed,
config=config,
step=step,
for_execution=for_execution,
store=store,
checkpointer=checkpointer,
manager=manager,
):
tasks.append(next_task)
tidx += 1
# key tasks by id
task_map = {t.id: t for t in tasks}
# 2. create new tasks for remaining sends (eg. from update_state)
for tid, writes in grouped_by_task.items():
task = task_map.get(tid)
for idx, c in enumerate(writes):
if c != PUSH:
continue
if next_task := prepare_single_task(
(PUSH, task.path if task else (), idx, tid),
None,
checkpoint=checkpoint,
pending_writes=pending_writes,
processes=processes,
channels=channels,
managed=managed,
config=config,
step=step,
for_execution=for_execution,
store=store,
checkpointer=checkpointer,
manager=manager,
):
task_map[next_task.id] = next_task
else:
task_map = {t.id: t for t in tasks}
return task_map
def prepare_single_task(
task_path: tuple[Union[str, int, tuple], ...],
task_id_checksum: Optional[str],
*,
checkpoint: Checkpoint,
pending_writes: Sequence[PendingWrite],
processes: Mapping[str, PregelNode],
channels: Mapping[str, BaseChannel],
managed: ManagedValueMapping,
config: RunnableConfig,
step: int,
for_execution: bool,
store: Optional[BaseStore] = None,
checkpointer: Optional[BaseCheckpointSaver] = None,
manager: Union[None, ParentRunManager, AsyncParentRunManager] = None,
) -> Union[None, PregelTask, PregelExecutableTask]:
"""Prepares a single task for the next Pregel step, given a task path, which
uniquely identifies a PUSH or PULL task within the graph."""
checkpoint_id = UUID(checkpoint["id"]).bytes
configurable = config.get(CONF, {})
parent_ns = configurable.get(CONFIG_KEY_CHECKPOINT_NS, "")
if task_path[0] == PUSH:
if len(task_path) == 2: # TODO: remove branch in 1.0
# legacy SEND tasks, executed in superstep n+1
# (PUSH, idx of pending send)
idx = cast(int, task_path[1])
if idx >= len(checkpoint["pending_sends"]):
return
packet = checkpoint["pending_sends"][idx]
if not isinstance(packet, Send):
logger.warning(
f"Ignoring invalid packet type {type(packet)} in pending sends"
)
return
if packet.node not in processes:
logger.warning(
f"Ignoring unknown node name {packet.node} in pending sends"
)
return
# create task id
triggers = [PUSH]
checkpoint_ns = (
f"{parent_ns}{NS_SEP}{packet.node}" if parent_ns else packet.node
)
task_id = _uuid5_str(
checkpoint_id,
checkpoint_ns,
str(step),
packet.node,
PUSH,
str(idx),
)
elif len(task_path) == 4:
# new PUSH tasks, executed in superstep n
# (PUSH, parent task path, idx of PUSH write, id of parent task)
task_path_t = cast(tuple[str, tuple, int, str], task_path)
writes_for_path = [w for w in pending_writes if w[0] == task_path_t[3]]
if task_path_t[2] >= len(writes_for_path):
logger.warning(
f"Ignoring invalid write index {task_path[2]} in pending writes"
)
return
packet = writes_for_path[task_path_t[2]][2]
if not isinstance(packet, Send):
logger.warning(
f"Ignoring invalid packet type {type(packet)} in pending writes"
)
return
if packet.node not in processes:
logger.warning(
f"Ignoring unknown node name {packet.node} in pending writes"
)
return
# create task id
triggers = [PUSH]
checkpoint_ns = (
f"{parent_ns}{NS_SEP}{packet.node}" if parent_ns else packet.node
)
task_id = _uuid5_str(
checkpoint_id,
checkpoint_ns,
str(step),
packet.node,
PUSH,
_tuple_str(task_path[1]),
str(task_path[2]),
)
else:
logger.warning(f"Ignoring invalid PUSH task path {task_path}")
return
task_checkpoint_ns = f"{checkpoint_ns}:{task_id}"
metadata = {
"langgraph_step": step,
"langgraph_node": packet.node,
"langgraph_triggers": triggers,
"langgraph_path": task_path,
"langgraph_checkpoint_ns": task_checkpoint_ns,
}
if task_id_checksum is not None:
assert task_id == task_id_checksum, f"{task_id} != {task_id_checksum}"
if for_execution:
proc = processes[packet.node]
if node := proc.node:
if proc.metadata:
metadata.update(proc.metadata)
writes: deque[tuple[str, Any]] = deque()
return PregelExecutableTask(
packet.node,
packet.arg,
node,
writes,
patch_config(
merge_configs(
config, {"metadata": metadata, "tags": proc.tags}
),
run_name=packet.node,
callbacks=(
manager.get_child(f"graph:step:{step}") if manager else None
),
configurable={
CONFIG_KEY_TASK_ID: task_id,
# deque.extend is thread-safe
CONFIG_KEY_SEND: partial(
local_write,
writes.extend,
processes.keys(),
),
CONFIG_KEY_READ: partial(
local_read,
step,
checkpoint,
channels,
managed,
PregelTaskWrites(
task_path, packet.node, writes, triggers
),
config,
),
CONFIG_KEY_STORE: (
store or configurable.get(CONFIG_KEY_STORE)
),
CONFIG_KEY_CHECKPOINTER: (
checkpointer
or configurable.get(CONFIG_KEY_CHECKPOINTER)
),
CONFIG_KEY_CHECKPOINT_MAP: {
**configurable.get(CONFIG_KEY_CHECKPOINT_MAP, {}),
parent_ns: checkpoint["id"],
},
CONFIG_KEY_CHECKPOINT_ID: None,
CONFIG_KEY_CHECKPOINT_NS: task_checkpoint_ns,
CONFIG_KEY_WRITES: [
w
for w in pending_writes
+ configurable.get(CONFIG_KEY_WRITES, [])
if w[0] in (NULL_TASK_ID, task_id)
],
CONFIG_KEY_SCRATCHPAD: {},
},
),
triggers,
proc.retry_policy,
None,
task_id,
task_path,
writers=proc.flat_writers,
)
else:
return PregelTask(task_id, packet.node, task_path)
elif task_path[0] == PULL:
# (PULL, node name)
name = cast(str, task_path[1])
if name not in processes:
return
proc = processes[name]
version_type = type(next(iter(checkpoint["channel_versions"].values()), None))
null_version = version_type() # type: ignore[misc]
if null_version is None:
return
seen = checkpoint["versions_seen"].get(name, {})
# If any of the channels read by this process were updated
if triggers := sorted(
chan
for chan in proc.triggers
if not isinstance(
read_channel(channels, chan, return_exception=True), EmptyChannelError
)
and checkpoint["channel_versions"].get(chan, null_version) # type: ignore[operator]
> seen.get(chan, null_version)
):
try:
val = next(
_proc_input(proc, managed, channels, for_execution=for_execution)
)
except StopIteration:
return
except Exception as exc:
if SUPPORTS_EXC_NOTES:
exc.add_note(
f"Before task with name '{name}' and path '{task_path[:3]}'"
)
raise
# create task id
checkpoint_ns = f"{parent_ns}{NS_SEP}{name}" if parent_ns else name
task_id = _uuid5_str(
checkpoint_id,
checkpoint_ns,
str(step),
name,
PULL,
*triggers,
)
task_checkpoint_ns = f"{checkpoint_ns}{NS_END}{task_id}"
metadata = {
"langgraph_step": step,
"langgraph_node": name,
"langgraph_triggers": triggers,
"langgraph_path": task_path,
"langgraph_checkpoint_ns": task_checkpoint_ns,
}
if task_id_checksum is not None:
assert task_id == task_id_checksum
if for_execution:
if node := proc.node:
if proc.metadata:
metadata.update(proc.metadata)
writes = deque()
return PregelExecutableTask(
name,
val,
node,
writes,
patch_config(
merge_configs(
config, {"metadata": metadata, "tags": proc.tags}
),
run_name=name,
callbacks=(
manager.get_child(f"graph:step:{step}")
if manager
else None
),
configurable={
CONFIG_KEY_TASK_ID: task_id,
# deque.extend is thread-safe
CONFIG_KEY_SEND: partial(
local_write,
writes.extend,
processes.keys(),
),
CONFIG_KEY_READ: partial(
local_read,
step,
checkpoint,
channels,
managed,
PregelTaskWrites(task_path, name, writes, triggers),
config,
),
CONFIG_KEY_STORE: (
store or configurable.get(CONFIG_KEY_STORE)
),
CONFIG_KEY_CHECKPOINTER: (
checkpointer
or configurable.get(CONFIG_KEY_CHECKPOINTER)
),
CONFIG_KEY_CHECKPOINT_MAP: {
**configurable.get(CONFIG_KEY_CHECKPOINT_MAP, {}),
parent_ns: checkpoint["id"],
},
CONFIG_KEY_CHECKPOINT_ID: None,
CONFIG_KEY_CHECKPOINT_NS: task_checkpoint_ns,
CONFIG_KEY_WRITES: [
w
for w in pending_writes
+ configurable.get(CONFIG_KEY_WRITES, [])
if w[0] in (NULL_TASK_ID, task_id)
],
CONFIG_KEY_SCRATCHPAD: {},
},
),
triggers,
proc.retry_policy,
None,
task_id,
task_path,
writers=proc.flat_writers,
)
else:
return PregelTask(task_id, name, task_path)
def _proc_input(
proc: PregelNode,
managed: ManagedValueMapping,
channels: Mapping[str, BaseChannel],
*,
for_execution: bool,
) -> Iterator[Any]:
"""Prepare input for a PULL task, based on the process's channels and triggers."""
# If all trigger channels subscribed by this process are not empty
# then invoke the process with the values of all non-empty channels
if isinstance(proc.channels, dict):
try:
val: dict[str, Any] = {}
for k, chan in proc.channels.items():
if chan in proc.triggers:
val[k] = read_channel(channels, chan, catch=False)
elif chan in channels:
try:
val[k] = read_channel(channels, chan, catch=False)
except EmptyChannelError:
continue
else:
val[k] = managed[k]()
except EmptyChannelError:
return
elif isinstance(proc.channels, list):
for chan in proc.channels:
try:
val = read_channel(channels, chan, catch=False)
break
except EmptyChannelError:
pass
else:
return
else:
raise RuntimeError(
"Invalid channels type, expected list or dict, got {proc.channels}"
)
# If the process has a mapper, apply it to the value
if for_execution and proc.mapper is not None:
val = proc.mapper(val)
yield val
def _uuid5_str(namespace: bytes, *parts: str) -> str:
"""Generate a UUID from the SHA-1 hash of a namespace UUID and a name."""
sha = sha1(namespace, usedforsecurity=False)
sha.update(b"".join(p.encode() for p in parts))
hex = sha.hexdigest()
return f"{hex[:8]}-{hex[8:12]}-{hex[12:16]}-{hex[16:20]}-{hex[20:32]}"
def _tuple_str(tup: Union[str, int, tuple]) -> str:
"""Generate a string representation of a tuple."""
return (
f"({', '.join(_tuple_str(x) for x in tup)})"
if isinstance(tup, (tuple, list))
else str(tup)
)
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/log.py | import logging
logger = logging.getLogger("langgraph")
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/write.py | from __future__ import annotations
from typing import (
Any,
Callable,
NamedTuple,
Optional,
Sequence,
TypeVar,
Union,
cast,
)
from langchain_core.runnables import Runnable, RunnableConfig
from langchain_core.runnables.utils import ConfigurableFieldSpec
from langgraph.constants import CONF, CONFIG_KEY_SEND, FF_SEND_V2, PUSH, TASKS, Send
from langgraph.errors import InvalidUpdateError
from langgraph.utils.runnable import RunnableCallable
TYPE_SEND = Callable[[Sequence[tuple[str, Any]]], None]
R = TypeVar("R", bound=Runnable)
SKIP_WRITE = object()
PASSTHROUGH = object()
class ChannelWriteEntry(NamedTuple):
channel: str
"""Channel name to write to."""
value: Any = PASSTHROUGH
"""Value to write, or PASSTHROUGH to use the input."""
skip_none: bool = False
"""Whether to skip writing if the value is None."""
mapper: Optional[Callable] = None
"""Function to transform the value before writing."""
class ChannelWriteTupleEntry(NamedTuple):
mapper: Callable[[Any], Optional[Sequence[tuple[str, Any]]]]
"""Function to extract tuples from value."""
value: Any = PASSTHROUGH
"""Value to write, or PASSTHROUGH to use the input."""
class ChannelWrite(RunnableCallable):
"""Implements the logic for sending writes to CONFIG_KEY_SEND.
Can be used as a runnable or as a static method to call imperatively."""
writes: list[Union[ChannelWriteEntry, ChannelWriteTupleEntry, Send]]
"""Sequence of write entries or Send objects to write."""
require_at_least_one_of: Optional[Sequence[str]]
"""If defined, at least one of these channels must be written to."""
def __init__(
self,
writes: Sequence[Union[ChannelWriteEntry, ChannelWriteTupleEntry, Send]],
*,
tags: Optional[Sequence[str]] = None,
require_at_least_one_of: Optional[Sequence[str]] = None,
):
super().__init__(func=self._write, afunc=self._awrite, name=None, tags=tags)
self.writes = cast(
list[Union[ChannelWriteEntry, ChannelWriteTupleEntry, Send]], writes
)
self.require_at_least_one_of = require_at_least_one_of
def get_name(
self, suffix: Optional[str] = None, *, name: Optional[str] = None
) -> str:
if not name:
name = f"ChannelWrite<{','.join(w.channel if isinstance(w, ChannelWriteEntry) else '...' if isinstance(w, ChannelWriteTupleEntry) else w.node for w in self.writes)}>"
return super().get_name(suffix, name=name)
@property
def config_specs(self) -> list[ConfigurableFieldSpec]:
return [
ConfigurableFieldSpec(
id=CONFIG_KEY_SEND,
name=CONFIG_KEY_SEND,
description=None,
default=None,
annotation=None,
),
]
def _write(self, input: Any, config: RunnableConfig) -> None:
writes = [
ChannelWriteEntry(write.channel, input, write.skip_none, write.mapper)
if isinstance(write, ChannelWriteEntry) and write.value is PASSTHROUGH
else ChannelWriteTupleEntry(write.mapper, input)
if isinstance(write, ChannelWriteTupleEntry) and write.value is PASSTHROUGH
else write
for write in self.writes
]
self.do_write(
config,
writes,
self.require_at_least_one_of if input is not None else None,
)
return input
async def _awrite(self, input: Any, config: RunnableConfig) -> None:
writes = [
ChannelWriteEntry(write.channel, input, write.skip_none, write.mapper)
if isinstance(write, ChannelWriteEntry) and write.value is PASSTHROUGH
else ChannelWriteTupleEntry(write.mapper, input)
if isinstance(write, ChannelWriteTupleEntry) and write.value is PASSTHROUGH
else write
for write in self.writes
]
self.do_write(
config,
writes,
self.require_at_least_one_of if input is not None else None,
)
return input
@staticmethod
def do_write(
config: RunnableConfig,
writes: Sequence[Union[ChannelWriteEntry, ChannelWriteTupleEntry, Send]],
require_at_least_one_of: Optional[Sequence[str]] = None,
) -> None:
# validate
for w in writes:
if isinstance(w, ChannelWriteEntry):
if w.channel in (TASKS, PUSH):
raise InvalidUpdateError(
"Cannot write to the reserved channel TASKS"
)
if w.value is PASSTHROUGH:
raise InvalidUpdateError("PASSTHROUGH value must be replaced")
if isinstance(w, ChannelWriteTupleEntry):
if w.value is PASSTHROUGH:
raise InvalidUpdateError("PASSTHROUGH value must be replaced")
# assemble writes
tuples: list[tuple[str, Any]] = []
for w in writes:
if isinstance(w, Send):
tuples.append((PUSH if FF_SEND_V2 else TASKS, w))
elif isinstance(w, ChannelWriteTupleEntry):
if ww := w.mapper(w.value):
tuples.extend(ww)
elif isinstance(w, ChannelWriteEntry):
value = w.mapper(w.value) if w.mapper is not None else w.value
if value is SKIP_WRITE:
continue
if w.skip_none and value is None:
continue
tuples.append((w.channel, value))
else:
raise ValueError(f"Invalid write entry: {w}")
# assert required channels
if require_at_least_one_of is not None:
if not {chan for chan, _ in tuples} & set(require_at_least_one_of):
raise InvalidUpdateError(
f"Must write to at least one of {require_at_least_one_of}"
)
write: TYPE_SEND = config[CONF][CONFIG_KEY_SEND]
write(tuples)
@staticmethod
def is_writer(runnable: Runnable) -> bool:
"""Used by PregelNode to distinguish between writers and other runnables."""
return (
isinstance(runnable, ChannelWrite)
or getattr(runnable, "_is_channel_writer", False) is True
)
@staticmethod
def register_writer(runnable: R) -> R:
"""Used to mark a runnable as a writer, so that it can be detected by is_writer.
Instances of ChannelWrite are automatically marked as writers."""
# using object.__setattr__ to work around objects that override __setattr__
# eg. pydantic models and dataclasses
object.__setattr__(runnable, "_is_channel_writer", True)
return runnable
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/retry.py | import asyncio
import logging
import random
import sys
import time
from dataclasses import replace
from functools import partial
from typing import Any, Callable, Optional, Sequence
from langgraph.constants import (
CONF,
CONFIG_KEY_CHECKPOINT_NS,
CONFIG_KEY_RESUMING,
CONFIG_KEY_SEND,
NS_SEP,
)
from langgraph.errors import _SEEN_CHECKPOINT_NS, GraphBubbleUp, ParentCommand
from langgraph.types import Command, PregelExecutableTask, RetryPolicy
from langgraph.utils.config import patch_configurable
logger = logging.getLogger(__name__)
SUPPORTS_EXC_NOTES = sys.version_info >= (3, 11)
def run_with_retry(
task: PregelExecutableTask,
retry_policy: Optional[RetryPolicy],
writer: Optional[
Callable[[PregelExecutableTask, Sequence[tuple[str, Any]]], None]
] = None,
) -> None:
"""Run a task with retries."""
retry_policy = task.retry_policy or retry_policy
interval = retry_policy.initial_interval if retry_policy else 0
attempts = 0
config = task.config
if writer is not None:
config = patch_configurable(config, {CONFIG_KEY_SEND: partial(writer, task)})
while True:
try:
# clear any writes from previous attempts
task.writes.clear()
# run the task
task.proc.invoke(task.input, config)
# if successful, end
break
except ParentCommand as exc:
ns: str = config[CONF][CONFIG_KEY_CHECKPOINT_NS]
cmd = exc.args[0]
if cmd.graph == ns:
# this command is for the current graph, handle it
for w in task.writers:
w.invoke(cmd, config)
break
elif cmd.graph == Command.PARENT:
# this command is for the parent graph, assign it to the parent
parent_ns = NS_SEP.join(ns.split(NS_SEP)[:-1])
exc.args = (replace(cmd, graph=parent_ns),)
# bubble up
raise
except GraphBubbleUp:
# if interrupted, end
raise
except Exception as exc:
if SUPPORTS_EXC_NOTES:
exc.add_note(f"During task with name '{task.name}' and id '{task.id}'")
if retry_policy is None:
raise
# increment attempts
attempts += 1
# check if we should retry
if isinstance(retry_policy.retry_on, Sequence):
if not isinstance(exc, tuple(retry_policy.retry_on)):
raise
elif isinstance(retry_policy.retry_on, type) and issubclass(
retry_policy.retry_on, Exception
):
if not isinstance(exc, retry_policy.retry_on):
raise
elif callable(retry_policy.retry_on):
if not retry_policy.retry_on(exc): # type: ignore[call-arg]
raise
else:
raise TypeError(
"retry_on must be an Exception class, a list or tuple of Exception classes, or a callable"
)
# check if we should give up
if attempts >= retry_policy.max_attempts:
raise
# sleep before retrying
interval = min(
retry_policy.max_interval,
interval * retry_policy.backoff_factor,
)
time.sleep(
interval + random.uniform(0, 1) if retry_policy.jitter else interval
)
# log the retry
logger.info(
f"Retrying task {task.name} after {interval:.2f} seconds (attempt {attempts}) after {exc.__class__.__name__} {exc}",
exc_info=exc,
)
# signal subgraphs to resume (if available)
config = patch_configurable(config, {CONFIG_KEY_RESUMING: True})
# clear checkpoint_ns seen (for subgraph detection)
if checkpoint_ns := config[CONF].get(CONFIG_KEY_CHECKPOINT_NS):
_SEEN_CHECKPOINT_NS.discard(checkpoint_ns)
finally:
# clear checkpoint_ns seen (for subgraph detection)
if checkpoint_ns := config[CONF].get(CONFIG_KEY_CHECKPOINT_NS):
_SEEN_CHECKPOINT_NS.discard(checkpoint_ns)
async def arun_with_retry(
task: PregelExecutableTask,
retry_policy: Optional[RetryPolicy],
stream: bool = False,
writer: Optional[
Callable[[PregelExecutableTask, Sequence[tuple[str, Any]]], None]
] = None,
) -> None:
"""Run a task asynchronously with retries."""
retry_policy = task.retry_policy or retry_policy
interval = retry_policy.initial_interval if retry_policy else 0
attempts = 0
config = task.config
if writer is not None:
config = patch_configurable(config, {CONFIG_KEY_SEND: partial(writer, task)})
while True:
try:
# clear any writes from previous attempts
task.writes.clear()
# run the task
if stream:
async for _ in task.proc.astream(task.input, config):
pass
else:
await task.proc.ainvoke(task.input, config)
# if successful, end
break
except ParentCommand as exc:
ns: str = config[CONF][CONFIG_KEY_CHECKPOINT_NS]
cmd = exc.args[0]
if cmd.graph == ns:
# this command is for the current graph, handle it
for w in task.writers:
w.invoke(cmd, config)
break
elif cmd.graph == Command.PARENT:
# this command is for the parent graph, assign it to the parent
parent_ns = NS_SEP.join(ns.split(NS_SEP)[:-1])
exc.args = (replace(cmd, graph=parent_ns),)
# bubble up
raise
except GraphBubbleUp:
# if interrupted, end
raise
except Exception as exc:
if SUPPORTS_EXC_NOTES:
exc.add_note(f"During task with name '{task.name}' and id '{task.id}'")
if retry_policy is None:
raise
# increment attempts
attempts += 1
# check if we should retry
if isinstance(retry_policy.retry_on, Sequence):
if not isinstance(exc, tuple(retry_policy.retry_on)):
raise
elif isinstance(retry_policy.retry_on, type) and issubclass(
retry_policy.retry_on, Exception
):
if not isinstance(exc, retry_policy.retry_on):
raise
elif callable(retry_policy.retry_on):
if not retry_policy.retry_on(exc): # type: ignore[call-arg]
raise
else:
raise TypeError(
"retry_on must be an Exception class, a list or tuple of Exception classes, or a callable"
)
# check if we should give up
if attempts >= retry_policy.max_attempts:
raise
# sleep before retrying
interval = min(
retry_policy.max_interval,
interval * retry_policy.backoff_factor,
)
await asyncio.sleep(
interval + random.uniform(0, 1) if retry_policy.jitter else interval
)
# log the retry
logger.info(
f"Retrying task {task.name} after {interval:.2f} seconds (attempt {attempts}) after {exc.__class__.__name__} {exc}",
exc_info=exc,
)
# signal subgraphs to resume (if available)
config = patch_configurable(config, {CONFIG_KEY_RESUMING: True})
# clear checkpoint_ns seen (for subgraph detection)
if checkpoint_ns := config[CONF].get(CONFIG_KEY_CHECKPOINT_NS):
_SEEN_CHECKPOINT_NS.discard(checkpoint_ns)
finally:
# clear checkpoint_ns seen (for subgraph detection)
if checkpoint_ns := config[CONF].get(CONFIG_KEY_CHECKPOINT_NS):
_SEEN_CHECKPOINT_NS.discard(checkpoint_ns)
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/executor.py | import asyncio
import concurrent.futures
import sys
from contextlib import ExitStack
from contextvars import copy_context
from types import TracebackType
from typing import (
AsyncContextManager,
Awaitable,
Callable,
ContextManager,
Coroutine,
Optional,
Protocol,
TypeVar,
cast,
)
from langchain_core.runnables import RunnableConfig
from langchain_core.runnables.config import get_executor_for_config
from typing_extensions import ParamSpec
from langgraph.errors import GraphBubbleUp
P = ParamSpec("P")
T = TypeVar("T")
class Submit(Protocol[P, T]):
def __call__(
self,
fn: Callable[P, T],
*args: P.args,
__name__: Optional[str] = None,
__cancel_on_exit__: bool = False,
__reraise_on_exit__: bool = True,
**kwargs: P.kwargs,
) -> concurrent.futures.Future[T]: ...
class BackgroundExecutor(ContextManager):
"""A context manager that runs sync tasks in the background.
Uses a thread pool executor to delegate tasks to separate threads.
On exit,
- cancels any (not yet started) tasks with `__cancel_on_exit__=True`
- waits for all tasks to finish
- re-raises the first exception from tasks with `__reraise_on_exit__=True`"""
def __init__(self, config: RunnableConfig) -> None:
self.stack = ExitStack()
self.executor = self.stack.enter_context(get_executor_for_config(config))
self.tasks: dict[concurrent.futures.Future, tuple[bool, bool]] = {}
def submit( # type: ignore[valid-type]
self,
fn: Callable[P, T],
*args: P.args,
__name__: Optional[str] = None, # currently not used in sync version
__cancel_on_exit__: bool = False, # for sync, can cancel only if not started
__reraise_on_exit__: bool = True,
**kwargs: P.kwargs,
) -> concurrent.futures.Future[T]:
task = self.executor.submit(fn, *args, **kwargs)
self.tasks[task] = (__cancel_on_exit__, __reraise_on_exit__)
task.add_done_callback(self.done)
return task
def done(self, task: concurrent.futures.Future) -> None:
try:
task.result()
except GraphBubbleUp:
# This exception is an interruption signal, not an error
# so we don't want to re-raise it on exit
self.tasks.pop(task)
except BaseException:
pass
else:
self.tasks.pop(task)
def __enter__(self) -> Submit:
return self.submit
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> Optional[bool]:
# copy the tasks as done() callback may modify the dict
tasks = self.tasks.copy()
# cancel all tasks that should be cancelled
for task, (cancel, _) in tasks.items():
if cancel:
task.cancel()
# wait for all tasks to finish
if pending := {t for t in tasks if not t.done()}:
concurrent.futures.wait(pending)
# shutdown the executor
self.stack.__exit__(exc_type, exc_value, traceback)
# re-raise the first exception that occurred in a task
if exc_type is None:
# if there's already an exception being raised, don't raise another one
for task, (_, reraise) in tasks.items():
if not reraise:
continue
try:
task.result()
except concurrent.futures.CancelledError:
pass
class AsyncBackgroundExecutor(AsyncContextManager):
"""A context manager that runs async tasks in the background.
Uses the current event loop to delegate tasks to asyncio tasks.
On exit,
- cancels any tasks with `__cancel_on_exit__=True`
- waits for all tasks to finish
- re-raises the first exception from tasks with `__reraise_on_exit__=True`
ignoring CancelledError"""
def __init__(self, config: RunnableConfig) -> None:
self.context_not_supported = sys.version_info < (3, 11)
self.tasks: dict[asyncio.Task, tuple[bool, bool]] = {}
self.sentinel = object()
self.loop = asyncio.get_running_loop()
if max_concurrency := config.get("max_concurrency"):
self.semaphore: Optional[asyncio.Semaphore] = asyncio.Semaphore(
max_concurrency
)
else:
self.semaphore = None
def submit( # type: ignore[valid-type]
self,
fn: Callable[P, Awaitable[T]],
*args: P.args,
__name__: Optional[str] = None,
__cancel_on_exit__: bool = False,
__reraise_on_exit__: bool = True,
**kwargs: P.kwargs,
) -> asyncio.Task[T]:
coro = cast(Coroutine[None, None, T], fn(*args, **kwargs))
if self.semaphore:
coro = gated(self.semaphore, coro)
if self.context_not_supported:
task = self.loop.create_task(coro, name=__name__)
else:
task = self.loop.create_task(coro, name=__name__, context=copy_context())
self.tasks[task] = (__cancel_on_exit__, __reraise_on_exit__)
task.add_done_callback(self.done)
return task
def done(self, task: asyncio.Task) -> None:
try:
if exc := task.exception():
# This exception is an interruption signal, not an error
# so we don't want to re-raise it on exit
if isinstance(exc, GraphBubbleUp):
self.tasks.pop(task)
else:
self.tasks.pop(task)
except asyncio.CancelledError:
self.tasks.pop(task)
async def __aenter__(self) -> Submit:
return self.submit
async def __aexit__(
self,
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
# copy the tasks as done() callback may modify the dict
tasks = self.tasks.copy()
# cancel all tasks that should be cancelled
for task, (cancel, _) in tasks.items():
if cancel:
task.cancel(self.sentinel)
# wait for all tasks to finish
if tasks:
await asyncio.wait(tasks)
# if there's already an exception being raised, don't raise another one
if exc_type is None:
# re-raise the first exception that occurred in a task
for task, (_, reraise) in tasks.items():
if not reraise:
continue
try:
if exc := task.exception():
raise exc
except asyncio.CancelledError:
pass
async def gated(semaphore: asyncio.Semaphore, coro: Coroutine[None, None, T]) -> T:
"""A coroutine that waits for a semaphore before running another coroutine."""
async with semaphore:
return await coro
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/io.py | from typing import Any, Iterator, Literal, Mapping, Optional, Sequence, TypeVar, Union
from uuid import UUID
from langchain_core.runnables.utils import AddableDict
from langgraph.channels.base import BaseChannel, EmptyChannelError
from langgraph.checkpoint.base import PendingWrite
from langgraph.constants import (
EMPTY_SEQ,
ERROR,
FF_SEND_V2,
INTERRUPT,
NULL_TASK_ID,
PUSH,
RESUME,
TAG_HIDDEN,
TASKS,
)
from langgraph.errors import InvalidUpdateError
from langgraph.pregel.log import logger
from langgraph.types import Command, PregelExecutableTask, Send
def is_task_id(task_id: str) -> bool:
"""Check if a string is a valid task id."""
try:
UUID(task_id)
except ValueError:
return False
return True
def read_channel(
channels: Mapping[str, BaseChannel],
chan: str,
*,
catch: bool = True,
return_exception: bool = False,
) -> Any:
try:
return channels[chan].get()
except EmptyChannelError as exc:
if return_exception:
return exc
elif catch:
return None
else:
raise
def read_channels(
channels: Mapping[str, BaseChannel],
select: Union[Sequence[str], str],
*,
skip_empty: bool = True,
) -> Union[dict[str, Any], Any]:
if isinstance(select, str):
return read_channel(channels, select)
else:
values: dict[str, Any] = {}
for k in select:
try:
values[k] = read_channel(channels, k, catch=not skip_empty)
except EmptyChannelError:
pass
return values
def map_command(
cmd: Command, pending_writes: list[PendingWrite]
) -> Iterator[tuple[str, str, Any]]:
"""Map input chunk to a sequence of pending writes in the form (channel, value)."""
if cmd.graph == Command.PARENT:
raise InvalidUpdateError("There is not parent graph")
if cmd.goto:
if isinstance(cmd.goto, (tuple, list)):
sends = cmd.goto
else:
sends = [cmd.goto]
for send in sends:
if not isinstance(send, Send):
raise TypeError(
f"In Command.goto, expected Send, got {type(send).__name__}"
)
yield (NULL_TASK_ID, PUSH if FF_SEND_V2 else TASKS, send)
# TODO handle goto str for state graph
if cmd.resume:
if isinstance(cmd.resume, dict) and all(is_task_id(k) for k in cmd.resume):
for tid, resume in cmd.resume.items():
existing: list[Any] = next(
(w[2] for w in pending_writes if w[0] == tid and w[1] == RESUME), []
)
existing.append(resume)
yield (tid, RESUME, existing)
else:
yield (NULL_TASK_ID, RESUME, cmd.resume)
if cmd.update:
for k, v in cmd._update_as_tuples():
yield (NULL_TASK_ID, k, v)
def map_input(
input_channels: Union[str, Sequence[str]],
chunk: Optional[Union[dict[str, Any], Any]],
) -> Iterator[tuple[str, Any]]:
"""Map input chunk to a sequence of pending writes in the form (channel, value)."""
if chunk is None:
return
elif isinstance(input_channels, str):
yield (input_channels, chunk)
else:
if not isinstance(chunk, dict):
raise TypeError(f"Expected chunk to be a dict, got {type(chunk).__name__}")
for k in chunk:
if k in input_channels:
yield (k, chunk[k])
else:
logger.warning(f"Input channel {k} not found in {input_channels}")
class AddableValuesDict(AddableDict):
def __add__(self, other: dict[str, Any]) -> "AddableValuesDict":
return self | other
def __radd__(self, other: dict[str, Any]) -> "AddableValuesDict":
return other | self
def map_output_values(
output_channels: Union[str, Sequence[str]],
pending_writes: Union[Literal[True], Sequence[tuple[str, Any]]],
channels: Mapping[str, BaseChannel],
) -> Iterator[Union[dict[str, Any], Any]]:
"""Map pending writes (a sequence of tuples (channel, value)) to output chunk."""
if isinstance(output_channels, str):
if pending_writes is True or any(
chan == output_channels for chan, _ in pending_writes
):
yield read_channel(channels, output_channels)
else:
if pending_writes is True or {
c for c, _ in pending_writes if c in output_channels
}:
yield AddableValuesDict(read_channels(channels, output_channels))
class AddableUpdatesDict(AddableDict):
def __add__(self, other: dict[str, Any]) -> "AddableUpdatesDict":
return [self, other]
def __radd__(self, other: dict[str, Any]) -> "AddableUpdatesDict":
raise TypeError("AddableUpdatesDict does not support right-side addition")
def map_output_updates(
output_channels: Union[str, Sequence[str]],
tasks: list[tuple[PregelExecutableTask, Sequence[tuple[str, Any]]]],
cached: bool = False,
) -> Iterator[dict[str, Union[Any, dict[str, Any]]]]:
"""Map pending writes (a sequence of tuples (channel, value)) to output chunk."""
output_tasks = [
(t, ww)
for t, ww in tasks
if (not t.config or TAG_HIDDEN not in t.config.get("tags", EMPTY_SEQ))
and ww[0][0] != ERROR
and ww[0][0] != INTERRUPT
]
if not output_tasks:
return
if isinstance(output_channels, str):
updated = (
(task.name, value)
for task, writes in output_tasks
for chan, value in writes
if chan == output_channels
)
else:
updated = (
(
task.name,
{chan: value for chan, value in writes if chan in output_channels},
)
for task, writes in output_tasks
if any(chan in output_channels for chan, _ in writes)
)
grouped: dict[str, list[Any]] = {t.name: [] for t, _ in output_tasks}
for node, value in updated:
grouped[node].append(value)
for node, value in grouped.items():
if len(value) == 0:
grouped[node] = None # type: ignore[assignment]
if len(value) == 1:
grouped[node] = value[0]
if cached:
grouped["__metadata__"] = {"cached": cached} # type: ignore[assignment]
yield AddableUpdatesDict(grouped)
T = TypeVar("T")
def single(iter: Iterator[T]) -> Optional[T]:
for item in iter:
return item
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/types.py | """Re-export types moved to langgraph.types"""
from langgraph.types import (
All,
CachePolicy,
PregelExecutableTask,
PregelTask,
RetryPolicy,
StateSnapshot,
StreamMode,
StreamWriter,
default_retry_on,
)
__all__ = [
"All",
"CachePolicy",
"PregelExecutableTask",
"PregelTask",
"RetryPolicy",
"StateSnapshot",
"StreamMode",
"StreamWriter",
"default_retry_on",
]
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/validate.py | from typing import Any, Mapping, Optional, Sequence, Union
from langgraph.channels.base import BaseChannel
from langgraph.constants import RESERVED
from langgraph.pregel.read import PregelNode
from langgraph.types import All
def validate_graph(
nodes: Mapping[str, PregelNode],
channels: dict[str, BaseChannel],
input_channels: Union[str, Sequence[str]],
output_channels: Union[str, Sequence[str]],
stream_channels: Optional[Union[str, Sequence[str]]],
interrupt_after_nodes: Union[All, Sequence[str]],
interrupt_before_nodes: Union[All, Sequence[str]],
) -> None:
for chan in channels:
if chan in RESERVED:
raise ValueError(f"Channel names {chan} are reserved")
subscribed_channels = set[str]()
for name, node in nodes.items():
if name in RESERVED:
raise ValueError(f"Node names {RESERVED} are reserved")
if isinstance(node, PregelNode):
subscribed_channels.update(node.triggers)
else:
raise TypeError(
f"Invalid node type {type(node)}, expected Channel.subscribe_to()"
)
for chan in subscribed_channels:
if chan not in channels:
raise ValueError(f"Subscribed channel '{chan}' not in 'channels'")
if isinstance(input_channels, str):
if input_channels not in channels:
raise ValueError(f"Input channel '{input_channels}' not in 'channels'")
if input_channels not in subscribed_channels:
raise ValueError(
f"Input channel {input_channels} is not subscribed to by any node"
)
else:
for chan in input_channels:
if chan not in channels:
raise ValueError(f"Input channel '{chan}' not in 'channels'")
if all(chan not in subscribed_channels for chan in input_channels):
raise ValueError(
f"None of the input channels {input_channels} are subscribed to by any node"
)
all_output_channels = set[str]()
if isinstance(output_channels, str):
all_output_channels.add(output_channels)
else:
all_output_channels.update(output_channels)
if isinstance(stream_channels, str):
all_output_channels.add(stream_channels)
elif stream_channels is not None:
all_output_channels.update(stream_channels)
for chan in all_output_channels:
if chan not in channels:
raise ValueError(f"Output channel '{chan}' not in 'channels'")
if interrupt_after_nodes != "*":
for n in interrupt_after_nodes:
if n not in nodes:
raise ValueError(f"Node {n} not in nodes")
if interrupt_before_nodes != "*":
for n in interrupt_before_nodes:
if n not in nodes:
raise ValueError(f"Node {n} not in nodes")
def validate_keys(
keys: Optional[Union[str, Sequence[str]]],
channels: Mapping[str, Any],
) -> None:
if isinstance(keys, str):
if keys not in channels:
raise ValueError(f"Key {keys} not in channels")
elif keys is not None:
for chan in keys:
if chan not in channels:
raise ValueError(f"Key {chan} not in channels")
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/debug.py | from collections import defaultdict
from dataclasses import asdict
from datetime import datetime, timezone
from pprint import pformat
from typing import (
Any,
Iterable,
Iterator,
Literal,
Mapping,
Optional,
Sequence,
TypedDict,
Union,
)
from uuid import UUID
from langchain_core.runnables.config import RunnableConfig
from langchain_core.utils.input import get_bolded_text, get_colored_text
from langgraph.channels.base import BaseChannel
from langgraph.checkpoint.base import Checkpoint, CheckpointMetadata, PendingWrite
from langgraph.constants import (
CONF,
CONFIG_KEY_CHECKPOINT_NS,
ERROR,
INTERRUPT,
NS_END,
NS_SEP,
TAG_HIDDEN,
)
from langgraph.pregel.io import read_channels
from langgraph.pregel.utils import find_subgraph_pregel
from langgraph.types import PregelExecutableTask, PregelTask, StateSnapshot
from langgraph.utils.config import patch_checkpoint_map
class TaskPayload(TypedDict):
id: str
name: str
input: Any
triggers: list[str]
class TaskResultPayload(TypedDict):
id: str
name: str
error: Optional[str]
interrupts: list[dict]
result: list[tuple[str, Any]]
class CheckpointTask(TypedDict):
id: str
name: str
error: Optional[str]
interrupts: list[dict]
state: Optional[RunnableConfig]
class CheckpointPayload(TypedDict):
config: Optional[RunnableConfig]
metadata: CheckpointMetadata
values: dict[str, Any]
next: list[str]
parent_config: Optional[RunnableConfig]
tasks: list[CheckpointTask]
class DebugOutputBase(TypedDict):
timestamp: str
step: int
class DebugOutputTask(DebugOutputBase):
type: Literal["task"]
payload: TaskPayload
class DebugOutputTaskResult(DebugOutputBase):
type: Literal["task_result"]
payload: TaskResultPayload
class DebugOutputCheckpoint(DebugOutputBase):
type: Literal["checkpoint"]
payload: CheckpointPayload
DebugOutput = Union[DebugOutputTask, DebugOutputTaskResult, DebugOutputCheckpoint]
TASK_NAMESPACE = UUID("6ba7b831-9dad-11d1-80b4-00c04fd430c8")
def map_debug_tasks(
step: int, tasks: Iterable[PregelExecutableTask]
) -> Iterator[DebugOutputTask]:
"""Produce "task" events for stream_mode=debug."""
ts = datetime.now(timezone.utc).isoformat()
for task in tasks:
if task.config is not None and TAG_HIDDEN in task.config.get("tags", []):
continue
yield {
"type": "task",
"timestamp": ts,
"step": step,
"payload": {
"id": task.id,
"name": task.name,
"input": task.input,
"triggers": task.triggers,
},
}
def map_debug_task_results(
step: int,
task_tup: tuple[PregelExecutableTask, Sequence[tuple[str, Any]]],
stream_keys: Union[str, Sequence[str]],
) -> Iterator[DebugOutputTaskResult]:
"""Produce "task_result" events for stream_mode=debug."""
stream_channels_list = (
[stream_keys] if isinstance(stream_keys, str) else stream_keys
)
task, writes = task_tup
yield {
"type": "task_result",
"timestamp": datetime.now(timezone.utc).isoformat(),
"step": step,
"payload": {
"id": task.id,
"name": task.name,
"error": next((w[1] for w in writes if w[0] == ERROR), None),
"result": [w for w in writes if w[0] in stream_channels_list],
"interrupts": [asdict(w[1]) for w in writes if w[0] == INTERRUPT],
},
}
def map_debug_checkpoint(
step: int,
config: RunnableConfig,
channels: Mapping[str, BaseChannel],
stream_channels: Union[str, Sequence[str]],
metadata: CheckpointMetadata,
checkpoint: Checkpoint,
tasks: Iterable[PregelExecutableTask],
pending_writes: list[PendingWrite],
parent_config: Optional[RunnableConfig],
output_keys: Union[str, Sequence[str]],
) -> Iterator[DebugOutputCheckpoint]:
"""Produce "checkpoint" events for stream_mode=debug."""
parent_ns = config[CONF].get(CONFIG_KEY_CHECKPOINT_NS, "")
task_states: dict[str, Union[RunnableConfig, StateSnapshot]] = {}
for task in tasks:
if not find_subgraph_pregel(task.proc):
continue
# assemble checkpoint_ns for this task
task_ns = f"{task.name}{NS_END}{task.id}"
if parent_ns:
task_ns = f"{parent_ns}{NS_SEP}{task_ns}"
# set config as signal that subgraph checkpoints exist
task_states[task.id] = {
CONF: {
"thread_id": config[CONF]["thread_id"],
CONFIG_KEY_CHECKPOINT_NS: task_ns,
}
}
yield {
"type": "checkpoint",
"timestamp": checkpoint["ts"],
"step": step,
"payload": {
"config": patch_checkpoint_map(config, metadata),
"parent_config": patch_checkpoint_map(parent_config, metadata),
"values": read_channels(channels, stream_channels),
"metadata": metadata,
"next": [t.name for t in tasks],
"tasks": [
{
"id": t.id,
"name": t.name,
"error": t.error,
"state": t.state,
}
if t.error
else {
"id": t.id,
"name": t.name,
"result": t.result,
"interrupts": tuple(asdict(i) for i in t.interrupts),
"state": t.state,
}
if t.result
else {
"id": t.id,
"name": t.name,
"interrupts": tuple(asdict(i) for i in t.interrupts),
"state": t.state,
}
for t in tasks_w_writes(tasks, pending_writes, task_states, output_keys)
],
},
}
def print_step_tasks(step: int, next_tasks: list[PregelExecutableTask]) -> None:
n_tasks = len(next_tasks)
print(
f"{get_colored_text(f'[{step}:tasks]', color='blue')} "
+ get_bolded_text(
f"Starting {n_tasks} task{'s' if n_tasks != 1 else ''} for step {step}:\n"
)
+ "\n".join(
f"- {get_colored_text(task.name, 'green')} -> {pformat(task.input)}"
for task in next_tasks
)
)
def print_step_writes(
step: int, writes: Sequence[tuple[str, Any]], whitelist: Sequence[str]
) -> None:
by_channel: dict[str, list[Any]] = defaultdict(list)
for channel, value in writes:
if channel in whitelist:
by_channel[channel].append(value)
print(
f"{get_colored_text(f'[{step}:writes]', color='blue')} "
+ get_bolded_text(
f"Finished step {step} with writes to {len(by_channel)} channel{'s' if len(by_channel) != 1 else ''}:\n"
)
+ "\n".join(
f"- {get_colored_text(name, 'yellow')} -> {', '.join(pformat(v) for v in vals)}"
for name, vals in by_channel.items()
)
)
def print_step_checkpoint(
metadata: CheckpointMetadata,
channels: Mapping[str, BaseChannel],
whitelist: Sequence[str],
) -> None:
step = metadata["step"]
print(
f"{get_colored_text(f'[{step}:checkpoint]', color='blue')} "
+ get_bolded_text(f"State at the end of step {step}:\n")
+ pformat(read_channels(channels, whitelist), depth=3)
)
def tasks_w_writes(
tasks: Iterable[Union[PregelTask, PregelExecutableTask]],
pending_writes: Optional[list[PendingWrite]],
states: Optional[dict[str, Union[RunnableConfig, StateSnapshot]]],
output_keys: Union[str, Sequence[str]],
) -> tuple[PregelTask, ...]:
"""Apply writes / subgraph states to tasks to be returned in a StateSnapshot."""
pending_writes = pending_writes or []
return tuple(
PregelTask(
task.id,
task.name,
task.path,
next(
(
exc
for tid, n, exc in pending_writes
if tid == task.id and n == ERROR
),
None,
),
tuple(
v for tid, n, v in pending_writes if tid == task.id and n == INTERRUPT
),
states.get(task.id) if states else None,
(
next(
(
val
for tid, chan, val in pending_writes
if tid == task.id and chan == output_keys
),
None,
)
if isinstance(output_keys, str)
else {
chan: val
for tid, chan, val in pending_writes
if tid == task.id
and (
chan == output_keys
if isinstance(output_keys, str)
else chan in output_keys
)
}
)
if any(
w[0] == task.id and w[1] not in (ERROR, INTERRUPT)
for w in pending_writes
)
else None,
)
for task in tasks
)
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/remote.py | from dataclasses import asdict
from typing import (
Any,
AsyncIterator,
Iterator,
Literal,
Optional,
Sequence,
Union,
cast,
)
import orjson
from langchain_core.runnables import RunnableConfig
from langchain_core.runnables.graph import (
Edge as DrawableEdge,
)
from langchain_core.runnables.graph import (
Graph as DrawableGraph,
)
from langchain_core.runnables.graph import (
Node as DrawableNode,
)
from langgraph_sdk.client import (
LangGraphClient,
SyncLangGraphClient,
get_client,
get_sync_client,
)
from langgraph_sdk.schema import Checkpoint, ThreadState
from langgraph_sdk.schema import Command as CommandSDK
from langgraph_sdk.schema import StreamMode as StreamModeSDK
from typing_extensions import Self
from langgraph.checkpoint.base import CheckpointMetadata
from langgraph.constants import (
CONF,
CONFIG_KEY_CHECKPOINT_NS,
CONFIG_KEY_STREAM,
INTERRUPT,
NS_SEP,
)
from langgraph.errors import GraphInterrupt
from langgraph.pregel.protocol import PregelProtocol
from langgraph.pregel.types import All, PregelTask, StateSnapshot, StreamMode
from langgraph.types import Command, Interrupt, StreamProtocol
from langgraph.utils.config import merge_configs
class RemoteException(Exception):
"""Exception raised when an error occurs in the remote graph."""
pass
class RemoteGraph(PregelProtocol):
"""The `RemoteGraph` class is a client implementation for calling remote
APIs that implement the LangGraph Server API specification.
For example, the `RemoteGraph` class can be used to call APIs from deployments
on LangGraph Cloud.
`RemoteGraph` behaves the same way as a `Graph` and can be used directly as
a node in another `Graph`.
"""
name: str
def __init__(
self,
name: str, # graph_id
/,
*,
url: Optional[str] = None,
api_key: Optional[str] = None,
headers: Optional[dict[str, str]] = None,
client: Optional[LangGraphClient] = None,
sync_client: Optional[SyncLangGraphClient] = None,
config: Optional[RunnableConfig] = None,
):
"""Specify `url`, `api_key`, and/or `headers` to create default sync and async clients.
If `client` or `sync_client` are provided, they will be used instead of the default clients.
See `LangGraphClient` and `SyncLangGraphClient` for details on the default clients. At least
one of `url`, `client`, or `sync_client` must be provided.
Args:
name: The name of the graph.
url: The URL of the remote API.
api_key: The API key to use for authentication. If not provided, it will be read from the environment (`LANGGRAPH_API_KEY`, `LANGSMITH_API_KEY`, or `LANGCHAIN_API_KEY`).
headers: Additional headers to include in the requests.
client: A `LangGraphClient` instance to use instead of creating a default client.
sync_client: A `SyncLangGraphClient` instance to use instead of creating a default client.
config: An optional `RunnableConfig` instance with additional configuration.
"""
self.name = name
self.config = config
if client is None and url is not None:
client = get_client(url=url, api_key=api_key, headers=headers)
self.client = client
if sync_client is None and url is not None:
sync_client = get_sync_client(url=url, api_key=api_key, headers=headers)
self.sync_client = sync_client
def _validate_client(self) -> LangGraphClient:
if self.client is None:
raise ValueError(
"Async client is not initialized: please provide `url` or `client` when initializing `RemoteGraph`."
)
return self.client
def _validate_sync_client(self) -> SyncLangGraphClient:
if self.sync_client is None:
raise ValueError(
"Sync client is not initialized: please provide `url` or `sync_client` when initializing `RemoteGraph`."
)
return self.sync_client
def copy(self, update: dict[str, Any]) -> Self:
attrs = {**self.__dict__, **update}
return self.__class__(attrs.pop("name"), **attrs)
def with_config(
self, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Self:
return self.copy(
{"config": merge_configs(self.config, config, cast(RunnableConfig, kwargs))}
)
def _get_drawable_nodes(
self, graph: dict[str, list[dict[str, Any]]]
) -> dict[str, DrawableNode]:
nodes = {}
for node in graph["nodes"]:
node_id = str(node["id"])
node_data = node.get("data", {})
# Get node name from node_data if available. If not, use node_id.
node_name = node.get("name")
if node_name is None:
if isinstance(node_data, dict):
node_name = node_data.get("name", node_id)
else:
node_name = node_id
nodes[node_id] = DrawableNode(
id=node_id,
name=node_name,
data=node_data,
metadata=node.get("metadata"),
)
return nodes
def get_graph(
self,
config: Optional[RunnableConfig] = None,
*,
xray: Union[int, bool] = False,
) -> DrawableGraph:
"""Get graph by graph name.
This method calls `GET /assistants/{assistant_id}/graph`.
Args:
config: This parameter is not used.
xray: Include graph representation of subgraphs. If an integer
value is provided, only subgraphs with a depth less than or
equal to the value will be included.
Returns:
The graph information for the assistant in JSON format.
"""
sync_client = self._validate_sync_client()
graph = sync_client.assistants.get_graph(
assistant_id=self.name,
xray=xray,
)
return DrawableGraph(
nodes=self._get_drawable_nodes(graph),
edges=[DrawableEdge(**edge) for edge in graph["edges"]],
)
async def aget_graph(
self,
config: Optional[RunnableConfig] = None,
*,
xray: Union[int, bool] = False,
) -> DrawableGraph:
"""Get graph by graph name.
This method calls `GET /assistants/{assistant_id}/graph`.
Args:
config: This parameter is not used.
xray: Include graph representation of subgraphs. If an integer
value is provided, only subgraphs with a depth less than or
equal to the value will be included.
Returns:
The graph information for the assistant in JSON format.
"""
client = self._validate_client()
graph = await client.assistants.get_graph(
assistant_id=self.name,
xray=xray,
)
return DrawableGraph(
nodes=self._get_drawable_nodes(graph),
edges=[DrawableEdge(**edge) for edge in graph["edges"]],
)
def _create_state_snapshot(self, state: ThreadState) -> StateSnapshot:
tasks = []
for task in state["tasks"]:
interrupts = []
for interrupt in task["interrupts"]:
interrupts.append(Interrupt(**interrupt))
tasks.append(
PregelTask(
id=task["id"],
name=task["name"],
path=tuple(),
error=Exception(task["error"]) if task["error"] else None,
interrupts=tuple(interrupts),
state=self._create_state_snapshot(task["state"])
if task["state"]
else cast(RunnableConfig, {"configurable": task["checkpoint"]})
if task["checkpoint"]
else None,
result=task.get("result"),
)
)
return StateSnapshot(
values=state["values"],
next=tuple(state["next"]) if state["next"] else tuple(),
config={
"configurable": {
"thread_id": state["checkpoint"]["thread_id"],
"checkpoint_ns": state["checkpoint"]["checkpoint_ns"],
"checkpoint_id": state["checkpoint"]["checkpoint_id"],
"checkpoint_map": state["checkpoint"].get("checkpoint_map", {}),
}
},
metadata=CheckpointMetadata(**state["metadata"]),
created_at=state["created_at"],
parent_config={
"configurable": {
"thread_id": state["parent_checkpoint"]["thread_id"],
"checkpoint_ns": state["parent_checkpoint"]["checkpoint_ns"],
"checkpoint_id": state["parent_checkpoint"]["checkpoint_id"],
"checkpoint_map": state["parent_checkpoint"].get(
"checkpoint_map", {}
),
}
}
if state["parent_checkpoint"]
else None,
tasks=tuple(tasks),
)
def _get_checkpoint(self, config: Optional[RunnableConfig]) -> Optional[Checkpoint]:
if config is None:
return None
checkpoint = {}
if "thread_id" in config["configurable"]:
checkpoint["thread_id"] = config["configurable"]["thread_id"]
if "checkpoint_ns" in config["configurable"]:
checkpoint["checkpoint_ns"] = config["configurable"]["checkpoint_ns"]
if "checkpoint_id" in config["configurable"]:
checkpoint["checkpoint_id"] = config["configurable"]["checkpoint_id"]
if "checkpoint_map" in config["configurable"]:
checkpoint["checkpoint_map"] = config["configurable"]["checkpoint_map"]
return checkpoint if checkpoint else None
def _get_config(self, checkpoint: Checkpoint) -> RunnableConfig:
return {
"configurable": {
"thread_id": checkpoint["thread_id"],
"checkpoint_ns": checkpoint["checkpoint_ns"],
"checkpoint_id": checkpoint["checkpoint_id"],
"checkpoint_map": checkpoint.get("checkpoint_map", {}),
}
}
def _sanitize_config(self, config: RunnableConfig) -> RunnableConfig:
reserved_configurable_keys = frozenset(
[
"callbacks",
"checkpoint_map",
"checkpoint_id",
"checkpoint_ns",
]
)
def _sanitize_obj(obj: Any) -> Any:
"""Remove non-JSON serializable fields from the given object."""
if isinstance(obj, dict):
return {k: _sanitize_obj(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [_sanitize_obj(v) for v in obj]
else:
try:
orjson.dumps(obj)
return obj
except orjson.JSONEncodeError:
return None
# Remove non-JSON serializable fields from the config.
config = _sanitize_obj(config)
# Only include configurable keys that are not reserved and
# not starting with "__pregel_" prefix.
new_configurable = {
k: v
for k, v in config["configurable"].items()
if k not in reserved_configurable_keys and not k.startswith("__pregel_")
}
return {
"tags": config.get("tags") or [],
"metadata": config.get("metadata") or {},
"configurable": new_configurable,
}
def get_state(
self, config: RunnableConfig, *, subgraphs: bool = False
) -> StateSnapshot:
"""Get the state of a thread.
This method calls `POST /threads/{thread_id}/state/checkpoint` if a
checkpoint is specified in the config or `GET /threads/{thread_id}/state`
if no checkpoint is specified.
Args:
config: A `RunnableConfig` that includes `thread_id` in the
`configurable` field.
subgraphs: Include subgraphs in the state.
Returns:
The latest state of the thread.
"""
sync_client = self._validate_sync_client()
merged_config = merge_configs(self.config, config)
state = sync_client.threads.get_state(
thread_id=merged_config["configurable"]["thread_id"],
checkpoint=self._get_checkpoint(merged_config),
subgraphs=subgraphs,
)
return self._create_state_snapshot(state)
async def aget_state(
self, config: RunnableConfig, *, subgraphs: bool = False
) -> StateSnapshot:
"""Get the state of a thread.
This method calls `POST /threads/{thread_id}/state/checkpoint` if a
checkpoint is specified in the config or `GET /threads/{thread_id}/state`
if no checkpoint is specified.
Args:
config: A `RunnableConfig` that includes `thread_id` in the
`configurable` field.
subgraphs: Include subgraphs in the state.
Returns:
The latest state of the thread.
"""
client = self._validate_client()
merged_config = merge_configs(self.config, config)
state = await client.threads.get_state(
thread_id=merged_config["configurable"]["thread_id"],
checkpoint=self._get_checkpoint(merged_config),
subgraphs=subgraphs,
)
return self._create_state_snapshot(state)
def get_state_history(
self,
config: RunnableConfig,
*,
filter: Optional[dict[str, Any]] = None,
before: Optional[RunnableConfig] = None,
limit: Optional[int] = None,
) -> Iterator[StateSnapshot]:
"""Get the state history of a thread.
This method calls `POST /threads/{thread_id}/history`.
Args:
config: A `RunnableConfig` that includes `thread_id` in the
`configurable` field.
filter: Metadata to filter on.
before: A `RunnableConfig` that includes checkpoint metadata.
limit: Max number of states to return.
Returns:
States of the thread.
"""
sync_client = self._validate_sync_client()
merged_config = merge_configs(self.config, config)
states = sync_client.threads.get_history(
thread_id=merged_config["configurable"]["thread_id"],
limit=limit if limit else 10,
before=self._get_checkpoint(before),
metadata=filter,
checkpoint=self._get_checkpoint(merged_config),
)
for state in states:
yield self._create_state_snapshot(state)
async def aget_state_history(
self,
config: RunnableConfig,
*,
filter: Optional[dict[str, Any]] = None,
before: Optional[RunnableConfig] = None,
limit: Optional[int] = None,
) -> AsyncIterator[StateSnapshot]:
"""Get the state history of a thread.
This method calls `POST /threads/{thread_id}/history`.
Args:
config: A `RunnableConfig` that includes `thread_id` in the
`configurable` field.
filter: Metadata to filter on.
before: A `RunnableConfig` that includes checkpoint metadata.
limit: Max number of states to return.
Returns:
States of the thread.
"""
client = self._validate_client()
merged_config = merge_configs(self.config, config)
states = await client.threads.get_history(
thread_id=merged_config["configurable"]["thread_id"],
limit=limit if limit else 10,
before=self._get_checkpoint(before),
metadata=filter,
checkpoint=self._get_checkpoint(merged_config),
)
for state in states:
yield self._create_state_snapshot(state)
def update_state(
self,
config: RunnableConfig,
values: Optional[Union[dict[str, Any], Any]],
as_node: Optional[str] = None,
) -> RunnableConfig:
"""Update the state of a thread.
This method calls `POST /threads/{thread_id}/state`.
Args:
config: A `RunnableConfig` that includes `thread_id` in the
`configurable` field.
values: Values to update to the state.
as_node: Update the state as if this node had just executed.
Returns:
`RunnableConfig` for the updated thread.
"""
sync_client = self._validate_sync_client()
merged_config = merge_configs(self.config, config)
response: dict = sync_client.threads.update_state( # type: ignore
thread_id=merged_config["configurable"]["thread_id"],
values=values,
as_node=as_node,
checkpoint=self._get_checkpoint(merged_config),
)
return self._get_config(response["checkpoint"])
async def aupdate_state(
self,
config: RunnableConfig,
values: Optional[Union[dict[str, Any], Any]],
as_node: Optional[str] = None,
) -> RunnableConfig:
"""Update the state of a thread.
This method calls `POST /threads/{thread_id}/state`.
Args:
config: A `RunnableConfig` that includes `thread_id` in the
`configurable` field.
values: Values to update to the state.
as_node: Update the state as if this node had just executed.
Returns:
`RunnableConfig` for the updated thread.
"""
client = self._validate_client()
merged_config = merge_configs(self.config, config)
response: dict = await client.threads.update_state( # type: ignore
thread_id=merged_config["configurable"]["thread_id"],
values=values,
as_node=as_node,
checkpoint=self._get_checkpoint(merged_config),
)
return self._get_config(response["checkpoint"])
def _get_stream_modes(
self,
stream_mode: Optional[Union[StreamMode, list[StreamMode]]],
config: Optional[RunnableConfig],
default: StreamMode = "updates",
) -> tuple[
list[StreamModeSDK], list[StreamModeSDK], bool, Optional[StreamProtocol]
]:
"""Return a tuple of the final list of stream modes sent to the
remote graph and a boolean flag indicating if stream mode 'updates'
was present in the original list of stream modes.
'updates' mode is added to the list of stream modes so that interrupts
can be detected in the remote graph.
"""
updated_stream_modes: list[StreamModeSDK] = []
req_single = True
# coerce to list, or add default stream mode
if stream_mode:
if isinstance(stream_mode, str):
updated_stream_modes.append(stream_mode)
else:
req_single = False
updated_stream_modes.extend(stream_mode)
else:
updated_stream_modes.append(default)
requested_stream_modes = updated_stream_modes.copy()
# add any from parent graph
stream: Optional[StreamProtocol] = (
(config or {}).get(CONF, {}).get(CONFIG_KEY_STREAM)
)
if stream:
updated_stream_modes.extend(stream.modes)
# map "messages" to "messages-tuple"
if "messages" in updated_stream_modes:
updated_stream_modes.remove("messages")
updated_stream_modes.append("messages-tuple")
# if requested "messages-tuple",
# map to "messages" in requested_stream_modes
if "messages-tuple" in requested_stream_modes:
requested_stream_modes.remove("messages-tuple")
requested_stream_modes.append("messages")
# add 'updates' mode if not present
if "updates" not in updated_stream_modes:
updated_stream_modes.append("updates")
# remove 'events', as it's not supported in Pregel
if "events" in updated_stream_modes:
updated_stream_modes.remove("events")
return (updated_stream_modes, requested_stream_modes, req_single, stream)
def stream(
self,
input: Union[dict[str, Any], Any],
config: Optional[RunnableConfig] = None,
*,
stream_mode: Optional[Union[StreamMode, list[StreamMode]]] = None,
interrupt_before: Optional[Union[All, Sequence[str]]] = None,
interrupt_after: Optional[Union[All, Sequence[str]]] = None,
subgraphs: bool = False,
**kwargs: Any,
) -> Iterator[Union[dict[str, Any], Any]]:
"""Create a run and stream the results.
This method calls `POST /threads/{thread_id}/runs/stream` if a `thread_id`
is speciffed in the `configurable` field of the config or
`POST /runs/stream` otherwise.
Args:
input: Input to the graph.
config: A `RunnableConfig` for graph invocation.
stream_mode: Stream mode(s) to use.
interrupt_before: Interrupt the graph before these nodes.
interrupt_after: Interrupt the graph after these nodes.
subgraphs: Stream from subgraphs.
**kwargs: Additional params to pass to client.runs.stream.
Yields:
The output of the graph.
"""
sync_client = self._validate_sync_client()
merged_config = merge_configs(self.config, config)
sanitized_config = self._sanitize_config(merged_config)
stream_modes, requested, req_single, stream = self._get_stream_modes(
stream_mode, config
)
if isinstance(input, Command):
command: Optional[CommandSDK] = cast(CommandSDK, asdict(input))
input = None
else:
command = None
for chunk in sync_client.runs.stream(
thread_id=sanitized_config["configurable"].get("thread_id"),
assistant_id=self.name,
input=input,
command=command,
config=sanitized_config,
stream_mode=stream_modes,
interrupt_before=interrupt_before,
interrupt_after=interrupt_after,
stream_subgraphs=subgraphs or stream is not None,
if_not_exists="create",
**kwargs,
):
# split mode and ns
if NS_SEP in chunk.event:
mode, ns_ = chunk.event.split(NS_SEP, 1)
ns = tuple(ns_.split(NS_SEP))
else:
mode, ns = chunk.event, ()
# prepend caller ns (as it is not passed to remote graph)
if caller_ns := (config or {}).get(CONF, {}).get(CONFIG_KEY_CHECKPOINT_NS):
caller_ns = tuple(caller_ns.split(NS_SEP))
ns = caller_ns + ns
# stream to parent stream
if stream is not None and mode in stream.modes:
stream((ns, mode, chunk.data))
# raise interrupt or errors
if chunk.event.startswith("updates"):
if isinstance(chunk.data, dict) and INTERRUPT in chunk.data:
raise GraphInterrupt(chunk.data[INTERRUPT])
elif chunk.event.startswith("error"):
raise RemoteException(chunk.data)
# filter for what was actually requested
if mode not in requested:
continue
# emit chunk
if subgraphs:
if NS_SEP in chunk.event:
mode, ns_ = chunk.event.split(NS_SEP, 1)
ns = tuple(ns_.split(NS_SEP))
else:
mode, ns = chunk.event, ()
if req_single:
yield ns, chunk.data
else:
yield ns, mode, chunk.data
elif req_single:
yield chunk.data
else:
yield chunk
async def astream(
self,
input: Union[dict[str, Any], Any],
config: Optional[RunnableConfig] = None,
*,
stream_mode: Optional[Union[StreamMode, list[StreamMode]]] = None,
interrupt_before: Optional[Union[All, Sequence[str]]] = None,
interrupt_after: Optional[Union[All, Sequence[str]]] = None,
subgraphs: bool = False,
**kwargs: Any,
) -> AsyncIterator[Union[dict[str, Any], Any]]:
"""Create a run and stream the results.
This method calls `POST /threads/{thread_id}/runs/stream` if a `thread_id`
is speciffed in the `configurable` field of the config or
`POST /runs/stream` otherwise.
Args:
input: Input to the graph.
config: A `RunnableConfig` for graph invocation.
stream_mode: Stream mode(s) to use.
interrupt_before: Interrupt the graph before these nodes.
interrupt_after: Interrupt the graph after these nodes.
subgraphs: Stream from subgraphs.
**kwargs: Additional params to pass to client.runs.stream.
Yields:
The output of the graph.
"""
client = self._validate_client()
merged_config = merge_configs(self.config, config)
sanitized_config = self._sanitize_config(merged_config)
stream_modes, requested, req_single, stream = self._get_stream_modes(
stream_mode, config
)
if isinstance(input, Command):
command: Optional[CommandSDK] = cast(CommandSDK, asdict(input))
input = None
else:
command = None
async for chunk in client.runs.stream(
thread_id=sanitized_config["configurable"].get("thread_id"),
assistant_id=self.name,
input=input,
command=command,
config=sanitized_config,
stream_mode=stream_modes,
interrupt_before=interrupt_before,
interrupt_after=interrupt_after,
stream_subgraphs=subgraphs or stream is not None,
if_not_exists="create",
**kwargs,
):
# split mode and ns
if NS_SEP in chunk.event:
mode, ns_ = chunk.event.split(NS_SEP, 1)
ns = tuple(ns_.split(NS_SEP))
else:
mode, ns = chunk.event, ()
# prepend caller ns (as it is not passed to remote graph)
if caller_ns := (config or {}).get(CONF, {}).get(CONFIG_KEY_CHECKPOINT_NS):
caller_ns = tuple(caller_ns.split(NS_SEP))
ns = caller_ns + ns
# stream to parent stream
if stream is not None and mode in stream.modes:
stream((ns, mode, chunk.data))
# raise interrupt or errors
if chunk.event.startswith("updates"):
if isinstance(chunk.data, dict) and INTERRUPT in chunk.data:
raise GraphInterrupt(chunk.data[INTERRUPT])
elif chunk.event.startswith("error"):
raise RemoteException(chunk.data)
# filter for what was actually requested
if mode not in requested:
continue
# emit chunk
if subgraphs:
if NS_SEP in chunk.event:
mode, ns_ = chunk.event.split(NS_SEP, 1)
ns = tuple(ns_.split(NS_SEP))
else:
mode, ns = chunk.event, ()
if req_single:
yield ns, chunk.data
else:
yield ns, mode, chunk.data
elif req_single:
yield chunk.data
else:
yield chunk
async def astream_events(
self,
input: Any,
config: Optional[RunnableConfig] = None,
*,
version: Literal["v1", "v2"],
include_names: Optional[Sequence[All]] = None,
include_types: Optional[Sequence[All]] = None,
include_tags: Optional[Sequence[All]] = None,
exclude_names: Optional[Sequence[All]] = None,
exclude_types: Optional[Sequence[All]] = None,
exclude_tags: Optional[Sequence[All]] = None,
**kwargs: Any,
) -> AsyncIterator[dict[str, Any]]:
raise NotImplementedError
def invoke(
self,
input: Union[dict[str, Any], Any],
config: Optional[RunnableConfig] = None,
*,
interrupt_before: Optional[Union[All, Sequence[str]]] = None,
interrupt_after: Optional[Union[All, Sequence[str]]] = None,
**kwargs: Any,
) -> Union[dict[str, Any], Any]:
"""Create a run, wait until it finishes and return the final state.
Args:
input: Input to the graph.
config: A `RunnableConfig` for graph invocation.
interrupt_before: Interrupt the graph before these nodes.
interrupt_after: Interrupt the graph after these nodes.
**kwargs: Additional params to pass to RemoteGraph.stream.
Returns:
The output of the graph.
"""
for chunk in self.stream(
input,
config=config,
interrupt_before=interrupt_before,
interrupt_after=interrupt_after,
stream_mode="values",
**kwargs,
):
pass
try:
return chunk
except UnboundLocalError:
return None
async def ainvoke(
self,
input: Union[dict[str, Any], Any],
config: Optional[RunnableConfig] = None,
*,
interrupt_before: Optional[Union[All, Sequence[str]]] = None,
interrupt_after: Optional[Union[All, Sequence[str]]] = None,
**kwargs: Any,
) -> Union[dict[str, Any], Any]:
"""Create a run, wait until it finishes and return the final state.
Args:
input: Input to the graph.
config: A `RunnableConfig` for graph invocation.
interrupt_before: Interrupt the graph before these nodes.
interrupt_after: Interrupt the graph after these nodes.
**kwargs: Additional params to pass to RemoteGraph.astream.
Returns:
The output of the graph.
"""
async for chunk in self.astream(
input,
config=config,
interrupt_before=interrupt_before,
interrupt_after=interrupt_after,
stream_mode="values",
**kwargs,
):
pass
try:
return chunk
except UnboundLocalError:
return None
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/runner.py | import asyncio
import concurrent.futures
import time
from typing import (
Any,
AsyncIterator,
Callable,
Iterable,
Iterator,
Optional,
Sequence,
Type,
Union,
cast,
)
from langgraph.constants import (
CONF,
CONFIG_KEY_SEND,
ERROR,
INTERRUPT,
NO_WRITES,
PUSH,
RESUME,
TAG_HIDDEN,
)
from langgraph.errors import GraphBubbleUp, GraphInterrupt
from langgraph.pregel.executor import Submit
from langgraph.pregel.retry import arun_with_retry, run_with_retry
from langgraph.types import PregelExecutableTask, RetryPolicy
class PregelRunner:
"""Responsible for executing a set of Pregel tasks concurrently, committing
their writes, yielding control to caller when there is output to emit, and
interrupting other tasks if appropriate."""
def __init__(
self,
*,
submit: Submit,
put_writes: Callable[[str, Sequence[tuple[str, Any]]], None],
schedule_task: Callable[
[PregelExecutableTask, int], Optional[PregelExecutableTask]
],
use_astream: bool = False,
node_finished: Optional[Callable[[str], None]] = None,
) -> None:
self.submit = submit
self.put_writes = put_writes
self.use_astream = use_astream
self.node_finished = node_finished
self.schedule_task = schedule_task
def tick(
self,
tasks: Iterable[PregelExecutableTask],
*,
reraise: bool = True,
timeout: Optional[float] = None,
retry_policy: Optional[RetryPolicy] = None,
get_waiter: Optional[Callable[[], concurrent.futures.Future[None]]] = None,
) -> Iterator[None]:
def writer(
task: PregelExecutableTask, writes: Sequence[tuple[str, Any]]
) -> None:
prev_length = len(task.writes)
# delegate to the underlying writer
task.config[CONF][CONFIG_KEY_SEND](writes)
for idx, w in enumerate(task.writes):
# find the index for the newly inserted writes
if idx < prev_length:
continue
assert writes[idx - prev_length] is w
# bail if not a PUSH write
if w[0] != PUSH:
continue
# schedule the next task, if the callback returns one
if next_task := self.schedule_task(task, idx):
# if the parent task was retried,
# the next task might already be running
if any(
t == next_task.id for t in futures.values() if t is not None
):
continue
# schedule the next task
futures[
self.submit(
run_with_retry,
next_task,
retry_policy,
writer=writer,
__reraise_on_exit__=reraise,
)
] = next_task
tasks = tuple(tasks)
futures: dict[concurrent.futures.Future, Optional[PregelExecutableTask]] = {}
# give control back to the caller
yield
# fast path if single task with no timeout and no waiter
if len(tasks) == 1 and timeout is None and get_waiter is None:
t = tasks[0]
try:
run_with_retry(t, retry_policy, writer=writer)
self.commit(t, None)
except Exception as exc:
self.commit(t, exc)
if reraise:
raise
if not futures: # maybe `t` schuduled another task
return
# add waiter task if requested
if get_waiter is not None:
futures[get_waiter()] = None
# execute tasks, and wait for one to fail or all to finish.
# each task is independent from all other concurrent tasks
# yield updates/debug output as each task finishes
for t in tasks:
if not t.writes:
futures[
self.submit(
run_with_retry,
t,
retry_policy,
writer=writer,
__reraise_on_exit__=reraise,
)
] = t
done_futures: set[concurrent.futures.Future] = set()
end_time = timeout + time.monotonic() if timeout else None
while len(futures) > (1 if get_waiter is not None else 0):
done, inflight = concurrent.futures.wait(
futures,
return_when=concurrent.futures.FIRST_COMPLETED,
timeout=(max(0, end_time - time.monotonic()) if end_time else None),
)
if not done:
break # timed out
for fut in done:
task = futures.pop(fut)
if task is None:
# waiter task finished, schedule another
if inflight and get_waiter is not None:
futures[get_waiter()] = None
else:
# store for panic check
done_futures.add(fut)
# task finished, commit writes
self.commit(task, _exception(fut))
else:
# remove references to loop vars
del fut, task
# maybe stop other tasks
if _should_stop_others(done):
break
# give control back to the caller
yield
# panic on failure or timeout
_panic_or_proceed(
done_futures.union(f for f, t in futures.items() if t is not None),
panic=reraise,
)
async def atick(
self,
tasks: Iterable[PregelExecutableTask],
*,
reraise: bool = True,
timeout: Optional[float] = None,
retry_policy: Optional[RetryPolicy] = None,
get_waiter: Optional[Callable[[], asyncio.Future[None]]] = None,
) -> AsyncIterator[None]:
def writer(
task: PregelExecutableTask, writes: Sequence[tuple[str, Any]]
) -> None:
prev_length = len(task.writes)
# delegate to the underlying writer
task.config[CONF][CONFIG_KEY_SEND](writes)
for idx, w in enumerate(task.writes):
# find the index for the newly inserted writes
if idx < prev_length:
continue
assert writes[idx - prev_length] is w
# bail if not a PUSH write
if w[0] != PUSH:
continue
# schedule the next task, if the callback returns one
if next_task := self.schedule_task(task, idx):
# if the parent task was retried,
# the next task might already be running
if any(
t == next_task.id for t in futures.values() if t is not None
):
continue
# schedule the next task
futures[
cast(
asyncio.Future,
self.submit(
arun_with_retry,
next_task,
retry_policy,
stream=self.use_astream,
writer=writer,
__name__=t.name,
__cancel_on_exit__=True,
__reraise_on_exit__=reraise,
),
)
] = next_task
loop = asyncio.get_event_loop()
tasks = tuple(tasks)
futures: dict[asyncio.Future, Optional[PregelExecutableTask]] = {}
# give control back to the caller
yield
# fast path if single task with no waiter and no timeout
if len(tasks) == 1 and get_waiter is None and timeout is None:
t = tasks[0]
try:
await arun_with_retry(
t, retry_policy, stream=self.use_astream, writer=writer
)
self.commit(t, None)
except Exception as exc:
self.commit(t, exc)
if reraise:
raise
if not futures: # maybe `t` schuduled another task
return
# add waiter task if requested
if get_waiter is not None:
futures[get_waiter()] = None
# execute tasks, and wait for one to fail or all to finish.
# each task is independent from all other concurrent tasks
# yield updates/debug output as each task finishes
for t in tasks:
if not t.writes:
futures[
cast(
asyncio.Future,
self.submit(
arun_with_retry,
t,
retry_policy,
stream=self.use_astream,
writer=writer,
__name__=t.name,
__cancel_on_exit__=True,
__reraise_on_exit__=reraise,
),
)
] = t
done_futures: set[asyncio.Future] = set()
end_time = timeout + loop.time() if timeout else None
while len(futures) > (1 if get_waiter is not None else 0):
done, inflight = await asyncio.wait(
futures,
return_when=asyncio.FIRST_COMPLETED,
timeout=(max(0, end_time - loop.time()) if end_time else None),
)
if not done:
break # timed out
for fut in done:
task = futures.pop(fut)
if task is None:
# waiter task finished, schedule another
if inflight and get_waiter is not None:
futures[get_waiter()] = None
else:
# store for panic check
done_futures.add(fut)
# task finished, commit writes
self.commit(task, _exception(fut))
else:
# remove references to loop vars
del fut, task
# maybe stop other tasks
if _should_stop_others(done):
break
# give control back to the caller
yield
# cancel waiter task
for fut in futures:
fut.cancel()
# panic on failure or timeout
_panic_or_proceed(
done_futures.union(f for f, t in futures.items() if t is not None),
timeout_exc_cls=asyncio.TimeoutError,
panic=reraise,
)
def commit(
self, task: PregelExecutableTask, exception: Optional[BaseException]
) -> None:
if exception:
if isinstance(exception, GraphInterrupt):
# save interrupt to checkpointer
if interrupts := [(INTERRUPT, i) for i in exception.args[0]]:
if resumes := [w for w in task.writes if w[0] == RESUME]:
interrupts.extend(resumes)
self.put_writes(task.id, interrupts)
elif isinstance(exception, GraphBubbleUp):
raise exception
else:
# save error to checkpointer
self.put_writes(task.id, [(ERROR, exception)])
else:
if self.node_finished and (
task.config is None or TAG_HIDDEN not in task.config.get("tags", [])
):
self.node_finished(task.name)
if not task.writes:
# add no writes marker
task.writes.append((NO_WRITES, None))
# save task writes to checkpointer
self.put_writes(task.id, task.writes)
def _should_stop_others(
done: Union[set[concurrent.futures.Future[Any]], set[asyncio.Future[Any]]],
) -> bool:
"""Check if any task failed, if so, cancel all other tasks.
GraphInterrupts are not considered failures."""
for fut in done:
if fut.cancelled():
return True
if exc := fut.exception():
return not isinstance(exc, GraphBubbleUp)
else:
return False
def _exception(
fut: Union[concurrent.futures.Future[Any], asyncio.Future[Any]],
) -> Optional[BaseException]:
"""Return the exception from a future, without raising CancelledError."""
if fut.cancelled():
if isinstance(fut, asyncio.Future):
return asyncio.CancelledError()
else:
return concurrent.futures.CancelledError()
else:
return fut.exception()
def _panic_or_proceed(
futs: Union[set[concurrent.futures.Future], set[asyncio.Future]],
*,
timeout_exc_cls: Type[Exception] = TimeoutError,
panic: bool = True,
) -> None:
"""Cancel remaining tasks if any failed, re-raise exception if panic is True."""
done: set[Union[concurrent.futures.Future[Any], asyncio.Future[Any]]] = set()
inflight: set[Union[concurrent.futures.Future[Any], asyncio.Future[Any]]] = set()
for fut in futs:
if fut.done():
done.add(fut)
else:
inflight.add(fut)
while done:
# if any task failed
if exc := _exception(done.pop()):
# cancel all pending tasks
while inflight:
inflight.pop().cancel()
# raise the exception
if panic:
raise exc
else:
return
if inflight:
# if we got here means we timed out
while inflight:
# cancel all pending tasks
inflight.pop().cancel()
# raise timeout error
raise timeout_exc_cls("Timed out")
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/messages.py | from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
List,
Optional,
Sequence,
Union,
cast,
)
from uuid import UUID, uuid4
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatGenerationChunk, LLMResult
from langchain_core.tracers._streaming import T, _StreamingCallbackHandler
from langgraph.constants import NS_SEP, TAG_HIDDEN, TAG_NOSTREAM
from langgraph.types import StreamChunk
Meta = tuple[tuple[str, ...], dict[str, Any]]
class StreamMessagesHandler(BaseCallbackHandler, _StreamingCallbackHandler):
"""A callback handler that implements stream_mode=messages.
Collects messages from (1) chat model stream events and (2) node outputs."""
run_inline = True
"""We want this callback to run in the main thread, to avoid order/locking issues."""
def __init__(self, stream: Callable[[StreamChunk], None]):
self.stream = stream
self.metadata: dict[UUID, Meta] = {}
self.seen: set[Union[int, str]] = set()
def _emit(self, meta: Meta, message: BaseMessage, *, dedupe: bool = False) -> None:
if dedupe and message.id in self.seen:
return
else:
if message.id is None:
message.id = str(uuid4())
self.seen.add(message.id)
self.stream((meta[0], "messages", (message, meta[1])))
def tap_output_aiter(
self, run_id: UUID, output: AsyncIterator[T]
) -> AsyncIterator[T]:
return output
def tap_output_iter(self, run_id: UUID, output: Iterator[T]) -> Iterator[T]:
return output
def on_chat_model_start(
self,
serialized: dict[str, Any],
messages: list[list[BaseMessage]],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
if metadata and (not tags or TAG_NOSTREAM not in tags):
self.metadata[run_id] = (
tuple(cast(str, metadata["langgraph_checkpoint_ns"]).split(NS_SEP)),
metadata,
)
def on_llm_new_token(
self,
token: str,
*,
chunk: Optional[ChatGenerationChunk] = None,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if not isinstance(chunk, ChatGenerationChunk):
return
if meta := self.metadata.get(run_id):
self._emit(meta, chunk.message)
def on_llm_end(
self,
response: LLMResult,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
self.metadata.pop(run_id, None)
def on_llm_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
self.metadata.pop(run_id, None)
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
if (
metadata
and kwargs.get("name") == metadata.get("langgraph_node")
and (not tags or TAG_HIDDEN not in tags)
):
self.metadata[run_id] = (
tuple(cast(str, metadata["langgraph_checkpoint_ns"]).split(NS_SEP)),
metadata,
)
def on_chain_end(
self,
response: Any,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if meta := self.metadata.pop(run_id, None):
if isinstance(response, BaseMessage):
self._emit(meta, response, dedupe=True)
elif isinstance(response, Sequence):
for value in response:
if isinstance(value, BaseMessage):
self._emit(meta, value, dedupe=True)
elif isinstance(response, dict):
for value in response.values():
if isinstance(value, BaseMessage):
self._emit(meta, value, dedupe=True)
elif isinstance(value, Sequence):
for item in value:
if isinstance(item, BaseMessage):
self._emit(meta, item, dedupe=True)
elif hasattr(response, "__dir__") and callable(response.__dir__):
for key in dir(response):
try:
value = getattr(response, key)
if isinstance(value, BaseMessage):
self._emit(meta, value, dedupe=True)
elif isinstance(value, Sequence):
for item in value:
if isinstance(item, BaseMessage):
self._emit(meta, item, dedupe=True)
except AttributeError:
pass
def on_chain_error(
self,
error: BaseException,
*,
run_id: UUID,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
self.metadata.pop(run_id, None)
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/utils.py | from typing import Optional
from langchain_core.runnables import RunnableLambda, RunnableSequence
from langchain_core.runnables.utils import get_function_nonlocals
from langgraph.checkpoint.base import ChannelVersions
from langgraph.pregel.protocol import PregelProtocol
from langgraph.utils.runnable import Runnable, RunnableCallable, RunnableSeq
def get_new_channel_versions(
previous_versions: ChannelVersions, current_versions: ChannelVersions
) -> ChannelVersions:
"""Get subset of current_versions that are newer than previous_versions."""
if previous_versions:
version_type = type(next(iter(current_versions.values()), None))
null_version = version_type() # type: ignore[misc]
new_versions = {
k: v
for k, v in current_versions.items()
if v > previous_versions.get(k, null_version) # type: ignore[operator]
}
else:
new_versions = current_versions
return new_versions
def find_subgraph_pregel(candidate: Runnable) -> Optional[Runnable]:
from langgraph.pregel import Pregel
candidates: list[Runnable] = [candidate]
for c in candidates:
if (
isinstance(c, PregelProtocol)
# subgraphs that disabled checkpointing are not considered
and (not isinstance(c, Pregel) or c.checkpointer is not False)
):
return c
elif isinstance(c, RunnableSequence) or isinstance(c, RunnableSeq):
candidates.extend(c.steps)
elif isinstance(c, RunnableLambda):
candidates.extend(c.deps)
elif isinstance(c, RunnableCallable):
if c.func is not None:
candidates.extend(
nl.__self__ if hasattr(nl, "__self__") else nl
for nl in get_function_nonlocals(c.func)
)
elif c.afunc is not None:
candidates.extend(
nl.__self__ if hasattr(nl, "__self__") else nl
for nl in get_function_nonlocals(c.afunc)
)
return None
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/__init__.py | from __future__ import annotations
import asyncio
import concurrent
import concurrent.futures
import queue
from collections import deque
from functools import partial
from typing import (
Any,
AsyncIterator,
Callable,
Dict,
Iterator,
Mapping,
Optional,
Sequence,
Type,
Union,
cast,
get_type_hints,
overload,
)
from uuid import UUID, uuid5
from langchain_core.globals import get_debug
from langchain_core.runnables import (
RunnableSequence,
)
from langchain_core.runnables.base import Input, Output
from langchain_core.runnables.config import (
RunnableConfig,
get_async_callback_manager_for_config,
get_callback_manager_for_config,
)
from langchain_core.runnables.graph import Graph
from langchain_core.runnables.utils import (
ConfigurableFieldSpec,
get_unique_config_specs,
)
from langchain_core.tracers._streaming import _StreamingCallbackHandler
from pydantic import BaseModel
from typing_extensions import Self
from langgraph.channels.base import (
BaseChannel,
)
from langgraph.checkpoint.base import (
BaseCheckpointSaver,
CheckpointTuple,
copy_checkpoint,
create_checkpoint,
empty_checkpoint,
)
from langgraph.constants import (
CONF,
CONFIG_KEY_CHECKPOINT_ID,
CONFIG_KEY_CHECKPOINT_NS,
CONFIG_KEY_CHECKPOINTER,
CONFIG_KEY_NODE_FINISHED,
CONFIG_KEY_READ,
CONFIG_KEY_RESUMING,
CONFIG_KEY_SEND,
CONFIG_KEY_STORE,
CONFIG_KEY_STREAM,
CONFIG_KEY_STREAM_WRITER,
CONFIG_KEY_TASK_ID,
END,
ERROR,
INPUT,
INTERRUPT,
NS_END,
NS_SEP,
NULL_TASK_ID,
PUSH,
SCHEDULED,
)
from langgraph.errors import (
ErrorCode,
GraphRecursionError,
InvalidUpdateError,
create_error_message,
)
from langgraph.managed.base import ManagedValueSpec
from langgraph.pregel.algo import (
PregelTaskWrites,
apply_writes,
local_read,
local_write,
prepare_next_tasks,
)
from langgraph.pregel.debug import tasks_w_writes
from langgraph.pregel.io import read_channels
from langgraph.pregel.loop import AsyncPregelLoop, StreamProtocol, SyncPregelLoop
from langgraph.pregel.manager import AsyncChannelsManager, ChannelsManager
from langgraph.pregel.messages import StreamMessagesHandler
from langgraph.pregel.protocol import PregelProtocol
from langgraph.pregel.read import PregelNode
from langgraph.pregel.retry import RetryPolicy
from langgraph.pregel.runner import PregelRunner
from langgraph.pregel.utils import find_subgraph_pregel, get_new_channel_versions
from langgraph.pregel.validate import validate_graph, validate_keys
from langgraph.pregel.write import ChannelWrite, ChannelWriteEntry
from langgraph.store.base import BaseStore
from langgraph.types import (
All,
Checkpointer,
LoopProtocol,
StateSnapshot,
StreamChunk,
StreamMode,
)
from langgraph.utils.config import (
ensure_config,
merge_configs,
patch_checkpoint_map,
patch_config,
patch_configurable,
)
from langgraph.utils.pydantic import create_model
from langgraph.utils.queue import AsyncQueue, SyncQueue # type: ignore[attr-defined]
WriteValue = Union[Callable[[Input], Output], Any]
class Channel:
@overload
@classmethod
def subscribe_to(
cls,
channels: str,
*,
key: Optional[str] = None,
tags: Optional[list[str]] = None,
) -> PregelNode: ...
@overload
@classmethod
def subscribe_to(
cls,
channels: Sequence[str],
*,
key: None = None,
tags: Optional[list[str]] = None,
) -> PregelNode: ...
@classmethod
def subscribe_to(
cls,
channels: Union[str, Sequence[str]],
*,
key: Optional[str] = None,
tags: Optional[list[str]] = None,
) -> PregelNode:
"""Runs process.invoke() each time channels are updated,
with a dict of the channel values as input."""
if not isinstance(channels, str) and key is not None:
raise ValueError(
"Can't specify a key when subscribing to multiple channels"
)
return PregelNode(
channels=cast(
Union[list[str], Mapping[str, str]],
(
{key: channels}
if isinstance(channels, str) and key is not None
else (
[channels]
if isinstance(channels, str)
else {chan: chan for chan in channels}
)
),
),
triggers=[channels] if isinstance(channels, str) else channels,
tags=tags,
)
@classmethod
def write_to(
cls,
*channels: str,
**kwargs: WriteValue,
) -> ChannelWrite:
"""Writes to channels the result of the lambda, or None to skip writing."""
return ChannelWrite(
[ChannelWriteEntry(c) for c in channels]
+ [
(
ChannelWriteEntry(k, mapper=v)
if callable(v)
else ChannelWriteEntry(k, value=v)
)
for k, v in kwargs.items()
]
)
class Pregel(PregelProtocol):
nodes: dict[str, PregelNode]
channels: dict[str, Union[BaseChannel, ManagedValueSpec]]
stream_mode: StreamMode = "values"
"""Mode to stream output, defaults to 'values'."""
output_channels: Union[str, Sequence[str]]
stream_channels: Optional[Union[str, Sequence[str]]] = None
"""Channels to stream, defaults to all channels not in reserved channels"""
interrupt_after_nodes: Union[All, Sequence[str]]
interrupt_before_nodes: Union[All, Sequence[str]]
input_channels: Union[str, Sequence[str]]
step_timeout: Optional[float] = None
"""Maximum time to wait for a step to complete, in seconds. Defaults to None."""
debug: bool
"""Whether to print debug information during execution. Defaults to False."""
checkpointer: Checkpointer = None
"""Checkpointer used to save and load graph state. Defaults to None."""
store: Optional[BaseStore] = None
"""Memory store to use for SharedValues. Defaults to None."""
retry_policy: Optional[RetryPolicy] = None
"""Retry policy to use when running tasks. Set to None to disable."""
config_type: Optional[Type[Any]] = None
config: Optional[RunnableConfig] = None
name: str = "LangGraph"
def __init__(
self,
*,
nodes: dict[str, PregelNode],
channels: Optional[dict[str, Union[BaseChannel, ManagedValueSpec]]],
auto_validate: bool = True,
stream_mode: StreamMode = "values",
output_channels: Union[str, Sequence[str]],
stream_channels: Optional[Union[str, Sequence[str]]] = None,
interrupt_after_nodes: Union[All, Sequence[str]] = (),
interrupt_before_nodes: Union[All, Sequence[str]] = (),
input_channels: Union[str, Sequence[str]],
step_timeout: Optional[float] = None,
debug: Optional[bool] = None,
checkpointer: Optional[BaseCheckpointSaver] = None,
store: Optional[BaseStore] = None,
retry_policy: Optional[RetryPolicy] = None,
config_type: Optional[Type[Any]] = None,
config: Optional[RunnableConfig] = None,
name: str = "LangGraph",
) -> None:
self.nodes = nodes
self.channels = channels or {}
self.stream_mode = stream_mode
self.output_channels = output_channels
self.stream_channels = stream_channels
self.interrupt_after_nodes = interrupt_after_nodes
self.interrupt_before_nodes = interrupt_before_nodes
self.input_channels = input_channels
self.step_timeout = step_timeout
self.debug = debug if debug is not None else get_debug()
self.checkpointer = checkpointer
self.store = store
self.retry_policy = retry_policy
self.config_type = config_type
self.config = config
self.name = name
if auto_validate:
self.validate()
def get_graph(
self, config: RunnableConfig | None = None, *, xray: int | bool = False
) -> Graph:
raise NotImplementedError
async def aget_graph(
self, config: RunnableConfig | None = None, *, xray: int | bool = False
) -> Graph:
raise NotImplementedError
def copy(self, update: dict[str, Any] | None = None) -> Self:
attrs = {**self.__dict__, **(update or {})}
return self.__class__(**attrs)
def with_config(self, config: RunnableConfig | None = None, **kwargs: Any) -> Self:
return self.copy(
{"config": merge_configs(self.config, config, cast(RunnableConfig, kwargs))}
)
def validate(self) -> Self:
validate_graph(
self.nodes,
{k: v for k, v in self.channels.items() if isinstance(v, BaseChannel)},
self.input_channels,
self.output_channels,
self.stream_channels,
self.interrupt_after_nodes,
self.interrupt_before_nodes,
)
return self
@property
def config_specs(self) -> list[ConfigurableFieldSpec]:
return [
spec
for spec in get_unique_config_specs(
[spec for node in self.nodes.values() for spec in node.config_specs]
+ (
self.checkpointer.config_specs
if isinstance(self.checkpointer, BaseCheckpointSaver)
else []
)
+ (
[
ConfigurableFieldSpec(id=name, annotation=typ)
for name, typ in get_type_hints(self.config_type).items()
]
if self.config_type is not None
else []
)
)
# these are provided by the Pregel class
if spec.id
not in [
CONFIG_KEY_READ,
CONFIG_KEY_SEND,
CONFIG_KEY_CHECKPOINTER,
CONFIG_KEY_RESUMING,
]
]
@property
def InputType(self) -> Any:
if isinstance(self.input_channels, str):
channel = self.channels[self.input_channels]
if isinstance(channel, BaseChannel):
return channel.UpdateType
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
config = merge_configs(self.config, config)
if isinstance(self.input_channels, str):
return super().get_input_schema(config)
else:
return create_model(
self.get_name("Input"),
field_definitions={
k: (c.UpdateType, None)
for k in self.input_channels or self.channels.keys()
if (c := self.channels[k]) and isinstance(c, BaseChannel)
},
)
def get_input_jsonschema(
self, config: Optional[RunnableConfig] = None
) -> Dict[All, Any]:
schema = self.get_input_schema(config)
if hasattr(schema, "model_json_schema"):
return schema.model_json_schema()
else:
return schema.schema()
@property
def OutputType(self) -> Any:
if isinstance(self.output_channels, str):
channel = self.channels[self.output_channels]
if isinstance(channel, BaseChannel):
return channel.ValueType
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> Type[BaseModel]:
config = merge_configs(self.config, config)
if isinstance(self.output_channels, str):
return super().get_output_schema(config)
else:
return create_model(
self.get_name("Output"),
field_definitions={
k: (c.ValueType, None)
for k in self.output_channels
if (c := self.channels[k]) and isinstance(c, BaseChannel)
},
)
def get_output_jsonschema(
self, config: Optional[RunnableConfig] = None
) -> Dict[All, Any]:
schema = self.get_output_schema(config)
if hasattr(schema, "model_json_schema"):
return schema.model_json_schema()
else:
return schema.schema()
@property
def stream_channels_list(self) -> Sequence[str]:
stream_channels = self.stream_channels_asis
return (
[stream_channels] if isinstance(stream_channels, str) else stream_channels
)
@property
def stream_channels_asis(self) -> Union[str, Sequence[str]]:
return self.stream_channels or [
k for k in self.channels if isinstance(self.channels[k], BaseChannel)
]
def get_subgraphs(
self, *, namespace: Optional[str] = None, recurse: bool = False
) -> Iterator[tuple[str, Pregel]]:
for name, node in self.nodes.items():
# filter by prefix
if namespace is not None:
if not namespace.startswith(name):
continue
# find the subgraph, if any
graph = cast(Optional[Pregel], find_subgraph_pregel(node.bound))
# if found, yield recursively
if graph:
if name == namespace:
yield name, graph
return # we found it, stop searching
if namespace is None:
yield name, graph
if recurse:
if namespace is not None:
namespace = namespace[len(name) + 1 :]
yield from (
(f"{name}{NS_SEP}{n}", s)
for n, s in graph.get_subgraphs(
namespace=namespace, recurse=recurse
)
)
async def aget_subgraphs(
self, *, namespace: Optional[str] = None, recurse: bool = False
) -> AsyncIterator[tuple[str, Pregel]]:
for name, node in self.get_subgraphs(namespace=namespace, recurse=recurse):
yield name, node
def _prepare_state_snapshot(
self,
config: RunnableConfig,
saved: Optional[CheckpointTuple],
recurse: Optional[BaseCheckpointSaver] = None,
apply_pending_writes: bool = False,
) -> StateSnapshot:
if not saved:
return StateSnapshot(
values={},
next=(),
config=config,
metadata=None,
created_at=None,
parent_config=None,
tasks=(),
)
with ChannelsManager(
self.channels,
saved.checkpoint,
LoopProtocol(
config=saved.config,
step=saved.metadata.get("step", -1) + 1,
stop=saved.metadata.get("step", -1) + 2,
),
skip_context=True,
) as (channels, managed):
# tasks for this checkpoint
next_tasks = prepare_next_tasks(
saved.checkpoint,
saved.pending_writes or [],
self.nodes,
channels,
managed,
saved.config,
saved.metadata.get("step", -1) + 1,
for_execution=True,
store=self.store,
checkpointer=self.checkpointer or None,
manager=None,
)
# get the subgraphs
subgraphs = dict(self.get_subgraphs())
parent_ns = saved.config[CONF].get(CONFIG_KEY_CHECKPOINT_NS, "")
task_states: dict[str, Union[RunnableConfig, StateSnapshot]] = {}
for task in next_tasks.values():
if task.name not in subgraphs:
continue
# assemble checkpoint_ns for this task
task_ns = f"{task.name}{NS_END}{task.id}"
if parent_ns:
task_ns = f"{parent_ns}{NS_SEP}{task_ns}"
if not recurse:
# set config as signal that subgraph checkpoints exist
config = {
CONF: {
"thread_id": saved.config[CONF]["thread_id"],
CONFIG_KEY_CHECKPOINT_NS: task_ns,
}
}
task_states[task.id] = config
else:
# get the state of the subgraph
config = {
CONF: {
CONFIG_KEY_CHECKPOINTER: recurse,
"thread_id": saved.config[CONF]["thread_id"],
CONFIG_KEY_CHECKPOINT_NS: task_ns,
}
}
task_states[task.id] = subgraphs[task.name].get_state(
config, subgraphs=True
)
# apply pending writes
if null_writes := [
w[1:] for w in saved.pending_writes or [] if w[0] == NULL_TASK_ID
]:
apply_writes(
saved.checkpoint,
channels,
[PregelTaskWrites((), INPUT, null_writes, [])],
None,
)
if apply_pending_writes and saved.pending_writes:
for tid, k, v in saved.pending_writes:
if k in (ERROR, INTERRUPT, SCHEDULED):
continue
if tid not in next_tasks:
continue
next_tasks[tid].writes.append((k, v))
if tasks := [t for t in next_tasks.values() if t.writes]:
apply_writes(saved.checkpoint, channels, tasks, None)
# assemble the state snapshot
return StateSnapshot(
read_channels(channels, self.stream_channels_asis),
tuple(t.name for t in next_tasks.values() if not t.writes),
patch_checkpoint_map(saved.config, saved.metadata),
saved.metadata,
saved.checkpoint["ts"],
patch_checkpoint_map(saved.parent_config, saved.metadata),
tasks_w_writes(
next_tasks.values(),
saved.pending_writes,
task_states,
self.stream_channels_asis,
),
)
async def _aprepare_state_snapshot(
self,
config: RunnableConfig,
saved: Optional[CheckpointTuple],
recurse: Optional[BaseCheckpointSaver] = None,
apply_pending_writes: bool = False,
) -> StateSnapshot:
if not saved:
return StateSnapshot(
values={},
next=(),
config=config,
metadata=None,
created_at=None,
parent_config=None,
tasks=(),
)
async with AsyncChannelsManager(
self.channels,
saved.checkpoint,
LoopProtocol(
config=saved.config,
step=saved.metadata.get("step", -1) + 1,
stop=saved.metadata.get("step", -1) + 2,
),
skip_context=True,
) as (
channels,
managed,
):
# tasks for this checkpoint
next_tasks = prepare_next_tasks(
saved.checkpoint,
saved.pending_writes or [],
self.nodes,
channels,
managed,
saved.config,
saved.metadata.get("step", -1) + 1,
for_execution=True,
store=self.store,
checkpointer=self.checkpointer or None,
manager=None,
)
# get the subgraphs
subgraphs = {n: g async for n, g in self.aget_subgraphs()}
parent_ns = saved.config[CONF].get(CONFIG_KEY_CHECKPOINT_NS, "")
task_states: dict[str, Union[RunnableConfig, StateSnapshot]] = {}
for task in next_tasks.values():
if task.name not in subgraphs:
continue
# assemble checkpoint_ns for this task
task_ns = f"{task.name}{NS_END}{task.id}"
if parent_ns:
task_ns = f"{parent_ns}{NS_SEP}{task_ns}"
if not recurse:
# set config as signal that subgraph checkpoints exist
config = {
CONF: {
"thread_id": saved.config[CONF]["thread_id"],
CONFIG_KEY_CHECKPOINT_NS: task_ns,
}
}
task_states[task.id] = config
else:
# get the state of the subgraph
config = {
CONF: {
CONFIG_KEY_CHECKPOINTER: recurse,
"thread_id": saved.config[CONF]["thread_id"],
CONFIG_KEY_CHECKPOINT_NS: task_ns,
}
}
task_states[task.id] = await subgraphs[task.name].aget_state(
config, subgraphs=True
)
# apply pending writes
if null_writes := [
w[1:] for w in saved.pending_writes or [] if w[0] == NULL_TASK_ID
]:
apply_writes(
saved.checkpoint,
channels,
[PregelTaskWrites((), INPUT, null_writes, [])],
None,
)
if apply_pending_writes and saved.pending_writes:
for tid, k, v in saved.pending_writes:
if k in (ERROR, INTERRUPT, SCHEDULED):
continue
if tid not in next_tasks:
continue
next_tasks[tid].writes.append((k, v))
if tasks := [t for t in next_tasks.values() if t.writes]:
apply_writes(saved.checkpoint, channels, tasks, None)
# assemble the state snapshot
return StateSnapshot(
read_channels(channels, self.stream_channels_asis),
tuple(t.name for t in next_tasks.values() if not t.writes),
patch_checkpoint_map(saved.config, saved.metadata),
saved.metadata,
saved.checkpoint["ts"],
patch_checkpoint_map(saved.parent_config, saved.metadata),
tasks_w_writes(
next_tasks.values(),
saved.pending_writes,
task_states,
self.stream_channels_asis,
),
)
def get_state(
self, config: RunnableConfig, *, subgraphs: bool = False
) -> StateSnapshot:
"""Get the current state of the graph."""
checkpointer: Optional[BaseCheckpointSaver] = ensure_config(config)[CONF].get(
CONFIG_KEY_CHECKPOINTER, self.checkpointer
)
if not checkpointer:
raise ValueError("No checkpointer set")
if (
checkpoint_ns := config[CONF].get(CONFIG_KEY_CHECKPOINT_NS, "")
) and CONFIG_KEY_CHECKPOINTER not in config[CONF]:
# remove task_ids from checkpoint_ns
recast_checkpoint_ns = NS_SEP.join(
part.split(NS_END)[0] for part in checkpoint_ns.split(NS_SEP)
)
# find the subgraph with the matching name
for _, pregel in self.get_subgraphs(
namespace=recast_checkpoint_ns, recurse=True
):
return pregel.get_state(
patch_configurable(config, {CONFIG_KEY_CHECKPOINTER: checkpointer}),
subgraphs=subgraphs,
)
else:
raise ValueError(f"Subgraph {recast_checkpoint_ns} not found")
config = merge_configs(self.config, config) if self.config else config
saved = checkpointer.get_tuple(config)
return self._prepare_state_snapshot(
config,
saved,
recurse=checkpointer if subgraphs else None,
apply_pending_writes=CONFIG_KEY_CHECKPOINT_ID not in config[CONF],
)
async def aget_state(
self, config: RunnableConfig, *, subgraphs: bool = False
) -> StateSnapshot:
"""Get the current state of the graph."""
checkpointer: Optional[BaseCheckpointSaver] = ensure_config(config)[CONF].get(
CONFIG_KEY_CHECKPOINTER, self.checkpointer
)
if not checkpointer:
raise ValueError("No checkpointer set")
if (
checkpoint_ns := config[CONF].get(CONFIG_KEY_CHECKPOINT_NS, "")
) and CONFIG_KEY_CHECKPOINTER not in config[CONF]:
# remove task_ids from checkpoint_ns
recast_checkpoint_ns = NS_SEP.join(
part.split(NS_END)[0] for part in checkpoint_ns.split(NS_SEP)
)
# find the subgraph with the matching name
async for _, pregel in self.aget_subgraphs(
namespace=recast_checkpoint_ns, recurse=True
):
return await pregel.aget_state(
patch_configurable(config, {CONFIG_KEY_CHECKPOINTER: checkpointer}),
subgraphs=subgraphs,
)
else:
raise ValueError(f"Subgraph {recast_checkpoint_ns} not found")
config = merge_configs(self.config, config) if self.config else config
saved = await checkpointer.aget_tuple(config)
return await self._aprepare_state_snapshot(
config,
saved,
recurse=checkpointer if subgraphs else None,
apply_pending_writes=CONFIG_KEY_CHECKPOINT_ID not in config[CONF],
)
def get_state_history(
self,
config: RunnableConfig,
*,
filter: Optional[Dict[str, Any]] = None,
before: Optional[RunnableConfig] = None,
limit: Optional[int] = None,
) -> Iterator[StateSnapshot]:
config = ensure_config(config)
"""Get the history of the state of the graph."""
checkpointer: Optional[BaseCheckpointSaver] = ensure_config(config)[CONF].get(
CONFIG_KEY_CHECKPOINTER, self.checkpointer
)
if not checkpointer:
raise ValueError("No checkpointer set")
if (
checkpoint_ns := config[CONF].get(CONFIG_KEY_CHECKPOINT_NS, "")
) and CONFIG_KEY_CHECKPOINTER not in config[CONF]:
# remove task_ids from checkpoint_ns
recast_checkpoint_ns = NS_SEP.join(
part.split(NS_END)[0] for part in checkpoint_ns.split(NS_SEP)
)
# find the subgraph with the matching name
for _, pregel in self.get_subgraphs(
namespace=recast_checkpoint_ns, recurse=True
):
yield from pregel.get_state_history(
patch_configurable(config, {CONFIG_KEY_CHECKPOINTER: checkpointer}),
filter=filter,
before=before,
limit=limit,
)
return
else:
raise ValueError(f"Subgraph {recast_checkpoint_ns} not found")
config = merge_configs(
self.config,
config,
{CONF: {CONFIG_KEY_CHECKPOINT_NS: checkpoint_ns}},
)
# eagerly consume list() to avoid holding up the db cursor
for checkpoint_tuple in list(
checkpointer.list(config, before=before, limit=limit, filter=filter)
):
yield self._prepare_state_snapshot(
checkpoint_tuple.config, checkpoint_tuple
)
async def aget_state_history(
self,
config: RunnableConfig,
*,
filter: Optional[Dict[str, Any]] = None,
before: Optional[RunnableConfig] = None,
limit: Optional[int] = None,
) -> AsyncIterator[StateSnapshot]:
config = ensure_config(config)
"""Get the history of the state of the graph."""
checkpointer: Optional[BaseCheckpointSaver] = ensure_config(config)[CONF].get(
CONFIG_KEY_CHECKPOINTER, self.checkpointer
)
if not checkpointer:
raise ValueError("No checkpointer set")
if (
checkpoint_ns := config[CONF].get(CONFIG_KEY_CHECKPOINT_NS, "")
) and CONFIG_KEY_CHECKPOINTER not in config[CONF]:
# remove task_ids from checkpoint_ns
recast_checkpoint_ns = NS_SEP.join(
part.split(NS_END)[0] for part in checkpoint_ns.split(NS_SEP)
)
# find the subgraph with the matching name
async for _, pregel in self.aget_subgraphs(
namespace=recast_checkpoint_ns, recurse=True
):
async for state in pregel.aget_state_history(
patch_configurable(config, {CONFIG_KEY_CHECKPOINTER: checkpointer}),
filter=filter,
before=before,
limit=limit,
):
yield state
return
else:
raise ValueError(f"Subgraph {recast_checkpoint_ns} not found")
config = merge_configs(
self.config,
config,
{CONF: {CONFIG_KEY_CHECKPOINT_NS: checkpoint_ns}},
)
# eagerly consume list() to avoid holding up the db cursor
for checkpoint_tuple in [
c
async for c in checkpointer.alist(
config, before=before, limit=limit, filter=filter
)
]:
yield await self._aprepare_state_snapshot(
checkpoint_tuple.config, checkpoint_tuple
)
def update_state(
self,
config: RunnableConfig,
values: Optional[Union[dict[str, Any], Any]],
as_node: Optional[str] = None,
) -> RunnableConfig:
"""Update the state of the graph with the given values, as if they came from
node `as_node`. If `as_node` is not provided, it will be set to the last node
that updated the state, if not ambiguous.
"""
checkpointer: Optional[BaseCheckpointSaver] = ensure_config(config)[CONF].get(
CONFIG_KEY_CHECKPOINTER, self.checkpointer
)
if not checkpointer:
raise ValueError("No checkpointer set")
# delegate to subgraph
if (
checkpoint_ns := config[CONF].get(CONFIG_KEY_CHECKPOINT_NS, "")
) and CONFIG_KEY_CHECKPOINTER not in config[CONF]:
# remove task_ids from checkpoint_ns
recast_checkpoint_ns = NS_SEP.join(
part.split(NS_END)[0] for part in checkpoint_ns.split(NS_SEP)
)
# find the subgraph with the matching name
for _, pregel in self.get_subgraphs(
namespace=recast_checkpoint_ns, recurse=True
):
return pregel.update_state(
patch_configurable(config, {CONFIG_KEY_CHECKPOINTER: checkpointer}),
values,
as_node,
)
else:
raise ValueError(f"Subgraph {recast_checkpoint_ns} not found")
# get last checkpoint
config = ensure_config(self.config, config)
saved = checkpointer.get_tuple(config)
checkpoint = copy_checkpoint(saved.checkpoint) if saved else empty_checkpoint()
checkpoint_previous_versions = (
saved.checkpoint["channel_versions"].copy() if saved else {}
)
step = saved.metadata.get("step", -1) if saved else -1
# merge configurable fields with previous checkpoint config
checkpoint_config = patch_configurable(
config,
{CONFIG_KEY_CHECKPOINT_NS: config[CONF].get(CONFIG_KEY_CHECKPOINT_NS, "")},
)
checkpoint_metadata = config["metadata"]
if saved:
checkpoint_config = patch_configurable(config, saved.config[CONF])
checkpoint_metadata = {**saved.metadata, **checkpoint_metadata}
with ChannelsManager(
self.channels,
checkpoint,
LoopProtocol(config=config, step=step + 1, stop=step + 2),
) as (channels, managed):
# no values as END, just clear all tasks
if values is None and as_node == END:
if saved is not None:
# tasks for this checkpoint
next_tasks = prepare_next_tasks(
checkpoint,
saved.pending_writes or [],
self.nodes,
channels,
managed,
saved.config,
saved.metadata.get("step", -1) + 1,
for_execution=True,
store=self.store,
checkpointer=self.checkpointer or None,
manager=None,
)
# apply null writes
if null_writes := [
w[1:]
for w in saved.pending_writes or []
if w[0] == NULL_TASK_ID
]:
apply_writes(
saved.checkpoint,
channels,
[PregelTaskWrites((), INPUT, null_writes, [])],
None,
)
# apply writes from tasks that already ran
for tid, k, v in saved.pending_writes or []:
if k in (ERROR, INTERRUPT, SCHEDULED):
continue
if tid not in next_tasks:
continue
next_tasks[tid].writes.append((k, v))
# clear all current tasks
apply_writes(checkpoint, channels, next_tasks.values(), None)
# save checkpoint
next_config = checkpointer.put(
checkpoint_config,
create_checkpoint(checkpoint, None, step),
{
**checkpoint_metadata,
"source": "update",
"step": step + 1,
"writes": {},
"parents": saved.metadata.get("parents", {}) if saved else {},
},
{},
)
return patch_checkpoint_map(
next_config, saved.metadata if saved else None
)
# no values, copy checkpoint
if values is None and as_node is None:
next_checkpoint = create_checkpoint(checkpoint, None, step)
# copy checkpoint
next_config = checkpointer.put(
checkpoint_config,
next_checkpoint,
{
**checkpoint_metadata,
"source": "update",
"step": step + 1,
"writes": {},
"parents": saved.metadata.get("parents", {}) if saved else {},
},
{},
)
return patch_checkpoint_map(
next_config, saved.metadata if saved else None
)
if values is None and as_node == "__copy__":
next_checkpoint = create_checkpoint(checkpoint, None, step)
# copy checkpoint
next_config = checkpointer.put(
saved.parent_config or saved.config if saved else checkpoint_config,
next_checkpoint,
{
**checkpoint_metadata,
"source": "fork",
"step": step + 1,
"parents": saved.metadata.get("parents", {}) if saved else {},
},
{},
)
return patch_checkpoint_map(
next_config, saved.metadata if saved else None
)
# apply pending writes, if not on specific checkpoint
if (
CONFIG_KEY_CHECKPOINT_ID not in config[CONF]
and saved is not None
and saved.pending_writes
):
# tasks for this checkpoint
next_tasks = prepare_next_tasks(
checkpoint,
saved.pending_writes,
self.nodes,
channels,
managed,
saved.config,
saved.metadata.get("step", -1) + 1,
for_execution=True,
store=self.store,
checkpointer=self.checkpointer or None,
manager=None,
)
# apply null writes
if null_writes := [
w[1:] for w in saved.pending_writes or [] if w[0] == NULL_TASK_ID
]:
apply_writes(
saved.checkpoint,
channels,
[PregelTaskWrites((), INPUT, null_writes, [])],
None,
)
# apply writes
for tid, k, v in saved.pending_writes:
if k in (ERROR, INTERRUPT, SCHEDULED):
continue
if tid not in next_tasks:
continue
next_tasks[tid].writes.append((k, v))
if tasks := [t for t in next_tasks.values() if t.writes]:
apply_writes(checkpoint, channels, tasks, None)
# find last node that updated the state, if not provided
if as_node is None and not any(
v for vv in checkpoint["versions_seen"].values() for v in vv.values()
):
if (
isinstance(self.input_channels, str)
and self.input_channels in self.nodes
):
as_node = self.input_channels
elif as_node is None:
last_seen_by_node = sorted(
(v, n)
for n, seen in checkpoint["versions_seen"].items()
if n in self.nodes
for v in seen.values()
)
# if two nodes updated the state at the same time, it's ambiguous
if last_seen_by_node:
if len(last_seen_by_node) == 1:
as_node = last_seen_by_node[0][1]
elif last_seen_by_node[-1][0] != last_seen_by_node[-2][0]:
as_node = last_seen_by_node[-1][1]
if as_node is None:
raise InvalidUpdateError("Ambiguous update, specify as_node")
if as_node not in self.nodes:
raise InvalidUpdateError(f"Node {as_node} does not exist")
# create task to run all writers of the chosen node
writers = self.nodes[as_node].flat_writers
if not writers:
raise InvalidUpdateError(f"Node {as_node} has no writers")
writes: deque[tuple[str, Any]] = deque()
task = PregelTaskWrites((), as_node, writes, [INTERRUPT])
task_id = str(uuid5(UUID(checkpoint["id"]), INTERRUPT))
run = RunnableSequence(*writers) if len(writers) > 1 else writers[0]
# execute task
run.invoke(
values,
patch_config(
config,
run_name=self.name + "UpdateState",
configurable={
# deque.extend is thread-safe
CONFIG_KEY_SEND: partial(
local_write,
writes.extend,
self.nodes.keys(),
),
CONFIG_KEY_READ: partial(
local_read,
step + 1,
checkpoint,
channels,
managed,
task,
config,
),
},
),
)
# save task writes
# channel writes are saved to current checkpoint
# push writes are saved to next checkpoint
channel_writes, push_writes = (
[w for w in task.writes if w[0] != PUSH],
[w for w in task.writes if w[0] == PUSH],
)
if saved and channel_writes:
checkpointer.put_writes(checkpoint_config, channel_writes, task_id)
# apply to checkpoint and save
mv_writes = apply_writes(
checkpoint, channels, [task], checkpointer.get_next_version
)
assert not mv_writes, "Can't write to SharedValues from update_state"
checkpoint = create_checkpoint(checkpoint, channels, step + 1)
next_config = checkpointer.put(
checkpoint_config,
checkpoint,
{
**checkpoint_metadata,
"source": "update",
"step": step + 1,
"writes": {as_node: values},
"parents": saved.metadata.get("parents", {}) if saved else {},
},
get_new_channel_versions(
checkpoint_previous_versions, checkpoint["channel_versions"]
),
)
if push_writes:
checkpointer.put_writes(next_config, push_writes, task_id)
return patch_checkpoint_map(next_config, saved.metadata if saved else None)
async def aupdate_state(
self,
config: RunnableConfig,
values: dict[str, Any] | Any,
as_node: Optional[str] = None,
) -> RunnableConfig:
checkpointer: Optional[BaseCheckpointSaver] = ensure_config(config)[CONF].get(
CONFIG_KEY_CHECKPOINTER, self.checkpointer
)
if not checkpointer:
raise ValueError("No checkpointer set")
# delegate to subgraph
if (
checkpoint_ns := config[CONF].get(CONFIG_KEY_CHECKPOINT_NS, "")
) and CONFIG_KEY_CHECKPOINTER not in config[CONF]:
# remove task_ids from checkpoint_ns
recast_checkpoint_ns = NS_SEP.join(
part.split(NS_END)[0] for part in checkpoint_ns.split(NS_SEP)
)
# find the subgraph with the matching name
async for _, pregel in self.aget_subgraphs(
namespace=recast_checkpoint_ns, recurse=True
):
return await pregel.aupdate_state(
patch_configurable(config, {CONFIG_KEY_CHECKPOINTER: checkpointer}),
values,
as_node,
)
else:
raise ValueError(f"Subgraph {recast_checkpoint_ns} not found")
# get last checkpoint
config = ensure_config(self.config, config)
saved = await checkpointer.aget_tuple(config)
checkpoint = copy_checkpoint(saved.checkpoint) if saved else empty_checkpoint()
checkpoint_previous_versions = (
saved.checkpoint["channel_versions"].copy() if saved else {}
)
step = saved.metadata.get("step", -1) if saved else -1
# merge configurable fields with previous checkpoint config
checkpoint_config = patch_configurable(
config,
{CONFIG_KEY_CHECKPOINT_NS: config[CONF].get(CONFIG_KEY_CHECKPOINT_NS, "")},
)
checkpoint_metadata = config["metadata"]
if saved:
checkpoint_config = patch_configurable(config, saved.config[CONF])
checkpoint_metadata = {**saved.metadata, **checkpoint_metadata}
async with AsyncChannelsManager(
self.channels,
checkpoint,
LoopProtocol(config=config, step=step + 1, stop=step + 2),
) as (
channels,
managed,
):
# no values, just clear all tasks
if values is None and as_node == END:
if saved is not None:
# tasks for this checkpoint
next_tasks = prepare_next_tasks(
checkpoint,
saved.pending_writes or [],
self.nodes,
channels,
managed,
saved.config,
saved.metadata.get("step", -1) + 1,
for_execution=True,
store=self.store,
checkpointer=self.checkpointer or None,
manager=None,
)
# apply null writes
if null_writes := [
w[1:]
for w in saved.pending_writes or []
if w[0] == NULL_TASK_ID
]:
apply_writes(
saved.checkpoint,
channels,
[PregelTaskWrites((), INPUT, null_writes, [])],
None,
)
# apply writes from tasks that already ran
for tid, k, v in saved.pending_writes or []:
if k in (ERROR, INTERRUPT, SCHEDULED):
continue
if tid not in next_tasks:
continue
next_tasks[tid].writes.append((k, v))
# clear all current tasks
apply_writes(checkpoint, channels, next_tasks.values(), None)
# save checkpoint
next_config = await checkpointer.aput(
checkpoint_config,
create_checkpoint(checkpoint, None, step),
{
**checkpoint_metadata,
"source": "update",
"step": step + 1,
"writes": {},
"parents": saved.metadata.get("parents", {}) if saved else {},
},
{},
)
return patch_checkpoint_map(
next_config, saved.metadata if saved else None
)
# no values, copy checkpoint
if values is None and as_node is None:
next_checkpoint = create_checkpoint(checkpoint, None, step)
# copy checkpoint
next_config = await checkpointer.aput(
checkpoint_config,
next_checkpoint,
{
**checkpoint_metadata,
"source": "update",
"step": step + 1,
"writes": {},
"parents": saved.metadata.get("parents", {}) if saved else {},
},
{},
)
return patch_checkpoint_map(
next_config, saved.metadata if saved else None
)
if values is None and as_node == "__copy__":
next_checkpoint = create_checkpoint(checkpoint, None, step)
# copy checkpoint
next_config = await checkpointer.aput(
saved.parent_config or saved.config if saved else checkpoint_config,
next_checkpoint,
{
**checkpoint_metadata,
"source": "fork",
"step": step + 1,
"parents": saved.metadata.get("parents", {}) if saved else {},
},
{},
)
return patch_checkpoint_map(
next_config, saved.metadata if saved else None
)
# apply pending writes, if not on specific checkpoint
if (
CONFIG_KEY_CHECKPOINT_ID not in config[CONF]
and saved is not None
and saved.pending_writes
):
# tasks for this checkpoint
next_tasks = prepare_next_tasks(
checkpoint,
saved.pending_writes,
self.nodes,
channels,
managed,
saved.config,
saved.metadata.get("step", -1) + 1,
for_execution=True,
store=self.store,
checkpointer=self.checkpointer or None,
manager=None,
)
# apply null writes
if null_writes := [
w[1:] for w in saved.pending_writes or [] if w[0] == NULL_TASK_ID
]:
apply_writes(
saved.checkpoint,
channels,
[PregelTaskWrites((), INPUT, null_writes, [])],
None,
)
for tid, k, v in saved.pending_writes:
if k in (ERROR, INTERRUPT, SCHEDULED):
continue
if tid not in next_tasks:
continue
next_tasks[tid].writes.append((k, v))
if tasks := [t for t in next_tasks.values() if t.writes]:
apply_writes(checkpoint, channels, tasks, None)
# find last node that updated the state, if not provided
if as_node is None and not saved:
if (
isinstance(self.input_channels, str)
and self.input_channels in self.nodes
):
as_node = self.input_channels
elif as_node is None:
last_seen_by_node = sorted(
(v, n)
for n, seen in checkpoint["versions_seen"].items()
if n in self.nodes
for v in seen.values()
)
# if two nodes updated the state at the same time, it's ambiguous
if last_seen_by_node:
if len(last_seen_by_node) == 1:
as_node = last_seen_by_node[0][1]
elif last_seen_by_node[-1][0] != last_seen_by_node[-2][0]:
as_node = last_seen_by_node[-1][1]
if as_node is None:
raise InvalidUpdateError("Ambiguous update, specify as_node")
if as_node not in self.nodes:
raise InvalidUpdateError(f"Node {as_node} does not exist")
# create task to run all writers of the chosen node
writers = self.nodes[as_node].flat_writers
if not writers:
raise InvalidUpdateError(f"Node {as_node} has no writers")
writes: deque[tuple[str, Any]] = deque()
task = PregelTaskWrites((), as_node, writes, [INTERRUPT])
task_id = str(uuid5(UUID(checkpoint["id"]), INTERRUPT))
run = RunnableSequence(*writers) if len(writers) > 1 else writers[0]
# execute task
await run.ainvoke(
values,
patch_config(
config,
run_name=self.name + "UpdateState",
configurable={
# deque.extend is thread-safe
CONFIG_KEY_SEND: partial(
local_write,
writes.extend,
self.nodes.keys(),
),
CONFIG_KEY_READ: partial(
local_read,
step + 1,
checkpoint,
channels,
managed,
task,
config,
),
},
),
)
# save task writes
# channel writes are saved to current checkpoint
# push writes are saved to next checkpoint
channel_writes, push_writes = (
[w for w in task.writes if w[0] != PUSH],
[w for w in task.writes if w[0] == PUSH],
)
if saved and channel_writes:
await checkpointer.aput_writes(
checkpoint_config, channel_writes, task_id
)
# apply to checkpoint and save
mv_writes = apply_writes(
checkpoint, channels, [task], checkpointer.get_next_version
)
assert not mv_writes, "Can't write to SharedValues from update_state"
checkpoint = create_checkpoint(checkpoint, channels, step + 1)
# save checkpoint, after applying writes
next_config = await checkpointer.aput(
checkpoint_config,
checkpoint,
{
**checkpoint_metadata,
"source": "update",
"step": step + 1,
"writes": {as_node: values},
"parents": saved.metadata.get("parents", {}) if saved else {},
},
get_new_channel_versions(
checkpoint_previous_versions, checkpoint["channel_versions"]
),
)
# save push writes
if push_writes:
await checkpointer.aput_writes(next_config, push_writes, task_id)
return patch_checkpoint_map(next_config, saved.metadata if saved else None)
def _defaults(
self,
config: RunnableConfig,
*,
stream_mode: Optional[Union[StreamMode, list[StreamMode]]],
output_keys: Optional[Union[str, Sequence[str]]],
interrupt_before: Optional[Union[All, Sequence[str]]],
interrupt_after: Optional[Union[All, Sequence[str]]],
debug: Optional[bool],
) -> tuple[
bool,
set[StreamMode],
Union[str, Sequence[str]],
Union[All, Sequence[str]],
Union[All, Sequence[str]],
Optional[BaseCheckpointSaver],
Optional[BaseStore],
]:
if config["recursion_limit"] < 1:
raise ValueError("recursion_limit must be at least 1")
debug = debug if debug is not None else self.debug
if output_keys is None:
output_keys = self.stream_channels_asis
else:
validate_keys(output_keys, self.channels)
interrupt_before = interrupt_before or self.interrupt_before_nodes
interrupt_after = interrupt_after or self.interrupt_after_nodes
stream_mode = stream_mode if stream_mode is not None else self.stream_mode
if not isinstance(stream_mode, list):
stream_mode = [stream_mode]
if CONFIG_KEY_TASK_ID in config.get(CONF, {}):
# if being called as a node in another graph, always use values mode
stream_mode = ["values"]
if self.checkpointer is False:
checkpointer: Optional[BaseCheckpointSaver] = None
elif CONFIG_KEY_CHECKPOINTER in config.get(CONF, {}):
checkpointer = config[CONF][CONFIG_KEY_CHECKPOINTER]
else:
checkpointer = self.checkpointer
if checkpointer and not config.get(CONF):
raise ValueError(
f"Checkpointer requires one or more of the following 'configurable' keys: {[s.id for s in checkpointer.config_specs]}"
)
if CONFIG_KEY_STORE in config.get(CONF, {}):
store: Optional[BaseStore] = config[CONF][CONFIG_KEY_STORE]
else:
store = self.store
return (
debug,
set(stream_mode),
output_keys,
interrupt_before,
interrupt_after,
checkpointer,
store,
)
def stream(
self,
input: Union[dict[str, Any], Any],
config: Optional[RunnableConfig] = None,
*,
stream_mode: Optional[Union[StreamMode, list[StreamMode]]] = None,
output_keys: Optional[Union[str, Sequence[str]]] = None,
interrupt_before: Optional[Union[All, Sequence[str]]] = None,
interrupt_after: Optional[Union[All, Sequence[str]]] = None,
debug: Optional[bool] = None,
subgraphs: bool = False,
) -> Iterator[Union[dict[str, Any], Any]]:
"""Stream graph steps for a single input.
Args:
input: The input to the graph.
config: The configuration to use for the run.
stream_mode: The mode to stream output, defaults to self.stream_mode.
Options are 'values', 'updates', and 'debug'.
values: Emit the current values of the state for each step.
updates: Emit only the updates to the state for each step.
Output is a dict with the node name as key and the updated values as value.
debug: Emit debug events for each step.
output_keys: The keys to stream, defaults to all non-context channels.
interrupt_before: Nodes to interrupt before, defaults to all nodes in the graph.
interrupt_after: Nodes to interrupt after, defaults to all nodes in the graph.
debug: Whether to print debug information during execution, defaults to False.
subgraphs: Whether to stream subgraphs, defaults to False.
Yields:
The output of each step in the graph. The output shape depends on the stream_mode.
Examples:
Using different stream modes with a graph:
```pycon
>>> import operator
>>> from typing_extensions import Annotated, TypedDict
>>> from langgraph.graph import StateGraph
>>> from langgraph.constants import START
...
>>> class State(TypedDict):
... alist: Annotated[list, operator.add]
... another_list: Annotated[list, operator.add]
...
>>> builder = StateGraph(State)
>>> builder.add_node("a", lambda _state: {"another_list": ["hi"]})
>>> builder.add_node("b", lambda _state: {"alist": ["there"]})
>>> builder.add_edge("a", "b")
>>> builder.add_edge(START, "a")
>>> graph = builder.compile()
```
With stream_mode="values":
```pycon
>>> for event in graph.stream({"alist": ['Ex for stream_mode="values"']}, stream_mode="values"):
... print(event)
{'alist': ['Ex for stream_mode="values"'], 'another_list': []}
{'alist': ['Ex for stream_mode="values"'], 'another_list': ['hi']}
{'alist': ['Ex for stream_mode="values"', 'there'], 'another_list': ['hi']}
```
With stream_mode="updates":
```pycon
>>> for event in graph.stream({"alist": ['Ex for stream_mode="updates"']}, stream_mode="updates"):
... print(event)
{'a': {'another_list': ['hi']}}
{'b': {'alist': ['there']}}
```
With stream_mode="debug":
```pycon
>>> for event in graph.stream({"alist": ['Ex for stream_mode="debug"']}, stream_mode="debug"):
... print(event)
{'type': 'task', 'timestamp': '2024-06-23T...+00:00', 'step': 1, 'payload': {'id': '...', 'name': 'a', 'input': {'alist': ['Ex for stream_mode="debug"'], 'another_list': []}, 'triggers': ['start:a']}}
{'type': 'task_result', 'timestamp': '2024-06-23T...+00:00', 'step': 1, 'payload': {'id': '...', 'name': 'a', 'result': [('another_list', ['hi'])]}}
{'type': 'task', 'timestamp': '2024-06-23T...+00:00', 'step': 2, 'payload': {'id': '...', 'name': 'b', 'input': {'alist': ['Ex for stream_mode="debug"'], 'another_list': ['hi']}, 'triggers': ['a']}}
{'type': 'task_result', 'timestamp': '2024-06-23T...+00:00', 'step': 2, 'payload': {'id': '...', 'name': 'b', 'result': [('alist', ['there'])]}}
```
"""
stream = SyncQueue()
def output() -> Iterator:
while True:
try:
ns, mode, payload = stream.get(block=False)
except queue.Empty:
break
if subgraphs and isinstance(stream_mode, list):
yield (ns, mode, payload)
elif isinstance(stream_mode, list):
yield (mode, payload)
elif subgraphs:
yield (ns, payload)
else:
yield payload
config = ensure_config(self.config, config)
callback_manager = get_callback_manager_for_config(config)
run_manager = callback_manager.on_chain_start(
None,
input,
name=config.get("run_name", self.get_name()),
run_id=config.get("run_id"),
)
try:
# assign defaults
(
debug,
stream_modes,
output_keys,
interrupt_before_,
interrupt_after_,
checkpointer,
store,
) = self._defaults(
config,
stream_mode=stream_mode,
output_keys=output_keys,
interrupt_before=interrupt_before,
interrupt_after=interrupt_after,
debug=debug,
)
# set up messages stream mode
if "messages" in stream_modes:
run_manager.inheritable_handlers.append(
StreamMessagesHandler(stream.put)
)
# set up custom stream mode
if "custom" in stream_modes:
config[CONF][CONFIG_KEY_STREAM_WRITER] = lambda c: stream.put(
((), "custom", c)
)
with SyncPregelLoop(
input,
stream=StreamProtocol(stream.put, stream_modes),
config=config,
store=store,
checkpointer=checkpointer,
nodes=self.nodes,
specs=self.channels,
output_keys=output_keys,
stream_keys=self.stream_channels_asis,
interrupt_before=interrupt_before_,
interrupt_after=interrupt_after_,
manager=run_manager,
debug=debug,
) as loop:
# create runner
runner = PregelRunner(
submit=loop.submit,
put_writes=loop.put_writes,
schedule_task=loop.accept_push,
node_finished=config[CONF].get(CONFIG_KEY_NODE_FINISHED),
)
# enable subgraph streaming
if subgraphs:
loop.config[CONF][CONFIG_KEY_STREAM] = loop.stream
# enable concurrent streaming
if subgraphs or "messages" in stream_modes or "custom" in stream_modes:
# we are careful to have a single waiter live at any one time
# because on exit we increment semaphore count by exactly 1
waiter: Optional[concurrent.futures.Future] = None
# because sync futures cannot be cancelled, we instead
# release the stream semaphore on exit, which will cause
# a pending waiter to return immediately
loop.stack.callback(stream._count.release)
def get_waiter() -> concurrent.futures.Future[None]:
nonlocal waiter
if waiter is None or waiter.done():
waiter = loop.submit(stream.wait)
return waiter
else:
return waiter
else:
get_waiter = None # type: ignore[assignment]
# Similarly to Bulk Synchronous Parallel / Pregel model
# computation proceeds in steps, while there are channel updates
# channel updates from step N are only visible in step N+1
# channels are guaranteed to be immutable for the duration of the step,
# with channel updates applied only at the transition between steps
while loop.tick(input_keys=self.input_channels):
for _ in runner.tick(
loop.tasks.values(),
timeout=self.step_timeout,
retry_policy=self.retry_policy,
get_waiter=get_waiter,
):
# emit output
yield from output()
# emit output
yield from output()
# handle exit
if loop.status == "out_of_steps":
msg = create_error_message(
message=(
f"Recursion limit of {config['recursion_limit']} reached "
"without hitting a stop condition. You can increase the "
"limit by setting the `recursion_limit` config key."
),
error_code=ErrorCode.GRAPH_RECURSION_LIMIT,
)
raise GraphRecursionError(msg)
# set final channel values as run output
run_manager.on_chain_end(loop.output)
except BaseException as e:
run_manager.on_chain_error(e)
raise
async def astream(
self,
input: Union[dict[str, Any], Any],
config: Optional[RunnableConfig] = None,
*,
stream_mode: Optional[Union[StreamMode, list[StreamMode]]] = None,
output_keys: Optional[Union[str, Sequence[str]]] = None,
interrupt_before: Optional[Union[All, Sequence[str]]] = None,
interrupt_after: Optional[Union[All, Sequence[str]]] = None,
debug: Optional[bool] = None,
subgraphs: bool = False,
) -> AsyncIterator[Union[dict[str, Any], Any]]:
"""Stream graph steps for a single input.
Args:
input: The input to the graph.
config: The configuration to use for the run.
stream_mode: The mode to stream output, defaults to self.stream_mode.
Options are 'values', 'updates', and 'debug'.
values: Emit the current values of the state for each step.
updates: Emit only the updates to the state for each step.
Output is a dict with the node name as key and the updated values as value.
debug: Emit debug events for each step.
output_keys: The keys to stream, defaults to all non-context channels.
interrupt_before: Nodes to interrupt before, defaults to all nodes in the graph.
interrupt_after: Nodes to interrupt after, defaults to all nodes in the graph.
debug: Whether to print debug information during execution, defaults to False.
subgraphs: Whether to stream subgraphs, defaults to False.
Yields:
The output of each step in the graph. The output shape depends on the stream_mode.
Examples:
Using different stream modes with a graph:
```pycon
>>> import operator
>>> from typing_extensions import Annotated, TypedDict
>>> from langgraph.graph import StateGraph
>>> from langgraph.constants import START
...
>>> class State(TypedDict):
... alist: Annotated[list, operator.add]
... another_list: Annotated[list, operator.add]
...
>>> builder = StateGraph(State)
>>> builder.add_node("a", lambda _state: {"another_list": ["hi"]})
>>> builder.add_node("b", lambda _state: {"alist": ["there"]})
>>> builder.add_edge("a", "b")
>>> builder.add_edge(START, "a")
>>> graph = builder.compile()
```
With stream_mode="values":
```pycon
>>> async for event in graph.astream({"alist": ['Ex for stream_mode="values"']}, stream_mode="values"):
... print(event)
{'alist': ['Ex for stream_mode="values"'], 'another_list': []}
{'alist': ['Ex for stream_mode="values"'], 'another_list': ['hi']}
{'alist': ['Ex for stream_mode="values"', 'there'], 'another_list': ['hi']}
```
With stream_mode="updates":
```pycon
>>> async for event in graph.astream({"alist": ['Ex for stream_mode="updates"']}, stream_mode="updates"):
... print(event)
{'a': {'another_list': ['hi']}}
{'b': {'alist': ['there']}}
```
With stream_mode="debug":
```pycon
>>> async for event in graph.astream({"alist": ['Ex for stream_mode="debug"']}, stream_mode="debug"):
... print(event)
{'type': 'task', 'timestamp': '2024-06-23T...+00:00', 'step': 1, 'payload': {'id': '...', 'name': 'a', 'input': {'alist': ['Ex for stream_mode="debug"'], 'another_list': []}, 'triggers': ['start:a']}}
{'type': 'task_result', 'timestamp': '2024-06-23T...+00:00', 'step': 1, 'payload': {'id': '...', 'name': 'a', 'result': [('another_list', ['hi'])]}}
{'type': 'task', 'timestamp': '2024-06-23T...+00:00', 'step': 2, 'payload': {'id': '...', 'name': 'b', 'input': {'alist': ['Ex for stream_mode="debug"'], 'another_list': ['hi']}, 'triggers': ['a']}}
{'type': 'task_result', 'timestamp': '2024-06-23T...+00:00', 'step': 2, 'payload': {'id': '...', 'name': 'b', 'result': [('alist', ['there'])]}}
```
"""
stream = AsyncQueue()
aioloop = asyncio.get_running_loop()
stream_put = cast(
Callable[[StreamChunk], None],
partial(aioloop.call_soon_threadsafe, stream.put_nowait),
)
def output() -> Iterator:
while True:
try:
ns, mode, payload = stream.get_nowait()
except asyncio.QueueEmpty:
break
if subgraphs and isinstance(stream_mode, list):
yield (ns, mode, payload)
elif isinstance(stream_mode, list):
yield (mode, payload)
elif subgraphs:
yield (ns, payload)
else:
yield payload
config = ensure_config(self.config, config)
callback_manager = get_async_callback_manager_for_config(config)
run_manager = await callback_manager.on_chain_start(
None,
input,
name=config.get("run_name", self.get_name()),
run_id=config.get("run_id"),
)
# if running from astream_log() run each proc with streaming
do_stream = next(
(
cast(_StreamingCallbackHandler, h)
for h in run_manager.handlers
if isinstance(h, _StreamingCallbackHandler)
),
None,
)
try:
# assign defaults
(
debug,
stream_modes,
output_keys,
interrupt_before_,
interrupt_after_,
checkpointer,
store,
) = self._defaults(
config,
stream_mode=stream_mode,
output_keys=output_keys,
interrupt_before=interrupt_before,
interrupt_after=interrupt_after,
debug=debug,
)
# set up messages stream mode
if "messages" in stream_modes:
run_manager.inheritable_handlers.append(
StreamMessagesHandler(stream_put)
)
# set up custom stream mode
if "custom" in stream_modes:
config[CONF][CONFIG_KEY_STREAM_WRITER] = (
lambda c: aioloop.call_soon_threadsafe(
stream.put_nowait, ((), "custom", c)
)
)
async with AsyncPregelLoop(
input,
stream=StreamProtocol(stream.put_nowait, stream_modes),
config=config,
store=store,
checkpointer=checkpointer,
nodes=self.nodes,
specs=self.channels,
output_keys=output_keys,
stream_keys=self.stream_channels_asis,
interrupt_before=interrupt_before_,
interrupt_after=interrupt_after_,
manager=run_manager,
debug=debug,
) as loop:
# create runner
runner = PregelRunner(
submit=loop.submit,
put_writes=loop.put_writes,
schedule_task=loop.accept_push,
use_astream=do_stream is not None,
node_finished=config[CONF].get(CONFIG_KEY_NODE_FINISHED),
)
# enable subgraph streaming
if subgraphs:
loop.config[CONF][CONFIG_KEY_STREAM] = StreamProtocol(
stream_put, stream_modes
)
# enable concurrent streaming
if subgraphs or "messages" in stream_modes or "custom" in stream_modes:
def get_waiter() -> asyncio.Task[None]:
return aioloop.create_task(stream.wait())
else:
get_waiter = None # type: ignore[assignment]
# Similarly to Bulk Synchronous Parallel / Pregel model
# computation proceeds in steps, while there are channel updates
# channel updates from step N are only visible in step N+1
# channels are guaranteed to be immutable for the duration of the step,
# with channel updates applied only at the transition between steps
while loop.tick(input_keys=self.input_channels):
async for _ in runner.atick(
loop.tasks.values(),
timeout=self.step_timeout,
retry_policy=self.retry_policy,
get_waiter=get_waiter,
):
# emit output
for o in output():
yield o
# emit output
for o in output():
yield o
# handle exit
if loop.status == "out_of_steps":
msg = create_error_message(
message=(
f"Recursion limit of {config['recursion_limit']} reached "
"without hitting a stop condition. You can increase the "
"limit by setting the `recursion_limit` config key."
),
error_code=ErrorCode.GRAPH_RECURSION_LIMIT,
)
raise GraphRecursionError(msg)
# set final channel values as run output
await run_manager.on_chain_end(loop.output)
except BaseException as e:
await asyncio.shield(run_manager.on_chain_error(e))
raise
def invoke(
self,
input: Union[dict[str, Any], Any],
config: Optional[RunnableConfig] = None,
*,
stream_mode: StreamMode = "values",
output_keys: Optional[Union[str, Sequence[str]]] = None,
interrupt_before: Optional[Union[All, Sequence[str]]] = None,
interrupt_after: Optional[Union[All, Sequence[str]]] = None,
debug: Optional[bool] = None,
**kwargs: Any,
) -> Union[dict[str, Any], Any]:
"""Run the graph with a single input and config.
Args:
input: The input data for the graph. It can be a dictionary or any other type.
config: Optional. The configuration for the graph run.
stream_mode: Optional[str]. The stream mode for the graph run. Default is "values".
output_keys: Optional. The output keys to retrieve from the graph run.
interrupt_before: Optional. The nodes to interrupt the graph run before.
interrupt_after: Optional. The nodes to interrupt the graph run after.
debug: Optional. Enable debug mode for the graph run.
**kwargs: Additional keyword arguments to pass to the graph run.
Returns:
The output of the graph run. If stream_mode is "values", it returns the latest output.
If stream_mode is not "values", it returns a list of output chunks.
"""
output_keys = output_keys if output_keys is not None else self.output_channels
if stream_mode == "values":
latest: Union[dict[str, Any], Any] = None
else:
chunks = []
for chunk in self.stream(
input,
config,
stream_mode=stream_mode,
output_keys=output_keys,
interrupt_before=interrupt_before,
interrupt_after=interrupt_after,
debug=debug,
**kwargs,
):
if stream_mode == "values":
latest = chunk
else:
chunks.append(chunk)
if stream_mode == "values":
return latest
else:
return chunks
async def ainvoke(
self,
input: Union[dict[str, Any], Any],
config: Optional[RunnableConfig] = None,
*,
stream_mode: StreamMode = "values",
output_keys: Optional[Union[str, Sequence[str]]] = None,
interrupt_before: Optional[Union[All, Sequence[str]]] = None,
interrupt_after: Optional[Union[All, Sequence[str]]] = None,
debug: Optional[bool] = None,
**kwargs: Any,
) -> Union[dict[str, Any], Any]:
"""Asynchronously invoke the graph on a single input.
Args:
input: The input data for the computation. It can be a dictionary or any other type.
config: Optional. The configuration for the computation.
stream_mode: Optional. The stream mode for the computation. Default is "values".
output_keys: Optional. The output keys to include in the result. Default is None.
interrupt_before: Optional. The nodes to interrupt before. Default is None.
interrupt_after: Optional. The nodes to interrupt after. Default is None.
debug: Optional. Whether to enable debug mode. Default is None.
**kwargs: Additional keyword arguments.
Returns:
The result of the computation. If stream_mode is "values", it returns the latest value.
If stream_mode is "chunks", it returns a list of chunks.
"""
output_keys = output_keys if output_keys is not None else self.output_channels
if stream_mode == "values":
latest: Union[dict[str, Any], Any] = None
else:
chunks = []
async for chunk in self.astream(
input,
config,
stream_mode=stream_mode,
output_keys=output_keys,
interrupt_before=interrupt_before,
interrupt_after=interrupt_after,
debug=debug,
**kwargs,
):
if stream_mode == "values":
latest = chunk
else:
chunks.append(chunk)
if stream_mode == "values":
return latest
else:
return chunks
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/manager.py | import asyncio
from contextlib import AsyncExitStack, ExitStack, asynccontextmanager, contextmanager
from typing import AsyncIterator, Iterator, Mapping, Union
from langgraph.channels.base import BaseChannel
from langgraph.checkpoint.base import Checkpoint
from langgraph.managed.base import (
ConfiguredManagedValue,
ManagedValueMapping,
ManagedValueSpec,
)
from langgraph.managed.context import Context
from langgraph.types import LoopProtocol
@contextmanager
def ChannelsManager(
specs: Mapping[str, Union[BaseChannel, ManagedValueSpec]],
checkpoint: Checkpoint,
loop: LoopProtocol,
*,
skip_context: bool = False,
) -> Iterator[tuple[Mapping[str, BaseChannel], ManagedValueMapping]]:
"""Manage channels for the lifetime of a Pregel invocation (multiple steps)."""
channel_specs: dict[str, BaseChannel] = {}
managed_specs: dict[str, ManagedValueSpec] = {}
for k, v in specs.items():
if isinstance(v, BaseChannel):
channel_specs[k] = v
elif (
skip_context and isinstance(v, ConfiguredManagedValue) and v.cls is Context
):
managed_specs[k] = Context.of(noop_context)
else:
managed_specs[k] = v
with ExitStack() as stack:
yield (
{
k: v.from_checkpoint(checkpoint["channel_values"].get(k))
for k, v in channel_specs.items()
},
ManagedValueMapping(
{
key: stack.enter_context(
value.cls.enter(loop, **value.kwargs)
if isinstance(value, ConfiguredManagedValue)
else value.enter(loop)
)
for key, value in managed_specs.items()
}
),
)
@asynccontextmanager
async def AsyncChannelsManager(
specs: Mapping[str, Union[BaseChannel, ManagedValueSpec]],
checkpoint: Checkpoint,
loop: LoopProtocol,
*,
skip_context: bool = False,
) -> AsyncIterator[tuple[Mapping[str, BaseChannel], ManagedValueMapping]]:
"""Manage channels for the lifetime of a Pregel invocation (multiple steps)."""
channel_specs: dict[str, BaseChannel] = {}
managed_specs: dict[str, ManagedValueSpec] = {}
for k, v in specs.items():
if isinstance(v, BaseChannel):
channel_specs[k] = v
elif (
skip_context and isinstance(v, ConfiguredManagedValue) and v.cls is Context
):
managed_specs[k] = Context.of(noop_context)
else:
managed_specs[k] = v
async with AsyncExitStack() as stack:
# managed: create enter tasks with reference to spec, await them
if tasks := {
asyncio.create_task(
stack.enter_async_context(
value.cls.aenter(loop, **value.kwargs)
if isinstance(value, ConfiguredManagedValue)
else value.aenter(loop)
)
): key
for key, value in managed_specs.items()
}:
done, _ = await asyncio.wait(tasks, return_when=asyncio.ALL_COMPLETED)
else:
done = set()
yield (
# channels: enter each channel with checkpoint
{
k: v.from_checkpoint(checkpoint["channel_values"].get(k))
for k, v in channel_specs.items()
},
# managed: build mapping from spec to result
ManagedValueMapping({tasks[task]: task.result() for task in done}),
)
@contextmanager
def noop_context() -> Iterator[None]:
yield None
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/pregel/protocol.py | from abc import ABC, abstractmethod
from typing import (
Any,
AsyncIterator,
Iterator,
Optional,
Sequence,
Union,
)
from langchain_core.runnables import Runnable, RunnableConfig
from langchain_core.runnables.graph import Graph as DrawableGraph
from typing_extensions import Self
from langgraph.pregel.types import All, StateSnapshot, StreamMode
class PregelProtocol(
Runnable[Union[dict[str, Any], Any], Union[dict[str, Any], Any]], ABC
):
@abstractmethod
def with_config(
self, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Self: ...
@abstractmethod
def get_graph(
self,
config: Optional[RunnableConfig] = None,
*,
xray: Union[int, bool] = False,
) -> DrawableGraph: ...
@abstractmethod
async def aget_graph(
self,
config: Optional[RunnableConfig] = None,
*,
xray: Union[int, bool] = False,
) -> DrawableGraph: ...
@abstractmethod
def get_state(
self, config: RunnableConfig, *, subgraphs: bool = False
) -> StateSnapshot: ...
@abstractmethod
async def aget_state(
self, config: RunnableConfig, *, subgraphs: bool = False
) -> StateSnapshot: ...
@abstractmethod
def get_state_history(
self,
config: RunnableConfig,
*,
filter: Optional[dict[str, Any]] = None,
before: Optional[RunnableConfig] = None,
limit: Optional[int] = None,
) -> Iterator[StateSnapshot]: ...
@abstractmethod
def aget_state_history(
self,
config: RunnableConfig,
*,
filter: Optional[dict[str, Any]] = None,
before: Optional[RunnableConfig] = None,
limit: Optional[int] = None,
) -> AsyncIterator[StateSnapshot]: ...
@abstractmethod
def update_state(
self,
config: RunnableConfig,
values: Optional[Union[dict[str, Any], Any]],
as_node: Optional[str] = None,
) -> RunnableConfig: ...
@abstractmethod
async def aupdate_state(
self,
config: RunnableConfig,
values: Optional[Union[dict[str, Any], Any]],
as_node: Optional[str] = None,
) -> RunnableConfig: ...
@abstractmethod
def stream(
self,
input: Union[dict[str, Any], Any],
config: Optional[RunnableConfig] = None,
*,
stream_mode: Optional[Union[StreamMode, list[StreamMode]]] = None,
interrupt_before: Optional[Union[All, Sequence[str]]] = None,
interrupt_after: Optional[Union[All, Sequence[str]]] = None,
subgraphs: bool = False,
) -> Iterator[Union[dict[str, Any], Any]]: ...
@abstractmethod
def astream(
self,
input: Union[dict[str, Any], Any],
config: Optional[RunnableConfig] = None,
*,
stream_mode: Optional[Union[StreamMode, list[StreamMode]]] = None,
interrupt_before: Optional[Union[All, Sequence[str]]] = None,
interrupt_after: Optional[Union[All, Sequence[str]]] = None,
subgraphs: bool = False,
) -> AsyncIterator[Union[dict[str, Any], Any]]: ...
@abstractmethod
def invoke(
self,
input: Union[dict[str, Any], Any],
config: Optional[RunnableConfig] = None,
*,
interrupt_before: Optional[Union[All, Sequence[str]]] = None,
interrupt_after: Optional[Union[All, Sequence[str]]] = None,
) -> Union[dict[str, Any], Any]: ...
@abstractmethod
async def ainvoke(
self,
input: Union[dict[str, Any], Any],
config: Optional[RunnableConfig] = None,
*,
interrupt_before: Optional[Union[All, Sequence[str]]] = None,
interrupt_after: Optional[Union[All, Sequence[str]]] = None,
) -> Union[dict[str, Any], Any]: ...
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/prebuilt/tool_executor.py | from typing import Any, Callable, Sequence, Union
from langchain_core.load.serializable import Serializable
from langchain_core.runnables import RunnableConfig
from langchain_core.tools import BaseTool
from langchain_core.tools import tool as create_tool
from langgraph._api.deprecation import deprecated
from langgraph.utils.runnable import RunnableCallable
INVALID_TOOL_MSG_TEMPLATE = (
"{requested_tool_name} is not a valid tool, "
"try one of [{available_tool_names_str}]."
)
@deprecated("0.2.0", "langgraph.prebuilt.ToolNode", removal="0.3.0")
class ToolInvocationInterface:
"""Interface for invoking a tool.
Attributes:
tool (str): The name of the tool to invoke.
tool_input (Union[str, dict]): The input to pass to the tool.
"""
tool: str
tool_input: Union[str, dict]
@deprecated("0.2.0", "langgraph.prebuilt.ToolNode", removal="0.3.0")
class ToolInvocation(Serializable):
"""Information about how to invoke a tool.
Attributes:
tool (str): The name of the Tool to execute.
tool_input (Union[str, dict]): The input to pass in to the Tool.
Examples:
Basic usage:
```pycon
>>> invocation = ToolInvocation(
... tool="search",
... tool_input="What is the capital of France?"
... )
```
"""
tool: str
tool_input: Union[str, dict]
@deprecated("0.2.0", "langgraph.prebuilt.ToolNode", removal="0.3.0")
class ToolExecutor(RunnableCallable):
"""Executes a tool invocation.
Args:
tools (Sequence[BaseTool]): A sequence of tools that can be invoked.
invalid_tool_msg_template (str, optional): The template for the error message
when an invalid tool is requested. Defaults to INVALID_TOOL_MSG_TEMPLATE.
Examples:
Basic usage:
```pycon
>>> from langchain_core.tools import tool
>>> from langgraph.prebuilt.tool_executor import ToolExecutor, ToolInvocation
...
...
>>> @tool
... def search(query: str) -> str:
... \"\"\"Search engine.\"\"\"
... return f"Searching for: {query}"
...
...
>>> tools = [search]
>>> executor = ToolExecutor(tools)
...
>>> invocation = ToolInvocation(tool="search", tool_input="What is the capital of France?")
>>> result = executor.invoke(invocation)
>>> print(result)
"Searching for: What is the capital of France?"
```
Handling invalid tool:
```pycon
>>> invocation = ToolInvocation(
... tool="nonexistent", tool_input="What is the capital of France?"
... )
>>> result = executor.invoke(invocation)
>>> print(result)
"nonexistent is not a valid tool, try one of [search]."
```
"""
def __init__(
self,
tools: Sequence[Union[BaseTool, Callable]],
*,
invalid_tool_msg_template: str = INVALID_TOOL_MSG_TEMPLATE,
) -> None:
super().__init__(self._execute, afunc=self._aexecute, trace=False)
tools_ = [
tool if isinstance(tool, BaseTool) else create_tool(tool) for tool in tools
]
self.tools = tools_
self.tool_map = {t.name: t for t in tools_}
self.invalid_tool_msg_template = invalid_tool_msg_template
def _execute(
self, tool_invocation: ToolInvocationInterface, config: RunnableConfig
) -> Any:
if tool_invocation.tool not in self.tool_map:
return self.invalid_tool_msg_template.format(
requested_tool_name=tool_invocation.tool,
available_tool_names_str=", ".join([t.name for t in self.tools]),
)
else:
tool = self.tool_map[tool_invocation.tool]
output = tool.invoke(tool_invocation.tool_input, config)
return output
async def _aexecute(
self, tool_invocation: ToolInvocationInterface, config: RunnableConfig
) -> Any:
if tool_invocation.tool not in self.tool_map:
return self.invalid_tool_msg_template.format(
requested_tool_name=tool_invocation.tool,
available_tool_names_str=", ".join([t.name for t in self.tools]),
)
else:
tool = self.tool_map[tool_invocation.tool]
output = await tool.ainvoke(tool_invocation.tool_input, config)
return output
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/prebuilt/tool_validator.py | """This module provides a ValidationNode class that can be used to validate tool calls
in a langchain graph. It applies a pydantic schema to tool_calls in the models' outputs,
and returns a ToolMessage with the validated content. If the schema is not valid, it
returns a ToolMessage with the error message. The ValidationNode can be used in a
StateGraph with a "messages" key or in a MessageGraph. If multiple tool calls are
requested, they will be run in parallel.
"""
from typing import (
Any,
Callable,
Dict,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from langchain_core.messages import (
AIMessage,
AnyMessage,
ToolCall,
ToolMessage,
)
from langchain_core.runnables import (
RunnableConfig,
)
from langchain_core.runnables.config import get_executor_for_config
from langchain_core.tools import BaseTool, create_schema_from_function
from pydantic import BaseModel, ValidationError
from pydantic.v1 import BaseModel as BaseModelV1
from pydantic.v1 import ValidationError as ValidationErrorV1
from langgraph.utils.runnable import RunnableCallable
def _default_format_error(
error: BaseException,
call: ToolCall,
schema: Union[Type[BaseModel], Type[BaseModelV1]],
) -> str:
"""Default error formatting function."""
return f"{repr(error)}\n\nRespond after fixing all validation errors."
class ValidationNode(RunnableCallable):
"""A node that validates all tools requests from the last AIMessage.
It can be used either in StateGraph with a "messages" key or in MessageGraph.
!!! note
This node does not actually **run** the tools, it only validates the tool calls,
which is useful for extraction and other use cases where you need to generate
structured output that conforms to a complex schema without losing the original
messages and tool IDs (for use in multi-turn conversations).
Args:
schemas: A list of schemas to validate the tool calls with. These can be
any of the following:
- A pydantic BaseModel class
- A BaseTool instance (the args_schema will be used)
- A function (a schema will be created from the function signature)
format_error: A function that takes an exception, a ToolCall, and a schema
and returns a formatted error string. By default, it returns the
exception repr and a message to respond after fixing validation errors.
name: The name of the node.
tags: A list of tags to add to the node.
Returns:
(Union[Dict[str, List[ToolMessage]], Sequence[ToolMessage]]): A list of ToolMessages with the validated content or error messages.
Examples:
Example usage for re-prompting the model to generate a valid response:
>>> from typing import Literal, Annotated, TypedDict
...
>>> from langchain_anthropic import ChatAnthropic
>>> from pydantic import BaseModel, validator
...
>>> from langgraph.graph import END, START, StateGraph
>>> from langgraph.prebuilt import ValidationNode
>>> from langgraph.graph.message import add_messages
...
...
>>> class SelectNumber(BaseModel):
... a: int
...
... @validator("a")
... def a_must_be_meaningful(cls, v):
... if v != 37:
... raise ValueError("Only 37 is allowed")
... return v
...
...
>>> class State(TypedDict):
... messages: Annotated[list, add_messages]
...
>>> builder = StateGraph(State)
>>> llm = ChatAnthropic(model="claude-3-haiku-20240307").bind_tools([SelectNumber])
>>> builder.add_node("model", llm)
>>> builder.add_node("validation", ValidationNode([SelectNumber]))
>>> builder.add_edge(START, "model")
...
...
>>> def should_validate(state: list) -> Literal["validation", "__end__"]:
... if state[-1].tool_calls:
... return "validation"
... return END
...
...
>>> builder.add_conditional_edges("model", should_validate)
...
...
>>> def should_reprompt(state: list) -> Literal["model", "__end__"]:
... for msg in state[::-1]:
... # None of the tool calls were errors
... if msg.type == "ai":
... return END
... if msg.additional_kwargs.get("is_error"):
... return "model"
... return END
...
...
>>> builder.add_conditional_edges("validation", should_reprompt)
...
...
>>> graph = builder.compile()
>>> res = graph.invoke(("user", "Select a number, any number"))
>>> # Show the retry logic
>>> for msg in res:
... msg.pretty_print()
================================ Human Message =================================
Select a number, any number
================================== Ai Message ==================================
[{'id': 'toolu_01JSjT9Pq8hGmTgmMPc6KnvM', 'input': {'a': 42}, 'name': 'SelectNumber', 'type': 'tool_use'}]
Tool Calls:
SelectNumber (toolu_01JSjT9Pq8hGmTgmMPc6KnvM)
Call ID: toolu_01JSjT9Pq8hGmTgmMPc6KnvM
Args:
a: 42
================================= Tool Message =================================
Name: SelectNumber
ValidationError(model='SelectNumber', errors=[{'loc': ('a',), 'msg': 'Only 37 is allowed', 'type': 'value_error'}])
Respond after fixing all validation errors.
================================== Ai Message ==================================
[{'id': 'toolu_01PkxSVxNxc5wqwCPW1FiSmV', 'input': {'a': 37}, 'name': 'SelectNumber', 'type': 'tool_use'}]
Tool Calls:
SelectNumber (toolu_01PkxSVxNxc5wqwCPW1FiSmV)
Call ID: toolu_01PkxSVxNxc5wqwCPW1FiSmV
Args:
a: 37
================================= Tool Message =================================
Name: SelectNumber
{"a": 37}
"""
def __init__(
self,
schemas: Sequence[Union[BaseTool, Type[BaseModel], Callable]],
*,
format_error: Optional[
Callable[[BaseException, ToolCall, Type[BaseModel]], str]
] = None,
name: str = "validation",
tags: Optional[list[str]] = None,
) -> None:
super().__init__(self._func, None, name=name, tags=tags, trace=False)
self._format_error = format_error or _default_format_error
self.schemas_by_name: Dict[str, Type[BaseModel]] = {}
for schema in schemas:
if isinstance(schema, BaseTool):
if schema.args_schema is None:
raise ValueError(
f"Tool {schema.name} does not have an args_schema defined."
)
self.schemas_by_name[schema.name] = schema.args_schema
elif isinstance(schema, type) and issubclass(
schema, (BaseModel, BaseModelV1)
):
self.schemas_by_name[schema.__name__] = cast(Type[BaseModel], schema)
elif callable(schema):
base_model = create_schema_from_function("Validation", schema)
self.schemas_by_name[schema.__name__] = base_model
else:
raise ValueError(
f"Unsupported input to ValidationNode. Expected BaseModel, tool or function. Got: {type(schema)}."
)
def _get_message(
self, input: Union[list[AnyMessage], dict[str, Any]]
) -> Tuple[str, AIMessage]:
"""Extract the last AIMessage from the input."""
if isinstance(input, list):
output_type = "list"
messages: list = input
elif messages := input.get("messages", []):
output_type = "dict"
else:
raise ValueError("No message found in input")
message: AnyMessage = messages[-1]
if not isinstance(message, AIMessage):
raise ValueError("Last message is not an AIMessage")
return output_type, message
def _func(
self, input: Union[list[AnyMessage], dict[str, Any]], config: RunnableConfig
) -> Any:
"""Validate and run tool calls synchronously."""
output_type, message = self._get_message(input)
def run_one(call: ToolCall) -> ToolMessage:
schema = self.schemas_by_name[call["name"]]
try:
if issubclass(schema, BaseModel):
output = schema.model_validate(call["args"])
content = output.model_dump_json()
elif issubclass(schema, BaseModelV1):
output = schema.validate(call["args"])
content = output.json()
else:
raise ValueError(
f"Unsupported schema type: {type(schema)}. Expected BaseModel or BaseModelV1."
)
return ToolMessage(
content=content,
name=call["name"],
tool_call_id=cast(str, call["id"]),
)
except (ValidationError, ValidationErrorV1) as e:
return ToolMessage(
content=self._format_error(e, call, schema),
name=call["name"],
tool_call_id=cast(str, call["id"]),
additional_kwargs={"is_error": True},
)
with get_executor_for_config(config) as executor:
outputs = [*executor.map(run_one, message.tool_calls)]
if output_type == "list":
return outputs
else:
return {"messages": outputs}
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/prebuilt/chat_agent_executor.py | from typing import Callable, Literal, Optional, Sequence, Type, TypeVar, Union, cast
from langchain_core.language_models import BaseChatModel, LanguageModelLike
from langchain_core.messages import AIMessage, BaseMessage, SystemMessage, ToolMessage
from langchain_core.runnables import (
Runnable,
RunnableBinding,
RunnableConfig,
)
from langchain_core.tools import BaseTool
from typing_extensions import Annotated, TypedDict
from langgraph._api.deprecation import deprecated_parameter
from langgraph.errors import ErrorCode, create_error_message
from langgraph.graph import StateGraph
from langgraph.graph.graph import CompiledGraph
from langgraph.graph.message import add_messages
from langgraph.managed import IsLastStep, RemainingSteps
from langgraph.prebuilt.tool_executor import ToolExecutor
from langgraph.prebuilt.tool_node import ToolNode
from langgraph.store.base import BaseStore
from langgraph.types import Checkpointer
from langgraph.utils.runnable import RunnableCallable
# We create the AgentState that we will pass around
# This simply involves a list of messages
# We want steps to return messages to append to the list
# So we annotate the messages attribute with operator.add
class AgentState(TypedDict):
"""The state of the agent."""
messages: Annotated[Sequence[BaseMessage], add_messages]
is_last_step: IsLastStep
remaining_steps: RemainingSteps
StateSchema = TypeVar("StateSchema", bound=AgentState)
StateSchemaType = Type[StateSchema]
STATE_MODIFIER_RUNNABLE_NAME = "StateModifier"
MessagesModifier = Union[
SystemMessage,
str,
Callable[[Sequence[BaseMessage]], Sequence[BaseMessage]],
Runnable[Sequence[BaseMessage], Sequence[BaseMessage]],
]
StateModifier = Union[
SystemMessage,
str,
Callable[[StateSchema], Sequence[BaseMessage]],
Runnable[StateSchema, Sequence[BaseMessage]],
]
def _get_state_modifier_runnable(
state_modifier: Optional[StateModifier], store: Optional[BaseStore] = None
) -> Runnable:
state_modifier_runnable: Runnable
if state_modifier is None:
state_modifier_runnable = RunnableCallable(
lambda state: state["messages"], name=STATE_MODIFIER_RUNNABLE_NAME
)
elif isinstance(state_modifier, str):
_system_message: BaseMessage = SystemMessage(content=state_modifier)
state_modifier_runnable = RunnableCallable(
lambda state: [_system_message] + state["messages"],
name=STATE_MODIFIER_RUNNABLE_NAME,
)
elif isinstance(state_modifier, SystemMessage):
state_modifier_runnable = RunnableCallable(
lambda state: [state_modifier] + state["messages"],
name=STATE_MODIFIER_RUNNABLE_NAME,
)
elif callable(state_modifier):
state_modifier_runnable = RunnableCallable(
state_modifier,
name=STATE_MODIFIER_RUNNABLE_NAME,
)
elif isinstance(state_modifier, Runnable):
state_modifier_runnable = state_modifier
else:
raise ValueError(
f"Got unexpected type for `state_modifier`: {type(state_modifier)}"
)
return state_modifier_runnable
def _convert_messages_modifier_to_state_modifier(
messages_modifier: MessagesModifier,
) -> StateModifier:
state_modifier: StateModifier
if isinstance(messages_modifier, (str, SystemMessage)):
return messages_modifier
elif callable(messages_modifier):
def state_modifier(state: AgentState) -> Sequence[BaseMessage]:
return messages_modifier(state["messages"])
return state_modifier
elif isinstance(messages_modifier, Runnable):
state_modifier = (lambda state: state["messages"]) | messages_modifier
return state_modifier
raise ValueError(
f"Got unexpected type for `messages_modifier`: {type(messages_modifier)}"
)
def _get_model_preprocessing_runnable(
state_modifier: Optional[StateModifier],
messages_modifier: Optional[MessagesModifier],
store: Optional[BaseStore],
) -> Runnable:
# Add the state or message modifier, if exists
if state_modifier is not None and messages_modifier is not None:
raise ValueError(
"Expected value for either state_modifier or messages_modifier, got values for both"
)
if state_modifier is None and messages_modifier is not None:
state_modifier = _convert_messages_modifier_to_state_modifier(messages_modifier)
return _get_state_modifier_runnable(state_modifier, store)
def _should_bind_tools(model: LanguageModelLike, tools: Sequence[BaseTool]) -> bool:
if not isinstance(model, RunnableBinding):
return True
if "tools" not in model.kwargs:
return True
bound_tools = model.kwargs["tools"]
if len(tools) != len(bound_tools):
raise ValueError(
"Number of tools in the model.bind_tools() and tools passed to create_react_agent must match"
)
tool_names = set(tool.name for tool in tools)
bound_tool_names = set()
for bound_tool in bound_tools:
# OpenAI-style tool
if bound_tool.get("type") == "function":
bound_tool_name = bound_tool["function"]["name"]
# Anthropic-style tool
elif bound_tool.get("name"):
bound_tool_name = bound_tool["name"]
else:
# unknown tool type so we'll ignore it
continue
bound_tool_names.add(bound_tool_name)
if missing_tools := tool_names - bound_tool_names:
raise ValueError(f"Missing tools '{missing_tools}' in the model.bind_tools()")
return False
def _validate_chat_history(
messages: Sequence[BaseMessage],
) -> None:
"""Validate that all tool calls in AIMessages have a corresponding ToolMessage."""
all_tool_calls = [
tool_call
for message in messages
if isinstance(message, AIMessage)
for tool_call in message.tool_calls
]
tool_call_ids_with_results = {
message.tool_call_id for message in messages if isinstance(message, ToolMessage)
}
tool_calls_without_results = [
tool_call
for tool_call in all_tool_calls
if tool_call["id"] not in tool_call_ids_with_results
]
if not tool_calls_without_results:
return
error_message = create_error_message(
message="Found AIMessages with tool_calls that do not have a corresponding ToolMessage. "
f"Here are the first few of those tool calls: {tool_calls_without_results[:3]}.\n\n"
"Every tool call (LLM requesting to call a tool) in the message history MUST have a corresponding ToolMessage "
"(result of a tool invocation to return to the LLM) - this is required by most LLM providers.",
error_code=ErrorCode.INVALID_CHAT_HISTORY,
)
raise ValueError(error_message)
@deprecated_parameter("messages_modifier", "0.1.9", "state_modifier", removal="0.3.0")
def create_react_agent(
model: LanguageModelLike,
tools: Union[ToolExecutor, Sequence[BaseTool], ToolNode],
*,
state_schema: Optional[StateSchemaType] = None,
messages_modifier: Optional[MessagesModifier] = None,
state_modifier: Optional[StateModifier] = None,
checkpointer: Optional[Checkpointer] = None,
store: Optional[BaseStore] = None,
interrupt_before: Optional[list[str]] = None,
interrupt_after: Optional[list[str]] = None,
debug: bool = False,
) -> CompiledGraph:
"""Creates a graph that works with a chat model that utilizes tool calling.
Args:
model: The `LangChain` chat model that supports tool calling.
tools: A list of tools, a ToolExecutor, or a ToolNode instance.
If an empty list is provided, the agent will consist of a single LLM node without tool calling.
state_schema: An optional state schema that defines graph state.
Must have `messages` and `is_last_step` keys.
Defaults to `AgentState` that defines those two keys.
messages_modifier: An optional
messages modifier. This applies to messages BEFORE they are passed into the LLM.
Can take a few different forms:
- SystemMessage: this is added to the beginning of the list of messages.
- str: This is converted to a SystemMessage and added to the beginning of the list of messages.
- Callable: This function should take in a list of messages and the output is then passed to the language model.
- Runnable: This runnable should take in a list of messages and the output is then passed to the language model.
!!! Warning
`messages_modifier` parameter is deprecated as of version 0.1.9 and will be removed in 0.2.0
state_modifier: An optional
state modifier. This takes full graph state BEFORE the LLM is called and prepares the input to LLM.
Can take a few different forms:
- SystemMessage: this is added to the beginning of the list of messages in state["messages"].
- str: This is converted to a SystemMessage and added to the beginning of the list of messages in state["messages"].
- Callable: This function should take in full graph state and the output is then passed to the language model.
- Runnable: This runnable should take in full graph state and the output is then passed to the language model.
checkpointer: An optional checkpoint saver object. This is used for persisting
the state of the graph (e.g., as chat memory) for a single thread (e.g., a single conversation).
store: An optional store object. This is used for persisting data
across multiple threads (e.g., multiple conversations / users).
interrupt_before: An optional list of node names to interrupt before.
Should be one of the following: "agent", "tools".
This is useful if you want to add a user confirmation or other interrupt before taking an action.
interrupt_after: An optional list of node names to interrupt after.
Should be one of the following: "agent", "tools".
This is useful if you want to return directly or run additional processing on an output.
debug: A flag indicating whether to enable debug mode.
Returns:
A compiled LangChain runnable that can be used for chat interactions.
The resulting graph looks like this:
``` mermaid
stateDiagram-v2
[*] --> Start
Start --> Agent
Agent --> Tools : continue
Tools --> Agent
Agent --> End : end
End --> [*]
classDef startClass fill:#ffdfba;
classDef endClass fill:#baffc9;
classDef otherClass fill:#fad7de;
class Start startClass
class End endClass
class Agent,Tools otherClass
```
The "agent" node calls the language model with the messages list (after applying the messages modifier).
If the resulting AIMessage contains `tool_calls`, the graph will then call the ["tools"][langgraph.prebuilt.tool_node.ToolNode].
The "tools" node executes the tools (1 tool per `tool_call`) and adds the responses to the messages list
as `ToolMessage` objects. The agent node then calls the language model again.
The process repeats until no more `tool_calls` are present in the response.
The agent then returns the full list of messages as a dictionary containing the key "messages".
``` mermaid
sequenceDiagram
participant U as User
participant A as Agent (LLM)
participant T as Tools
U->>A: Initial input
Note over A: Messages modifier + LLM
loop while tool_calls present
A->>T: Execute tools
T-->>A: ToolMessage for each tool_calls
end
A->>U: Return final state
```
Examples:
Use with a simple tool:
```pycon
>>> from datetime import datetime
>>> from langchain_openai import ChatOpenAI
>>> from langgraph.prebuilt import create_react_agent
... def check_weather(location: str, at_time: datetime | None = None) -> str:
... '''Return the weather forecast for the specified location.'''
... return f"It's always sunny in {location}"
>>>
>>> tools = [check_weather]
>>> model = ChatOpenAI(model="gpt-4o")
>>> graph = create_react_agent(model, tools=tools)
>>> inputs = {"messages": [("user", "what is the weather in sf")]}
>>> for s in graph.stream(inputs, stream_mode="values"):
... message = s["messages"][-1]
... if isinstance(message, tuple):
... print(message)
... else:
... message.pretty_print()
('user', 'what is the weather in sf')
================================== Ai Message ==================================
Tool Calls:
check_weather (call_LUzFvKJRuaWQPeXvBOzwhQOu)
Call ID: call_LUzFvKJRuaWQPeXvBOzwhQOu
Args:
location: San Francisco
================================= Tool Message =================================
Name: check_weather
It's always sunny in San Francisco
================================== Ai Message ==================================
The weather in San Francisco is sunny.
```
Add a system prompt for the LLM:
```pycon
>>> system_prompt = "You are a helpful bot named Fred."
>>> graph = create_react_agent(model, tools, state_modifier=system_prompt)
>>> inputs = {"messages": [("user", "What's your name? And what's the weather in SF?")]}
>>> for s in graph.stream(inputs, stream_mode="values"):
... message = s["messages"][-1]
... if isinstance(message, tuple):
... print(message)
... else:
... message.pretty_print()
('user', "What's your name? And what's the weather in SF?")
================================== Ai Message ==================================
Hi, my name is Fred. Let me check the weather in San Francisco for you.
Tool Calls:
check_weather (call_lqhj4O0hXYkW9eknB4S41EXk)
Call ID: call_lqhj4O0hXYkW9eknB4S41EXk
Args:
location: San Francisco
================================= Tool Message =================================
Name: check_weather
It's always sunny in San Francisco
================================== Ai Message ==================================
The weather in San Francisco is currently sunny. If you need any more details or have other questions, feel free to ask!
```
Add a more complex prompt for the LLM:
```pycon
>>> from langchain_core.prompts import ChatPromptTemplate
>>> prompt = ChatPromptTemplate.from_messages([
... ("system", "You are a helpful bot named Fred."),
... ("placeholder", "{messages}"),
... ("user", "Remember, always be polite!"),
... ])
>>> def format_for_model(state: AgentState):
... # You can do more complex modifications here
... return prompt.invoke({"messages": state["messages"]})
>>>
>>> graph = create_react_agent(model, tools, state_modifier=format_for_model)
>>> inputs = {"messages": [("user", "What's your name? And what's the weather in SF?")]}
>>> for s in graph.stream(inputs, stream_mode="values"):
... message = s["messages"][-1]
... if isinstance(message, tuple):
... print(message)
... else:
... message.pretty_print()
```
Add complex prompt with custom graph state:
```pycon
>>> from typing import TypedDict
>>> prompt = ChatPromptTemplate.from_messages(
... [
... ("system", "Today is {today}"),
... ("placeholder", "{messages}"),
... ]
... )
>>>
>>> class CustomState(TypedDict):
... today: str
... messages: Annotated[list[BaseMessage], add_messages]
... is_last_step: str
>>>
>>> graph = create_react_agent(model, tools, state_schema=CustomState, state_modifier=prompt)
>>> inputs = {"messages": [("user", "What's today's date? And what's the weather in SF?")], "today": "July 16, 2004"}
>>> for s in graph.stream(inputs, stream_mode="values"):
... message = s["messages"][-1]
... if isinstance(message, tuple):
... print(message)
... else:
... message.pretty_print()
```
Add thread-level "chat memory" to the graph:
```pycon
>>> from langgraph.checkpoint.memory import MemorySaver
>>> graph = create_react_agent(model, tools, checkpointer=MemorySaver())
>>> config = {"configurable": {"thread_id": "thread-1"}}
>>> def print_stream(graph, inputs, config):
... for s in graph.stream(inputs, config, stream_mode="values"):
... message = s["messages"][-1]
... if isinstance(message, tuple):
... print(message)
... else:
... message.pretty_print()
>>> inputs = {"messages": [("user", "What's the weather in SF?")]}
>>> print_stream(graph, inputs, config)
>>> inputs2 = {"messages": [("user", "Cool, so then should i go biking today?")]}
>>> print_stream(graph, inputs2, config)
('user', "What's the weather in SF?")
================================== Ai Message ==================================
Tool Calls:
check_weather (call_ChndaktJxpr6EMPEB5JfOFYc)
Call ID: call_ChndaktJxpr6EMPEB5JfOFYc
Args:
location: San Francisco
================================= Tool Message =================================
Name: check_weather
It's always sunny in San Francisco
================================== Ai Message ==================================
The weather in San Francisco is sunny. Enjoy your day!
================================ Human Message =================================
Cool, so then should i go biking today?
================================== Ai Message ==================================
Since the weather in San Francisco is sunny, it sounds like a great day for biking! Enjoy your ride!
```
Add an interrupt to let the user confirm before taking an action:
```pycon
>>> graph = create_react_agent(
... model, tools, interrupt_before=["tools"], checkpointer=MemorySaver()
>>> )
>>> config = {"configurable": {"thread_id": "thread-1"}}
>>> inputs = {"messages": [("user", "What's the weather in SF?")]}
>>> print_stream(graph, inputs, config)
>>> snapshot = graph.get_state(config)
>>> print("Next step: ", snapshot.next)
>>> print_stream(graph, None, config)
```
Add cross-thread memory to the graph:
```pycon
>>> from langgraph.prebuilt import InjectedStore
>>> from langgraph.store.base import BaseStore
>>> def save_memory(memory: str, *, config: RunnableConfig, store: Annotated[BaseStore, InjectedStore()]) -> str:
... '''Save the given memory for the current user.'''
... # This is a **tool** the model can use to save memories to storage
... user_id = config.get("configurable", {}).get("user_id")
... namespace = ("memories", user_id)
... store.put(namespace, f"memory_{len(store.search(namespace))}", {"data": memory})
... return f"Saved memory: {memory}"
>>> def prepare_model_inputs(state: AgentState, config: RunnableConfig, store: BaseStore):
... # Retrieve user memories and add them to the system message
... # This function is called **every time** the model is prompted. It converts the state to a prompt
... user_id = config.get("configurable", {}).get("user_id")
... namespace = ("memories", user_id)
... memories = [m.value["data"] for m in store.search(namespace)]
... system_msg = f"User memories: {', '.join(memories)}"
... return [{"role": "system", "content": system_msg)] + state["messages"]
>>> from langgraph.checkpoint.memory import MemorySaver
>>> from langgraph.store.memory import InMemoryStore
>>> store = InMemoryStore()
>>> graph = create_react_agent(model, [save_memory], state_modifier=prepare_model_inputs, store=store, checkpointer=MemorySaver())
>>> config = {"configurable": {"thread_id": "thread-1", "user_id": "1"}}
>>> inputs = {"messages": [("user", "Hey I'm Will, how's it going?")]}
>>> print_stream(graph, inputs, config)
('user', "Hey I'm Will, how's it going?")
================================== Ai Message ==================================
Hello Will! It's nice to meet you. I'm doing well, thank you for asking. How are you doing today?
>>> inputs2 = {"messages": [("user", "I like to bike")]}
>>> print_stream(graph, inputs2, config)
================================ Human Message =================================
I like to bike
================================== Ai Message ==================================
That's great to hear, Will! Biking is an excellent hobby and form of exercise. It's a fun way to stay active and explore your surroundings. Do you have any favorite biking routes or trails you enjoy? Or perhaps you're into a specific type of biking, like mountain biking or road cycling?
>>> config = {"configurable": {"thread_id": "thread-2", "user_id": "1"}}
>>> inputs3 = {"messages": [("user", "Hi there! Remember me?")]}
>>> print_stream(graph, inputs3, config)
================================ Human Message =================================
Hi there! Remember me?
================================== Ai Message ==================================
User memories:
Hello! Of course, I remember you, Will! You mentioned earlier that you like to bike. It's great to hear from you again. How have you been? Have you been on any interesting bike rides lately?
```
Add a timeout for a given step:
```pycon
>>> import time
... def check_weather(location: str, at_time: datetime | None = None) -> float:
... '''Return the weather forecast for the specified location.'''
... time.sleep(2)
... return f"It's always sunny in {location}"
>>>
>>> tools = [check_weather]
>>> graph = create_react_agent(model, tools)
>>> graph.step_timeout = 1 # Seconds
>>> for s in graph.stream({"messages": [("user", "what is the weather in sf")]}):
... print(s)
TimeoutError: Timed out at step 2
```
"""
if state_schema is not None:
if missing_keys := {"messages", "is_last_step"} - set(
state_schema.__annotations__
):
raise ValueError(f"Missing required key(s) {missing_keys} in state_schema")
if isinstance(tools, ToolExecutor):
tool_classes: Sequence[BaseTool] = tools.tools
tool_node = ToolNode(tool_classes)
elif isinstance(tools, ToolNode):
tool_classes = list(tools.tools_by_name.values())
tool_node = tools
else:
tool_node = ToolNode(tools)
# get the tool functions wrapped in a tool class from the ToolNode
tool_classes = list(tool_node.tools_by_name.values())
tool_calling_enabled = len(tool_classes) > 0
if _should_bind_tools(model, tool_classes) and tool_calling_enabled:
model = cast(BaseChatModel, model).bind_tools(tool_classes)
# we're passing store here for validation
preprocessor = _get_model_preprocessing_runnable(
state_modifier, messages_modifier, store
)
model_runnable = preprocessor | model
# Define the function that calls the model
def call_model(state: AgentState, config: RunnableConfig) -> AgentState:
_validate_chat_history(state["messages"])
response = model_runnable.invoke(state, config)
has_tool_calls = isinstance(response, AIMessage) and response.tool_calls
all_tools_return_direct = (
all(call["name"] in should_return_direct for call in response.tool_calls)
if isinstance(response, AIMessage)
else False
)
if (
(
"remaining_steps" not in state
and state["is_last_step"]
and has_tool_calls
)
or (
"remaining_steps" in state
and state["remaining_steps"] < 1
and all_tools_return_direct
)
or (
"remaining_steps" in state
and state["remaining_steps"] < 2
and has_tool_calls
)
):
return {
"messages": [
AIMessage(
id=response.id,
content="Sorry, need more steps to process this request.",
)
]
}
# We return a list, because this will get added to the existing list
return {"messages": [response]}
async def acall_model(state: AgentState, config: RunnableConfig) -> AgentState:
_validate_chat_history(state["messages"])
response = await model_runnable.ainvoke(state, config)
has_tool_calls = isinstance(response, AIMessage) and response.tool_calls
all_tools_return_direct = (
all(call["name"] in should_return_direct for call in response.tool_calls)
if isinstance(response, AIMessage)
else False
)
if (
(
"remaining_steps" not in state
and state["is_last_step"]
and has_tool_calls
)
or (
"remaining_steps" in state
and state["remaining_steps"] < 1
and all_tools_return_direct
)
or (
"remaining_steps" in state
and state["remaining_steps"] < 2
and has_tool_calls
)
):
return {
"messages": [
AIMessage(
id=response.id,
content="Sorry, need more steps to process this request.",
)
]
}
# We return a list, because this will get added to the existing list
return {"messages": [response]}
if not tool_calling_enabled:
# Define a new graph
workflow = StateGraph(state_schema or AgentState)
workflow.add_node("agent", RunnableCallable(call_model, acall_model))
workflow.set_entry_point("agent")
return workflow.compile(
checkpointer=checkpointer,
store=store,
interrupt_before=interrupt_before,
interrupt_after=interrupt_after,
debug=debug,
)
# Define the function that determines whether to continue or not
def should_continue(state: AgentState) -> Literal["tools", "__end__"]:
messages = state["messages"]
last_message = messages[-1]
# If there is no function call, then we finish
if not isinstance(last_message, AIMessage) or not last_message.tool_calls:
return "__end__"
# Otherwise if there is, we continue
else:
return "tools"
# Define a new graph
workflow = StateGraph(state_schema or AgentState)
# Define the two nodes we will cycle between
workflow.add_node("agent", RunnableCallable(call_model, acall_model))
workflow.add_node("tools", tool_node)
# Set the entrypoint as `agent`
# This means that this node is the first one called
workflow.set_entry_point("agent")
# We now add a conditional edge
workflow.add_conditional_edges(
# First, we define the start node. We use `agent`.
# This means these are the edges taken after the `agent` node is called.
"agent",
# Next, we pass in the function that will determine which node is called next.
should_continue,
)
# If any of the tools are configured to return_directly after running,
# our graph needs to check if these were called
should_return_direct = {t.name for t in tool_classes if t.return_direct}
def route_tool_responses(state: AgentState) -> Literal["agent", "__end__"]:
for m in reversed(state["messages"]):
if not isinstance(m, ToolMessage):
break
if m.name in should_return_direct:
return "__end__"
return "agent"
if should_return_direct:
workflow.add_conditional_edges("tools", route_tool_responses)
else:
workflow.add_edge("tools", "agent")
# Finally, we compile it!
# This compiles it into a LangChain Runnable,
# meaning you can use it as you would any other runnable
return workflow.compile(
checkpointer=checkpointer,
store=store,
interrupt_before=interrupt_before,
interrupt_after=interrupt_after,
debug=debug,
)
# Keep for backwards compatibility
create_tool_calling_executor = create_react_agent
__all__ = [
"create_react_agent",
"create_tool_calling_executor",
"AgentState",
]
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/prebuilt/tool_node.py | import asyncio
import inspect
import json
from copy import copy
from typing import (
Any,
Callable,
Literal,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
get_type_hints,
)
from langchain_core.messages import (
AIMessage,
AnyMessage,
ToolCall,
ToolMessage,
)
from langchain_core.runnables import RunnableConfig
from langchain_core.runnables.config import (
get_config_list,
get_executor_for_config,
)
from langchain_core.runnables.utils import Input
from langchain_core.tools import BaseTool, InjectedToolArg
from langchain_core.tools import tool as create_tool
from langchain_core.tools.base import get_all_basemodel_annotations
from pydantic import BaseModel
from typing_extensions import Annotated, get_args, get_origin
from langgraph.errors import GraphBubbleUp
from langgraph.store.base import BaseStore
from langgraph.utils.runnable import RunnableCallable
INVALID_TOOL_NAME_ERROR_TEMPLATE = (
"Error: {requested_tool} is not a valid tool, try one of [{available_tools}]."
)
TOOL_CALL_ERROR_TEMPLATE = "Error: {error}\n Please fix your mistakes."
def msg_content_output(output: Any) -> Union[str, list[dict]]:
recognized_content_block_types = ("image", "image_url", "text", "json")
if isinstance(output, str):
return output
elif all(
[
isinstance(x, dict) and x.get("type") in recognized_content_block_types
for x in output
]
):
return output
# Technically a list of strings is also valid message content but it's not currently
# well tested that all chat models support this. And for backwards compatibility
# we want to make sure we don't break any existing ToolNode usage.
else:
try:
return json.dumps(output, ensure_ascii=False)
except Exception:
return str(output)
def _handle_tool_error(
e: Exception,
*,
flag: Union[
bool,
str,
Callable[..., str],
tuple[type[Exception], ...],
],
) -> str:
if isinstance(flag, (bool, tuple)):
content = TOOL_CALL_ERROR_TEMPLATE.format(error=repr(e))
elif isinstance(flag, str):
content = flag
elif callable(flag):
content = flag(e)
else:
raise ValueError(
f"Got unexpected type of `handle_tool_error`. Expected bool, str "
f"or callable. Received: {flag}"
)
return content
def _infer_handled_types(handler: Callable[..., str]) -> tuple[type[Exception], ...]:
sig = inspect.signature(handler)
params = list(sig.parameters.values())
if params:
# If it's a method, the first argument is typically 'self' or 'cls'
if params[0].name in ["self", "cls"] and len(params) == 2:
first_param = params[1]
else:
first_param = params[0]
type_hints = get_type_hints(handler)
if first_param.name in type_hints:
origin = get_origin(first_param.annotation)
if origin is Union:
args = get_args(first_param.annotation)
if all(issubclass(arg, Exception) for arg in args):
return tuple(args)
else:
raise ValueError(
"All types in the error handler error annotation must be Exception types. "
"For example, `def custom_handler(e: Union[ValueError, TypeError])`. "
f"Got '{first_param.annotation}' instead."
)
exception_type = type_hints[first_param.name]
if Exception in exception_type.__mro__:
return (exception_type,)
else:
raise ValueError(
f"Arbitrary types are not supported in the error handler signature. "
"Please annotate the error with either a specific Exception type or a union of Exception types. "
"For example, `def custom_handler(e: ValueError)` or `def custom_handler(e: Union[ValueError, TypeError])`. "
f"Got '{exception_type}' instead."
)
# If no type information is available, return (Exception,) for backwards compatibility.
return (Exception,)
class ToolNode(RunnableCallable):
"""A node that runs the tools called in the last AIMessage.
It can be used either in StateGraph with a "messages" state key (or a custom key passed via ToolNode's 'messages_key').
If multiple tool calls are requested, they will be run in parallel. The output will be
a list of ToolMessages, one for each tool call.
Args:
tools: A sequence of tools that can be invoked by the ToolNode.
name: The name of the ToolNode in the graph. Defaults to "tools".
tags: Optional tags to associate with the node. Defaults to None.
handle_tool_errors: How to handle tool errors raised by tools inside the node. Defaults to True.
Must be one of the following:
- True: all errors will be caught and
a ToolMessage with a default error message (TOOL_CALL_ERROR_TEMPLATE) will be returned.
- str: all errors will be caught and
a ToolMessage with the string value of 'handle_tool_errors' will be returned.
- tuple[type[Exception], ...]: exceptions in the tuple will be caught and
a ToolMessage with a default error message (TOOL_CALL_ERROR_TEMPLATE) will be returned.
- Callable[..., str]: exceptions from the signature of the callable will be caught and
a ToolMessage with the string value of the result of the 'handle_tool_errors' callable will be returned.
- False: none of the errors raised by the tools will be caught
messages_key: The state key in the input that contains the list of messages.
The same key will be used for the output from the ToolNode.
Defaults to "messages".
The `ToolNode` is roughly analogous to:
```python
tools_by_name = {tool.name: tool for tool in tools}
def tool_node(state: dict):
result = []
for tool_call in state["messages"][-1].tool_calls:
tool = tools_by_name[tool_call["name"]]
observation = tool.invoke(tool_call["args"])
result.append(ToolMessage(content=observation, tool_call_id=tool_call["id"]))
return {"messages": result}
```
Important:
- The state MUST contain a list of messages.
- The last message MUST be an `AIMessage`.
- The `AIMessage` MUST have `tool_calls` populated.
"""
name: str = "ToolNode"
def __init__(
self,
tools: Sequence[Union[BaseTool, Callable]],
*,
name: str = "tools",
tags: Optional[list[str]] = None,
handle_tool_errors: Union[
bool, str, Callable[..., str], tuple[type[Exception], ...]
] = True,
messages_key: str = "messages",
) -> None:
super().__init__(self._func, self._afunc, name=name, tags=tags, trace=False)
self.tools_by_name: dict[str, BaseTool] = {}
self.tool_to_state_args: dict[str, dict[str, Optional[str]]] = {}
self.tool_to_store_arg: dict[str, Optional[str]] = {}
self.handle_tool_errors = handle_tool_errors
self.messages_key = messages_key
for tool_ in tools:
if not isinstance(tool_, BaseTool):
tool_ = create_tool(tool_)
self.tools_by_name[tool_.name] = tool_
self.tool_to_state_args[tool_.name] = _get_state_args(tool_)
self.tool_to_store_arg[tool_.name] = _get_store_arg(tool_)
def _func(
self,
input: Union[
list[AnyMessage],
dict[str, Any],
BaseModel,
],
config: RunnableConfig,
*,
store: BaseStore,
) -> Any:
tool_calls, output_type = self._parse_input(input, store)
config_list = get_config_list(config, len(tool_calls))
with get_executor_for_config(config) as executor:
outputs = [*executor.map(self._run_one, tool_calls, config_list)]
# TypedDict, pydantic, dataclass, etc. should all be able to load from dict
return outputs if output_type == "list" else {self.messages_key: outputs}
def invoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Any:
if "store" not in kwargs:
kwargs["store"] = None
return super().invoke(input, config, **kwargs)
async def ainvoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Any:
if "store" not in kwargs:
kwargs["store"] = None
return await super().ainvoke(input, config, **kwargs)
async def _afunc(
self,
input: Union[
list[AnyMessage],
dict[str, Any],
BaseModel,
],
config: RunnableConfig,
*,
store: BaseStore,
) -> Any:
tool_calls, output_type = self._parse_input(input, store)
outputs = await asyncio.gather(
*(self._arun_one(call, config) for call in tool_calls)
)
# TypedDict, pydantic, dataclass, etc. should all be able to load from dict
return outputs if output_type == "list" else {self.messages_key: outputs}
def _run_one(self, call: ToolCall, config: RunnableConfig) -> ToolMessage:
if invalid_tool_message := self._validate_tool_call(call):
return invalid_tool_message
try:
input = {**call, **{"type": "tool_call"}}
tool_message: ToolMessage = self.tools_by_name[call["name"]].invoke(
input, config
)
tool_message.content = cast(
Union[str, list], msg_content_output(tool_message.content)
)
return tool_message
# GraphInterrupt is a special exception that will always be raised.
# It can be triggered in the following scenarios:
# (1) a NodeInterrupt is raised inside a tool
# (2) a NodeInterrupt is raised inside a graph node for a graph called as a tool
# (3) a GraphInterrupt is raised when a subgraph is interrupted inside a graph called as a tool
# (2 and 3 can happen in a "supervisor w/ tools" multi-agent architecture)
except GraphBubbleUp as e:
raise e
except Exception as e:
if isinstance(self.handle_tool_errors, tuple):
handled_types: tuple = self.handle_tool_errors
elif callable(self.handle_tool_errors):
handled_types = _infer_handled_types(self.handle_tool_errors)
else:
# default behavior is catching all exceptions
handled_types = (Exception,)
# Unhandled
if not self.handle_tool_errors or not isinstance(e, handled_types):
raise e
# Handled
else:
content = _handle_tool_error(e, flag=self.handle_tool_errors)
return ToolMessage(
content=content, name=call["name"], tool_call_id=call["id"], status="error"
)
async def _arun_one(self, call: ToolCall, config: RunnableConfig) -> ToolMessage:
if invalid_tool_message := self._validate_tool_call(call):
return invalid_tool_message
try:
input = {**call, **{"type": "tool_call"}}
tool_message: ToolMessage = await self.tools_by_name[call["name"]].ainvoke(
input, config
)
tool_message.content = cast(
Union[str, list], msg_content_output(tool_message.content)
)
return tool_message
# GraphInterrupt is a special exception that will always be raised.
# It can be triggered in the following scenarios:
# (1) a NodeInterrupt is raised inside a tool
# (2) a NodeInterrupt is raised inside a graph node for a graph called as a tool
# (3) a GraphInterrupt is raised when a subgraph is interrupted inside a graph called as a tool
# (2 and 3 can happen in a "supervisor w/ tools" multi-agent architecture)
except GraphBubbleUp as e:
raise e
except Exception as e:
if isinstance(self.handle_tool_errors, tuple):
handled_types: tuple = self.handle_tool_errors
elif callable(self.handle_tool_errors):
handled_types = _infer_handled_types(self.handle_tool_errors)
else:
# default behavior is catching all exceptions
handled_types = (Exception,)
# Unhandled
if not self.handle_tool_errors or not isinstance(e, handled_types):
raise e
# Handled
else:
content = _handle_tool_error(e, flag=self.handle_tool_errors)
return ToolMessage(
content=content, name=call["name"], tool_call_id=call["id"], status="error"
)
def _parse_input(
self,
input: Union[
list[AnyMessage],
dict[str, Any],
BaseModel,
],
store: BaseStore,
) -> Tuple[list[ToolCall], Literal["list", "dict"]]:
if isinstance(input, list):
output_type = "list"
message: AnyMessage = input[-1]
elif isinstance(input, dict) and (messages := input.get(self.messages_key, [])):
output_type = "dict"
message = messages[-1]
elif messages := getattr(input, self.messages_key, None):
# Assume dataclass-like state that can coerce from dict
output_type = "dict"
message = messages[-1]
else:
raise ValueError("No message found in input")
if not isinstance(message, AIMessage):
raise ValueError("Last message is not an AIMessage")
tool_calls = [
self._inject_tool_args(call, input, store) for call in message.tool_calls
]
return tool_calls, output_type
def _validate_tool_call(self, call: ToolCall) -> Optional[ToolMessage]:
if (requested_tool := call["name"]) not in self.tools_by_name:
content = INVALID_TOOL_NAME_ERROR_TEMPLATE.format(
requested_tool=requested_tool,
available_tools=", ".join(self.tools_by_name.keys()),
)
return ToolMessage(
content, name=requested_tool, tool_call_id=call["id"], status="error"
)
else:
return None
def _inject_state(
self,
tool_call: ToolCall,
input: Union[
list[AnyMessage],
dict[str, Any],
BaseModel,
],
) -> ToolCall:
state_args = self.tool_to_state_args[tool_call["name"]]
if state_args and isinstance(input, list):
required_fields = list(state_args.values())
if (
len(required_fields) == 1
and required_fields[0] == self.messages_key
or required_fields[0] is None
):
input = {self.messages_key: input}
else:
err_msg = (
f"Invalid input to ToolNode. Tool {tool_call['name']} requires "
f"graph state dict as input."
)
if any(state_field for state_field in state_args.values()):
required_fields_str = ", ".join(f for f in required_fields if f)
err_msg += f" State should contain fields {required_fields_str}."
raise ValueError(err_msg)
if isinstance(input, dict):
tool_state_args = {
tool_arg: input[state_field] if state_field else input
for tool_arg, state_field in state_args.items()
}
else:
tool_state_args = {
tool_arg: getattr(input, state_field) if state_field else input
for tool_arg, state_field in state_args.items()
}
tool_call["args"] = {
**tool_call["args"],
**tool_state_args,
}
return tool_call
def _inject_store(self, tool_call: ToolCall, store: BaseStore) -> ToolCall:
store_arg = self.tool_to_store_arg[tool_call["name"]]
if not store_arg:
return tool_call
if store is None:
raise ValueError(
"Cannot inject store into tools with InjectedStore annotations - "
"please compile your graph with a store."
)
tool_call["args"] = {
**tool_call["args"],
store_arg: store,
}
return tool_call
def _inject_tool_args(
self,
tool_call: ToolCall,
input: Union[
list[AnyMessage],
dict[str, Any],
BaseModel,
],
store: BaseStore,
) -> ToolCall:
if tool_call["name"] not in self.tools_by_name:
return tool_call
tool_call_copy: ToolCall = copy(tool_call)
tool_call_with_state = self._inject_state(tool_call_copy, input)
tool_call_with_store = self._inject_store(tool_call_with_state, store)
return tool_call_with_store
def tools_condition(
state: Union[list[AnyMessage], dict[str, Any], BaseModel],
messages_key: str = "messages",
) -> Literal["tools", "__end__"]:
"""Use in the conditional_edge to route to the ToolNode if the last message
has tool calls. Otherwise, route to the end.
Args:
state (Union[list[AnyMessage], dict[str, Any], BaseModel]): The state to check for
tool calls. Must have a list of messages (MessageGraph) or have the
"messages" key (StateGraph).
Returns:
The next node to route to.
Examples:
Create a custom ReAct-style agent with tools.
```pycon
>>> from langchain_anthropic import ChatAnthropic
>>> from langchain_core.tools import tool
...
>>> from langgraph.graph import StateGraph
>>> from langgraph.prebuilt import ToolNode, tools_condition
>>> from langgraph.graph.message import add_messages
...
>>> from typing import TypedDict, Annotated
...
>>> @tool
>>> def divide(a: float, b: float) -> int:
... \"\"\"Return a / b.\"\"\"
... return a / b
...
>>> llm = ChatAnthropic(model="claude-3-haiku-20240307")
>>> tools = [divide]
...
>>> class State(TypedDict):
... messages: Annotated[list, add_messages]
>>>
>>> graph_builder = StateGraph(State)
>>> graph_builder.add_node("tools", ToolNode(tools))
>>> graph_builder.add_node("chatbot", lambda state: {"messages":llm.bind_tools(tools).invoke(state['messages'])})
>>> graph_builder.add_edge("tools", "chatbot")
>>> graph_builder.add_conditional_edges(
... "chatbot", tools_condition
... )
>>> graph_builder.set_entry_point("chatbot")
>>> graph = graph_builder.compile()
>>> graph.invoke({"messages": {"role": "user", "content": "What's 329993 divided by 13662?"}})
```
"""
if isinstance(state, list):
ai_message = state[-1]
elif isinstance(state, dict) and (messages := state.get(messages_key, [])):
ai_message = messages[-1]
elif messages := getattr(state, messages_key, []):
ai_message = messages[-1]
else:
raise ValueError(f"No messages found in input state to tool_edge: {state}")
if hasattr(ai_message, "tool_calls") and len(ai_message.tool_calls) > 0:
return "tools"
return "__end__"
class InjectedState(InjectedToolArg):
"""Annotation for a Tool arg that is meant to be populated with the graph state.
Any Tool argument annotated with InjectedState will be hidden from a tool-calling
model, so that the model doesn't attempt to generate the argument. If using
ToolNode, the appropriate graph state field will be automatically injected into
the model-generated tool args.
Args:
field: The key from state to insert. If None, the entire state is expected to
be passed in.
Example:
```python
from typing import List
from typing_extensions import Annotated, TypedDict
from langchain_core.messages import BaseMessage, AIMessage
from langchain_core.tools import tool
from langgraph.prebuilt import InjectedState, ToolNode
class AgentState(TypedDict):
messages: List[BaseMessage]
foo: str
@tool
def state_tool(x: int, state: Annotated[dict, InjectedState]) -> str:
'''Do something with state.'''
if len(state["messages"]) > 2:
return state["foo"] + str(x)
else:
return "not enough messages"
@tool
def foo_tool(x: int, foo: Annotated[str, InjectedState("foo")]) -> str:
'''Do something else with state.'''
return foo + str(x + 1)
node = ToolNode([state_tool, foo_tool])
tool_call1 = {"name": "state_tool", "args": {"x": 1}, "id": "1", "type": "tool_call"}
tool_call2 = {"name": "foo_tool", "args": {"x": 1}, "id": "2", "type": "tool_call"}
state = {
"messages": [AIMessage("", tool_calls=[tool_call1, tool_call2])],
"foo": "bar",
}
node.invoke(state)
```
```pycon
[
ToolMessage(content='not enough messages', name='state_tool', tool_call_id='1'),
ToolMessage(content='bar2', name='foo_tool', tool_call_id='2')
]
```
""" # noqa: E501
def __init__(self, field: Optional[str] = None) -> None:
self.field = field
class InjectedStore(InjectedToolArg):
"""Annotation for a Tool arg that is meant to be populated with LangGraph store.
Any Tool argument annotated with InjectedStore will be hidden from a tool-calling
model, so that the model doesn't attempt to generate the argument. If using
ToolNode, the appropriate store field will be automatically injected into
the model-generated tool args. Note: if a graph is compiled with a store object,
the store will be automatically propagated to the tools with InjectedStore args
when using ToolNode.
!!! Warning
`InjectedStore` annotation requires `langchain-core >= 0.3.8`
Example:
```python
from typing import Any
from typing_extensions import Annotated
from langchain_core.messages import AIMessage
from langchain_core.tools import tool
from langgraph.store.memory import InMemoryStore
from langgraph.prebuilt import InjectedStore, ToolNode
store = InMemoryStore()
store.put(("values",), "foo", {"bar": 2})
@tool
def store_tool(x: int, my_store: Annotated[Any, InjectedStore()]) -> str:
'''Do something with store.'''
stored_value = my_store.get(("values",), "foo").value["bar"]
return stored_value + x
node = ToolNode([store_tool])
tool_call = {"name": "store_tool", "args": {"x": 1}, "id": "1", "type": "tool_call"}
state = {
"messages": [AIMessage("", tool_calls=[tool_call])],
}
node.invoke(state, store=store)
```
```pycon
{
"messages": [
ToolMessage(content='3', name='store_tool', tool_call_id='1'),
]
}
```
""" # noqa: E501
def _is_injection(
type_arg: Any, injection_type: Union[Type[InjectedState], Type[InjectedStore]]
) -> bool:
if isinstance(type_arg, injection_type) or (
isinstance(type_arg, type) and issubclass(type_arg, injection_type)
):
return True
origin_ = get_origin(type_arg)
if origin_ is Union or origin_ is Annotated:
return any(_is_injection(ta, injection_type) for ta in get_args(type_arg))
return False
def _get_state_args(tool: BaseTool) -> dict[str, Optional[str]]:
full_schema = tool.get_input_schema()
tool_args_to_state_fields: dict = {}
for name, type_ in get_all_basemodel_annotations(full_schema).items():
injections = [
type_arg
for type_arg in get_args(type_)
if _is_injection(type_arg, InjectedState)
]
if len(injections) > 1:
raise ValueError(
"A tool argument should not be annotated with InjectedState more than "
f"once. Received arg {name} with annotations {injections}."
)
elif len(injections) == 1:
injection = injections[0]
if isinstance(injection, InjectedState) and injection.field:
tool_args_to_state_fields[name] = injection.field
else:
tool_args_to_state_fields[name] = None
else:
pass
return tool_args_to_state_fields
def _get_store_arg(tool: BaseTool) -> Optional[str]:
full_schema = tool.get_input_schema()
for name, type_ in get_all_basemodel_annotations(full_schema).items():
injections = [
type_arg
for type_arg in get_args(type_)
if _is_injection(type_arg, InjectedStore)
]
if len(injections) > 1:
ValueError(
"A tool argument should not be annotated with InjectedStore more than "
f"once. Received arg {name} with annotations {injections}."
)
elif len(injections) == 1:
return name
else:
pass
return None
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/prebuilt/__init__.py | """langgraph.prebuilt exposes a higher-level API for creating and executing agents and tools."""
from langgraph.prebuilt.chat_agent_executor import create_react_agent
from langgraph.prebuilt.tool_executor import ToolExecutor, ToolInvocation
from langgraph.prebuilt.tool_node import (
InjectedState,
InjectedStore,
ToolNode,
tools_condition,
)
from langgraph.prebuilt.tool_validator import ValidationNode
__all__ = [
"create_react_agent",
"ToolExecutor",
"ToolInvocation",
"ToolNode",
"tools_condition",
"ValidationNode",
"InjectedState",
"InjectedStore",
]
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/utils/config.py | import asyncio
import sys
from collections import ChainMap
from typing import Any, Optional, Sequence, cast
from langchain_core.callbacks import (
AsyncCallbackManager,
BaseCallbackManager,
CallbackManager,
Callbacks,
)
from langchain_core.runnables import RunnableConfig
from langchain_core.runnables.config import (
CONFIG_KEYS,
COPIABLE_KEYS,
DEFAULT_RECURSION_LIMIT,
var_child_runnable_config,
)
from langgraph.checkpoint.base import CheckpointMetadata
from langgraph.constants import (
CONF,
CONFIG_KEY_CHECKPOINT_ID,
CONFIG_KEY_CHECKPOINT_MAP,
CONFIG_KEY_CHECKPOINT_NS,
)
def patch_configurable(
config: Optional[RunnableConfig], patch: dict[str, Any]
) -> RunnableConfig:
if config is None:
return {CONF: patch}
elif CONF not in config:
return {**config, CONF: patch}
else:
return {**config, CONF: {**config[CONF], **patch}}
def patch_checkpoint_map(
config: Optional[RunnableConfig], metadata: Optional[CheckpointMetadata]
) -> RunnableConfig:
if config is None:
return config
elif parents := (metadata.get("parents") if metadata else None):
conf = config[CONF]
return patch_configurable(
config,
{
CONFIG_KEY_CHECKPOINT_MAP: {
**parents,
conf[CONFIG_KEY_CHECKPOINT_NS]: conf[CONFIG_KEY_CHECKPOINT_ID],
},
},
)
else:
return config
def merge_configs(*configs: Optional[RunnableConfig]) -> RunnableConfig:
"""Merge multiple configs into one.
Args:
*configs (Optional[RunnableConfig]): The configs to merge.
Returns:
RunnableConfig: The merged config.
"""
base: RunnableConfig = {}
# Even though the keys aren't literals, this is correct
# because both dicts are the same type
for config in configs:
if config is None:
continue
for key, value in config.items():
if not value:
continue
if key == "metadata":
if base_value := base.get(key):
base[key] = {**base_value, **value} # type: ignore
else:
base[key] = value # type: ignore[literal-required]
elif key == "tags":
if base_value := base.get(key):
base[key] = [*base_value, *value] # type: ignore
else:
base[key] = value # type: ignore[literal-required]
elif key == CONF:
if base_value := base.get(key):
base[key] = {**base_value, **value} # type: ignore[dict-item]
else:
base[key] = value
elif key == "callbacks":
base_callbacks = base.get("callbacks")
# callbacks can be either None, list[handler] or manager
# so merging two callbacks values has 6 cases
if isinstance(value, list):
if base_callbacks is None:
base["callbacks"] = value.copy()
elif isinstance(base_callbacks, list):
base["callbacks"] = base_callbacks + value
else:
# base_callbacks is a manager
mngr = base_callbacks.copy()
for callback in value:
mngr.add_handler(callback, inherit=True)
base["callbacks"] = mngr
elif isinstance(value, BaseCallbackManager):
# value is a manager
if base_callbacks is None:
base["callbacks"] = value.copy()
elif isinstance(base_callbacks, list):
mngr = value.copy()
for callback in base_callbacks:
mngr.add_handler(callback, inherit=True)
base["callbacks"] = mngr
else:
# base_callbacks is also a manager
base["callbacks"] = base_callbacks.merge(value)
else:
raise NotImplementedError
elif key == "recursion_limit":
if config["recursion_limit"] != DEFAULT_RECURSION_LIMIT:
base["recursion_limit"] = config["recursion_limit"]
else:
base[key] = config[key] # type: ignore[literal-required]
if CONF not in base:
base[CONF] = {}
return base
def patch_config(
config: Optional[RunnableConfig],
*,
callbacks: Optional[Callbacks] = None,
recursion_limit: Optional[int] = None,
max_concurrency: Optional[int] = None,
run_name: Optional[str] = None,
configurable: Optional[dict[str, Any]] = None,
) -> RunnableConfig:
"""Patch a config with new values.
Args:
config (Optional[RunnableConfig]): The config to patch.
callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.
Defaults to None.
recursion_limit (Optional[int], optional): The recursion limit to set.
Defaults to None.
max_concurrency (Optional[int], optional): The max concurrency to set.
Defaults to None.
run_name (Optional[str], optional): The run name to set. Defaults to None.
configurable (Optional[Dict[str, Any]], optional): The configurable to set.
Defaults to None.
Returns:
RunnableConfig: The patched config.
"""
config = config.copy() if config is not None else {}
if callbacks is not None:
# If we're replacing callbacks, we need to unset run_name
# As that should apply only to the same run as the original callbacks
config["callbacks"] = callbacks
if "run_name" in config:
del config["run_name"]
if "run_id" in config:
del config["run_id"]
if recursion_limit is not None:
config["recursion_limit"] = recursion_limit
if max_concurrency is not None:
config["max_concurrency"] = max_concurrency
if run_name is not None:
config["run_name"] = run_name
if configurable is not None:
config[CONF] = {**config.get(CONF, {}), **configurable}
return config
def get_callback_manager_for_config(
config: RunnableConfig, tags: Optional[Sequence[str]] = None
) -> CallbackManager:
"""Get a callback manager for a config.
Args:
config (RunnableConfig): The config.
Returns:
CallbackManager: The callback manager.
"""
from langchain_core.callbacks.manager import CallbackManager
# merge tags
all_tags = config.get("tags")
if all_tags is not None and tags is not None:
all_tags = [*all_tags, *tags]
elif tags is not None:
all_tags = list(tags)
# use existing callbacks if they exist
if (callbacks := config.get("callbacks")) and isinstance(
callbacks, CallbackManager
):
if all_tags:
callbacks.add_tags(all_tags)
if metadata := config.get("metadata"):
callbacks.add_metadata(metadata)
return callbacks
else:
# otherwise create a new manager
return CallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
inheritable_tags=all_tags,
inheritable_metadata=config.get("metadata"),
)
def get_async_callback_manager_for_config(
config: RunnableConfig,
tags: Optional[Sequence[str]] = None,
) -> AsyncCallbackManager:
"""Get an async callback manager for a config.
Args:
config (RunnableConfig): The config.
Returns:
AsyncCallbackManager: The async callback manager.
"""
from langchain_core.callbacks.manager import AsyncCallbackManager
# merge tags
all_tags = config.get("tags")
if all_tags is not None and tags is not None:
all_tags = [*all_tags, *tags]
elif tags is not None:
all_tags = list(tags)
# use existing callbacks if they exist
if (callbacks := config.get("callbacks")) and isinstance(
callbacks, AsyncCallbackManager
):
if all_tags:
callbacks.add_tags(all_tags)
if metadata := config.get("metadata"):
callbacks.add_metadata(metadata)
return callbacks
else:
# otherwise create a new manager
return AsyncCallbackManager.configure(
inheritable_callbacks=config.get("callbacks"),
inheritable_tags=config.get("tags"),
inheritable_metadata=config.get("metadata"),
)
def ensure_config(*configs: Optional[RunnableConfig]) -> RunnableConfig:
"""Ensure that a config is a dict with all keys present.
Args:
config (Optional[RunnableConfig], optional): The config to ensure.
Defaults to None.
Returns:
RunnableConfig: The ensured config.
"""
empty = RunnableConfig(
tags=[],
metadata=ChainMap(),
callbacks=None,
recursion_limit=DEFAULT_RECURSION_LIMIT,
configurable={},
)
if var_config := var_child_runnable_config.get():
empty.update(
{
k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]
for k, v in var_config.items()
if v is not None
},
)
for config in configs:
if config is None:
continue
for k, v in config.items():
if v is not None and k in CONFIG_KEYS:
if k == CONF:
empty[k] = cast(dict, v).copy()
else:
empty[k] = v # type: ignore[literal-required]
for k, v in config.items():
if v is not None and k not in CONFIG_KEYS:
empty[CONF][k] = v
for key, value in empty[CONF].items():
if (
not key.startswith("__")
and isinstance(value, (str, int, float, bool))
and key not in empty["metadata"]
):
empty["metadata"][key] = value
return empty
def get_configurable() -> dict[str, Any]:
if sys.version_info < (3, 11):
try:
if asyncio.current_task():
raise RuntimeError(
"Python 3.11 or later required to use this in an async context"
)
except RuntimeError:
pass
if var_config := var_child_runnable_config.get():
return var_config[CONF]
else:
raise RuntimeError("Called get_configurable outside of a runnable context")
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/utils/runnable.py | import asyncio
import enum
import inspect
import sys
from contextlib import AsyncExitStack
from contextvars import copy_context
from functools import partial, wraps
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Coroutine,
Iterator,
Optional,
Sequence,
Union,
cast,
)
from langchain_core.runnables.base import (
Runnable,
RunnableConfig,
RunnableLambda,
RunnableLike,
RunnableParallel,
RunnableSequence,
)
from langchain_core.runnables.config import (
run_in_executor,
var_child_runnable_config,
)
from langchain_core.runnables.utils import Input
from langchain_core.tracers._streaming import _StreamingCallbackHandler
from typing_extensions import TypeGuard
from langgraph.constants import CONF, CONFIG_KEY_STORE, CONFIG_KEY_STREAM_WRITER
from langgraph.store.base import BaseStore
from langgraph.types import StreamWriter
from langgraph.utils.config import (
ensure_config,
get_async_callback_manager_for_config,
get_callback_manager_for_config,
patch_config,
)
try:
from langchain_core.runnables.config import _set_config_context
except ImportError:
# For forwards compatibility
def _set_config_context(context: RunnableConfig) -> None: # type: ignore
"""Set the context for the current thread."""
var_child_runnable_config.set(context)
# Before Python 3.11 native StrEnum is not available
class StrEnum(str, enum.Enum):
"""A string enum."""
ASYNCIO_ACCEPTS_CONTEXT = sys.version_info >= (3, 11)
KWARGS_CONFIG_KEYS: tuple[tuple[str, tuple[Any, ...], str, Any], ...] = (
(
sys.intern("writer"),
(StreamWriter, "StreamWriter", inspect.Parameter.empty),
CONFIG_KEY_STREAM_WRITER,
lambda _: None,
),
(
sys.intern("store"),
(BaseStore, "BaseStore", inspect.Parameter.empty),
CONFIG_KEY_STORE,
inspect.Parameter.empty,
),
)
"""List of kwargs that can be passed to functions, and their corresponding
config keys, default values and type annotations."""
VALID_KINDS = (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY)
class RunnableCallable(Runnable):
"""A much simpler version of RunnableLambda that requires sync and async functions."""
def __init__(
self,
func: Optional[Callable[..., Union[Any, Runnable]]],
afunc: Optional[Callable[..., Awaitable[Union[Any, Runnable]]]] = None,
*,
name: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
trace: bool = True,
recurse: bool = True,
**kwargs: Any,
) -> None:
self.name = name
if self.name is None:
if func:
try:
if func.__name__ != "<lambda>":
self.name = func.__name__
except AttributeError:
pass
elif afunc:
try:
self.name = afunc.__name__
except AttributeError:
pass
self.func = func
self.afunc = afunc
self.tags = tags
self.kwargs = kwargs
self.trace = trace
self.recurse = recurse
# check signature
if func is None and afunc is None:
raise ValueError("At least one of func or afunc must be provided.")
params = inspect.signature(cast(Callable, func or afunc)).parameters
self.func_accepts_config = "config" in params
self.func_accepts: dict[str, bool] = {}
for kw, typ, _, _ in KWARGS_CONFIG_KEYS:
p = params.get(kw)
self.func_accepts[kw] = (
p is not None and p.annotation in typ and p.kind in VALID_KINDS
)
def __repr__(self) -> str:
repr_args = {
k: v
for k, v in self.__dict__.items()
if k not in {"name", "func", "afunc", "config", "kwargs", "trace"}
}
return f"{self.get_name()}({', '.join(f'{k}={v!r}' for k, v in repr_args.items())})"
def invoke(
self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Any:
if self.func is None:
raise TypeError(
f'No synchronous function provided to "{self.name}".'
"\nEither initialize with a synchronous function or invoke"
" via the async API (ainvoke, astream, etc.)"
)
if config is None:
config = ensure_config()
kwargs = {**self.kwargs, **kwargs}
if self.func_accepts_config:
kwargs["config"] = config
_conf = config[CONF]
for kw, _, ck, defv in KWARGS_CONFIG_KEYS:
if not self.func_accepts[kw]:
continue
if defv is inspect.Parameter.empty and kw not in kwargs and ck not in _conf:
raise ValueError(
f"Missing required config key '{ck}' for '{self.name}'."
)
elif kwargs.get(kw) is None:
kwargs[kw] = _conf.get(ck, defv)
context = copy_context()
if self.trace:
callback_manager = get_callback_manager_for_config(config, self.tags)
run_manager = callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
try:
child_config = patch_config(config, callbacks=run_manager.get_child())
context = copy_context()
context.run(_set_config_context, child_config)
ret = context.run(self.func, input, **kwargs)
except BaseException as e:
run_manager.on_chain_error(e)
raise
else:
run_manager.on_chain_end(ret)
else:
context.run(_set_config_context, config)
ret = context.run(self.func, input, **kwargs)
if isinstance(ret, Runnable) and self.recurse:
return ret.invoke(input, config)
return ret
async def ainvoke(
self, input: Any, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Any:
if not self.afunc:
return self.invoke(input, config)
if config is None:
config = ensure_config()
kwargs = {**self.kwargs, **kwargs}
if self.func_accepts_config:
kwargs["config"] = config
_conf = config[CONF]
for kw, _, ck, defv in KWARGS_CONFIG_KEYS:
if not self.func_accepts[kw]:
continue
if defv is inspect.Parameter.empty and kw not in kwargs and ck not in _conf:
raise ValueError(
f"Missing required config key '{ck}' for '{self.name}'."
)
elif kwargs.get(kw) is None:
kwargs[kw] = _conf.get(ck, defv)
context = copy_context()
if self.trace:
callback_manager = get_async_callback_manager_for_config(config, self.tags)
run_manager = await callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.name,
run_id=config.pop("run_id", None),
)
try:
child_config = patch_config(config, callbacks=run_manager.get_child())
context.run(_set_config_context, child_config)
coro = cast(Coroutine[None, None, Any], self.afunc(input, **kwargs))
if ASYNCIO_ACCEPTS_CONTEXT:
ret = await asyncio.create_task(coro, context=context)
else:
ret = await coro
except BaseException as e:
await run_manager.on_chain_error(e)
raise
else:
await run_manager.on_chain_end(ret)
else:
context.run(_set_config_context, config)
if ASYNCIO_ACCEPTS_CONTEXT:
coro = cast(Coroutine[None, None, Any], self.afunc(input, **kwargs))
ret = await asyncio.create_task(coro, context=context)
else:
ret = await self.afunc(input, **kwargs)
if isinstance(ret, Runnable) and self.recurse:
return await ret.ainvoke(input, config)
return ret
def is_async_callable(
func: Any,
) -> TypeGuard[Callable[..., Awaitable]]:
"""Check if a function is async."""
return (
asyncio.iscoroutinefunction(func)
or hasattr(func, "__call__")
and asyncio.iscoroutinefunction(func.__call__)
)
def is_async_generator(
func: Any,
) -> TypeGuard[Callable[..., AsyncIterator]]:
"""Check if a function is an async generator."""
return (
inspect.isasyncgenfunction(func)
or hasattr(func, "__call__")
and inspect.isasyncgenfunction(func.__call__)
)
def coerce_to_runnable(
thing: RunnableLike, *, name: Optional[str], trace: bool
) -> Runnable:
"""Coerce a runnable-like object into a Runnable.
Args:
thing: A runnable-like object.
Returns:
A Runnable.
"""
if isinstance(thing, Runnable):
return thing
elif is_async_generator(thing) or inspect.isgeneratorfunction(thing):
return RunnableLambda(thing, name=name)
elif callable(thing):
if is_async_callable(thing):
return RunnableCallable(None, thing, name=name, trace=trace)
else:
return RunnableCallable(
thing,
wraps(thing)(partial(run_in_executor, None, thing)), # type: ignore[arg-type]
name=name,
trace=trace,
)
elif isinstance(thing, dict):
return RunnableParallel(thing)
else:
raise TypeError(
f"Expected a Runnable, callable or dict."
f"Instead got an unsupported type: {type(thing)}"
)
class RunnableSeq(Runnable):
"""A simpler version of RunnableSequence."""
def __init__(
self,
*steps: RunnableLike,
name: Optional[str] = None,
) -> None:
"""Create a new RunnableSequence.
Args:
steps: The steps to include in the sequence.
name: The name of the Runnable. Defaults to None.
first: The first Runnable in the sequence. Defaults to None.
middle: The middle Runnables in the sequence. Defaults to None.
last: The last Runnable in the sequence. Defaults to None.
Raises:
ValueError: If the sequence has less than 2 steps.
"""
steps_flat: list[Runnable] = []
for step in steps:
if isinstance(step, RunnableSequence):
steps_flat.extend(step.steps)
elif isinstance(step, RunnableSeq):
steps_flat.extend(step.steps)
else:
steps_flat.append(coerce_to_runnable(step, name=None, trace=True))
if len(steps_flat) < 2:
raise ValueError(
f"RunnableSeq must have at least 2 steps, got {len(steps_flat)}"
)
self.steps = steps_flat
self.name = name
def __or__(
self,
other: Any,
) -> Runnable:
if isinstance(other, RunnableSequence):
return RunnableSeq(
*self.steps,
other.first,
*other.middle,
other.last,
name=self.name or other.name,
)
elif isinstance(other, RunnableSeq):
return RunnableSeq(
*self.steps,
*other.steps,
name=self.name or other.name,
)
else:
return RunnableSeq(
*self.steps,
coerce_to_runnable(other, name=None, trace=True),
name=self.name,
)
def __ror__(
self,
other: Any,
) -> Runnable:
if isinstance(other, RunnableSequence):
return RunnableSequence(
other.first,
*other.middle,
other.last,
*self.steps,
name=other.name or self.name,
)
elif isinstance(other, RunnableSeq):
return RunnableSeq(
*other.steps,
*self.steps,
name=other.name or self.name,
)
else:
return RunnableSequence(
coerce_to_runnable(other, name=None, trace=True),
*self.steps,
name=self.name,
)
def invoke(
self, input: Input, config: Optional[RunnableConfig] = None, **kwargs: Any
) -> Any:
if config is None:
config = ensure_config()
# setup callbacks and context
callback_manager = get_callback_manager_for_config(config)
# start the root run
run_manager = callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
# invoke all steps in sequence
try:
for i, step in enumerate(self.steps):
# mark each step as a child run
config = patch_config(
config, callbacks=run_manager.get_child(f"seq:step:{i+1}")
)
context = copy_context()
context.run(_set_config_context, config)
if i == 0:
input = context.run(step.invoke, input, config, **kwargs)
else:
input = context.run(step.invoke, input, config)
# finish the root run
except BaseException as e:
run_manager.on_chain_error(e)
raise
else:
run_manager.on_chain_end(input)
return input
async def ainvoke(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Any:
if config is None:
config = ensure_config()
# setup callbacks
callback_manager = get_async_callback_manager_for_config(config)
# start the root run
run_manager = await callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
# invoke all steps in sequence
try:
for i, step in enumerate(self.steps):
# mark each step as a child run
config = patch_config(
config, callbacks=run_manager.get_child(f"seq:step:{i+1}")
)
context = copy_context()
context.run(_set_config_context, config)
if i == 0:
coro = step.ainvoke(input, config, **kwargs)
else:
coro = step.ainvoke(input, config)
if ASYNCIO_ACCEPTS_CONTEXT:
input = await asyncio.create_task(coro, context=context)
else:
input = await asyncio.create_task(coro)
# finish the root run
except BaseException as e:
await run_manager.on_chain_error(e)
raise
else:
await run_manager.on_chain_end(input)
return input
def stream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> Iterator[Any]:
if config is None:
config = ensure_config()
# setup callbacks
callback_manager = get_callback_manager_for_config(config)
# start the root run
run_manager = callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
try:
# stream the last steps
# transform the input stream of each step with the next
# steps that don't natively support transforming an input stream will
# buffer input in memory until all available, and then start emitting output
for idx, step in enumerate(self.steps):
config = patch_config(
config,
callbacks=run_manager.get_child(f"seq:step:{idx+1}"),
)
if idx == 0:
iterator = step.stream(input, config, **kwargs)
else:
iterator = step.transform(iterator, config)
if stream_handler := next(
(
cast(_StreamingCallbackHandler, h)
for h in run_manager.handlers
if isinstance(h, _StreamingCallbackHandler)
),
None,
):
# populates streamed_output in astream_log() output if needed
iterator = stream_handler.tap_output_iter(run_manager.run_id, iterator)
output: Any = None
add_supported = False
for chunk in iterator:
yield chunk
# collect final output
if output is None:
output = chunk
elif add_supported:
try:
output = output + chunk
except TypeError:
output = chunk
add_supported = False
else:
output = chunk
except BaseException as e:
run_manager.on_chain_error(e)
raise
else:
run_manager.on_chain_end(output)
async def astream(
self,
input: Input,
config: Optional[RunnableConfig] = None,
**kwargs: Optional[Any],
) -> AsyncIterator[Any]:
if config is None:
config = ensure_config()
# setup callbacks
callback_manager = get_async_callback_manager_for_config(config)
# start the root run
run_manager = await callback_manager.on_chain_start(
None,
input,
name=config.get("run_name") or self.get_name(),
run_id=config.pop("run_id", None),
)
try:
async with AsyncExitStack() as stack:
# stream the last steps
# transform the input stream of each step with the next
# steps that don't natively support transforming an input stream will
# buffer input in memory until all available, and then start emitting output
for idx, step in enumerate(self.steps):
config = patch_config(
config,
callbacks=run_manager.get_child(f"seq:step:{idx+1}"),
)
if idx == 0:
aiterator = step.astream(input, config, **kwargs)
else:
aiterator = step.atransform(aiterator, config)
if hasattr(aiterator, "aclose"):
stack.push_async_callback(aiterator.aclose)
if stream_handler := next(
(
cast(_StreamingCallbackHandler, h)
for h in run_manager.handlers
if isinstance(h, _StreamingCallbackHandler)
),
None,
):
# populates streamed_output in astream_log() output if needed
aiterator = stream_handler.tap_output_aiter(
run_manager.run_id, aiterator
)
output: Any = None
add_supported = False
async for chunk in aiterator:
yield chunk
# collect final output
if add_supported:
try:
output = output + chunk
except TypeError:
output = chunk
add_supported = False
else:
output = chunk
except BaseException as e:
await run_manager.on_chain_error(e)
raise
else:
await run_manager.on_chain_end(output)
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/utils/pydantic.py | from typing import Any, Dict, Optional, Union
from pydantic import BaseModel
from pydantic.v1 import BaseModel as BaseModelV1
def create_model(
model_name: str,
*,
field_definitions: Optional[Dict[str, Any]] = None,
root: Optional[Any] = None,
) -> Union[BaseModel, BaseModelV1]:
"""Create a pydantic model with the given field definitions.
Args:
model_name: The name of the model.
field_definitions: The field definitions for the model.
root: Type for a root model (RootModel)
"""
try:
# for langchain-core >= 0.3.0
from langchain_core.utils.pydantic import create_model_v2
return create_model_v2(
model_name,
field_definitions=field_definitions,
root=root,
)
except ImportError:
# for langchain-core < 0.3.0
from langchain_core.runnables.utils import create_model
v1_kwargs = {}
if root is not None:
v1_kwargs["__root__"] = root
return create_model(model_name, **v1_kwargs, **(field_definitions or {}))
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/utils/fields.py | import dataclasses
from typing import Any, Optional, Type, Union
from typing_extensions import Annotated, NotRequired, ReadOnly, Required, get_origin
def _is_optional_type(type_: Any) -> bool:
"""Check if a type is Optional."""
if hasattr(type_, "__origin__") and hasattr(type_, "__args__"):
origin = get_origin(type_)
if origin is Optional:
return True
if origin is Union:
return any(
arg is type(None) or _is_optional_type(arg) for arg in type_.__args__
)
if origin is Annotated:
return _is_optional_type(type_.__args__[0])
return origin is None
if hasattr(type_, "__bound__") and type_.__bound__ is not None:
return _is_optional_type(type_.__bound__)
return type_ is None
def _is_required_type(type_: Any) -> Optional[bool]:
"""Check if an annotation is marked as Required/NotRequired.
Returns:
- True if required
- False if not required
- None if not annotated with either
"""
origin = get_origin(type_)
if origin is Required:
return True
if origin is NotRequired:
return False
if origin is Annotated or getattr(origin, "__args__", None):
# See https://typing.readthedocs.io/en/latest/spec/typeddict.html#interaction-with-annotated
return _is_required_type(type_.__args__[0])
return None
def _is_readonly_type(type_: Any) -> bool:
"""Check if an annotation is marked as ReadOnly.
Returns:
- True if is read only
- False if not read only
"""
# See: https://typing.readthedocs.io/en/latest/spec/typeddict.html#typing-readonly-type-qualifier
origin = get_origin(type_)
if origin is Annotated:
return _is_readonly_type(type_.__args__[0])
if origin is ReadOnly:
return True
return False
_DEFAULT_KEYS: frozenset[str] = frozenset()
def get_field_default(name: str, type_: Any, schema: Type[Any]) -> Any:
"""Determine the default value for a field in a state schema.
This is based on:
If TypedDict:
- Required/NotRequired
- total=False -> everything optional
- Type annotation (Optional/Union[None])
"""
optional_keys = getattr(schema, "__optional_keys__", _DEFAULT_KEYS)
irq = _is_required_type(type_)
if name in optional_keys:
# Either total=False or explicit NotRequired.
# No type annotation trumps this.
if irq:
# Unless it's earlier versions of python & explicit Required
return ...
return None
if irq is not None:
if irq:
# Handle Required[<type>]
# (we already handled NotRequired and total=False)
return ...
# Handle NotRequired[<type>] for earlier versions of python
return None
if dataclasses.is_dataclass(schema):
field_info = next(
(f for f in dataclasses.fields(schema) if f.name == name), None
)
if field_info:
if (
field_info.default is not dataclasses.MISSING
and field_info.default is not ...
):
return field_info.default
elif field_info.default_factory is not dataclasses.MISSING:
return field_info.default_factory()
# Note, we ignore ReadOnly attributes,
# as they don't make much sense. (we don't care if you mutate the state in your node)
# and mutating state in your node has no effect on our graph state.
# Base case is the annotation
if _is_optional_type(type_):
return None
return ...
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/utils/queue.py | # type: ignore
import asyncio
import queue
import sys
import threading
import types
from collections import deque
from time import monotonic
from typing import Optional
PY_310 = sys.version_info >= (3, 10)
class AsyncQueue(asyncio.Queue):
"""Async unbounded FIFO queue with a wait() method.
Subclassed from asyncio.Queue, adding a wait() method."""
async def wait(self) -> None:
"""If queue is empty, wait until an item is available.
Copied from Queue.get(), removing the call to .get_nowait(),
ie. this doesn't consume the item, just waits for it.
"""
while self.empty():
if PY_310:
getter = self._get_loop().create_future()
else:
getter = self._loop.create_future()
self._getters.append(getter)
try:
await getter
except:
getter.cancel() # Just in case getter is not done yet.
try:
# Clean self._getters from canceled getters.
self._getters.remove(getter)
except ValueError:
# The getter could be removed from self._getters by a
# previous put_nowait call.
pass
if not self.empty() and not getter.cancelled():
# We were woken up by put_nowait(), but can't take
# the call. Wake up the next in line.
self._wakeup_next(self._getters)
raise
class Semaphore(threading.Semaphore):
"""Semaphore subclass with a wait() method."""
def wait(self, blocking: bool = True, timeout: Optional[float] = None):
"""Block until the semaphore can be acquired, but don't acquire it."""
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
with self._cond:
while self._value == 0:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = monotonic() + timeout
else:
timeout = endtime - monotonic()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
rc = True
return rc
class SyncQueue:
"""Unbounded FIFO queue with a wait() method.
Adapted from pure Python implementation of queue.SimpleQueue.
"""
def __init__(self):
self._queue = deque()
self._count = Semaphore(0)
def put(self, item, block=True, timeout=None):
"""Put the item on the queue.
The optional 'block' and 'timeout' arguments are ignored, as this method
never blocks. They are provided for compatibility with the Queue class.
"""
self._queue.append(item)
self._count.release()
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
if timeout is not None and timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
if not self._count.acquire(block, timeout):
raise queue.Empty
try:
return self._queue.popleft()
except IndexError:
raise queue.Empty
def wait(self, block=True, timeout=None):
"""If queue is empty, wait until an item maybe is available,
but don't consume it.
"""
if timeout is not None and timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
self._count.wait(block, timeout)
def empty(self):
"""Return True if the queue is empty, False otherwise (not reliable!)."""
return len(self._queue) == 0
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
return len(self._queue)
__class_getitem__ = classmethod(types.GenericAlias)
__all__ = ["AsyncQueue", "SyncQueue"]
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/graph/graph.py | import asyncio
import logging
from collections import defaultdict
from typing import (
Any,
Awaitable,
Callable,
Hashable,
Literal,
NamedTuple,
Optional,
Sequence,
Union,
cast,
get_args,
get_origin,
get_type_hints,
overload,
)
from langchain_core.runnables import Runnable
from langchain_core.runnables.base import RunnableLike
from langchain_core.runnables.config import RunnableConfig
from langchain_core.runnables.graph import Graph as DrawableGraph
from langchain_core.runnables.graph import Node as DrawableNode
from typing_extensions import Self
from langgraph.channels.ephemeral_value import EphemeralValue
from langgraph.constants import (
EMPTY_SEQ,
END,
NS_END,
NS_SEP,
START,
TAG_HIDDEN,
Send,
)
from langgraph.errors import InvalidUpdateError
from langgraph.pregel import Channel, Pregel
from langgraph.pregel.read import PregelNode
from langgraph.pregel.write import ChannelWrite, ChannelWriteEntry
from langgraph.types import All, Checkpointer
from langgraph.utils.runnable import RunnableCallable, coerce_to_runnable
logger = logging.getLogger(__name__)
class NodeSpec(NamedTuple):
runnable: Runnable
metadata: Optional[dict[str, Any]] = None
ends: Optional[tuple[str, ...]] = EMPTY_SEQ
class Branch(NamedTuple):
path: Runnable[Any, Union[Hashable, list[Hashable]]]
ends: Optional[dict[Hashable, str]]
then: Optional[str] = None
def run(
self,
writer: Callable[
[Sequence[Union[str, Send]], RunnableConfig], Optional[ChannelWrite]
],
reader: Optional[Callable[[RunnableConfig], Any]] = None,
) -> RunnableCallable:
return ChannelWrite.register_writer(
RunnableCallable(
func=self._route,
afunc=self._aroute,
writer=writer,
reader=reader,
name=None,
trace=False,
)
)
def _route(
self,
input: Any,
config: RunnableConfig,
*,
reader: Optional[Callable[[RunnableConfig], Any]],
writer: Callable[
[Sequence[Union[str, Send]], RunnableConfig], Optional[ChannelWrite]
],
) -> Runnable:
if reader:
value = reader(config)
# passthrough additional keys from node to branch
# only doable when using dict states
if isinstance(value, dict) and isinstance(input, dict):
value = {**input, **value}
else:
value = input
result = self.path.invoke(value, config)
return self._finish(writer, input, result, config)
async def _aroute(
self,
input: Any,
config: RunnableConfig,
*,
reader: Optional[Callable[[RunnableConfig], Any]],
writer: Callable[
[Sequence[Union[str, Send]], RunnableConfig], Optional[ChannelWrite]
],
) -> Runnable:
if reader:
value = await asyncio.to_thread(reader, config)
# passthrough additional keys from node to branch
# only doable when using dict states
if isinstance(value, dict) and isinstance(input, dict):
value = {**input, **value}
else:
value = input
result = await self.path.ainvoke(value, config)
return self._finish(writer, input, result, config)
def _finish(
self,
writer: Callable[
[Sequence[Union[str, Send]], RunnableConfig], Optional[ChannelWrite]
],
input: Any,
result: Any,
config: RunnableConfig,
) -> Union[Runnable, Any]:
if not isinstance(result, (list, tuple)):
result = [result]
if self.ends:
destinations: Sequence[Union[Send, str]] = [
r if isinstance(r, Send) else self.ends[r] for r in result
]
else:
destinations = cast(Sequence[Union[Send, str]], result)
if any(dest is None or dest == START for dest in destinations):
raise ValueError("Branch did not return a valid destination")
if any(p.node == END for p in destinations if isinstance(p, Send)):
raise InvalidUpdateError("Cannot send a packet to the END node")
return writer(destinations, config) or input
class Graph:
def __init__(self) -> None:
self.nodes: dict[str, NodeSpec] = {}
self.edges = set[tuple[str, str]]()
self.branches: defaultdict[str, dict[str, Branch]] = defaultdict(dict)
self.support_multiple_edges = False
self.compiled = False
@property
def _all_edges(self) -> set[tuple[str, str]]:
return self.edges
@overload
def add_node(
self,
node: RunnableLike,
*,
metadata: Optional[dict[str, Any]] = None,
) -> Self: ...
@overload
def add_node(
self,
node: str,
action: RunnableLike,
*,
metadata: Optional[dict[str, Any]] = None,
) -> Self: ...
def add_node(
self,
node: Union[str, RunnableLike],
action: Optional[RunnableLike] = None,
*,
metadata: Optional[dict[str, Any]] = None,
) -> Self:
if isinstance(node, str):
for character in (NS_SEP, NS_END):
if character in node:
raise ValueError(
f"'{character}' is a reserved character and is not allowed in the node names."
)
if self.compiled:
logger.warning(
"Adding a node to a graph that has already been compiled. This will "
"not be reflected in the compiled graph."
)
if not isinstance(node, str):
action = node
node = getattr(action, "name", getattr(action, "__name__"))
if node is None:
raise ValueError(
"Node name must be provided if action is not a function"
)
if action is None:
raise RuntimeError(
"Expected a function or Runnable action in add_node. Received None."
)
if node in self.nodes:
raise ValueError(f"Node `{node}` already present.")
if node == END or node == START:
raise ValueError(f"Node `{node}` is reserved.")
self.nodes[cast(str, node)] = NodeSpec(
coerce_to_runnable(action, name=cast(str, node), trace=False), metadata
)
return self
def add_edge(self, start_key: str, end_key: str) -> Self:
if self.compiled:
logger.warning(
"Adding an edge to a graph that has already been compiled. This will "
"not be reflected in the compiled graph."
)
if start_key == END:
raise ValueError("END cannot be a start node")
if end_key == START:
raise ValueError("START cannot be an end node")
# run this validation only for non-StateGraph graphs
if not hasattr(self, "channels") and start_key in set(
start for start, _ in self.edges
):
raise ValueError(
f"Already found path for node '{start_key}'.\n"
"For multiple edges, use StateGraph with an Annotated state key."
)
self.edges.add((start_key, end_key))
return self
def add_conditional_edges(
self,
source: str,
path: Union[
Callable[..., Union[Hashable, list[Hashable]]],
Callable[..., Awaitable[Union[Hashable, list[Hashable]]]],
Runnable[Any, Union[Hashable, list[Hashable]]],
],
path_map: Optional[Union[dict[Hashable, str], list[str]]] = None,
then: Optional[str] = None,
) -> Self:
"""Add a conditional edge from the starting node to any number of destination nodes.
Args:
source (str): The starting node. This conditional edge will run when
exiting this node.
path (Union[Callable, Runnable]): The callable that determines the next
node or nodes. If not specifying `path_map` it should return one or
more nodes. If it returns END, the graph will stop execution.
path_map (Optional[dict[Hashable, str]]): Optional mapping of paths to node
names. If omitted the paths returned by `path` should be node names.
then (Optional[str]): The name of a node to execute after the nodes
selected by `path`.
Returns:
None
Note: Without typehints on the `path` function's return value (e.g., `-> Literal["foo", "__end__"]:`)
or a path_map, the graph visualization assumes the edge could transition to any node in the graph.
""" # noqa: E501
if self.compiled:
logger.warning(
"Adding an edge to a graph that has already been compiled. This will "
"not be reflected in the compiled graph."
)
# coerce path_map to a dictionary
try:
if isinstance(path_map, dict):
path_map_ = path_map.copy()
elif isinstance(path_map, list):
path_map_ = {name: name for name in path_map}
elif isinstance(path, Runnable):
path_map_ = None
elif rtn_type := get_type_hints(path.__call__).get( # type: ignore[operator]
"return"
) or get_type_hints(path).get("return"):
if get_origin(rtn_type) is Literal:
path_map_ = {name: name for name in get_args(rtn_type)}
else:
path_map_ = None
else:
path_map_ = None
except Exception:
path_map_ = None
# find a name for the condition
path = coerce_to_runnable(path, name=None, trace=True)
name = path.name or "condition"
# validate the condition
if name in self.branches[source]:
raise ValueError(
f"Branch with name `{path.name}` already exists for node " f"`{source}`"
)
# save it
self.branches[source][name] = Branch(path, path_map_, then)
return self
def set_entry_point(self, key: str) -> Self:
"""Specifies the first node to be called in the graph.
Equivalent to calling `add_edge(START, key)`.
Parameters:
key (str): The key of the node to set as the entry point.
Returns:
None
"""
return self.add_edge(START, key)
def set_conditional_entry_point(
self,
path: Union[
Callable[..., Union[Hashable, list[Hashable]]],
Callable[..., Awaitable[Union[Hashable, list[Hashable]]]],
Runnable[Any, Union[Hashable, list[Hashable]]],
],
path_map: Optional[Union[dict[Hashable, str], list[str]]] = None,
then: Optional[str] = None,
) -> Self:
"""Sets a conditional entry point in the graph.
Args:
path (Union[Callable, Runnable]): The callable that determines the next
node or nodes. If not specifying `path_map` it should return one or
more nodes. If it returns END, the graph will stop execution.
path_map (Optional[dict[str, str]]): Optional mapping of paths to node
names. If omitted the paths returned by `path` should be node names.
then (Optional[str]): The name of a node to execute after the nodes
selected by `path`.
Returns:
None
"""
return self.add_conditional_edges(START, path, path_map, then)
def set_finish_point(self, key: str) -> Self:
"""Marks a node as a finish point of the graph.
If the graph reaches this node, it will cease execution.
Parameters:
key (str): The key of the node to set as the finish point.
Returns:
None
"""
return self.add_edge(key, END)
def validate(self, interrupt: Optional[Sequence[str]] = None) -> Self:
# assemble sources
all_sources = {src for src, _ in self._all_edges}
for start, branches in self.branches.items():
all_sources.add(start)
for cond, branch in branches.items():
if branch.then is not None:
if branch.ends is not None:
for end in branch.ends.values():
if end != END:
all_sources.add(end)
else:
for node in self.nodes:
if node != start and node != branch.then:
all_sources.add(node)
for name, spec in self.nodes.items():
if spec.ends:
all_sources.add(name)
# validate sources
for source in all_sources:
if source not in self.nodes and source != START:
raise ValueError(f"Found edge starting at unknown node '{source}'")
if START not in all_sources:
raise ValueError(
"Graph must have an entrypoint: add at least one edge from START to another node"
)
# assemble targets
all_targets = {end for _, end in self._all_edges}
for start, branches in self.branches.items():
for cond, branch in branches.items():
if branch.then is not None:
all_targets.add(branch.then)
if branch.ends is not None:
for end in branch.ends.values():
if end not in self.nodes and end != END:
raise ValueError(
f"At '{start}' node, '{cond}' branch found unknown target '{end}'"
)
all_targets.add(end)
else:
all_targets.add(END)
for node in self.nodes:
if node != start and node != branch.then:
all_targets.add(node)
for name, spec in self.nodes.items():
if spec.ends:
all_targets.update(spec.ends)
for target in all_targets:
if target not in self.nodes and target != END:
raise ValueError(f"Found edge ending at unknown node `{target}`")
# validate interrupts
if interrupt:
for node in interrupt:
if node not in self.nodes:
raise ValueError(f"Interrupt node `{node}` not found")
self.compiled = True
return self
def compile(
self,
checkpointer: Checkpointer = None,
interrupt_before: Optional[Union[All, list[str]]] = None,
interrupt_after: Optional[Union[All, list[str]]] = None,
debug: bool = False,
) -> "CompiledGraph":
# assign default values
interrupt_before = interrupt_before or []
interrupt_after = interrupt_after or []
# validate the graph
self.validate(
interrupt=(
(interrupt_before if interrupt_before != "*" else []) + interrupt_after
if interrupt_after != "*"
else []
)
)
# create empty compiled graph
compiled = CompiledGraph(
builder=self,
nodes={},
channels={START: EphemeralValue(Any), END: EphemeralValue(Any)},
input_channels=START,
output_channels=END,
stream_mode="values",
stream_channels=[],
checkpointer=checkpointer,
interrupt_before_nodes=interrupt_before,
interrupt_after_nodes=interrupt_after,
auto_validate=False,
debug=debug,
)
# attach nodes, edges, and branches
for key, node in self.nodes.items():
compiled.attach_node(key, node)
for start, end in self.edges:
compiled.attach_edge(start, end)
for start, branches in self.branches.items():
for name, branch in branches.items():
compiled.attach_branch(start, name, branch)
# validate the compiled graph
return compiled.validate()
class CompiledGraph(Pregel):
builder: Graph
def __init__(self, *, builder: Graph, **kwargs: Any) -> None:
super().__init__(**kwargs)
self.builder = builder
def attach_node(self, key: str, node: NodeSpec) -> None:
self.channels[key] = EphemeralValue(Any)
self.nodes[key] = (
PregelNode(channels=[], triggers=[], metadata=node.metadata)
| node.runnable
| ChannelWrite([ChannelWriteEntry(key)], tags=[TAG_HIDDEN])
)
cast(list[str], self.stream_channels).append(key)
def attach_edge(self, start: str, end: str) -> None:
if end == END:
# publish to end channel
self.nodes[start].writers.append(
ChannelWrite([ChannelWriteEntry(END)], tags=[TAG_HIDDEN])
)
else:
# subscribe to start channel
self.nodes[end].triggers.append(start)
cast(list[str], self.nodes[end].channels).append(start)
def attach_branch(self, start: str, name: str, branch: Branch) -> None:
def branch_writer(
packets: Sequence[Union[str, Send]], config: RunnableConfig
) -> Optional[ChannelWrite]:
writes = [
(
ChannelWriteEntry(f"branch:{start}:{name}:{p}" if p != END else END)
if not isinstance(p, Send)
else p
)
for p in packets
]
return ChannelWrite(
cast(Sequence[Union[ChannelWriteEntry, Send]], writes),
tags=[TAG_HIDDEN],
)
# add hidden start node
if start == START and start not in self.nodes:
self.nodes[start] = Channel.subscribe_to(START, tags=[TAG_HIDDEN])
# attach branch writer
self.nodes[start] |= branch.run(branch_writer)
# attach branch readers
ends = branch.ends.values() if branch.ends else [node for node in self.nodes]
for end in ends:
if end != END:
channel_name = f"branch:{start}:{name}:{end}"
self.channels[channel_name] = EphemeralValue(Any)
self.nodes[end].triggers.append(channel_name)
cast(list[str], self.nodes[end].channels).append(channel_name)
async def aget_graph(
self,
config: Optional[RunnableConfig] = None,
*,
xray: Union[int, bool] = False,
) -> DrawableGraph:
return self.get_graph(config, xray=xray)
def get_graph(
self,
config: Optional[RunnableConfig] = None,
*,
xray: Union[int, bool] = False,
) -> DrawableGraph:
"""Returns a drawable representation of the computation graph."""
graph = DrawableGraph()
start_nodes: dict[str, DrawableNode] = {
START: graph.add_node(self.get_input_schema(config), START)
}
end_nodes: dict[str, DrawableNode] = {}
if xray:
subgraphs = {
k: v for k, v in self.get_subgraphs() if isinstance(v, CompiledGraph)
}
else:
subgraphs = {}
def add_edge(
start: str,
end: str,
label: Optional[Hashable] = None,
conditional: bool = False,
) -> None:
if end == END and END not in end_nodes:
end_nodes[END] = graph.add_node(self.get_output_schema(config), END)
return graph.add_edge(
start_nodes[start],
end_nodes[end],
str(label) if label is not None else None,
conditional,
)
for key, n in self.builder.nodes.items():
node = n.runnable
metadata = n.metadata or {}
if key in self.interrupt_before_nodes and key in self.interrupt_after_nodes:
metadata["__interrupt"] = "before,after"
elif key in self.interrupt_before_nodes:
metadata["__interrupt"] = "before"
elif key in self.interrupt_after_nodes:
metadata["__interrupt"] = "after"
if xray and key in subgraphs:
subgraph = subgraphs[key].get_graph(
config=config,
xray=xray - 1
if isinstance(xray, int) and not isinstance(xray, bool) and xray > 0
else xray,
)
subgraph.trim_first_node()
subgraph.trim_last_node()
if len(subgraph.nodes) > 1:
e, s = graph.extend(subgraph, prefix=key)
if e is None:
raise ValueError(
f"Could not extend subgraph '{key}' due to missing entrypoint"
)
if s is not None:
start_nodes[key] = s
end_nodes[key] = e
else:
nn = graph.add_node(node, key, metadata=metadata or None)
start_nodes[key] = nn
end_nodes[key] = nn
else:
nn = graph.add_node(node, key, metadata=metadata or None)
start_nodes[key] = nn
end_nodes[key] = nn
for start, end in sorted(self.builder._all_edges):
add_edge(start, end)
for start, branches in self.builder.branches.items():
default_ends = {
**{k: k for k in self.builder.nodes if k != start},
END: END,
}
for _, branch in branches.items():
if branch.ends is not None:
ends = branch.ends
elif branch.then is not None:
ends = {k: k for k in default_ends if k not in (END, branch.then)}
else:
ends = cast(dict[Hashable, str], default_ends)
for label, end in ends.items():
add_edge(
start,
end,
label if label != end else None,
conditional=True,
)
if branch.then is not None:
add_edge(end, branch.then)
for key, n in self.builder.nodes.items():
if n.ends:
for end in n.ends:
add_edge(key, end, conditional=True)
return graph
def _repr_mimebundle_(self, **kwargs: Any) -> dict[str, Any]:
"""Mime bundle used by Jupyter to display the graph"""
return {
"text/plain": repr(self),
"image/png": self.get_graph().draw_mermaid_png(),
}
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/graph/message.py | import uuid
from typing import Annotated, TypedDict, Union, cast
from langchain_core.messages import (
AnyMessage,
BaseMessageChunk,
MessageLikeRepresentation,
RemoveMessage,
convert_to_messages,
message_chunk_to_message,
)
from langgraph.graph.state import StateGraph
Messages = Union[list[MessageLikeRepresentation], MessageLikeRepresentation]
def add_messages(left: Messages, right: Messages) -> Messages:
"""Merges two lists of messages, updating existing messages by ID.
By default, this ensures the state is "append-only", unless the
new message has the same ID as an existing message.
Args:
left: The base list of messages.
right: The list of messages (or single message) to merge
into the base list.
Returns:
A new list of messages with the messages from `right` merged into `left`.
If a message in `right` has the same ID as a message in `left`, the
message from `right` will replace the message from `left`.
Examples:
```pycon
>>> from langchain_core.messages import AIMessage, HumanMessage
>>> msgs1 = [HumanMessage(content="Hello", id="1")]
>>> msgs2 = [AIMessage(content="Hi there!", id="2")]
>>> add_messages(msgs1, msgs2)
[HumanMessage(content='Hello', id='1'), AIMessage(content='Hi there!', id='2')]
>>> msgs1 = [HumanMessage(content="Hello", id="1")]
>>> msgs2 = [HumanMessage(content="Hello again", id="1")]
>>> add_messages(msgs1, msgs2)
[HumanMessage(content='Hello again', id='1')]
>>> from typing import Annotated
>>> from typing_extensions import TypedDict
>>> from langgraph.graph import StateGraph
>>>
>>> class State(TypedDict):
... messages: Annotated[list, add_messages]
...
>>> builder = StateGraph(State)
>>> builder.add_node("chatbot", lambda state: {"messages": [("assistant", "Hello")]})
>>> builder.set_entry_point("chatbot")
>>> builder.set_finish_point("chatbot")
>>> graph = builder.compile()
>>> graph.invoke({})
{'messages': [AIMessage(content='Hello', id=...)]}
```
"""
# coerce to list
if not isinstance(left, list):
left = [left] # type: ignore[assignment]
if not isinstance(right, list):
right = [right] # type: ignore[assignment]
# coerce to message
left = [
message_chunk_to_message(cast(BaseMessageChunk, m))
for m in convert_to_messages(left)
]
right = [
message_chunk_to_message(cast(BaseMessageChunk, m))
for m in convert_to_messages(right)
]
# assign missing ids
for m in left:
if m.id is None:
m.id = str(uuid.uuid4())
for m in right:
if m.id is None:
m.id = str(uuid.uuid4())
# merge
left_idx_by_id = {m.id: i for i, m in enumerate(left)}
merged = left.copy()
ids_to_remove = set()
for m in right:
if (existing_idx := left_idx_by_id.get(m.id)) is not None:
if isinstance(m, RemoveMessage):
ids_to_remove.add(m.id)
else:
merged[existing_idx] = m
else:
if isinstance(m, RemoveMessage):
raise ValueError(
f"Attempting to delete a message with an ID that doesn't exist ('{m.id}')"
)
merged.append(m)
merged = [m for m in merged if m.id not in ids_to_remove]
return merged
class MessageGraph(StateGraph):
"""A StateGraph where every node receives a list of messages as input and returns one or more messages as output.
MessageGraph is a subclass of StateGraph whose entire state is a single, append-only* list of messages.
Each node in a MessageGraph takes a list of messages as input and returns zero or more
messages as output. The `add_messages` function is used to merge the output messages from each node
into the existing list of messages in the graph's state.
Examples:
```pycon
>>> from langgraph.graph.message import MessageGraph
...
>>> builder = MessageGraph()
>>> builder.add_node("chatbot", lambda state: [("assistant", "Hello!")])
>>> builder.set_entry_point("chatbot")
>>> builder.set_finish_point("chatbot")
>>> builder.compile().invoke([("user", "Hi there.")])
[HumanMessage(content="Hi there.", id='...'), AIMessage(content="Hello!", id='...')]
```
```pycon
>>> from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
>>> from langgraph.graph.message import MessageGraph
...
>>> builder = MessageGraph()
>>> builder.add_node(
... "chatbot",
... lambda state: [
... AIMessage(
... content="Hello!",
... tool_calls=[{"name": "search", "id": "123", "args": {"query": "X"}}],
... )
... ],
... )
>>> builder.add_node(
... "search", lambda state: [ToolMessage(content="Searching...", tool_call_id="123")]
... )
>>> builder.set_entry_point("chatbot")
>>> builder.add_edge("chatbot", "search")
>>> builder.set_finish_point("search")
>>> builder.compile().invoke([HumanMessage(content="Hi there. Can you search for X?")])
{'messages': [HumanMessage(content="Hi there. Can you search for X?", id='b8b7d8f4-7f4d-4f4d-9c1d-f8b8d8f4d9c1'),
AIMessage(content="Hello!", id='f4d9c1d8-8d8f-4d9c-b8b7-d8f4f4d9c1d8'),
ToolMessage(content="Searching...", id='d8f4f4d9-c1d8-4f4d-b8b7-d8f4f4d9c1d8', tool_call_id="123")]}
```
"""
def __init__(self) -> None:
super().__init__(Annotated[list[AnyMessage], add_messages]) # type: ignore[arg-type]
class MessagesState(TypedDict):
messages: Annotated[list[AnyMessage], add_messages]
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/graph/state.py | import inspect
import logging
import typing
import warnings
from functools import partial
from inspect import isclass, isfunction, ismethod, signature
from types import FunctionType
from typing import (
Any,
Callable,
Literal,
NamedTuple,
Optional,
Sequence,
Type,
Union,
cast,
get_args,
get_origin,
get_type_hints,
overload,
)
from langchain_core.runnables import Runnable, RunnableConfig
from langchain_core.runnables.base import RunnableLike
from pydantic import BaseModel
from pydantic.v1 import BaseModel as BaseModelV1
from typing_extensions import Self
from langgraph._api.deprecation import LangGraphDeprecationWarning
from langgraph.channels.base import BaseChannel
from langgraph.channels.binop import BinaryOperatorAggregate
from langgraph.channels.dynamic_barrier_value import DynamicBarrierValue, WaitForNames
from langgraph.channels.ephemeral_value import EphemeralValue
from langgraph.channels.last_value import LastValue
from langgraph.channels.named_barrier_value import NamedBarrierValue
from langgraph.constants import EMPTY_SEQ, NS_END, NS_SEP, SELF, TAG_HIDDEN
from langgraph.errors import (
ErrorCode,
InvalidUpdateError,
ParentCommand,
create_error_message,
)
from langgraph.graph.graph import END, START, Branch, CompiledGraph, Graph, Send
from langgraph.managed.base import (
ChannelKeyPlaceholder,
ChannelTypePlaceholder,
ConfiguredManagedValue,
ManagedValueSpec,
is_managed_value,
is_writable_managed_value,
)
from langgraph.pregel.read import ChannelRead, PregelNode
from langgraph.pregel.write import (
ChannelWrite,
ChannelWriteEntry,
ChannelWriteTupleEntry,
)
from langgraph.store.base import BaseStore
from langgraph.types import All, Checkpointer, Command, RetryPolicy
from langgraph.utils.fields import get_field_default
from langgraph.utils.pydantic import create_model
from langgraph.utils.runnable import RunnableCallable, coerce_to_runnable
logger = logging.getLogger(__name__)
def _warn_invalid_state_schema(schema: Union[Type[Any], Any]) -> None:
if isinstance(schema, type):
return
if typing.get_args(schema):
return
warnings.warn(
f"Invalid state_schema: {schema}. Expected a type or Annotated[type, reducer]. "
"Please provide a valid schema to ensure correct updates.\n"
" See: https://langchain-ai.github.io/langgraph/reference/graphs/#stategraph"
)
def _get_node_name(node: RunnableLike) -> str:
if isinstance(node, Runnable):
return node.get_name()
elif callable(node):
return getattr(node, "__name__", node.__class__.__name__)
else:
raise TypeError(f"Unsupported node type: {type(node)}")
class StateNodeSpec(NamedTuple):
runnable: Runnable
metadata: Optional[dict[str, Any]]
input: Type[Any]
retry_policy: Optional[RetryPolicy]
ends: Optional[tuple[str, ...]] = EMPTY_SEQ
class StateGraph(Graph):
"""A graph whose nodes communicate by reading and writing to a shared state.
The signature of each node is State -> Partial<State>.
Each state key can optionally be annotated with a reducer function that
will be used to aggregate the values of that key received from multiple nodes.
The signature of a reducer function is (Value, Value) -> Value.
Args:
state_schema (Type[Any]): The schema class that defines the state.
config_schema (Optional[Type[Any]]): The schema class that defines the configuration.
Use this to expose configurable parameters in your API.
Examples:
>>> from langchain_core.runnables import RunnableConfig
>>> from typing_extensions import Annotated, TypedDict
>>> from langgraph.checkpoint.memory import MemorySaver
>>> from langgraph.graph import StateGraph
>>>
>>> def reducer(a: list, b: int | None) -> list:
... if b is not None:
... return a + [b]
... return a
>>>
>>> class State(TypedDict):
... x: Annotated[list, reducer]
>>>
>>> class ConfigSchema(TypedDict):
... r: float
>>>
>>> graph = StateGraph(State, config_schema=ConfigSchema)
>>>
>>> def node(state: State, config: RunnableConfig) -> dict:
... r = config["configurable"].get("r", 1.0)
... x = state["x"][-1]
... next_value = x * r * (1 - x)
... return {"x": next_value}
>>>
>>> graph.add_node("A", node)
>>> graph.set_entry_point("A")
>>> graph.set_finish_point("A")
>>> compiled = graph.compile()
>>>
>>> print(compiled.config_specs)
[ConfigurableFieldSpec(id='r', annotation=<class 'float'>, name=None, description=None, default=None, is_shared=False, dependencies=None)]
>>>
>>> step1 = compiled.invoke({"x": 0.5}, {"configurable": {"r": 3.0}})
>>> print(step1)
{'x': [0.5, 0.75]}"""
nodes: dict[str, StateNodeSpec] # type: ignore[assignment]
channels: dict[str, BaseChannel]
managed: dict[str, ManagedValueSpec]
schemas: dict[Type[Any], dict[str, Union[BaseChannel, ManagedValueSpec]]]
def __init__(
self,
state_schema: Optional[Type[Any]] = None,
config_schema: Optional[Type[Any]] = None,
*,
input: Optional[Type[Any]] = None,
output: Optional[Type[Any]] = None,
) -> None:
super().__init__()
if state_schema is None:
if input is None or output is None:
raise ValueError("Must provide state_schema or input and output")
state_schema = input
warnings.warn(
"Initializing StateGraph without state_schema is deprecated. "
"Please pass in an explicit state_schema instead of just an input and output schema.",
LangGraphDeprecationWarning,
stacklevel=2,
)
else:
if input is None:
input = state_schema
if output is None:
output = state_schema
self.schemas = {}
self.channels = {}
self.managed = {}
self.schema = state_schema
self.input = input
self.output = output
self._add_schema(state_schema)
self._add_schema(input, allow_managed=False)
self._add_schema(output, allow_managed=False)
self.config_schema = config_schema
self.waiting_edges: set[tuple[tuple[str, ...], str]] = set()
@property
def _all_edges(self) -> set[tuple[str, str]]:
return self.edges | {
(start, end) for starts, end in self.waiting_edges for start in starts
}
def _add_schema(self, schema: Type[Any], /, allow_managed: bool = True) -> None:
if schema not in self.schemas:
_warn_invalid_state_schema(schema)
channels, managed = _get_channels(schema)
if managed and not allow_managed:
names = ", ".join(managed)
schema_name = getattr(schema, "__name__", "")
raise ValueError(
f"Invalid managed channels detected in {schema_name}: {names}."
" Managed channels are not permitted in Input/Output schema."
)
self.schemas[schema] = {**channels, **managed}
for key, channel in channels.items():
if key in self.channels:
if self.channels[key] != channel:
if isinstance(channel, LastValue):
pass
else:
raise ValueError(
f"Channel '{key}' already exists with a different type"
)
else:
self.channels[key] = channel
for key, managed in managed.items():
if key in self.managed:
if self.managed[key] != managed:
raise ValueError(
f"Managed value '{key}' already exists with a different type"
)
else:
self.managed[key] = managed
@overload
def add_node(
self,
node: RunnableLike,
*,
metadata: Optional[dict[str, Any]] = None,
input: Optional[Type[Any]] = None,
retry: Optional[RetryPolicy] = None,
) -> Self:
"""Adds a new node to the state graph.
Will take the name of the function/runnable as the node name.
Args:
node (RunnableLike): The function or runnable this node will run.
Raises:
ValueError: If the key is already being used as a state key.
Returns:
StateGraph
"""
...
@overload
def add_node(
self,
node: str,
action: RunnableLike,
*,
metadata: Optional[dict[str, Any]] = None,
input: Optional[Type[Any]] = None,
retry: Optional[RetryPolicy] = None,
) -> Self:
"""Adds a new node to the state graph.
Args:
node (str): The key of the node.
action (RunnableLike): The action associated with the node.
Raises:
ValueError: If the key is already being used as a state key.
Returns:
StateGraph
"""
...
def add_node(
self,
node: Union[str, RunnableLike],
action: Optional[RunnableLike] = None,
*,
metadata: Optional[dict[str, Any]] = None,
input: Optional[Type[Any]] = None,
retry: Optional[RetryPolicy] = None,
) -> Self:
"""Adds a new node to the state graph.
Will take the name of the function/runnable as the node name.
Args:
node (Union[str, RunnableLike)]: The function or runnable this node will run.
action (Optional[RunnableLike]): The action associated with the node. (default: None)
metadata (Optional[dict[str, Any]]): The metadata associated with the node. (default: None)
input (Optional[Type[Any]]): The input schema for the node. (default: the graph's input schema)
retry (Optional[RetryPolicy]): The policy for retrying the node. (default: None)
Raises:
ValueError: If the key is already being used as a state key.
Examples:
```pycon
>>> from langgraph.graph import START, StateGraph
...
>>> def my_node(state, config):
... return {"x": state["x"] + 1}
...
>>> builder = StateGraph(dict)
>>> builder.add_node(my_node) # node name will be 'my_node'
>>> builder.add_edge(START, "my_node")
>>> graph = builder.compile()
>>> graph.invoke({"x": 1})
{'x': 2}
```
Customize the name:
```pycon
>>> builder = StateGraph(dict)
>>> builder.add_node("my_fair_node", my_node)
>>> builder.add_edge(START, "my_fair_node")
>>> graph = builder.compile()
>>> graph.invoke({"x": 1})
{'x': 2}
```
Returns:
StateGraph
"""
if not isinstance(node, str):
action = node
if isinstance(action, Runnable):
node = action.get_name()
else:
node = getattr(action, "__name__", action.__class__.__name__)
if node is None:
raise ValueError(
"Node name must be provided if action is not a function"
)
if node in self.channels:
raise ValueError(f"'{node}' is already being used as a state key")
if self.compiled:
logger.warning(
"Adding a node to a graph that has already been compiled. This will "
"not be reflected in the compiled graph."
)
if not isinstance(node, str):
action = node
node = cast(str, getattr(action, "name", getattr(action, "__name__", None)))
if node is None:
raise ValueError(
"Node name must be provided if action is not a function"
)
if action is None:
raise RuntimeError
if node in self.nodes:
raise ValueError(f"Node `{node}` already present.")
if node == END or node == START:
raise ValueError(f"Node `{node}` is reserved.")
for character in (NS_SEP, NS_END):
if character in cast(str, node):
raise ValueError(
f"'{character}' is a reserved character and is not allowed in the node names."
)
ends = EMPTY_SEQ
try:
if (isfunction(action) or ismethod(getattr(action, "__call__", None))) and (
hints := get_type_hints(getattr(action, "__call__"))
or get_type_hints(action)
):
if input is None:
first_parameter_name = next(
iter(
inspect.signature(
cast(FunctionType, action)
).parameters.keys()
)
)
if input_hint := hints.get(first_parameter_name):
if isinstance(input_hint, type) and get_type_hints(input_hint):
input = input_hint
if (
(rtn := hints.get("return"))
and get_origin(rtn) is Command
and (rargs := get_args(rtn))
and get_origin(rargs[0]) is Literal
and (vals := get_args(rargs[0]))
):
ends = vals
except (TypeError, StopIteration):
pass
if input is not None:
self._add_schema(input)
self.nodes[cast(str, node)] = StateNodeSpec(
coerce_to_runnable(action, name=cast(str, node), trace=False),
metadata,
input=input or self.schema,
retry_policy=retry,
ends=ends,
)
return self
def add_edge(self, start_key: Union[str, list[str]], end_key: str) -> Self:
"""Adds a directed edge from the start node to the end node.
If the graph transitions to the start_key node, it will always transition to the end_key node next.
Args:
start_key (Union[str, list[str]]): The key(s) of the start node(s) of the edge.
end_key (str): The key of the end node of the edge.
Raises:
ValueError: If the start key is 'END' or if the start key or end key is not present in the graph.
Returns:
StateGraph
"""
if isinstance(start_key, str):
return super().add_edge(start_key, end_key)
if self.compiled:
logger.warning(
"Adding an edge to a graph that has already been compiled. This will "
"not be reflected in the compiled graph."
)
for start in start_key:
if start == END:
raise ValueError("END cannot be a start node")
if start not in self.nodes:
raise ValueError(f"Need to add_node `{start}` first")
if end_key == START:
raise ValueError("START cannot be an end node")
if end_key != END and end_key not in self.nodes:
raise ValueError(f"Need to add_node `{end_key}` first")
self.waiting_edges.add((tuple(start_key), end_key))
return self
def add_sequence(
self,
nodes: Sequence[Union[RunnableLike, tuple[str, RunnableLike]]],
) -> Self:
"""Add a sequence of nodes that will be executed in the provided order.
Args:
nodes: A sequence of RunnableLike objects (e.g. a LangChain Runnable or a callable) or (name, RunnableLike) tuples.
If no names are provided, the name will be inferred from the node object (e.g. a runnable or a callable name).
Each node will be executed in the order provided.
Raises:
ValueError: if the sequence is empty.
ValueError: if the sequence contains duplicate node names.
Returns:
StateGraph
"""
if len(nodes) < 1:
raise ValueError("Sequence requires at least one node.")
previous_name: Optional[str] = None
for node in nodes:
if isinstance(node, tuple) and len(node) == 2:
name, node = node
else:
name = _get_node_name(node)
if name in self.nodes:
raise ValueError(
f"Node names must be unique: node with the name '{name}' already exists. "
"If you need to use two different runnables/callables with the same name (for example, using `lambda`), please provide them as tuples (name, runnable/callable)."
)
self.add_node(name, node)
if previous_name is not None:
self.add_edge(previous_name, name)
previous_name = name
return self
def compile(
self,
checkpointer: Checkpointer = None,
*,
store: Optional[BaseStore] = None,
interrupt_before: Optional[Union[All, list[str]]] = None,
interrupt_after: Optional[Union[All, list[str]]] = None,
debug: bool = False,
) -> "CompiledStateGraph":
"""Compiles the state graph into a `CompiledGraph` object.
The compiled graph implements the `Runnable` interface and can be invoked,
streamed, batched, and run asynchronously.
Args:
checkpointer (Optional[Union[Checkpointer, Literal[False]]]): A checkpoint saver object or flag.
If provided, this Checkpointer serves as a fully versioned "short-term memory" for the graph,
allowing it to be paused, resumed, and replayed from any point.
If None, it may inherit the parent graph's checkpointer when used as a subgraph.
If False, it will not use or inherit any checkpointer.
interrupt_before (Optional[Sequence[str]]): An optional list of node names to interrupt before.
interrupt_after (Optional[Sequence[str]]): An optional list of node names to interrupt after.
debug (bool): A flag indicating whether to enable debug mode.
Returns:
CompiledStateGraph: The compiled state graph.
"""
# assign default values
interrupt_before = interrupt_before or []
interrupt_after = interrupt_after or []
# validate the graph
self.validate(
interrupt=(
(interrupt_before if interrupt_before != "*" else []) + interrupt_after
if interrupt_after != "*"
else []
)
)
# prepare output channels
output_channels = (
"__root__"
if len(self.schemas[self.output]) == 1
and "__root__" in self.schemas[self.output]
else [
key
for key, val in self.schemas[self.output].items()
if not is_managed_value(val)
]
)
stream_channels = (
"__root__"
if len(self.channels) == 1 and "__root__" in self.channels
else [
key for key, val in self.channels.items() if not is_managed_value(val)
]
)
compiled = CompiledStateGraph(
builder=self,
config_type=self.config_schema,
nodes={},
channels={
**self.channels,
**self.managed,
START: EphemeralValue(self.input),
},
input_channels=START,
stream_mode="updates",
output_channels=output_channels,
stream_channels=stream_channels,
checkpointer=checkpointer,
interrupt_before_nodes=interrupt_before,
interrupt_after_nodes=interrupt_after,
auto_validate=False,
debug=debug,
store=store,
)
compiled.attach_node(START, None)
for key, node in self.nodes.items():
compiled.attach_node(key, node)
for key, node in self.nodes.items():
compiled.attach_branch(key, SELF, CONTROL_BRANCH, with_reader=False)
for start, end in self.edges:
compiled.attach_edge(start, end)
for starts, end in self.waiting_edges:
compiled.attach_edge(starts, end)
for start, branches in self.branches.items():
for name, branch in branches.items():
compiled.attach_branch(start, name, branch)
return compiled.validate()
class CompiledStateGraph(CompiledGraph):
builder: StateGraph
def get_input_schema(
self, config: Optional[RunnableConfig] = None
) -> type[BaseModel]:
return _get_schema(
typ=self.builder.input,
schemas=self.builder.schemas,
channels=self.builder.channels,
name=self.get_name("Input"),
)
def get_output_schema(
self, config: Optional[RunnableConfig] = None
) -> type[BaseModel]:
return _get_schema(
typ=self.builder.output,
schemas=self.builder.schemas,
channels=self.builder.channels,
name=self.get_name("Output"),
)
def attach_node(self, key: str, node: Optional[StateNodeSpec]) -> None:
if key == START:
output_keys = [
k
for k, v in self.builder.schemas[self.builder.input].items()
if not is_managed_value(v)
]
else:
output_keys = list(self.builder.channels) + [
k
for k, v in self.builder.managed.items()
if is_writable_managed_value(v)
]
def _get_root(input: Any) -> Optional[Sequence[tuple[str, Any]]]:
if (
isinstance(input, (list, tuple))
and input
and all(isinstance(i, Command) for i in input)
):
updates: list[tuple[str, Any]] = []
for i in input:
if i.graph == Command.PARENT:
continue
updates.extend(i._update_as_tuples())
return updates
elif isinstance(input, Command):
if input.graph == Command.PARENT:
return ()
return input._update_as_tuples()
elif input is not None:
return [("__root__", input)]
def _get_updates(
input: Union[None, dict, Any],
) -> Optional[Sequence[tuple[str, Any]]]:
if input is None:
return None
elif isinstance(input, dict):
return [(k, v) for k, v in input.items() if k in output_keys]
elif isinstance(input, Command):
if input.graph == Command.PARENT:
return None
return input._update_as_tuples()
elif (
isinstance(input, (list, tuple))
and input
and all(isinstance(i, Command) for i in input)
):
updates: list[tuple[str, Any]] = []
for i in input:
if i.graph == Command.PARENT:
continue
updates.extend(i._update_as_tuples())
return updates
elif get_type_hints(type(input)):
return [
(k, getattr(input, k))
for k in output_keys
if getattr(input, k, None) is not None
]
else:
msg = create_error_message(
message=f"Expected dict, got {input}",
error_code=ErrorCode.INVALID_GRAPH_NODE_RETURN_VALUE,
)
raise InvalidUpdateError(msg)
# state updaters
write_entries: list[Union[ChannelWriteEntry, ChannelWriteTupleEntry]] = [
ChannelWriteTupleEntry(
mapper=_get_root if output_keys == ["__root__"] else _get_updates
)
]
# add node and output channel
if key == START:
self.nodes[key] = PregelNode(
tags=[TAG_HIDDEN],
triggers=[START],
channels=[START],
writers=[
ChannelWrite(
write_entries,
tags=[TAG_HIDDEN],
require_at_least_one_of=output_keys,
),
],
)
elif node is not None:
input_schema = node.input if node else self.builder.schema
input_values = {k: k for k in self.builder.schemas[input_schema]}
is_single_input = len(input_values) == 1 and "__root__" in input_values
self.channels[key] = EphemeralValue(Any, guard=False)
self.nodes[key] = PregelNode(
triggers=[],
# read state keys and managed values
channels=(list(input_values) if is_single_input else input_values),
# coerce state dict to schema class (eg. pydantic model)
mapper=(
None
if is_single_input or issubclass(input_schema, dict)
else partial(_coerce_state, input_schema)
),
writers=[
# publish to this channel and state keys
ChannelWrite(
write_entries + [ChannelWriteEntry(key, key)],
tags=[TAG_HIDDEN],
),
],
metadata=node.metadata,
retry_policy=node.retry_policy,
bound=node.runnable,
)
else:
raise RuntimeError
def attach_edge(self, starts: Union[str, Sequence[str]], end: str) -> None:
if isinstance(starts, str):
if starts == START:
channel_name = f"start:{end}"
# register channel
self.channels[channel_name] = EphemeralValue(Any)
# subscribe to channel
self.nodes[end].triggers.append(channel_name)
# publish to channel
self.nodes[START] |= ChannelWrite(
[ChannelWriteEntry(channel_name, START)], tags=[TAG_HIDDEN]
)
elif end != END:
# subscribe to start channel
self.nodes[end].triggers.append(starts)
elif end != END:
channel_name = f"join:{'+'.join(starts)}:{end}"
# register channel
self.channels[channel_name] = NamedBarrierValue(str, set(starts))
# subscribe to channel
self.nodes[end].triggers.append(channel_name)
# publish to channel
for start in starts:
self.nodes[start] |= ChannelWrite(
[ChannelWriteEntry(channel_name, start)], tags=[TAG_HIDDEN]
)
def attach_branch(
self, start: str, name: str, branch: Branch, *, with_reader: bool = True
) -> None:
def branch_writer(
packets: Sequence[Union[str, Send]], config: RunnableConfig
) -> None:
if filtered := [p for p in packets if p != END]:
writes = [
(
ChannelWriteEntry(f"branch:{start}:{name}:{p}", start)
if not isinstance(p, Send)
else p
)
for p in filtered
]
if branch.then and branch.then != END:
writes.append(
ChannelWriteEntry(
f"branch:{start}:{name}::then",
WaitForNames(
{p.node if isinstance(p, Send) else p for p in filtered}
),
)
)
ChannelWrite.do_write(
config, cast(Sequence[Union[Send, ChannelWriteEntry]], writes)
)
# attach branch publisher
schema = (
self.builder.nodes[start].input
if start in self.builder.nodes
else self.builder.schema
)
self.nodes[start] |= branch.run(
branch_writer,
_get_state_reader(self.builder, schema) if with_reader else None,
)
# attach branch subscribers
ends = (
branch.ends.values()
if branch.ends
else [node for node in self.builder.nodes if node != branch.then]
)
for end in ends:
if end != END:
channel_name = f"branch:{start}:{name}:{end}"
self.channels[channel_name] = EphemeralValue(Any, guard=False)
self.nodes[end].triggers.append(channel_name)
# attach then subscriber
if branch.then and branch.then != END:
channel_name = f"branch:{start}:{name}::then"
self.channels[channel_name] = DynamicBarrierValue(str)
self.nodes[branch.then].triggers.append(channel_name)
for end in ends:
if end != END:
self.nodes[end] |= ChannelWrite(
[ChannelWriteEntry(channel_name, end)], tags=[TAG_HIDDEN]
)
def _get_state_reader(
builder: StateGraph, schema: Type[Any]
) -> Callable[[RunnableConfig], Any]:
state_keys = list(builder.channels)
select = list(builder.schemas[schema])
return partial(
ChannelRead.do_read,
select=select[0] if select == ["__root__"] else select,
fresh=True,
# coerce state dict to schema class (eg. pydantic model)
mapper=(
None
if state_keys == ["__root__"] or issubclass(schema, dict)
else partial(_coerce_state, schema)
),
)
def _coerce_state(schema: Type[Any], input: dict[str, Any]) -> dict[str, Any]:
return schema(**input)
def _control_branch(value: Any) -> Sequence[Union[str, Send]]:
if isinstance(value, Send):
return [value]
commands: list[Command] = []
if isinstance(value, Command):
commands.append(value)
elif (
isinstance(value, (list, tuple))
and value
and all(isinstance(i, Command) for i in value)
):
commands.extend(value)
else:
return EMPTY_SEQ
rtn: list[Union[str, Send]] = []
for command in commands:
if command.graph == Command.PARENT:
raise ParentCommand(command)
if isinstance(command.goto, Send):
rtn.append(command.goto)
elif isinstance(command.goto, str):
rtn.append(command.goto)
else:
rtn.extend(command.goto)
return rtn
async def _acontrol_branch(value: Any) -> Sequence[Union[str, Send]]:
if isinstance(value, Send):
return [value]
commands: list[Command] = []
if isinstance(value, Command):
commands.append(value)
elif (
isinstance(value, (list, tuple))
and value
and all(isinstance(i, Command) for i in value)
):
commands.extend(value)
else:
return EMPTY_SEQ
rtn: list[Union[str, Send]] = []
for command in commands:
if command.graph == Command.PARENT:
raise ParentCommand(command)
if isinstance(command.goto, Send):
rtn.append(command.goto)
elif isinstance(command.goto, str):
rtn.append(command.goto)
else:
rtn.extend(command.goto)
return rtn
CONTROL_BRANCH_PATH = RunnableCallable(
_control_branch, _acontrol_branch, tags=[TAG_HIDDEN], trace=False, recurse=False
)
CONTROL_BRANCH = Branch(CONTROL_BRANCH_PATH, None)
def _get_channels(
schema: Type[dict],
) -> tuple[dict[str, BaseChannel], dict[str, ManagedValueSpec]]:
if not hasattr(schema, "__annotations__"):
return {"__root__": _get_channel("__root__", schema, allow_managed=False)}, {}
all_keys = {
name: _get_channel(name, typ)
for name, typ in get_type_hints(schema, include_extras=True).items()
if name != "__slots__"
}
return (
{k: v for k, v in all_keys.items() if isinstance(v, BaseChannel)},
{k: v for k, v in all_keys.items() if is_managed_value(v)},
)
@overload
def _get_channel(
name: str, annotation: Any, *, allow_managed: Literal[False]
) -> BaseChannel: ...
@overload
def _get_channel(
name: str, annotation: Any, *, allow_managed: Literal[True] = True
) -> Union[BaseChannel, ManagedValueSpec]: ...
def _get_channel(
name: str, annotation: Any, *, allow_managed: bool = True
) -> Union[BaseChannel, ManagedValueSpec]:
if manager := _is_field_managed_value(name, annotation):
if allow_managed:
return manager
else:
raise ValueError(f"This {annotation} not allowed in this position")
elif channel := _is_field_channel(annotation):
channel.key = name
return channel
elif channel := _is_field_binop(annotation):
channel.key = name
return channel
fallback: LastValue = LastValue(annotation)
fallback.key = name
return fallback
def _is_field_channel(typ: Type[Any]) -> Optional[BaseChannel]:
if hasattr(typ, "__metadata__"):
meta = typ.__metadata__
if len(meta) >= 1 and isinstance(meta[-1], BaseChannel):
return meta[-1]
elif len(meta) >= 1 and isclass(meta[-1]) and issubclass(meta[-1], BaseChannel):
return meta[-1](typ.__origin__ if hasattr(typ, "__origin__") else typ)
return None
def _is_field_binop(typ: Type[Any]) -> Optional[BinaryOperatorAggregate]:
if hasattr(typ, "__metadata__"):
meta = typ.__metadata__
if len(meta) >= 1 and callable(meta[-1]):
sig = signature(meta[-1])
params = list(sig.parameters.values())
if len(params) == 2 and all(
p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) for p in params
):
return BinaryOperatorAggregate(typ, meta[-1])
else:
raise ValueError(
f"Invalid reducer signature. Expected (a, b) -> c. Got {sig}"
)
return None
def _is_field_managed_value(name: str, typ: Type[Any]) -> Optional[ManagedValueSpec]:
if hasattr(typ, "__metadata__"):
meta = typ.__metadata__
if len(meta) >= 1:
decoration = get_origin(meta[-1]) or meta[-1]
if is_managed_value(decoration):
if isinstance(decoration, ConfiguredManagedValue):
for k, v in decoration.kwargs.items():
if v is ChannelKeyPlaceholder:
decoration.kwargs[k] = name
if v is ChannelTypePlaceholder:
decoration.kwargs[k] = typ.__origin__
return decoration
return None
def _get_schema(
typ: Type,
schemas: dict,
channels: dict,
name: str,
) -> type[BaseModel]:
if isclass(typ) and issubclass(typ, (BaseModel, BaseModelV1)):
return typ
else:
keys = list(schemas[typ].keys())
if len(keys) == 1 and keys[0] == "__root__":
return create_model(
name,
root=(channels[keys[0]].UpdateType, None),
)
else:
return create_model(
name,
field_definitions={
k: (
channels[k].UpdateType,
(
get_field_default(
k,
channels[k].UpdateType,
typ,
)
),
)
for k in schemas[typ]
if k in channels and isinstance(channels[k], BaseChannel)
},
)
|
0 | lc_public_repos/langgraph/libs/langgraph/langgraph | lc_public_repos/langgraph/libs/langgraph/langgraph/graph/__init__.py | from langgraph.graph.graph import END, START, Graph
from langgraph.graph.message import MessageGraph, MessagesState, add_messages
from langgraph.graph.state import StateGraph
__all__ = [
"END",
"START",
"Graph",
"StateGraph",
"MessageGraph",
"add_messages",
"MessagesState",
]
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/bench/react_agent.py | from typing import Any, Optional
from uuid import uuid4
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models.fake_chat_models import (
FakeMessagesListChatModel,
)
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from langchain_core.tools import StructuredTool
from langgraph.checkpoint.base import BaseCheckpointSaver
from langgraph.prebuilt.chat_agent_executor import create_react_agent
from langgraph.pregel import Pregel
def react_agent(n_tools: int, checkpointer: Optional[BaseCheckpointSaver]) -> Pregel:
class FakeFuntionChatModel(FakeMessagesListChatModel):
def bind_tools(self, functions: list):
return self
def _generate(
self,
messages: list[BaseMessage],
stop: Optional[list[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> ChatResult:
response = self.responses[self.i].copy()
if self.i < len(self.responses) - 1:
self.i += 1
else:
self.i = 0
generation = ChatGeneration(message=response)
return ChatResult(generations=[generation])
tool = StructuredTool.from_function(
lambda query: f"result for query: {query}" * 10,
name=str(uuid4()),
description="",
)
model = FakeFuntionChatModel(
responses=[
AIMessage(
content="",
tool_calls=[
{
"id": str(uuid4()),
"name": tool.name,
"args": {"query": str(uuid4()) * 100},
}
],
id=str(uuid4()),
)
for _ in range(n_tools)
]
+ [
AIMessage(content="answer" * 100, id=str(uuid4())),
]
)
return create_react_agent(model, [tool], checkpointer=checkpointer)
if __name__ == "__main__":
import asyncio
import uvloop
from langgraph.checkpoint.memory import MemorySaver
graph = react_agent(100, checkpointer=MemorySaver())
input = {"messages": [HumanMessage("hi?")]}
config = {"configurable": {"thread_id": "1"}, "recursion_limit": 20000000000}
async def run():
len([c async for c in graph.astream(input, config=config)])
uvloop.install()
asyncio.run(run())
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/bench/wide_state.py | import operator
from dataclasses import dataclass, field
from functools import partial
from typing import Annotated, Optional, Sequence
from langgraph.constants import END, START
from langgraph.graph.state import StateGraph
def wide_state(n: int) -> StateGraph:
@dataclass(kw_only=True)
class State:
messages: Annotated[list, operator.add] = field(default_factory=list)
trigger_events: Annotated[list, operator.add] = field(default_factory=list)
"""The external events that are converted by the graph."""
primary_issue_medium: Annotated[str, lambda x, y: y or x] = field(
default="email"
)
autoresponse: Annotated[Optional[dict], lambda _, y: y] = field(
default=None
) # Always overwrite
issue: Annotated[dict | None, lambda x, y: y if y else x] = field(default=None)
relevant_rules: Optional[list[dict]] = field(default=None)
"""SOPs fetched from the rulebook that are relevant to the current conversation."""
memory_docs: Optional[list[dict]] = field(default=None)
"""Memory docs fetched from the memory service that are relevant to the current conversation."""
categorizations: Annotated[list[dict], operator.add] = field(
default_factory=list
)
"""The issue categorizations auto-generated by the AI."""
responses: Annotated[list[dict], operator.add] = field(default_factory=list)
"""The draft responses recommended by the AI."""
user_info: Annotated[Optional[dict], lambda x, y: y if y is not None else x] = (
field(default=None)
)
"""The current user state (by email)."""
crm_info: Annotated[Optional[dict], lambda x, y: y if y is not None else x] = (
field(default=None)
)
"""The CRM information for organization the current user is from."""
email_thread_id: Annotated[
Optional[str], lambda x, y: y if y is not None else x
] = field(default=None)
"""The current email thread ID."""
slack_participants: Annotated[dict, operator.or_] = field(default_factory=dict)
"""The growing list of current slack participants."""
bot_id: Optional[str] = field(default=None)
"""The ID of the bot user in the slack channel."""
notified_assignees: Annotated[dict, operator.or_] = field(default_factory=dict)
def read_write(read: str, write: Sequence[str], input: State) -> dict:
val = getattr(input, read)
val_single = val[-1] if isinstance(val, list) else val
val_list = val if isinstance(val, list) else [val]
return {
k: val_list if isinstance(getattr(input, k), list) else val_single
for k in write
}
builder = StateGraph(State)
builder.add_edge(START, "one")
builder.add_node(
"one",
partial(read_write, "messages", ["trigger_events", "primary_issue_medium"]),
)
builder.add_edge("one", "two")
builder.add_node(
"two",
partial(read_write, "trigger_events", ["autoresponse", "issue"]),
)
builder.add_edge("two", "three")
builder.add_edge("two", "four")
builder.add_node(
"three",
partial(read_write, "autoresponse", ["relevant_rules"]),
)
builder.add_node(
"four",
partial(
read_write,
"trigger_events",
["categorizations", "responses", "memory_docs"],
),
)
builder.add_node(
"five",
partial(
read_write,
"categorizations",
[
"user_info",
"crm_info",
"email_thread_id",
"slack_participants",
"bot_id",
"notified_assignees",
],
),
)
builder.add_edge(["three", "four"], "five")
builder.add_edge("five", "six")
builder.add_node(
"six",
partial(read_write, "responses", ["messages"]),
)
builder.add_conditional_edges(
"six", lambda state: END if len(state.messages) > n else "one"
)
return builder
if __name__ == "__main__":
import asyncio
import uvloop
from langgraph.checkpoint.memory import MemorySaver
graph = wide_state(1000).compile(checkpointer=MemorySaver())
input = {
"messages": [
{
str(i) * 10: {
str(j) * 10: ["hi?" * 10, True, 1, 6327816386138, None] * 5
for j in range(5)
}
for i in range(5)
}
]
}
config = {"configurable": {"thread_id": "1"}, "recursion_limit": 20000000000}
async def run():
async for c in graph.astream(input, config=config):
print(c.keys())
uvloop.install()
asyncio.run(run())
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/bench/fanout_to_subgraph.py | import operator
from typing import Annotated, TypedDict
from langgraph.constants import END, START, Send
from langgraph.graph.state import StateGraph
def fanout_to_subgraph() -> StateGraph:
class OverallState(TypedDict):
subjects: list[str]
jokes: Annotated[list[str], operator.add]
async def continue_to_jokes(state: OverallState):
return [Send("generate_joke", {"subject": s}) for s in state["subjects"]]
class JokeInput(TypedDict):
subject: str
class JokeOutput(TypedDict):
jokes: list[str]
async def bump(state: JokeOutput):
return {"jokes": [state["jokes"][0] + " a"]}
async def generate(state: JokeInput):
return {"jokes": [f"Joke about {state['subject']}"]}
async def edit(state: JokeInput):
subject = state["subject"]
return {"subject": f"{subject} - hohoho"}
async def bump_loop(state: JokeOutput):
return END if state["jokes"][0].endswith(" a" * 10) else "bump"
# subgraph
subgraph = StateGraph(input=JokeInput, output=JokeOutput)
subgraph.add_node("edit", edit)
subgraph.add_node("generate", generate)
subgraph.add_node("bump", bump)
subgraph.set_entry_point("edit")
subgraph.add_edge("edit", "generate")
subgraph.add_edge("generate", "bump")
subgraph.add_conditional_edges("bump", bump_loop)
subgraph.set_finish_point("generate")
subgraphc = subgraph.compile()
# parent graph
builder = StateGraph(OverallState)
builder.add_node("generate_joke", subgraphc)
builder.add_conditional_edges(START, continue_to_jokes)
builder.add_edge("generate_joke", END)
return builder
def fanout_to_subgraph_sync() -> StateGraph:
class OverallState(TypedDict):
subjects: list[str]
jokes: Annotated[list[str], operator.add]
def continue_to_jokes(state: OverallState):
return [Send("generate_joke", {"subject": s}) for s in state["subjects"]]
class JokeInput(TypedDict):
subject: str
class JokeOutput(TypedDict):
jokes: list[str]
def bump(state: JokeOutput):
return {"jokes": [state["jokes"][0] + " a"]}
def generate(state: JokeInput):
return {"jokes": [f"Joke about {state['subject']}"]}
def edit(state: JokeInput):
subject = state["subject"]
return {"subject": f"{subject} - hohoho"}
def bump_loop(state: JokeOutput):
return END if state["jokes"][0].endswith(" a" * 10) else "bump"
# subgraph
subgraph = StateGraph(input=JokeInput, output=JokeOutput)
subgraph.add_node("edit", edit)
subgraph.add_node("generate", generate)
subgraph.add_node("bump", bump)
subgraph.set_entry_point("edit")
subgraph.add_edge("edit", "generate")
subgraph.add_edge("generate", "bump")
subgraph.add_conditional_edges("bump", bump_loop)
subgraph.set_finish_point("generate")
subgraphc = subgraph.compile()
# parent graph
builder = StateGraph(OverallState)
builder.add_node("generate_joke", subgraphc)
builder.add_conditional_edges(START, continue_to_jokes)
builder.add_edge("generate_joke", END)
return builder
if __name__ == "__main__":
import asyncio
import random
import uvloop
from langgraph.checkpoint.memory import MemorySaver
graph = fanout_to_subgraph().compile(checkpointer=MemorySaver())
input = {
"subjects": [
random.choices("abcdefghijklmnopqrstuvwxyz", k=1000) for _ in range(1000)
]
}
config = {"configurable": {"thread_id": "1"}}
async def run():
len([c async for c in graph.astream(input, config=config)])
uvloop.install()
asyncio.run(run())
|
0 | lc_public_repos/langgraph/libs/langgraph | lc_public_repos/langgraph/libs/langgraph/bench/__main__.py | import random
from uuid import uuid4
from langchain_core.messages import HumanMessage
from pyperf._runner import Runner
from uvloop import new_event_loop
from bench.fanout_to_subgraph import fanout_to_subgraph, fanout_to_subgraph_sync
from bench.react_agent import react_agent
from bench.wide_state import wide_state
from langgraph.checkpoint.memory import MemorySaver
from langgraph.pregel import Pregel
async def arun(graph: Pregel, input: dict):
len(
[
c
async for c in graph.astream(
input,
{
"configurable": {"thread_id": str(uuid4())},
"recursion_limit": 1000000000,
},
)
]
)
def run(graph: Pregel, input: dict):
len(
[
c
for c in graph.stream(
input,
{
"configurable": {"thread_id": str(uuid4())},
"recursion_limit": 1000000000,
},
)
]
)
benchmarks = (
(
"fanout_to_subgraph_10x",
fanout_to_subgraph().compile(checkpointer=None),
fanout_to_subgraph_sync().compile(checkpointer=None),
{
"subjects": [
random.choices("abcdefghijklmnopqrstuvwxyz", k=1000) for _ in range(10)
]
},
),
(
"fanout_to_subgraph_10x_checkpoint",
fanout_to_subgraph().compile(checkpointer=MemorySaver()),
fanout_to_subgraph_sync().compile(checkpointer=MemorySaver()),
{
"subjects": [
random.choices("abcdefghijklmnopqrstuvwxyz", k=1000) for _ in range(10)
]
},
),
(
"fanout_to_subgraph_100x",
fanout_to_subgraph().compile(checkpointer=None),
fanout_to_subgraph_sync().compile(checkpointer=None),
{
"subjects": [
random.choices("abcdefghijklmnopqrstuvwxyz", k=1000) for _ in range(100)
]
},
),
(
"fanout_to_subgraph_100x_checkpoint",
fanout_to_subgraph().compile(checkpointer=MemorySaver()),
fanout_to_subgraph_sync().compile(checkpointer=MemorySaver()),
{
"subjects": [
random.choices("abcdefghijklmnopqrstuvwxyz", k=1000) for _ in range(100)
]
},
),
(
"react_agent_10x",
react_agent(10, checkpointer=None),
react_agent(10, checkpointer=None),
{"messages": [HumanMessage("hi?")]},
),
(
"react_agent_10x_checkpoint",
react_agent(10, checkpointer=MemorySaver()),
react_agent(10, checkpointer=MemorySaver()),
{"messages": [HumanMessage("hi?")]},
),
(
"react_agent_100x",
react_agent(100, checkpointer=None),
react_agent(100, checkpointer=None),
{"messages": [HumanMessage("hi?")]},
),
(
"react_agent_100x_checkpoint",
react_agent(100, checkpointer=MemorySaver()),
react_agent(100, checkpointer=MemorySaver()),
{"messages": [HumanMessage("hi?")]},
),
(
"wide_state_25x300",
wide_state(300).compile(checkpointer=None),
wide_state(300).compile(checkpointer=None),
{
"messages": [
{
str(i) * 10: {
str(j) * 10: ["hi?" * 10, True, 1, 6327816386138, None] * 5
for j in range(5)
}
for i in range(5)
}
]
},
),
(
"wide_state_25x300_checkpoint",
wide_state(300).compile(checkpointer=MemorySaver()),
wide_state(300).compile(checkpointer=MemorySaver()),
{
"messages": [
{
str(i) * 10: {
str(j) * 10: ["hi?" * 10, True, 1, 6327816386138, None] * 5
for j in range(5)
}
for i in range(5)
}
]
},
),
(
"wide_state_15x600",
wide_state(600).compile(checkpointer=None),
wide_state(600).compile(checkpointer=None),
{
"messages": [
{
str(i) * 10: {
str(j) * 10: ["hi?" * 10, True, 1, 6327816386138, None] * 5
for j in range(5)
}
for i in range(3)
}
]
},
),
(
"wide_state_15x600_checkpoint",
wide_state(600).compile(checkpointer=MemorySaver()),
wide_state(600).compile(checkpointer=MemorySaver()),
{
"messages": [
{
str(i) * 10: {
str(j) * 10: ["hi?" * 10, True, 1, 6327816386138, None] * 5
for j in range(5)
}
for i in range(3)
}
]
},
),
(
"wide_state_9x1200",
wide_state(1200).compile(checkpointer=None),
wide_state(1200).compile(checkpointer=None),
{
"messages": [
{
str(i) * 10: {
str(j) * 10: ["hi?" * 10, True, 1, 6327816386138, None] * 5
for j in range(3)
}
for i in range(3)
}
]
},
),
(
"wide_state_9x1200_checkpoint",
wide_state(1200).compile(checkpointer=MemorySaver()),
wide_state(1200).compile(checkpointer=MemorySaver()),
{
"messages": [
{
str(i) * 10: {
str(j) * 10: ["hi?" * 10, True, 1, 6327816386138, None] * 5
for j in range(3)
}
for i in range(3)
}
]
},
),
)
r = Runner()
for name, agraph, graph, input in benchmarks:
r.bench_async_func(name, arun, agraph, input, loop_factory=new_event_loop)
if graph is not None:
r.bench_func(name + "_sync", run, graph, input)
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/sdk-js/tsconfig.json | {
"extends": "@tsconfig/recommended",
"compilerOptions": {
"target": "ES2021",
"lib": [
"ES2021",
"ES2022.Object",
"DOM"
],
"module": "NodeNext",
"moduleResolution": "nodenext",
"esModuleInterop": true,
"declaration": true,
"noImplicitReturns": true,
"noFallthroughCasesInSwitch": true,
"noUnusedLocals": true,
"noUnusedParameters": true,
"useDefineForClassFields": true,
"strictPropertyInitialization": false,
"allowJs": true,
"strict": true,
"outDir": "dist"
},
"include": [
"src/**/*"
],
"exclude": [
"node_modules",
"dist",
"coverage"
],
"includeVersion": true,
"typedocOptions": {
"entryPoints": [
"src/client.ts"
],
"readme": "none",
"out": "docs",
"plugin": [
"typedoc-plugin-markdown"
],
"excludePrivate": true,
"excludeProtected": true,
"excludeExternals": false
}
}
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/sdk-js/LICENSE | MIT License
Copyright (c) 2024 LangChain, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/sdk-js/yarn.lock | # THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.
# yarn lockfile v1
"@babel/code-frame@^7.0.0":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/code-frame/-/code-frame-7.24.7.tgz#882fd9e09e8ee324e496bd040401c6f046ef4465"
integrity sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==
dependencies:
"@babel/highlight" "^7.24.7"
picocolors "^1.0.0"
"@babel/helper-validator-identifier@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.24.7.tgz#75b889cfaf9e35c2aaf42cf0d72c8e91719251db"
integrity sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==
"@babel/highlight@^7.24.7":
version "7.24.7"
resolved "https://registry.yarnpkg.com/@babel/highlight/-/highlight-7.24.7.tgz#a05ab1df134b286558aae0ed41e6c5f731bf409d"
integrity sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==
dependencies:
"@babel/helper-validator-identifier" "^7.24.7"
chalk "^2.4.2"
js-tokens "^4.0.0"
picocolors "^1.0.0"
"@isaacs/cliui@^8.0.2":
version "8.0.2"
resolved "https://registry.yarnpkg.com/@isaacs/cliui/-/cliui-8.0.2.tgz#b37667b7bc181c168782259bab42474fbf52b550"
integrity sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==
dependencies:
string-width "^5.1.2"
string-width-cjs "npm:string-width@^4.2.0"
strip-ansi "^7.0.1"
strip-ansi-cjs "npm:strip-ansi@^6.0.1"
wrap-ansi "^8.1.0"
wrap-ansi-cjs "npm:wrap-ansi@^7.0.0"
"@langchain/scripts@^0.1.4":
version "0.1.4"
resolved "https://registry.yarnpkg.com/@langchain/scripts/-/scripts-0.1.4.tgz#8c5d03d627686f20b9522213c12e2b329e73e381"
integrity sha512-O+mv2aqUIm3XWxYBrwFWMeTI3aHWeFR8OYjGFXKc1MGV/3LLao3PciyQvKUg1SL7FemHJ1ltDx74rKuEv8xxPA==
dependencies:
"@octokit/rest" "^21.0.2"
"@rollup/wasm-node" "^4.19.0"
axios "^1.6.7"
commander "^11.1.0"
glob "^10.3.10"
lodash "^4.17.21"
readline "^1.3.0"
rimraf "^5.0.1"
rollup "^4.5.2"
ts-morph "^21.0.1"
typescript "^5.4.5"
"@nodelib/fs.scandir@2.1.5":
version "2.1.5"
resolved "https://registry.yarnpkg.com/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz#7619c2eb21b25483f6d167548b4cfd5a7488c3d5"
integrity sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==
dependencies:
"@nodelib/fs.stat" "2.0.5"
run-parallel "^1.1.9"
"@nodelib/fs.stat@2.0.5", "@nodelib/fs.stat@^2.0.2":
version "2.0.5"
resolved "https://registry.yarnpkg.com/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz#5bd262af94e9d25bd1e71b05deed44876a222e8b"
integrity sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==
"@nodelib/fs.walk@^1.2.3":
version "1.2.8"
resolved "https://registry.yarnpkg.com/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz#e95737e8bb6746ddedf69c556953494f196fe69a"
integrity sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==
dependencies:
"@nodelib/fs.scandir" "2.1.5"
fastq "^1.6.0"
"@octokit/auth-token@^5.0.0":
version "5.1.1"
resolved "https://registry.yarnpkg.com/@octokit/auth-token/-/auth-token-5.1.1.tgz#3bbfe905111332a17f72d80bd0b51a3e2fa2cf07"
integrity sha512-rh3G3wDO8J9wSjfI436JUKzHIxq8NaiL0tVeB2aXmG6p/9859aUOAjA9pmSPNGGZxfwmaJ9ozOJImuNVJdpvbA==
"@octokit/core@^6.1.2":
version "6.1.2"
resolved "https://registry.yarnpkg.com/@octokit/core/-/core-6.1.2.tgz#20442d0a97c411612da206411e356014d1d1bd17"
integrity sha512-hEb7Ma4cGJGEUNOAVmyfdB/3WirWMg5hDuNFVejGEDFqupeOysLc2sG6HJxY2etBp5YQu5Wtxwi020jS9xlUwg==
dependencies:
"@octokit/auth-token" "^5.0.0"
"@octokit/graphql" "^8.0.0"
"@octokit/request" "^9.0.0"
"@octokit/request-error" "^6.0.1"
"@octokit/types" "^13.0.0"
before-after-hook "^3.0.2"
universal-user-agent "^7.0.0"
"@octokit/endpoint@^10.0.0":
version "10.1.1"
resolved "https://registry.yarnpkg.com/@octokit/endpoint/-/endpoint-10.1.1.tgz#1a9694e7aef6aa9d854dc78dd062945945869bcc"
integrity sha512-JYjh5rMOwXMJyUpj028cu0Gbp7qe/ihxfJMLc8VZBMMqSwLgOxDI1911gV4Enl1QSavAQNJcwmwBF9M0VvLh6Q==
dependencies:
"@octokit/types" "^13.0.0"
universal-user-agent "^7.0.2"
"@octokit/graphql@^8.0.0":
version "8.1.1"
resolved "https://registry.yarnpkg.com/@octokit/graphql/-/graphql-8.1.1.tgz#3cacab5f2e55d91c733e3bf481d3a3f8a5f639c4"
integrity sha512-ukiRmuHTi6ebQx/HFRCXKbDlOh/7xEV6QUXaE7MJEKGNAncGI/STSbOkl12qVXZrfZdpXctx5O9X1AIaebiDBg==
dependencies:
"@octokit/request" "^9.0.0"
"@octokit/types" "^13.0.0"
universal-user-agent "^7.0.0"
"@octokit/openapi-types@^22.2.0":
version "22.2.0"
resolved "https://registry.yarnpkg.com/@octokit/openapi-types/-/openapi-types-22.2.0.tgz#75aa7dcd440821d99def6a60b5f014207ae4968e"
integrity sha512-QBhVjcUa9W7Wwhm6DBFu6ZZ+1/t/oYxqc2tp81Pi41YNuJinbFRx8B133qVOrAaBbF7D/m0Et6f9/pZt9Rc+tg==
"@octokit/plugin-paginate-rest@^11.0.0":
version "11.3.5"
resolved "https://registry.yarnpkg.com/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-11.3.5.tgz#a1929b3ba3dc7b63bc73bb6d3c7a3faf2a9c7649"
integrity sha512-cgwIRtKrpwhLoBi0CUNuY83DPGRMaWVjqVI/bGKsLJ4PzyWZNaEmhHroI2xlrVXkk6nFv0IsZpOp+ZWSWUS2AQ==
dependencies:
"@octokit/types" "^13.6.0"
"@octokit/plugin-request-log@^5.3.1":
version "5.3.1"
resolved "https://registry.yarnpkg.com/@octokit/plugin-request-log/-/plugin-request-log-5.3.1.tgz#ccb75d9705de769b2aa82bcd105cc96eb0c00f69"
integrity sha512-n/lNeCtq+9ofhC15xzmJCNKP2BWTv8Ih2TTy+jatNCCq/gQP/V7rK3fjIfuz0pDWDALO/o/4QY4hyOF6TQQFUw==
"@octokit/plugin-rest-endpoint-methods@^13.0.0":
version "13.2.6"
resolved "https://registry.yarnpkg.com/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-13.2.6.tgz#b9d343dbe88a6cb70cc7fa16faa98f0a29ffe654"
integrity sha512-wMsdyHMjSfKjGINkdGKki06VEkgdEldIGstIEyGX0wbYHGByOwN/KiM+hAAlUwAtPkP3gvXtVQA9L3ITdV2tVw==
dependencies:
"@octokit/types" "^13.6.1"
"@octokit/request-error@^6.0.1":
version "6.1.5"
resolved "https://registry.yarnpkg.com/@octokit/request-error/-/request-error-6.1.5.tgz#907099e341c4e6179db623a0328d678024f54653"
integrity sha512-IlBTfGX8Yn/oFPMwSfvugfncK2EwRLjzbrpifNaMY8o/HTEAFqCA1FZxjD9cWvSKBHgrIhc4CSBIzMxiLsbzFQ==
dependencies:
"@octokit/types" "^13.0.0"
"@octokit/request@^9.0.0":
version "9.1.3"
resolved "https://registry.yarnpkg.com/@octokit/request/-/request-9.1.3.tgz#42b693bc06238f43af3c037ebfd35621c6457838"
integrity sha512-V+TFhu5fdF3K58rs1pGUJIDH5RZLbZm5BI+MNF+6o/ssFNT4vWlCh/tVpF3NxGtP15HUxTTMUbsG5llAuU2CZA==
dependencies:
"@octokit/endpoint" "^10.0.0"
"@octokit/request-error" "^6.0.1"
"@octokit/types" "^13.1.0"
universal-user-agent "^7.0.2"
"@octokit/rest@^21.0.2":
version "21.0.2"
resolved "https://registry.yarnpkg.com/@octokit/rest/-/rest-21.0.2.tgz#9b767dbc1098daea8310fd8b76bf7a97215d5972"
integrity sha512-+CiLisCoyWmYicH25y1cDfCrv41kRSvTq6pPWtRroRJzhsCZWZyCqGyI8foJT5LmScADSwRAnr/xo+eewL04wQ==
dependencies:
"@octokit/core" "^6.1.2"
"@octokit/plugin-paginate-rest" "^11.0.0"
"@octokit/plugin-request-log" "^5.3.1"
"@octokit/plugin-rest-endpoint-methods" "^13.0.0"
"@octokit/types@^13.0.0", "@octokit/types@^13.1.0", "@octokit/types@^13.6.0", "@octokit/types@^13.6.1":
version "13.6.1"
resolved "https://registry.yarnpkg.com/@octokit/types/-/types-13.6.1.tgz#432fc6c0aaae54318e5b2d3e15c22ac97fc9b15f"
integrity sha512-PHZE9Z+kWXb23Ndik8MKPirBPziOc0D2/3KH1P+6jK5nGWe96kadZuE4jev2/Jq7FvIfTlT2Ltg8Fv2x1v0a5g==
dependencies:
"@octokit/openapi-types" "^22.2.0"
"@pkgjs/parseargs@^0.11.0":
version "0.11.0"
resolved "https://registry.yarnpkg.com/@pkgjs/parseargs/-/parseargs-0.11.0.tgz#a77ea742fab25775145434eb1d2328cf5013ac33"
integrity sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==
"@rollup/rollup-android-arm-eabi@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.24.4.tgz#c460b54c50d42f27f8254c435a4f3b3e01910bc8"
integrity sha512-jfUJrFct/hTA0XDM5p/htWKoNNTbDLY0KRwEt6pyOA6k2fmk0WVwl65PdUdJZgzGEHWx+49LilkcSaumQRyNQw==
"@rollup/rollup-android-arm64@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.24.4.tgz#96e01f3a04675d8d5973ab8d3fd6bc3be21fa5e1"
integrity sha512-j4nrEO6nHU1nZUuCfRKoCcvh7PIywQPUCBa2UsootTHvTHIoIu2BzueInGJhhvQO/2FTRdNYpf63xsgEqH9IhA==
"@rollup/rollup-darwin-arm64@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.24.4.tgz#9b2ec23b17b47cbb2f771b81f86ede3ac6730bce"
integrity sha512-GmU/QgGtBTeraKyldC7cDVVvAJEOr3dFLKneez/n7BvX57UdhOqDsVwzU7UOnYA7AAOt+Xb26lk79PldDHgMIQ==
"@rollup/rollup-darwin-x64@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.24.4.tgz#f30e4ee6929e048190cf10e0daa8e8ae035b6e46"
integrity sha512-N6oDBiZCBKlwYcsEPXGDE4g9RoxZLK6vT98M8111cW7VsVJFpNEqvJeIPfsCzbf0XEakPslh72X0gnlMi4Ddgg==
"@rollup/rollup-freebsd-arm64@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.24.4.tgz#c54b2373ec5bcf71f08c4519c7ae80a0b6c8e03b"
integrity sha512-py5oNShCCjCyjWXCZNrRGRpjWsF0ic8f4ieBNra5buQz0O/U6mMXCpC1LvrHuhJsNPgRt36tSYMidGzZiJF6mw==
"@rollup/rollup-freebsd-x64@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.24.4.tgz#3bc53aa29d5a34c28ba8e00def76aa612368458e"
integrity sha512-L7VVVW9FCnTTp4i7KrmHeDsDvjB4++KOBENYtNYAiYl96jeBThFfhP6HVxL74v4SiZEVDH/1ILscR5U9S4ms4g==
"@rollup/rollup-linux-arm-gnueabihf@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.24.4.tgz#c85aedd1710c9e267ee86b6d1ce355ecf7d9e8d9"
integrity sha512-10ICosOwYChROdQoQo589N5idQIisxjaFE/PAnX2i0Zr84mY0k9zul1ArH0rnJ/fpgiqfu13TFZR5A5YJLOYZA==
"@rollup/rollup-linux-arm-musleabihf@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.24.4.tgz#e77313408bf13995aecde281aec0cceb08747e42"
integrity sha512-ySAfWs69LYC7QhRDZNKqNhz2UKN8LDfbKSMAEtoEI0jitwfAG2iZwVqGACJT+kfYvvz3/JgsLlcBP+WWoKCLcw==
"@rollup/rollup-linux-arm64-gnu@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.24.4.tgz#633f632397b3662108cfaa1abca2a80b85f51102"
integrity sha512-uHYJ0HNOI6pGEeZ/5mgm5arNVTI0nLlmrbdph+pGXpC9tFHFDQmDMOEqkmUObRfosJqpU8RliYoGz06qSdtcjg==
"@rollup/rollup-linux-arm64-musl@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.24.4.tgz#63edd72b29c4cced93e16113a68e1be9fef88907"
integrity sha512-38yiWLemQf7aLHDgTg85fh3hW9stJ0Muk7+s6tIkSUOMmi4Xbv5pH/5Bofnsb6spIwD5FJiR+jg71f0CH5OzoA==
"@rollup/rollup-linux-powerpc64le-gnu@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.24.4.tgz#a9418a4173df80848c0d47df0426a0bf183c4e75"
integrity sha512-q73XUPnkwt9ZNF2xRS4fvneSuaHw2BXuV5rI4cw0fWYVIWIBeDZX7c7FWhFQPNTnE24172K30I+dViWRVD9TwA==
"@rollup/rollup-linux-riscv64-gnu@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.24.4.tgz#bc9c195db036a27e5e3339b02f51526b4ce1e988"
integrity sha512-Aie/TbmQi6UXokJqDZdmTJuZBCU3QBDA8oTKRGtd4ABi/nHgXICulfg1KI6n9/koDsiDbvHAiQO3YAUNa/7BCw==
"@rollup/rollup-linux-s390x-gnu@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.24.4.tgz#1651fdf8144ae89326c01da5d52c60be63e71a82"
integrity sha512-P8MPErVO/y8ohWSP9JY7lLQ8+YMHfTI4bAdtCi3pC2hTeqFJco2jYspzOzTUB8hwUWIIu1xwOrJE11nP+0JFAQ==
"@rollup/rollup-linux-x64-gnu@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.24.4.tgz#e473de5e4acb95fcf930a35cbb7d3e8080e57a6f"
integrity sha512-K03TljaaoPK5FOyNMZAAEmhlyO49LaE4qCsr0lYHUKyb6QacTNF9pnfPpXnFlFD3TXuFbFbz7tJ51FujUXkXYA==
"@rollup/rollup-linux-x64-musl@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.24.4.tgz#0af12dd2578c29af4037f0c834b4321429dd5b01"
integrity sha512-VJYl4xSl/wqG2D5xTYncVWW+26ICV4wubwN9Gs5NrqhJtayikwCXzPL8GDsLnaLU3WwhQ8W02IinYSFJfyo34Q==
"@rollup/rollup-win32-arm64-msvc@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.24.4.tgz#e48e78cdd45313b977c1390f4bfde7ab79be8871"
integrity sha512-ku2GvtPwQfCqoPFIJCqZ8o7bJcj+Y54cZSr43hHca6jLwAiCbZdBUOrqE6y29QFajNAzzpIOwsckaTFmN6/8TA==
"@rollup/rollup-win32-ia32-msvc@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.24.4.tgz#a3fc8536d243fe161c796acb93eba43c250f311c"
integrity sha512-V3nCe+eTt/W6UYNr/wGvO1fLpHUrnlirlypZfKCT1fG6hWfqhPgQV/K/mRBXBpxc0eKLIF18pIOFVPh0mqHjlg==
"@rollup/rollup-win32-x64-msvc@4.24.4":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.24.4.tgz#e2a9d1fd56524103a6cc8a54404d9d3ebc73c454"
integrity sha512-LTw1Dfd0mBIEqUVCxbvTE/LLo+9ZxVC9k99v1v4ahg9Aak6FpqOfNu5kRkeTAn0wphoC4JU7No1/rL+bBCEwhg==
"@rollup/wasm-node@^4.19.0":
version "4.24.4"
resolved "https://registry.yarnpkg.com/@rollup/wasm-node/-/wasm-node-4.24.4.tgz#11d78f5cc85b04e81468f245c2dc0885d06d663d"
integrity sha512-WKJUdPcM8YAYujafY95+2EapqU3F/nwfBkXh9AfkBvWBwFhsvNJABA86Br6graRH2vRE4FBsiqjFvFWOtEO6wg==
dependencies:
"@types/estree" "1.0.6"
optionalDependencies:
fsevents "~2.3.2"
"@shikijs/core@1.9.0":
version "1.9.0"
resolved "https://registry.yarnpkg.com/@shikijs/core/-/core-1.9.0.tgz#ff717fef5e0e9882f0848272699fd8f04d6f9a07"
integrity sha512-cbSoY8P/jgGByG8UOl3jnP/CWg/Qk+1q+eAKWtcrU3pNoILF8wTsLB0jT44qUBV8Ce1SvA9uqcM9Xf+u3fJFBw==
"@textlint/ast-node-types@^12.6.1":
version "12.6.1"
resolved "https://registry.yarnpkg.com/@textlint/ast-node-types/-/ast-node-types-12.6.1.tgz#35ecefe74e701d7f632c083d4fda89cab1b89012"
integrity sha512-uzlJ+ZsCAyJm+lBi7j0UeBbj+Oy6w/VWoGJ3iHRHE5eZ8Z4iK66mq+PG/spupmbllLtz77OJbY89BYqgFyjXmA==
"@textlint/markdown-to-ast@^12.1.1":
version "12.6.1"
resolved "https://registry.yarnpkg.com/@textlint/markdown-to-ast/-/markdown-to-ast-12.6.1.tgz#fcccb5733b3e76cd0db78a323763ab101f2d803b"
integrity sha512-T0HO+VrU9VbLRiEx/kH4+gwGMHNMIGkp0Pok+p0I33saOOLyhfGvwOKQgvt2qkxzQEV2L5MtGB8EnW4r5d3CqQ==
dependencies:
"@textlint/ast-node-types" "^12.6.1"
debug "^4.3.4"
mdast-util-gfm-autolink-literal "^0.1.3"
remark-footnotes "^3.0.0"
remark-frontmatter "^3.0.0"
remark-gfm "^1.0.0"
remark-parse "^9.0.0"
traverse "^0.6.7"
unified "^9.2.2"
"@ts-morph/common@~0.22.0":
version "0.22.0"
resolved "https://registry.yarnpkg.com/@ts-morph/common/-/common-0.22.0.tgz#8951d451622a26472fbc3a227d6c3a90e687a683"
integrity sha512-HqNBuV/oIlMKdkLshXd1zKBqNQCsuPEsgQOkfFQ/eUKjRlwndXW1AjN9LVkBEIukm00gGXSRmfkl0Wv5VXLnlw==
dependencies:
fast-glob "^3.3.2"
minimatch "^9.0.3"
mkdirp "^3.0.1"
path-browserify "^1.0.1"
"@tsconfig/recommended@^1.0.2":
version "1.0.6"
resolved "https://registry.yarnpkg.com/@tsconfig/recommended/-/recommended-1.0.6.tgz#217b78f9601215939d566a79d202a760ae185114"
integrity sha512-0IKu9GHYF1NGTJiYgfWwqnOQSlnE9V9R7YohHNNf0/fj/SyOZWzdd06JFr0fLpg1Mqw0kGbYg8w5xdkSqLKM9g==
"@types/estree@1.0.6":
version "1.0.6"
resolved "https://registry.yarnpkg.com/@types/estree/-/estree-1.0.6.tgz#628effeeae2064a1b4e79f78e81d87b7e5fc7b50"
integrity sha512-AYnb1nQyY49te+VRAVgmzfcgjYS91mY5P0TKUDCLEM+gNnA+3T6rWITXRLYCpahpqSQbN5cE+gHpnPyXjHWxcw==
"@types/json-schema@^7.0.15":
version "7.0.15"
resolved "https://registry.yarnpkg.com/@types/json-schema/-/json-schema-7.0.15.tgz#596a1747233694d50f6ad8a7869fcb6f56cf5841"
integrity sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==
"@types/mdast@^3.0.0":
version "3.0.15"
resolved "https://registry.yarnpkg.com/@types/mdast/-/mdast-3.0.15.tgz#49c524a263f30ffa28b71ae282f813ed000ab9f5"
integrity sha512-LnwD+mUEfxWMa1QpDraczIn6k0Ee3SMicuYSSzS6ZYl2gKS09EClnJYGd8Du6rfc5r/GZEk5o1mRb8TaTj03sQ==
dependencies:
"@types/unist" "^2"
"@types/minimist@^1.2.0":
version "1.2.5"
resolved "https://registry.yarnpkg.com/@types/minimist/-/minimist-1.2.5.tgz#ec10755e871497bcd83efe927e43ec46e8c0747e"
integrity sha512-hov8bUuiLiyFPGyFPE1lwWhmzYbirOXQNNo40+y3zow8aFVTeyn3VWL0VFFfdNddA8S4Vf0Tc062rzyNr7Paag==
"@types/node@^20.12.12":
version "20.12.12"
resolved "https://registry.yarnpkg.com/@types/node/-/node-20.12.12.tgz#7cbecdf902085cec634fdb362172dfe12b8f2050"
integrity sha512-eWLDGF/FOSPtAvEqeRAQ4C8LSA7M1I7i0ky1I8U7kD1J5ITyW3AsRhQrKVoWf5pFKZ2kILsEGJhsI9r93PYnOw==
dependencies:
undici-types "~5.26.4"
"@types/normalize-package-data@^2.4.0":
version "2.4.4"
resolved "https://registry.yarnpkg.com/@types/normalize-package-data/-/normalize-package-data-2.4.4.tgz#56e2cc26c397c038fab0e3a917a12d5c5909e901"
integrity sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA==
"@types/retry@0.12.0":
version "0.12.0"
resolved "https://registry.yarnpkg.com/@types/retry/-/retry-0.12.0.tgz#2b35eccfcee7d38cd72ad99232fbd58bffb3c84d"
integrity sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==
"@types/unist@^2", "@types/unist@^2.0.0", "@types/unist@^2.0.2":
version "2.0.10"
resolved "https://registry.yarnpkg.com/@types/unist/-/unist-2.0.10.tgz#04ffa7f406ab628f7f7e97ca23e290cd8ab15efc"
integrity sha512-IfYcSBWE3hLpBg8+X2SEa8LVkJdJEkT2Ese2aaLs3ptGdVtABxndrMaxuFlQ1qdFf9Q5rDvDpxI3WwgvKFAsQA==
"@types/uuid@^9.0.1":
version "9.0.8"
resolved "https://registry.yarnpkg.com/@types/uuid/-/uuid-9.0.8.tgz#7545ba4fc3c003d6c756f651f3bf163d8f0f29ba"
integrity sha512-jg+97EGIcY9AGHJJRaaPVgetKDsrTgbRjQ5Msgjh/DQKEFl0DtyRr/VCOyD1T2R1MNeWPK/u7JoGhlDZnKBAfA==
anchor-markdown-header@^0.6.0:
version "0.6.0"
resolved "https://registry.yarnpkg.com/anchor-markdown-header/-/anchor-markdown-header-0.6.0.tgz#908f2031281766f44ac350380ca0de77ab7065b8"
integrity sha512-v7HJMtE1X7wTpNFseRhxsY/pivP4uAJbidVhPT+yhz4i/vV1+qx371IXuV9V7bN6KjFtheLJxqaSm0Y/8neJTA==
dependencies:
emoji-regex "~10.1.0"
ansi-regex@^5.0.1:
version "5.0.1"
resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-5.0.1.tgz#082cb2c89c9fe8659a311a53bd6a4dc5301db304"
integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==
ansi-regex@^6.0.1:
version "6.1.0"
resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.1.0.tgz#95ec409c69619d6cb1b8b34f14b660ef28ebd654"
integrity sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==
ansi-styles@^3.2.1:
version "3.2.1"
resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-3.2.1.tgz#41fbb20243e50b12be0f04b8dedbf07520ce841d"
integrity sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==
dependencies:
color-convert "^1.9.0"
ansi-styles@^4.0.0:
version "4.3.0"
resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-4.3.0.tgz#edd803628ae71c04c85ae7a0906edad34b648937"
integrity sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==
dependencies:
color-convert "^2.0.1"
ansi-styles@^6.1.0:
version "6.2.1"
resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-6.2.1.tgz#0e62320cf99c21afff3b3012192546aacbfb05c5"
integrity sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==
argparse@^1.0.7:
version "1.0.10"
resolved "https://registry.yarnpkg.com/argparse/-/argparse-1.0.10.tgz#bcd6791ea5ae09725e17e5ad988134cd40b3d911"
integrity sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==
dependencies:
sprintf-js "~1.0.2"
argparse@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/argparse/-/argparse-2.0.1.tgz#246f50f3ca78a3240f6c997e8a9bd1eac49e4b38"
integrity sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==
array-buffer-byte-length@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/array-buffer-byte-length/-/array-buffer-byte-length-1.0.1.tgz#1e5583ec16763540a27ae52eed99ff899223568f"
integrity sha512-ahC5W1xgou+KTXix4sAO8Ki12Q+jf4i0+tmk3sC+zgcynshkHxzpXdImBehiUYKKKDwvfFiJl1tZt6ewscS1Mg==
dependencies:
call-bind "^1.0.5"
is-array-buffer "^3.0.4"
array-union@^2.1.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d"
integrity sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==
arraybuffer.prototype.slice@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/arraybuffer.prototype.slice/-/arraybuffer.prototype.slice-1.0.3.tgz#097972f4255e41bc3425e37dc3f6421cf9aefde6"
integrity sha512-bMxMKAjg13EBSVscxTaYA4mRc5t1UAXa2kXiGTNfZ079HIWXEkKmkgFrh/nJqamaLSrXO5H4WFFkPEaLJWbs3A==
dependencies:
array-buffer-byte-length "^1.0.1"
call-bind "^1.0.5"
define-properties "^1.2.1"
es-abstract "^1.22.3"
es-errors "^1.2.1"
get-intrinsic "^1.2.3"
is-array-buffer "^3.0.4"
is-shared-array-buffer "^1.0.2"
arrify@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/arrify/-/arrify-1.0.1.tgz#898508da2226f380df904728456849c1501a4b0d"
integrity sha512-3CYzex9M9FGQjCGMGyi6/31c8GJbgb0qGyrx5HWxPd0aCwh4cB2YjMb2Xf9UuoogrMrlO9cTqnB5rI5GHZTcUA==
asynckit@^0.4.0:
version "0.4.0"
resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79"
integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==
available-typed-arrays@^1.0.7:
version "1.0.7"
resolved "https://registry.yarnpkg.com/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz#a5cc375d6a03c2efc87a553f3e0b1522def14846"
integrity sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==
dependencies:
possible-typed-array-names "^1.0.0"
axios@^1.6.7:
version "1.7.7"
resolved "https://registry.yarnpkg.com/axios/-/axios-1.7.7.tgz#2f554296f9892a72ac8d8e4c5b79c14a91d0a47f"
integrity sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q==
dependencies:
follow-redirects "^1.15.6"
form-data "^4.0.0"
proxy-from-env "^1.1.0"
bail@^1.0.0:
version "1.0.5"
resolved "https://registry.yarnpkg.com/bail/-/bail-1.0.5.tgz#b6fa133404a392cbc1f8c4bf63f5953351e7a776"
integrity sha512-xFbRxM1tahm08yHBP16MMjVUAvDaBMD38zsM9EMAUN61omwLmKlOpB/Zku5QkjZ8TZ4vn53pj+t518cH0S03RQ==
balanced-match@^1.0.0:
version "1.0.2"
resolved "https://registry.yarnpkg.com/balanced-match/-/balanced-match-1.0.2.tgz#e83e3a7e3f300b34cb9d87f615fa0cbf357690ee"
integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==
before-after-hook@^3.0.2:
version "3.0.2"
resolved "https://registry.yarnpkg.com/before-after-hook/-/before-after-hook-3.0.2.tgz#d5665a5fa8b62294a5aa0a499f933f4a1016195d"
integrity sha512-Nik3Sc0ncrMK4UUdXQmAnRtzmNQTAAXmXIopizwZ1W1t8QmfJj+zL4OA2I7XPTPW5z5TDqv4hRo/JzouDJnX3A==
brace-expansion@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/brace-expansion/-/brace-expansion-2.0.1.tgz#1edc459e0f0c548486ecf9fc99f2221364b9a0ae"
integrity sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==
dependencies:
balanced-match "^1.0.0"
braces@^3.0.3:
version "3.0.3"
resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789"
integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==
dependencies:
fill-range "^7.1.1"
call-bind@^1.0.2, call-bind@^1.0.5, call-bind@^1.0.6, call-bind@^1.0.7:
version "1.0.7"
resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.7.tgz#06016599c40c56498c18769d2730be242b6fa3b9"
integrity sha512-GHTSNSYICQ7scH7sZ+M2rFopRoLh8t2bLSW6BbgrtLsahOIB5iyAVJf9GjWK3cYTDaMj4XdBpM1cA6pIS0Kv2w==
dependencies:
es-define-property "^1.0.0"
es-errors "^1.3.0"
function-bind "^1.1.2"
get-intrinsic "^1.2.4"
set-function-length "^1.2.1"
camelcase-keys@^6.2.2:
version "6.2.2"
resolved "https://registry.yarnpkg.com/camelcase-keys/-/camelcase-keys-6.2.2.tgz#5e755d6ba51aa223ec7d3d52f25778210f9dc3c0"
integrity sha512-YrwaA0vEKazPBkn0ipTiMpSajYDSe+KjQfrjhcBMxJt/znbvlHd8Pw/Vamaz5EB4Wfhs3SUR3Z9mwRu/P3s3Yg==
dependencies:
camelcase "^5.3.1"
map-obj "^4.0.0"
quick-lru "^4.0.1"
camelcase@^5.3.1:
version "5.3.1"
resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-5.3.1.tgz#e3c9b31569e106811df242f715725a1f4c494320"
integrity sha512-L28STB170nwWS63UjtlEOE3dldQApaJXZkOI1uMFfzf3rRuPegHaHesyee+YxQ+W6SvRDQV6UrdOdRiR153wJg==
ccount@^1.0.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/ccount/-/ccount-1.1.0.tgz#246687debb6014735131be8abab2d93898f8d043"
integrity sha512-vlNK021QdI7PNeiUh/lKkC/mNHHfV0m/Ad5JoI0TYtlBnJAslM/JIkm/tGC88bkLIwO6OQ5uV6ztS6kVAtCDlg==
chalk@^2.4.2:
version "2.4.2"
resolved "https://registry.yarnpkg.com/chalk/-/chalk-2.4.2.tgz#cd42541677a54333cf541a49108c1432b44c9424"
integrity sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==
dependencies:
ansi-styles "^3.2.1"
escape-string-regexp "^1.0.5"
supports-color "^5.3.0"
character-entities-legacy@^1.0.0:
version "1.1.4"
resolved "https://registry.yarnpkg.com/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz#94bc1845dce70a5bb9d2ecc748725661293d8fc1"
integrity sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==
character-entities@^1.0.0:
version "1.2.4"
resolved "https://registry.yarnpkg.com/character-entities/-/character-entities-1.2.4.tgz#e12c3939b7eaf4e5b15e7ad4c5e28e1d48c5b16b"
integrity sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==
character-reference-invalid@^1.0.0:
version "1.1.4"
resolved "https://registry.yarnpkg.com/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz#083329cda0eae272ab3dbbf37e9a382c13af1560"
integrity sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==
code-block-writer@^12.0.0:
version "12.0.0"
resolved "https://registry.yarnpkg.com/code-block-writer/-/code-block-writer-12.0.0.tgz#4dd58946eb4234105aff7f0035977b2afdc2a770"
integrity sha512-q4dMFMlXtKR3XNBHyMHt/3pwYNA69EDk00lloMOaaUMKPUXBw6lpXtbu3MMVG6/uOihGnRDOlkyqsONEUj60+w==
color-convert@^1.9.0:
version "1.9.3"
resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8"
integrity sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==
dependencies:
color-name "1.1.3"
color-convert@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-2.0.1.tgz#72d3a68d598c9bdb3af2ad1e84f21d896abd4de3"
integrity sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==
dependencies:
color-name "~1.1.4"
color-name@1.1.3:
version "1.1.3"
resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.3.tgz#a7d0558bd89c42f795dd42328f740831ca53bc25"
integrity sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==
color-name@~1.1.4:
version "1.1.4"
resolved "https://registry.yarnpkg.com/color-name/-/color-name-1.1.4.tgz#c2a09a87acbde69543de6f63fa3995c826c536a2"
integrity sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==
combined-stream@^1.0.8:
version "1.0.8"
resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f"
integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==
dependencies:
delayed-stream "~1.0.0"
commander@^11.1.0:
version "11.1.0"
resolved "https://registry.yarnpkg.com/commander/-/commander-11.1.0.tgz#62fdce76006a68e5c1ab3314dc92e800eb83d906"
integrity sha512-yPVavfyCcRhmorC7rWlkHn15b4wDVgVmBA7kV4QVBsF7kv/9TKJAbAXVTxvTnwP8HHKjRCJDClKbciiYS7p0DQ==
concat-md@^0.5.1:
version "0.5.1"
resolved "https://registry.yarnpkg.com/concat-md/-/concat-md-0.5.1.tgz#03c72343a5d81306aa5ae1040d6368ffbc444781"
integrity sha512-iZr6yxlwPQ5IZup2mvqgm+JI0jnu5yGkND2ra5DinBtcevDQPQiAGpf4RXOnor1UpKBUydqegDLfPY8b+FfI+Q==
dependencies:
doctoc "^2.2.1"
front-matter "^4.0.2"
globby "^11.1.0"
lodash.startcase "^4.4.0"
meow "^9.0.0"
transform-markdown-links "^2.0.0"
cross-spawn@^7.0.0:
version "7.0.5"
resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.5.tgz#910aac880ff5243da96b728bc6521a5f6c2f2f82"
integrity sha512-ZVJrKKYunU38/76t0RMOulHOnUcbU9GbpWKAOZ0mhjr7CX6FVrH+4FrAapSOekrgFQ3f/8gwMEuIft0aKq6Hug==
dependencies:
path-key "^3.1.0"
shebang-command "^2.0.0"
which "^2.0.1"
data-view-buffer@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/data-view-buffer/-/data-view-buffer-1.0.1.tgz#8ea6326efec17a2e42620696e671d7d5a8bc66b2"
integrity sha512-0lht7OugA5x3iJLOWFhWK/5ehONdprk0ISXqVFn/NFrDu+cuc8iADFrGQz5BnRK7LLU3JmkbXSxaqX+/mXYtUA==
dependencies:
call-bind "^1.0.6"
es-errors "^1.3.0"
is-data-view "^1.0.1"
data-view-byte-length@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/data-view-byte-length/-/data-view-byte-length-1.0.1.tgz#90721ca95ff280677eb793749fce1011347669e2"
integrity sha512-4J7wRJD3ABAzr8wP+OcIcqq2dlUKp4DVflx++hs5h5ZKydWMI6/D/fAot+yh6g2tHh8fLFTvNOaVN357NvSrOQ==
dependencies:
call-bind "^1.0.7"
es-errors "^1.3.0"
is-data-view "^1.0.1"
data-view-byte-offset@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/data-view-byte-offset/-/data-view-byte-offset-1.0.0.tgz#5e0bbfb4828ed2d1b9b400cd8a7d119bca0ff18a"
integrity sha512-t/Ygsytq+R995EJ5PZlD4Cu56sWa8InXySaViRzw9apusqsOO2bQP+SbYzAhR0pFKoB+43lYy8rWban9JSuXnA==
dependencies:
call-bind "^1.0.6"
es-errors "^1.3.0"
is-data-view "^1.0.1"
debug@^4.0.0, debug@^4.3.4:
version "4.3.5"
resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.5.tgz#e83444eceb9fedd4a1da56d671ae2446a01a6e1e"
integrity sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==
dependencies:
ms "2.1.2"
decamelize-keys@^1.1.0:
version "1.1.1"
resolved "https://registry.yarnpkg.com/decamelize-keys/-/decamelize-keys-1.1.1.tgz#04a2d523b2f18d80d0158a43b895d56dff8d19d8"
integrity sha512-WiPxgEirIV0/eIOMcnFBA3/IJZAZqKnwAwWyvvdi4lsr1WCN22nhdf/3db3DoZcUjTV2SqfzIwNyp6y2xs3nmg==
dependencies:
decamelize "^1.1.0"
map-obj "^1.0.0"
decamelize@^1.1.0, decamelize@^1.2.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-1.2.0.tgz#f6534d15148269b20352e7bee26f501f9a191290"
integrity sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==
define-data-property@^1.0.1, define-data-property@^1.1.4:
version "1.1.4"
resolved "https://registry.yarnpkg.com/define-data-property/-/define-data-property-1.1.4.tgz#894dc141bb7d3060ae4366f6a0107e68fbe48c5e"
integrity sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==
dependencies:
es-define-property "^1.0.0"
es-errors "^1.3.0"
gopd "^1.0.1"
define-properties@^1.2.0, define-properties@^1.2.1:
version "1.2.1"
resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.2.1.tgz#10781cc616eb951a80a034bafcaa7377f6af2b6c"
integrity sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==
dependencies:
define-data-property "^1.0.1"
has-property-descriptors "^1.0.0"
object-keys "^1.1.1"
delayed-stream@~1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619"
integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==
dir-glob@^3.0.1:
version "3.0.1"
resolved "https://registry.yarnpkg.com/dir-glob/-/dir-glob-3.0.1.tgz#56dbf73d992a4a93ba1584f4534063fd2e41717f"
integrity sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==
dependencies:
path-type "^4.0.0"
doctoc@^2.2.1:
version "2.2.1"
resolved "https://registry.yarnpkg.com/doctoc/-/doctoc-2.2.1.tgz#83f6a6bf4df97defbe027c9a82d13091a138ffe2"
integrity sha512-qNJ1gsuo7hH40vlXTVVrADm6pdg30bns/Mo7Nv1SxuXSM1bwF9b4xQ40a6EFT/L1cI+Yylbyi8MPI4G4y7XJzQ==
dependencies:
"@textlint/markdown-to-ast" "^12.1.1"
anchor-markdown-header "^0.6.0"
htmlparser2 "^7.2.0"
minimist "^1.2.6"
underscore "^1.13.2"
update-section "^0.3.3"
dom-serializer@^1.0.1:
version "1.4.1"
resolved "https://registry.yarnpkg.com/dom-serializer/-/dom-serializer-1.4.1.tgz#de5d41b1aea290215dc45a6dae8adcf1d32e2d30"
integrity sha512-VHwB3KfrcOOkelEG2ZOfxqLZdfkil8PtJi4P8N2MMXucZq2yLp75ClViUlOVwyoHEDjYU433Aq+5zWP61+RGag==
dependencies:
domelementtype "^2.0.1"
domhandler "^4.2.0"
entities "^2.0.0"
domelementtype@^2.0.1, domelementtype@^2.2.0:
version "2.3.0"
resolved "https://registry.yarnpkg.com/domelementtype/-/domelementtype-2.3.0.tgz#5c45e8e869952626331d7aab326d01daf65d589d"
integrity sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==
domhandler@^4.2.0, domhandler@^4.2.2:
version "4.3.1"
resolved "https://registry.yarnpkg.com/domhandler/-/domhandler-4.3.1.tgz#8d792033416f59d68bc03a5aa7b018c1ca89279c"
integrity sha512-GrwoxYN+uWlzO8uhUXRl0P+kHE4GtVPfYzVLcUxPL7KNdHKj66vvlhiweIHqYYXWlw+T8iLMp42Lm67ghw4WMQ==
dependencies:
domelementtype "^2.2.0"
domutils@^2.8.0:
version "2.8.0"
resolved "https://registry.yarnpkg.com/domutils/-/domutils-2.8.0.tgz#4437def5db6e2d1f5d6ee859bd95ca7d02048135"
integrity sha512-w96Cjofp72M5IIhpjgobBimYEfoPjx1Vx0BSX9P30WBdZW2WIKU0T1Bd0kz2eNZ9ikjKgHbEyKx8BB6H1L3h3A==
dependencies:
dom-serializer "^1.0.1"
domelementtype "^2.2.0"
domhandler "^4.2.0"
eastasianwidth@^0.2.0:
version "0.2.0"
resolved "https://registry.yarnpkg.com/eastasianwidth/-/eastasianwidth-0.2.0.tgz#696ce2ec0aa0e6ea93a397ffcf24aa7840c827cb"
integrity sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==
emoji-regex@^8.0.0:
version "8.0.0"
resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-8.0.0.tgz#e818fd69ce5ccfcb404594f842963bf53164cc37"
integrity sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==
emoji-regex@^9.2.2:
version "9.2.2"
resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-9.2.2.tgz#840c8803b0d8047f4ff0cf963176b32d4ef3ed72"
integrity sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==
emoji-regex@~10.1.0:
version "10.1.0"
resolved "https://registry.yarnpkg.com/emoji-regex/-/emoji-regex-10.1.0.tgz#d50e383743c0f7a5945c47087295afc112e3cf66"
integrity sha512-xAEnNCT3w2Tg6MA7ly6QqYJvEoY1tm9iIjJ3yMKK9JPlWuRHAMoe5iETwQnx3M9TVbFMfsrBgWKR+IsmswwNjg==
entities@^2.0.0:
version "2.2.0"
resolved "https://registry.yarnpkg.com/entities/-/entities-2.2.0.tgz#098dc90ebb83d8dffa089d55256b351d34c4da55"
integrity sha512-p92if5Nz619I0w+akJrLZH0MX0Pb5DX39XOwQTtXSdQQOaYH03S1uIQp4mhOZtAXrxq4ViO67YTiLBo2638o9A==
entities@^3.0.1:
version "3.0.1"
resolved "https://registry.yarnpkg.com/entities/-/entities-3.0.1.tgz#2b887ca62585e96db3903482d336c1006c3001d4"
integrity sha512-WiyBqoomrwMdFG1e0kqvASYfnlb0lp8M5o5Fw2OFq1hNZxxcNk8Ik0Xm7LxzBhuidnZB/UtBqVCgUz3kBOP51Q==
entities@^4.4.0:
version "4.5.0"
resolved "https://registry.yarnpkg.com/entities/-/entities-4.5.0.tgz#5d268ea5e7113ec74c4d033b79ea5a35a488fb48"
integrity sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==
error-ex@^1.3.1:
version "1.3.2"
resolved "https://registry.yarnpkg.com/error-ex/-/error-ex-1.3.2.tgz#b4ac40648107fdcdcfae242f428bea8a14d4f1bf"
integrity sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==
dependencies:
is-arrayish "^0.2.1"
es-abstract@^1.22.1, es-abstract@^1.22.3, es-abstract@^1.23.0:
version "1.23.3"
resolved "https://registry.yarnpkg.com/es-abstract/-/es-abstract-1.23.3.tgz#8f0c5a35cd215312573c5a27c87dfd6c881a0aa0"
integrity sha512-e+HfNH61Bj1X9/jLc5v1owaLYuHdeHHSQlkhCBiTK8rBvKaULl/beGMxwrMXjpYrv4pz22BlY570vVePA2ho4A==
dependencies:
array-buffer-byte-length "^1.0.1"
arraybuffer.prototype.slice "^1.0.3"
available-typed-arrays "^1.0.7"
call-bind "^1.0.7"
data-view-buffer "^1.0.1"
data-view-byte-length "^1.0.1"
data-view-byte-offset "^1.0.0"
es-define-property "^1.0.0"
es-errors "^1.3.0"
es-object-atoms "^1.0.0"
es-set-tostringtag "^2.0.3"
es-to-primitive "^1.2.1"
function.prototype.name "^1.1.6"
get-intrinsic "^1.2.4"
get-symbol-description "^1.0.2"
globalthis "^1.0.3"
gopd "^1.0.1"
has-property-descriptors "^1.0.2"
has-proto "^1.0.3"
has-symbols "^1.0.3"
hasown "^2.0.2"
internal-slot "^1.0.7"
is-array-buffer "^3.0.4"
is-callable "^1.2.7"
is-data-view "^1.0.1"
is-negative-zero "^2.0.3"
is-regex "^1.1.4"
is-shared-array-buffer "^1.0.3"
is-string "^1.0.7"
is-typed-array "^1.1.13"
is-weakref "^1.0.2"
object-inspect "^1.13.1"
object-keys "^1.1.1"
object.assign "^4.1.5"
regexp.prototype.flags "^1.5.2"
safe-array-concat "^1.1.2"
safe-regex-test "^1.0.3"
string.prototype.trim "^1.2.9"
string.prototype.trimend "^1.0.8"
string.prototype.trimstart "^1.0.8"
typed-array-buffer "^1.0.2"
typed-array-byte-length "^1.0.1"
typed-array-byte-offset "^1.0.2"
typed-array-length "^1.0.6"
unbox-primitive "^1.0.2"
which-typed-array "^1.1.15"
es-define-property@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/es-define-property/-/es-define-property-1.0.0.tgz#c7faefbdff8b2696cf5f46921edfb77cc4ba3845"
integrity sha512-jxayLKShrEqqzJ0eumQbVhTYQM27CfT1T35+gCgDFoL82JLsXqTJ76zv6A0YLOgEnLUMvLzsDsGIrl8NFpT2gQ==
dependencies:
get-intrinsic "^1.2.4"
es-errors@^1.2.1, es-errors@^1.3.0:
version "1.3.0"
resolved "https://registry.yarnpkg.com/es-errors/-/es-errors-1.3.0.tgz#05f75a25dab98e4fb1dcd5e1472c0546d5057c8f"
integrity sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==
es-object-atoms@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/es-object-atoms/-/es-object-atoms-1.0.0.tgz#ddb55cd47ac2e240701260bc2a8e31ecb643d941"
integrity sha512-MZ4iQ6JwHOBQjahnjwaC1ZtIBH+2ohjamzAO3oaHcXYup7qxjF2fixyH+Q71voWHeOkI2q/TnJao/KfXYIZWbw==
dependencies:
es-errors "^1.3.0"
es-set-tostringtag@^2.0.3:
version "2.0.3"
resolved "https://registry.yarnpkg.com/es-set-tostringtag/-/es-set-tostringtag-2.0.3.tgz#8bb60f0a440c2e4281962428438d58545af39777"
integrity sha512-3T8uNMC3OQTHkFUsFq8r/BwAXLHvU/9O9mE0fBc/MY5iq/8H7ncvO947LmYA6ldWw9Uh8Yhf25zu6n7nML5QWQ==
dependencies:
get-intrinsic "^1.2.4"
has-tostringtag "^1.0.2"
hasown "^2.0.1"
es-to-primitive@^1.2.1:
version "1.2.1"
resolved "https://registry.yarnpkg.com/es-to-primitive/-/es-to-primitive-1.2.1.tgz#e55cd4c9cdc188bcefb03b366c736323fc5c898a"
integrity sha512-QCOllgZJtaUo9miYBcLChTUaHNjJF3PYs1VidD7AwiEj1kYxKeQTctLAezAOH5ZKRH0g2IgPn6KwB4IT8iRpvA==
dependencies:
is-callable "^1.1.4"
is-date-object "^1.0.1"
is-symbol "^1.0.2"
escape-string-regexp@^1.0.5:
version "1.0.5"
resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz#1b61c0562190a8dff6ae3bb2cf0200ca130b86d4"
integrity sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==
escape-string-regexp@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz#14ba83a5d373e3d311e5afca29cf5bfad965bf34"
integrity sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==
esprima@^4.0.0:
version "4.0.1"
resolved "https://registry.yarnpkg.com/esprima/-/esprima-4.0.1.tgz#13b04cdb3e6c5d19df91ab6987a8695619b0aa71"
integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==
eventemitter3@^4.0.4:
version "4.0.7"
resolved "https://registry.yarnpkg.com/eventemitter3/-/eventemitter3-4.0.7.tgz#2de9b68f6528d5644ef5c59526a1b4a07306169f"
integrity sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==
extend@^3.0.0:
version "3.0.2"
resolved "https://registry.yarnpkg.com/extend/-/extend-3.0.2.tgz#f8b1136b4071fbd8eb140aff858b1019ec2915fa"
integrity sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==
fast-glob@^3.2.9, fast-glob@^3.3.2:
version "3.3.2"
resolved "https://registry.yarnpkg.com/fast-glob/-/fast-glob-3.3.2.tgz#a904501e57cfdd2ffcded45e99a54fef55e46129"
integrity sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==
dependencies:
"@nodelib/fs.stat" "^2.0.2"
"@nodelib/fs.walk" "^1.2.3"
glob-parent "^5.1.2"
merge2 "^1.3.0"
micromatch "^4.0.4"
fastq@^1.6.0:
version "1.17.1"
resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.17.1.tgz#2a523f07a4e7b1e81a42b91b8bf2254107753b47"
integrity sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==
dependencies:
reusify "^1.0.4"
fault@^1.0.0:
version "1.0.4"
resolved "https://registry.yarnpkg.com/fault/-/fault-1.0.4.tgz#eafcfc0a6d214fc94601e170df29954a4f842f13"
integrity sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA==
dependencies:
format "^0.2.0"
fill-range@^7.1.1:
version "7.1.1"
resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292"
integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==
dependencies:
to-regex-range "^5.0.1"
find-up@^4.1.0:
version "4.1.0"
resolved "https://registry.yarnpkg.com/find-up/-/find-up-4.1.0.tgz#97afe7d6cdc0bc5928584b7c8d7b16e8a9aa5d19"
integrity sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==
dependencies:
locate-path "^5.0.0"
path-exists "^4.0.0"
follow-redirects@^1.15.6:
version "1.15.9"
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.9.tgz#a604fa10e443bf98ca94228d9eebcc2e8a2c8ee1"
integrity sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ==
for-each@^0.3.3:
version "0.3.3"
resolved "https://registry.yarnpkg.com/for-each/-/for-each-0.3.3.tgz#69b447e88a0a5d32c3e7084f3f1710034b21376e"
integrity sha512-jqYfLp7mo9vIyQf8ykW2v7A+2N4QjeCeI5+Dz9XraiO1ign81wjiH7Fb9vSOWvQfNtmSa4H2RoQTrrXivdUZmw==
dependencies:
is-callable "^1.1.3"
foreground-child@^3.1.0:
version "3.3.0"
resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-3.3.0.tgz#0ac8644c06e431439f8561db8ecf29a7b5519c77"
integrity sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg==
dependencies:
cross-spawn "^7.0.0"
signal-exit "^4.0.1"
form-data@^4.0.0:
version "4.0.1"
resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.1.tgz#ba1076daaaa5bfd7e99c1a6cb02aa0a5cff90d48"
integrity sha512-tzN8e4TX8+kkxGPK8D5u0FNmjPUjw3lwC9lSLxxoB/+GtsJG91CO8bSWy73APlgAZzZbXEYZJuxjkHH2w+Ezhw==
dependencies:
asynckit "^0.4.0"
combined-stream "^1.0.8"
mime-types "^2.1.12"
format@^0.2.0:
version "0.2.2"
resolved "https://registry.yarnpkg.com/format/-/format-0.2.2.tgz#d6170107e9efdc4ed30c9dc39016df942b5cb58b"
integrity sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==
front-matter@^4.0.2:
version "4.0.2"
resolved "https://registry.yarnpkg.com/front-matter/-/front-matter-4.0.2.tgz#b14e54dc745cfd7293484f3210d15ea4edd7f4d5"
integrity sha512-I8ZuJ/qG92NWX8i5x1Y8qyj3vizhXS31OxjKDu3LKP+7/qBgfIKValiZIEwoVoJKUHlhWtYrktkxV1XsX+pPlg==
dependencies:
js-yaml "^3.13.1"
fsevents@~2.3.2:
version "2.3.3"
resolved "https://registry.yarnpkg.com/fsevents/-/fsevents-2.3.3.tgz#cac6407785d03675a2a5e1a5305c697b347d90d6"
integrity sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==
function-bind@^1.1.2:
version "1.1.2"
resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c"
integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==
function.prototype.name@^1.1.6:
version "1.1.6"
resolved "https://registry.yarnpkg.com/function.prototype.name/-/function.prototype.name-1.1.6.tgz#cdf315b7d90ee77a4c6ee216c3c3362da07533fd"
integrity sha512-Z5kx79swU5P27WEayXM1tBi5Ze/lbIyiNgU3qyXUOf9b2rgXYyF9Dy9Cx+IQv/Lc8WCG6L82zwUPpSS9hGehIg==
dependencies:
call-bind "^1.0.2"
define-properties "^1.2.0"
es-abstract "^1.22.1"
functions-have-names "^1.2.3"
functions-have-names@^1.2.3:
version "1.2.3"
resolved "https://registry.yarnpkg.com/functions-have-names/-/functions-have-names-1.2.3.tgz#0404fe4ee2ba2f607f0e0ec3c80bae994133b834"
integrity sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==
get-intrinsic@^1.1.3, get-intrinsic@^1.2.1, get-intrinsic@^1.2.3, get-intrinsic@^1.2.4:
version "1.2.4"
resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.4.tgz#e385f5a4b5227d449c3eabbad05494ef0abbeadd"
integrity sha512-5uYhsJH8VJBTv7oslg4BznJYhDoRI6waYCxMmCdnTrcCrHA/fCFKoTFz2JKKE0HdDFUF7/oQuhzumXJK7paBRQ==
dependencies:
es-errors "^1.3.0"
function-bind "^1.1.2"
has-proto "^1.0.1"
has-symbols "^1.0.3"
hasown "^2.0.0"
get-symbol-description@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/get-symbol-description/-/get-symbol-description-1.0.2.tgz#533744d5aa20aca4e079c8e5daf7fd44202821f5"
integrity sha512-g0QYk1dZBxGwk+Ngc+ltRH2IBp2f7zBkBMBJZCDerh6EhlhSR6+9irMCuT/09zD6qkarHUSn529sK/yL4S27mg==
dependencies:
call-bind "^1.0.5"
es-errors "^1.3.0"
get-intrinsic "^1.2.4"
glob-parent@^5.1.2:
version "5.1.2"
resolved "https://registry.yarnpkg.com/glob-parent/-/glob-parent-5.1.2.tgz#869832c58034fe68a4093c17dc15e8340d8401c4"
integrity sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==
dependencies:
is-glob "^4.0.1"
glob@^10.3.10, glob@^10.3.7:
version "10.4.5"
resolved "https://registry.yarnpkg.com/glob/-/glob-10.4.5.tgz#f4d9f0b90ffdbab09c9d77f5f29b4262517b0956"
integrity sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==
dependencies:
foreground-child "^3.1.0"
jackspeak "^3.1.2"
minimatch "^9.0.4"
minipass "^7.1.2"
package-json-from-dist "^1.0.0"
path-scurry "^1.11.1"
globalthis@^1.0.3:
version "1.0.4"
resolved "https://registry.yarnpkg.com/globalthis/-/globalthis-1.0.4.tgz#7430ed3a975d97bfb59bcce41f5cabbafa651236"
integrity sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==
dependencies:
define-properties "^1.2.1"
gopd "^1.0.1"
globby@^11.1.0:
version "11.1.0"
resolved "https://registry.yarnpkg.com/globby/-/globby-11.1.0.tgz#bd4be98bb042f83d796f7e3811991fbe82a0d34b"
integrity sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==
dependencies:
array-union "^2.1.0"
dir-glob "^3.0.1"
fast-glob "^3.2.9"
ignore "^5.2.0"
merge2 "^1.4.1"
slash "^3.0.0"
gopd@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.0.1.tgz#29ff76de69dac7489b7c0918a5788e56477c332c"
integrity sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==
dependencies:
get-intrinsic "^1.1.3"
hard-rejection@^2.1.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/hard-rejection/-/hard-rejection-2.1.0.tgz#1c6eda5c1685c63942766d79bb40ae773cecd883"
integrity sha512-VIZB+ibDhx7ObhAe7OVtoEbuP4h/MuOTHJ+J8h/eBXotJYl0fBgR72xDFCKgIh22OJZIOVNxBMWuhAr10r8HdA==
has-bigints@^1.0.1, has-bigints@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/has-bigints/-/has-bigints-1.0.2.tgz#0871bd3e3d51626f6ca0966668ba35d5602d6eaa"
integrity sha512-tSvCKtBr9lkF0Ex0aQiP9N+OpV4zi2r/Nee5VkRDbaqv35RLYMzbwQfFSZZH0kR+Rd6302UJZ2p/bJCEoR3VoQ==
has-flag@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/has-flag/-/has-flag-3.0.0.tgz#b5d454dc2199ae225699f3467e5a07f3b955bafd"
integrity sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==
has-property-descriptors@^1.0.0, has-property-descriptors@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz#963ed7d071dc7bf5f084c5bfbe0d1b6222586854"
integrity sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==
dependencies:
es-define-property "^1.0.0"
has-proto@^1.0.1, has-proto@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.3.tgz#b31ddfe9b0e6e9914536a6ab286426d0214f77fd"
integrity sha512-SJ1amZAJUiZS+PhsVLf5tGydlaVB8EdFpaSO4gmiUKUOxk8qzn5AIy4ZeJUmh22znIdk/uMAUT2pl3FxzVUH+Q==
has-symbols@^1.0.2, has-symbols@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8"
integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==
has-tostringtag@^1.0.0, has-tostringtag@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/has-tostringtag/-/has-tostringtag-1.0.2.tgz#2cdc42d40bef2e5b4eeab7c01a73c54ce7ab5abc"
integrity sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==
dependencies:
has-symbols "^1.0.3"
hasown@^2.0.0, hasown@^2.0.1, hasown@^2.0.2:
version "2.0.2"
resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.2.tgz#003eaf91be7adc372e84ec59dc37252cedb80003"
integrity sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==
dependencies:
function-bind "^1.1.2"
hosted-git-info@^2.1.4:
version "2.8.9"
resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-2.8.9.tgz#dffc0bf9a21c02209090f2aa69429e1414daf3f9"
integrity sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==
hosted-git-info@^4.0.1:
version "4.1.0"
resolved "https://registry.yarnpkg.com/hosted-git-info/-/hosted-git-info-4.1.0.tgz#827b82867e9ff1c8d0c4d9d53880397d2c86d224"
integrity sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==
dependencies:
lru-cache "^6.0.0"
htmlparser2@^7.2.0:
version "7.2.0"
resolved "https://registry.yarnpkg.com/htmlparser2/-/htmlparser2-7.2.0.tgz#8817cdea38bbc324392a90b1990908e81a65f5a5"
integrity sha512-H7MImA4MS6cw7nbyURtLPO1Tms7C5H602LRETv95z1MxO/7CP7rDVROehUYeYBUYEON94NXXDEPmZuq+hX4sog==
dependencies:
domelementtype "^2.0.1"
domhandler "^4.2.2"
domutils "^2.8.0"
entities "^3.0.1"
ignore@^5.2.0:
version "5.3.1"
resolved "https://registry.yarnpkg.com/ignore/-/ignore-5.3.1.tgz#5073e554cd42c5b33b394375f538b8593e34d4ef"
integrity sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==
indent-string@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/indent-string/-/indent-string-4.0.0.tgz#624f8f4497d619b2d9768531d58f4122854d7251"
integrity sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==
internal-slot@^1.0.7:
version "1.0.7"
resolved "https://registry.yarnpkg.com/internal-slot/-/internal-slot-1.0.7.tgz#c06dcca3ed874249881007b0a5523b172a190802"
integrity sha512-NGnrKwXzSms2qUUih/ILZ5JBqNTSa1+ZmP6flaIp6KmSElgE9qdndzS3cqjrDovwFdmwsGsLdeFgB6suw+1e9g==
dependencies:
es-errors "^1.3.0"
hasown "^2.0.0"
side-channel "^1.0.4"
is-alphabetical@^1.0.0:
version "1.0.4"
resolved "https://registry.yarnpkg.com/is-alphabetical/-/is-alphabetical-1.0.4.tgz#9e7d6b94916be22153745d184c298cbf986a686d"
integrity sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==
is-alphanumerical@^1.0.0:
version "1.0.4"
resolved "https://registry.yarnpkg.com/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz#7eb9a2431f855f6b1ef1a78e326df515696c4dbf"
integrity sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==
dependencies:
is-alphabetical "^1.0.0"
is-decimal "^1.0.0"
is-array-buffer@^3.0.4:
version "3.0.4"
resolved "https://registry.yarnpkg.com/is-array-buffer/-/is-array-buffer-3.0.4.tgz#7a1f92b3d61edd2bc65d24f130530ea93d7fae98"
integrity sha512-wcjaerHw0ydZwfhiKbXJWLDY8A7yV7KhjQOpb83hGgGfId/aQa4TOvwyzn2PuswW2gPCYEL/nEAiSVpdOj1lXw==
dependencies:
call-bind "^1.0.2"
get-intrinsic "^1.2.1"
is-arrayish@^0.2.1:
version "0.2.1"
resolved "https://registry.yarnpkg.com/is-arrayish/-/is-arrayish-0.2.1.tgz#77c99840527aa8ecb1a8ba697b80645a7a926a9d"
integrity sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==
is-bigint@^1.0.1:
version "1.0.4"
resolved "https://registry.yarnpkg.com/is-bigint/-/is-bigint-1.0.4.tgz#08147a1875bc2b32005d41ccd8291dffc6691df3"
integrity sha512-zB9CruMamjym81i2JZ3UMn54PKGsQzsJeo6xvN3HJJ4CAsQNB6iRutp2To77OfCNuoxspsIhzaPoO1zyCEhFOg==
dependencies:
has-bigints "^1.0.1"
is-boolean-object@^1.1.0:
version "1.1.2"
resolved "https://registry.yarnpkg.com/is-boolean-object/-/is-boolean-object-1.1.2.tgz#5c6dc200246dd9321ae4b885a114bb1f75f63719"
integrity sha512-gDYaKHJmnj4aWxyj6YHyXVpdQawtVLHU5cb+eztPGczf6cjuTdwve5ZIEfgXqH4e57An1D1AKf8CZ3kYrQRqYA==
dependencies:
call-bind "^1.0.2"
has-tostringtag "^1.0.0"
is-buffer@^2.0.0:
version "2.0.5"
resolved "https://registry.yarnpkg.com/is-buffer/-/is-buffer-2.0.5.tgz#ebc252e400d22ff8d77fa09888821a24a658c191"
integrity sha512-i2R6zNFDwgEHJyQUtJEk0XFi1i0dPFn/oqjK3/vPCcDeJvW5NQ83V8QbicfF1SupOaB0h8ntgBC2YiE7dfyctQ==
is-callable@^1.1.3, is-callable@^1.1.4, is-callable@^1.2.7:
version "1.2.7"
resolved "https://registry.yarnpkg.com/is-callable/-/is-callable-1.2.7.tgz#3bc2a85ea742d9e36205dcacdd72ca1fdc51b055"
integrity sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==
is-core-module@^2.13.0, is-core-module@^2.5.0:
version "2.14.0"
resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.14.0.tgz#43b8ef9f46a6a08888db67b1ffd4ec9e3dfd59d1"
integrity sha512-a5dFJih5ZLYlRtDc0dZWP7RiKr6xIKzmn/oAYCDvdLThadVgyJwlaoQPmRtMSpz+rk0OGAgIu+TcM9HUF0fk1A==
dependencies:
hasown "^2.0.2"
is-data-view@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/is-data-view/-/is-data-view-1.0.1.tgz#4b4d3a511b70f3dc26d42c03ca9ca515d847759f"
integrity sha512-AHkaJrsUVW6wq6JS8y3JnM/GJF/9cf+k20+iDzlSaJrinEo5+7vRiteOSwBhHRiAyQATN1AmY4hwzxJKPmYf+w==
dependencies:
is-typed-array "^1.1.13"
is-date-object@^1.0.1:
version "1.0.5"
resolved "https://registry.yarnpkg.com/is-date-object/-/is-date-object-1.0.5.tgz#0841d5536e724c25597bf6ea62e1bd38298df31f"
integrity sha512-9YQaSxsAiSwcvS33MBk3wTCVnWK+HhF8VZR2jRxehM16QcVOdHqPn4VPHmRK4lSr38n9JriurInLcP90xsYNfQ==
dependencies:
has-tostringtag "^1.0.0"
is-decimal@^1.0.0:
version "1.0.4"
resolved "https://registry.yarnpkg.com/is-decimal/-/is-decimal-1.0.4.tgz#65a3a5958a1c5b63a706e1b333d7cd9f630d3fa5"
integrity sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==
is-extglob@^2.1.1:
version "2.1.1"
resolved "https://registry.yarnpkg.com/is-extglob/-/is-extglob-2.1.1.tgz#a88c02535791f02ed37c76a1b9ea9773c833f8c2"
integrity sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==
is-fullwidth-code-point@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz#f116f8064fe90b3f7844a38997c0b75051269f1d"
integrity sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==
is-glob@^4.0.1:
version "4.0.3"
resolved "https://registry.yarnpkg.com/is-glob/-/is-glob-4.0.3.tgz#64f61e42cbbb2eec2071a9dac0b28ba1e65d5084"
integrity sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==
dependencies:
is-extglob "^2.1.1"
is-hexadecimal@^1.0.0:
version "1.0.4"
resolved "https://registry.yarnpkg.com/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz#cc35c97588da4bd49a8eedd6bc4082d44dcb23a7"
integrity sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==
is-negative-zero@^2.0.3:
version "2.0.3"
resolved "https://registry.yarnpkg.com/is-negative-zero/-/is-negative-zero-2.0.3.tgz#ced903a027aca6381b777a5743069d7376a49747"
integrity sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==
is-number-object@^1.0.4:
version "1.0.7"
resolved "https://registry.yarnpkg.com/is-number-object/-/is-number-object-1.0.7.tgz#59d50ada4c45251784e9904f5246c742f07a42fc"
integrity sha512-k1U0IRzLMo7ZlYIfzRu23Oh6MiIFasgpb9X76eqfFZAqwH44UI4KTBvBYIZ1dSL9ZzChTB9ShHfLkR4pdW5krQ==
dependencies:
has-tostringtag "^1.0.0"
is-number@^7.0.0:
version "7.0.0"
resolved "https://registry.yarnpkg.com/is-number/-/is-number-7.0.0.tgz#7535345b896734d5f80c4d06c50955527a14f12b"
integrity sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==
is-plain-obj@^1.1.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-1.1.0.tgz#71a50c8429dfca773c92a390a4a03b39fcd51d3e"
integrity sha512-yvkRyxmFKEOQ4pNXCmJG5AEQNlXJS5LaONXo5/cLdTZdWvsZ1ioJEonLGAosKlMWE8lwUy/bJzMjcw8az73+Fg==
is-plain-obj@^2.0.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/is-plain-obj/-/is-plain-obj-2.1.0.tgz#45e42e37fccf1f40da8e5f76ee21515840c09287"
integrity sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==
is-regex@^1.1.4:
version "1.1.4"
resolved "https://registry.yarnpkg.com/is-regex/-/is-regex-1.1.4.tgz#eef5663cd59fa4c0ae339505323df6854bb15958"
integrity sha512-kvRdxDsxZjhzUX07ZnLydzS1TU/TJlTUHHY4YLL87e37oUA49DfkLqgy+VjFocowy29cKvcSiu+kIv728jTTVg==
dependencies:
call-bind "^1.0.2"
has-tostringtag "^1.0.0"
is-shared-array-buffer@^1.0.2, is-shared-array-buffer@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/is-shared-array-buffer/-/is-shared-array-buffer-1.0.3.tgz#1237f1cba059cdb62431d378dcc37d9680181688"
integrity sha512-nA2hv5XIhLR3uVzDDfCIknerhx8XUKnstuOERPNNIinXG7v9u+ohXF67vxm4TPTEPU6lm61ZkwP3c9PCB97rhg==
dependencies:
call-bind "^1.0.7"
is-string@^1.0.5, is-string@^1.0.7:
version "1.0.7"
resolved "https://registry.yarnpkg.com/is-string/-/is-string-1.0.7.tgz#0dd12bf2006f255bb58f695110eff7491eebc0fd"
integrity sha512-tE2UXzivje6ofPW7l23cjDOMa09gb7xlAqG6jG5ej6uPV32TlWP3NKPigtaGeHNu9fohccRYvIiZMfOOnOYUtg==
dependencies:
has-tostringtag "^1.0.0"
is-symbol@^1.0.2, is-symbol@^1.0.3:
version "1.0.4"
resolved "https://registry.yarnpkg.com/is-symbol/-/is-symbol-1.0.4.tgz#a6dac93b635b063ca6872236de88910a57af139c"
integrity sha512-C/CPBqKWnvdcxqIARxyOh4v1UUEOCHpgDa0WYgpKDFMszcrPcffg5uhwSgPCLD2WWxmq6isisz87tzT01tuGhg==
dependencies:
has-symbols "^1.0.2"
is-typed-array@^1.1.13:
version "1.1.13"
resolved "https://registry.yarnpkg.com/is-typed-array/-/is-typed-array-1.1.13.tgz#d6c5ca56df62334959322d7d7dd1cca50debe229"
integrity sha512-uZ25/bUAlUY5fR4OKT4rZQEBrzQWYV9ZJYGGsUmEJ6thodVJ1HX64ePQ6Z0qPWP+m+Uq6e9UugrE38jeYsDSMw==
dependencies:
which-typed-array "^1.1.14"
is-weakref@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/is-weakref/-/is-weakref-1.0.2.tgz#9529f383a9338205e89765e0392efc2f100f06f2"
integrity sha512-qctsuLZmIQ0+vSSMfoVvyFe2+GSEvnmZ2ezTup1SBse9+twCCeial6EEi3Nc2KFcf6+qz2FBPnjXsk8xhKSaPQ==
dependencies:
call-bind "^1.0.2"
isarray@^2.0.5:
version "2.0.5"
resolved "https://registry.yarnpkg.com/isarray/-/isarray-2.0.5.tgz#8af1e4c1221244cc62459faf38940d4e644a5723"
integrity sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==
isexe@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/isexe/-/isexe-2.0.0.tgz#e8fbf374dc556ff8947a10dcb0572d633f2cfa10"
integrity sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==
jackspeak@^3.1.2:
version "3.4.3"
resolved "https://registry.yarnpkg.com/jackspeak/-/jackspeak-3.4.3.tgz#8833a9d89ab4acde6188942bd1c53b6390ed5a8a"
integrity sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==
dependencies:
"@isaacs/cliui" "^8.0.2"
optionalDependencies:
"@pkgjs/parseargs" "^0.11.0"
js-tokens@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/js-tokens/-/js-tokens-4.0.0.tgz#19203fb59991df98e3a287050d4647cdeaf32499"
integrity sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==
js-yaml@^3.13.1:
version "3.14.1"
resolved "https://registry.yarnpkg.com/js-yaml/-/js-yaml-3.14.1.tgz#dae812fdb3825fa306609a8717383c50c36a0537"
integrity sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==
dependencies:
argparse "^1.0.7"
esprima "^4.0.0"
json-parse-even-better-errors@^2.3.0:
version "2.3.1"
resolved "https://registry.yarnpkg.com/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz#7c47805a94319928e05777405dc12e1f7a4ee02d"
integrity sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==
kind-of@^6.0.3:
version "6.0.3"
resolved "https://registry.yarnpkg.com/kind-of/-/kind-of-6.0.3.tgz#07c05034a6c349fa06e24fa35aa76db4580ce4dd"
integrity sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==
lines-and-columns@^1.1.6:
version "1.2.4"
resolved "https://registry.yarnpkg.com/lines-and-columns/-/lines-and-columns-1.2.4.tgz#eca284f75d2965079309dc0ad9255abb2ebc1632"
integrity sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==
linkify-it@^5.0.0:
version "5.0.0"
resolved "https://registry.yarnpkg.com/linkify-it/-/linkify-it-5.0.0.tgz#9ef238bfa6dc70bd8e7f9572b52d369af569b421"
integrity sha512-5aHCbzQRADcdP+ATqnDuhhJ/MRIqDkZX5pyjFHRRysS8vZ5AbqGEoFIb6pYHPZ+L/OC2Lc+xT8uHVVR5CAK/wQ==
dependencies:
uc.micro "^2.0.0"
locate-path@^5.0.0:
version "5.0.0"
resolved "https://registry.yarnpkg.com/locate-path/-/locate-path-5.0.0.tgz#1afba396afd676a6d42504d0a67a3a7eb9f62aa0"
integrity sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==
dependencies:
p-locate "^4.1.0"
lodash.startcase@^4.4.0:
version "4.4.0"
resolved "https://registry.yarnpkg.com/lodash.startcase/-/lodash.startcase-4.4.0.tgz#9436e34ed26093ed7ffae1936144350915d9add8"
integrity sha512-+WKqsK294HMSc2jEbNgpHpd0JfIBhp7rEV4aqXWqFr6AlXov+SlcgB1Fv01y2kGe3Gc8nMW7VA0SrGuSkRfIEg==
lodash@^4.17.21:
version "4.17.21"
resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c"
integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==
longest-streak@^2.0.0:
version "2.0.4"
resolved "https://registry.yarnpkg.com/longest-streak/-/longest-streak-2.0.4.tgz#b8599957da5b5dab64dee3fe316fa774597d90e4"
integrity sha512-vM6rUVCVUJJt33bnmHiZEvr7wPT78ztX7rojL+LW51bHtLh6HTjx84LA5W4+oa6aKEJA7jJu5LR6vQRBpA5DVg==
lru-cache@^10.2.0:
version "10.4.3"
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-10.4.3.tgz#410fc8a17b70e598013df257c2446b7f3383f119"
integrity sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==
lru-cache@^6.0.0:
version "6.0.0"
resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94"
integrity sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==
dependencies:
yallist "^4.0.0"
lunr@^2.3.9:
version "2.3.9"
resolved "https://registry.yarnpkg.com/lunr/-/lunr-2.3.9.tgz#18b123142832337dd6e964df1a5a7707b25d35e1"
integrity sha512-zTU3DaZaF3Rt9rhN3uBMGQD3dD2/vFQqnvZCDv4dl5iOzq2IZQqTxu90r4E5J+nP70J3ilqVCrbho2eWaeW8Ow==
map-obj@^1.0.0:
version "1.0.1"
resolved "https://registry.yarnpkg.com/map-obj/-/map-obj-1.0.1.tgz#d933ceb9205d82bdcf4886f6742bdc2b4dea146d"
integrity sha512-7N/q3lyZ+LVCp7PzuxrJr4KMbBE2hW7BT7YNia330OFxIf4d3r5zVpicP2650l7CPN6RM9zOJRl3NGpqSiw3Eg==
map-obj@^4.0.0:
version "4.3.0"
resolved "https://registry.yarnpkg.com/map-obj/-/map-obj-4.3.0.tgz#9304f906e93faae70880da102a9f1df0ea8bb05a"
integrity sha512-hdN1wVrZbb29eBGiGjJbeP8JbKjq1urkHJ/LIP/NY48MZ1QVXUsQBV1G1zvYFHn1XE06cwjBsOI2K3Ulnj1YXQ==
markdown-it@^14.1.0:
version "14.1.0"
resolved "https://registry.yarnpkg.com/markdown-it/-/markdown-it-14.1.0.tgz#3c3c5992883c633db4714ccb4d7b5935d98b7d45"
integrity sha512-a54IwgWPaeBCAAsv13YgmALOF1elABB08FxO9i+r4VFk5Vl4pKokRPeX8u5TCgSsPi6ec1otfLjdOpVcgbpshg==
dependencies:
argparse "^2.0.1"
entities "^4.4.0"
linkify-it "^5.0.0"
mdurl "^2.0.0"
punycode.js "^2.3.1"
uc.micro "^2.1.0"
markdown-table@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/markdown-table/-/markdown-table-2.0.0.tgz#194a90ced26d31fe753d8b9434430214c011865b"
integrity sha512-Ezda85ToJUBhM6WGaG6veasyym+Tbs3cMAw/ZhOPqXiYsr0jgocBV3j3nx+4lk47plLlIqjwuTm/ywVI+zjJ/A==
dependencies:
repeat-string "^1.0.0"
mdast-util-find-and-replace@^1.1.0:
version "1.1.1"
resolved "https://registry.yarnpkg.com/mdast-util-find-and-replace/-/mdast-util-find-and-replace-1.1.1.tgz#b7db1e873f96f66588c321f1363069abf607d1b5"
integrity sha512-9cKl33Y21lyckGzpSmEQnIDjEfeeWelN5s1kUW1LwdB0Fkuq2u+4GdqcGEygYxJE8GVqCl0741bYXHgamfWAZA==
dependencies:
escape-string-regexp "^4.0.0"
unist-util-is "^4.0.0"
unist-util-visit-parents "^3.0.0"
mdast-util-footnote@^0.1.0:
version "0.1.7"
resolved "https://registry.yarnpkg.com/mdast-util-footnote/-/mdast-util-footnote-0.1.7.tgz#4b226caeab4613a3362c144c94af0fdd6f7e0ef0"
integrity sha512-QxNdO8qSxqbO2e3m09KwDKfWiLgqyCurdWTQ198NpbZ2hxntdc+VKS4fDJCmNWbAroUdYnSthu+XbZ8ovh8C3w==
dependencies:
mdast-util-to-markdown "^0.6.0"
micromark "~2.11.0"
mdast-util-from-markdown@^0.8.0:
version "0.8.5"
resolved "https://registry.yarnpkg.com/mdast-util-from-markdown/-/mdast-util-from-markdown-0.8.5.tgz#d1ef2ca42bc377ecb0463a987910dae89bd9a28c"
integrity sha512-2hkTXtYYnr+NubD/g6KGBS/0mFmBcifAsI0yIWRiRo0PjVs6SSOSOdtzbp6kSGnShDN6G5aWZpKQ2lWRy27mWQ==
dependencies:
"@types/mdast" "^3.0.0"
mdast-util-to-string "^2.0.0"
micromark "~2.11.0"
parse-entities "^2.0.0"
unist-util-stringify-position "^2.0.0"
mdast-util-frontmatter@^0.2.0:
version "0.2.0"
resolved "https://registry.yarnpkg.com/mdast-util-frontmatter/-/mdast-util-frontmatter-0.2.0.tgz#8bd5cd55e236c03e204a036f7372ebe9e6748240"
integrity sha512-FHKL4w4S5fdt1KjJCwB0178WJ0evnyyQr5kXTM3wrOVpytD0hrkvd+AOOjU9Td8onOejCkmZ+HQRT3CZ3coHHQ==
dependencies:
micromark-extension-frontmatter "^0.2.0"
mdast-util-gfm-autolink-literal@^0.1.0, mdast-util-gfm-autolink-literal@^0.1.3:
version "0.1.3"
resolved "https://registry.yarnpkg.com/mdast-util-gfm-autolink-literal/-/mdast-util-gfm-autolink-literal-0.1.3.tgz#9c4ff399c5ddd2ece40bd3b13e5447d84e385fb7"
integrity sha512-GjmLjWrXg1wqMIO9+ZsRik/s7PLwTaeCHVB7vRxUwLntZc8mzmTsLVr6HW1yLokcnhfURsn5zmSVdi3/xWWu1A==
dependencies:
ccount "^1.0.0"
mdast-util-find-and-replace "^1.1.0"
micromark "^2.11.3"
mdast-util-gfm-strikethrough@^0.2.0:
version "0.2.3"
resolved "https://registry.yarnpkg.com/mdast-util-gfm-strikethrough/-/mdast-util-gfm-strikethrough-0.2.3.tgz#45eea337b7fff0755a291844fbea79996c322890"
integrity sha512-5OQLXpt6qdbttcDG/UxYY7Yjj3e8P7X16LzvpX8pIQPYJ/C2Z1qFGMmcw+1PZMUM3Z8wt8NRfYTvCni93mgsgA==
dependencies:
mdast-util-to-markdown "^0.6.0"
mdast-util-gfm-table@^0.1.0:
version "0.1.6"
resolved "https://registry.yarnpkg.com/mdast-util-gfm-table/-/mdast-util-gfm-table-0.1.6.tgz#af05aeadc8e5ee004eeddfb324b2ad8c029b6ecf"
integrity sha512-j4yDxQ66AJSBwGkbpFEp9uG/LS1tZV3P33fN1gkyRB2LoRL+RR3f76m0HPHaby6F4Z5xr9Fv1URmATlRRUIpRQ==
dependencies:
markdown-table "^2.0.0"
mdast-util-to-markdown "~0.6.0"
mdast-util-gfm-task-list-item@^0.1.0:
version "0.1.6"
resolved "https://registry.yarnpkg.com/mdast-util-gfm-task-list-item/-/mdast-util-gfm-task-list-item-0.1.6.tgz#70c885e6b9f543ddd7e6b41f9703ee55b084af10"
integrity sha512-/d51FFIfPsSmCIRNp7E6pozM9z1GYPIkSy1urQ8s/o4TC22BZ7DqfHFWiqBD23bc7J3vV1Fc9O4QIHBlfuit8A==
dependencies:
mdast-util-to-markdown "~0.6.0"
mdast-util-gfm@^0.1.0:
version "0.1.2"
resolved "https://registry.yarnpkg.com/mdast-util-gfm/-/mdast-util-gfm-0.1.2.tgz#8ecddafe57d266540f6881f5c57ff19725bd351c"
integrity sha512-NNkhDx/qYcuOWB7xHUGWZYVXvjPFFd6afg6/e2g+SV4r9q5XUcCbV4Wfa3DLYIiD+xAEZc6K4MGaE/m0KDcPwQ==
dependencies:
mdast-util-gfm-autolink-literal "^0.1.0"
mdast-util-gfm-strikethrough "^0.2.0"
mdast-util-gfm-table "^0.1.0"
mdast-util-gfm-task-list-item "^0.1.0"
mdast-util-to-markdown "^0.6.1"
mdast-util-to-markdown@^0.6.0, mdast-util-to-markdown@^0.6.1, mdast-util-to-markdown@~0.6.0:
version "0.6.5"
resolved "https://registry.yarnpkg.com/mdast-util-to-markdown/-/mdast-util-to-markdown-0.6.5.tgz#b33f67ca820d69e6cc527a93d4039249b504bebe"
integrity sha512-XeV9sDE7ZlOQvs45C9UKMtfTcctcaj/pGwH8YLbMHoMOXNNCn2LsqVQOqrF1+/NU8lKDAqozme9SCXWyo9oAcQ==
dependencies:
"@types/unist" "^2.0.0"
longest-streak "^2.0.0"
mdast-util-to-string "^2.0.0"
parse-entities "^2.0.0"
repeat-string "^1.0.0"
zwitch "^1.0.0"
mdast-util-to-string@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/mdast-util-to-string/-/mdast-util-to-string-2.0.0.tgz#b8cfe6a713e1091cb5b728fc48885a4767f8b97b"
integrity sha512-AW4DRS3QbBayY/jJmD8437V1Gombjf8RSOUCMFBuo5iHi58AGEgVCKQ+ezHkZZDpAQS75hcBMpLqjpJTjtUL7w==
mdurl@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/mdurl/-/mdurl-2.0.0.tgz#80676ec0433025dd3e17ee983d0fe8de5a2237e0"
integrity sha512-Lf+9+2r+Tdp5wXDXC4PcIBjTDtq4UKjCPMQhKIuzpJNW0b96kVqSwW0bT7FhRSfmAiFYgP+SCRvdrDozfh0U5w==
meow@^9.0.0:
version "9.0.0"
resolved "https://registry.yarnpkg.com/meow/-/meow-9.0.0.tgz#cd9510bc5cac9dee7d03c73ee1f9ad959f4ea364"
integrity sha512-+obSblOQmRhcyBt62furQqRAQpNyWXo8BuQ5bN7dG8wmwQ+vwHKp/rCFD4CrTP8CsDQD1sjoZ94K417XEUk8IQ==
dependencies:
"@types/minimist" "^1.2.0"
camelcase-keys "^6.2.2"
decamelize "^1.2.0"
decamelize-keys "^1.1.0"
hard-rejection "^2.1.0"
minimist-options "4.1.0"
normalize-package-data "^3.0.0"
read-pkg-up "^7.0.1"
redent "^3.0.0"
trim-newlines "^3.0.0"
type-fest "^0.18.0"
yargs-parser "^20.2.3"
merge2@^1.3.0, merge2@^1.4.1:
version "1.4.1"
resolved "https://registry.yarnpkg.com/merge2/-/merge2-1.4.1.tgz#4368892f885e907455a6fd7dc55c0c9d404990ae"
integrity sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==
micromark-extension-footnote@^0.3.0:
version "0.3.2"
resolved "https://registry.yarnpkg.com/micromark-extension-footnote/-/micromark-extension-footnote-0.3.2.tgz#129b74ef4920ce96719b2c06102ee7abb2b88a20"
integrity sha512-gr/BeIxbIWQoUm02cIfK7mdMZ/fbroRpLsck4kvFtjbzP4yi+OPVbnukTc/zy0i7spC2xYE/dbX1Sur8BEDJsQ==
dependencies:
micromark "~2.11.0"
micromark-extension-frontmatter@^0.2.0:
version "0.2.2"
resolved "https://registry.yarnpkg.com/micromark-extension-frontmatter/-/micromark-extension-frontmatter-0.2.2.tgz#61b8e92e9213e1d3c13f5a59e7862f5ca98dfa53"
integrity sha512-q6nPLFCMTLtfsctAuS0Xh4vaolxSFUWUWR6PZSrXXiRy+SANGllpcqdXFv2z07l0Xz/6Hl40hK0ffNCJPH2n1A==
dependencies:
fault "^1.0.0"
micromark-extension-gfm-autolink-literal@~0.5.0:
version "0.5.7"
resolved "https://registry.yarnpkg.com/micromark-extension-gfm-autolink-literal/-/micromark-extension-gfm-autolink-literal-0.5.7.tgz#53866c1f0c7ef940ae7ca1f72c6faef8fed9f204"
integrity sha512-ePiDGH0/lhcngCe8FtH4ARFoxKTUelMp4L7Gg2pujYD5CSMb9PbblnyL+AAMud/SNMyusbS2XDSiPIRcQoNFAw==
dependencies:
micromark "~2.11.3"
micromark-extension-gfm-strikethrough@~0.6.5:
version "0.6.5"
resolved "https://registry.yarnpkg.com/micromark-extension-gfm-strikethrough/-/micromark-extension-gfm-strikethrough-0.6.5.tgz#96cb83356ff87bf31670eefb7ad7bba73e6514d1"
integrity sha512-PpOKlgokpQRwUesRwWEp+fHjGGkZEejj83k9gU5iXCbDG+XBA92BqnRKYJdfqfkrRcZRgGuPuXb7DaK/DmxOhw==
dependencies:
micromark "~2.11.0"
micromark-extension-gfm-table@~0.4.0:
version "0.4.3"
resolved "https://registry.yarnpkg.com/micromark-extension-gfm-table/-/micromark-extension-gfm-table-0.4.3.tgz#4d49f1ce0ca84996c853880b9446698947f1802b"
integrity sha512-hVGvESPq0fk6ALWtomcwmgLvH8ZSVpcPjzi0AjPclB9FsVRgMtGZkUcpE0zgjOCFAznKepF4z3hX8z6e3HODdA==
dependencies:
micromark "~2.11.0"
micromark-extension-gfm-tagfilter@~0.3.0:
version "0.3.0"
resolved "https://registry.yarnpkg.com/micromark-extension-gfm-tagfilter/-/micromark-extension-gfm-tagfilter-0.3.0.tgz#d9f26a65adee984c9ccdd7e182220493562841ad"
integrity sha512-9GU0xBatryXifL//FJH+tAZ6i240xQuFrSL7mYi8f4oZSbc+NvXjkrHemeYP0+L4ZUT+Ptz3b95zhUZnMtoi/Q==
micromark-extension-gfm-task-list-item@~0.3.0:
version "0.3.3"
resolved "https://registry.yarnpkg.com/micromark-extension-gfm-task-list-item/-/micromark-extension-gfm-task-list-item-0.3.3.tgz#d90c755f2533ed55a718129cee11257f136283b8"
integrity sha512-0zvM5iSLKrc/NQl84pZSjGo66aTGd57C1idmlWmE87lkMcXrTxg1uXa/nXomxJytoje9trP0NDLvw4bZ/Z/XCQ==
dependencies:
micromark "~2.11.0"
micromark-extension-gfm@^0.3.0:
version "0.3.3"
resolved "https://registry.yarnpkg.com/micromark-extension-gfm/-/micromark-extension-gfm-0.3.3.tgz#36d1a4c089ca8bdfd978c9bd2bf1a0cb24e2acfe"
integrity sha512-oVN4zv5/tAIA+l3GbMi7lWeYpJ14oQyJ3uEim20ktYFAcfX1x3LNlFGGlmrZHt7u9YlKExmyJdDGaTt6cMSR/A==
dependencies:
micromark "~2.11.0"
micromark-extension-gfm-autolink-literal "~0.5.0"
micromark-extension-gfm-strikethrough "~0.6.5"
micromark-extension-gfm-table "~0.4.0"
micromark-extension-gfm-tagfilter "~0.3.0"
micromark-extension-gfm-task-list-item "~0.3.0"
micromark@^2.11.3, micromark@~2.11.0, micromark@~2.11.3:
version "2.11.4"
resolved "https://registry.yarnpkg.com/micromark/-/micromark-2.11.4.tgz#d13436138eea826383e822449c9a5c50ee44665a"
integrity sha512-+WoovN/ppKolQOFIAajxi7Lu9kInbPxFuTBVEavFcL8eAfVstoc5MocPmqBeAdBOJV00uaVjegzH4+MA0DN/uA==
dependencies:
debug "^4.0.0"
parse-entities "^2.0.0"
micromatch@^4.0.4:
version "4.0.8"
resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.8.tgz#d66fa18f3a47076789320b9b1af32bd86d9fa202"
integrity sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==
dependencies:
braces "^3.0.3"
picomatch "^2.3.1"
mime-db@1.52.0:
version "1.52.0"
resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70"
integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==
mime-types@^2.1.12:
version "2.1.35"
resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a"
integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==
dependencies:
mime-db "1.52.0"
min-indent@^1.0.0:
version "1.0.1"
resolved "https://registry.yarnpkg.com/min-indent/-/min-indent-1.0.1.tgz#a63f681673b30571fbe8bc25686ae746eefa9869"
integrity sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==
minimatch@^9.0.3:
version "9.0.5"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.5.tgz#d74f9dd6b57d83d8e98cfb82133b03978bc929e5"
integrity sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==
dependencies:
brace-expansion "^2.0.1"
minimatch@^9.0.4:
version "9.0.4"
resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.4.tgz#8e49c731d1749cbec05050ee5145147b32496a51"
integrity sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw==
dependencies:
brace-expansion "^2.0.1"
minimist-options@4.1.0:
version "4.1.0"
resolved "https://registry.yarnpkg.com/minimist-options/-/minimist-options-4.1.0.tgz#c0655713c53a8a2ebd77ffa247d342c40f010619"
integrity sha512-Q4r8ghd80yhO/0j1O3B2BjweX3fiHg9cdOwjJd2J76Q135c+NDxGCqdYKQ1SKBuFfgWbAUzBfvYjPUEeNgqN1A==
dependencies:
arrify "^1.0.1"
is-plain-obj "^1.1.0"
kind-of "^6.0.3"
minimist@^1.2.6:
version "1.2.8"
resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c"
integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==
"minipass@^5.0.0 || ^6.0.2 || ^7.0.0", minipass@^7.1.2:
version "7.1.2"
resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.2.tgz#93a9626ce5e5e66bd4db86849e7515e92340a707"
integrity sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==
mkdirp@^3.0.1:
version "3.0.1"
resolved "https://registry.yarnpkg.com/mkdirp/-/mkdirp-3.0.1.tgz#e44e4c5607fb279c168241713cc6e0fea9adcb50"
integrity sha512-+NsyUUAZDmo6YVHzL/stxSu3t9YS1iljliy3BSDrXJ/dkn1KYdmtZODGGjLcc9XLgVVpH4KshHB8XmZgMhaBXg==
ms@2.1.2:
version "2.1.2"
resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009"
integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==
normalize-package-data@^2.5.0:
version "2.5.0"
resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-2.5.0.tgz#e66db1838b200c1dfc233225d12cb36520e234a8"
integrity sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==
dependencies:
hosted-git-info "^2.1.4"
resolve "^1.10.0"
semver "2 || 3 || 4 || 5"
validate-npm-package-license "^3.0.1"
normalize-package-data@^3.0.0:
version "3.0.3"
resolved "https://registry.yarnpkg.com/normalize-package-data/-/normalize-package-data-3.0.3.tgz#dbcc3e2da59509a0983422884cd172eefdfa525e"
integrity sha512-p2W1sgqij3zMMyRC067Dg16bfzVH+w7hyegmpIvZ4JNjqtGOVAIvLmjBx3yP7YTe9vKJgkoNOPjwQGogDoMXFA==
dependencies:
hosted-git-info "^4.0.1"
is-core-module "^2.5.0"
semver "^7.3.4"
validate-npm-package-license "^3.0.1"
object-inspect@^1.13.1:
version "1.13.2"
resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.2.tgz#dea0088467fb991e67af4058147a24824a3043ff"
integrity sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g==
object-keys@^1.1.1:
version "1.1.1"
resolved "https://registry.yarnpkg.com/object-keys/-/object-keys-1.1.1.tgz#1c47f272df277f3b1daf061677d9c82e2322c60e"
integrity sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==
object.assign@^4.1.5:
version "4.1.5"
resolved "https://registry.yarnpkg.com/object.assign/-/object.assign-4.1.5.tgz#3a833f9ab7fdb80fc9e8d2300c803d216d8fdbb0"
integrity sha512-byy+U7gp+FVwmyzKPYhW2h5l3crpmGsxl7X2s8y43IgxvG4g3QZ6CffDtsNQy1WsmZpQbO+ybo0AlW7TY6DcBQ==
dependencies:
call-bind "^1.0.5"
define-properties "^1.2.1"
has-symbols "^1.0.3"
object-keys "^1.1.1"
p-finally@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/p-finally/-/p-finally-1.0.0.tgz#3fbcfb15b899a44123b34b6dcc18b724336a2cae"
integrity sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==
p-limit@^2.2.0:
version "2.3.0"
resolved "https://registry.yarnpkg.com/p-limit/-/p-limit-2.3.0.tgz#3dd33c647a214fdfffd835933eb086da0dc21db1"
integrity sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==
dependencies:
p-try "^2.0.0"
p-locate@^4.1.0:
version "4.1.0"
resolved "https://registry.yarnpkg.com/p-locate/-/p-locate-4.1.0.tgz#a3428bb7088b3a60292f66919278b7c297ad4f07"
integrity sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==
dependencies:
p-limit "^2.2.0"
p-queue@^6.6.2:
version "6.6.2"
resolved "https://registry.yarnpkg.com/p-queue/-/p-queue-6.6.2.tgz#2068a9dcf8e67dd0ec3e7a2bcb76810faa85e426"
integrity sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==
dependencies:
eventemitter3 "^4.0.4"
p-timeout "^3.2.0"
p-retry@4:
version "4.6.2"
resolved "https://registry.yarnpkg.com/p-retry/-/p-retry-4.6.2.tgz#9baae7184057edd4e17231cee04264106e092a16"
integrity sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==
dependencies:
"@types/retry" "0.12.0"
retry "^0.13.1"
p-timeout@^3.2.0:
version "3.2.0"
resolved "https://registry.yarnpkg.com/p-timeout/-/p-timeout-3.2.0.tgz#c7e17abc971d2a7962ef83626b35d635acf23dfe"
integrity sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==
dependencies:
p-finally "^1.0.0"
p-try@^2.0.0:
version "2.2.0"
resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6"
integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==
package-json-from-dist@^1.0.0:
version "1.0.1"
resolved "https://registry.yarnpkg.com/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz#4f1471a010827a86f94cfd9b0727e36d267de505"
integrity sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==
parse-entities@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/parse-entities/-/parse-entities-2.0.0.tgz#53c6eb5b9314a1f4ec99fa0fdf7ce01ecda0cbe8"
integrity sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==
dependencies:
character-entities "^1.0.0"
character-entities-legacy "^1.0.0"
character-reference-invalid "^1.0.0"
is-alphanumerical "^1.0.0"
is-decimal "^1.0.0"
is-hexadecimal "^1.0.0"
parse-json@^5.0.0:
version "5.2.0"
resolved "https://registry.yarnpkg.com/parse-json/-/parse-json-5.2.0.tgz#c76fc66dee54231c962b22bcc8a72cf2f99753cd"
integrity sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==
dependencies:
"@babel/code-frame" "^7.0.0"
error-ex "^1.3.1"
json-parse-even-better-errors "^2.3.0"
lines-and-columns "^1.1.6"
path-browserify@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/path-browserify/-/path-browserify-1.0.1.tgz#d98454a9c3753d5790860f16f68867b9e46be1fd"
integrity sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==
path-exists@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/path-exists/-/path-exists-4.0.0.tgz#513bdbe2d3b95d7762e8c1137efa195c6c61b5b3"
integrity sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==
path-key@^3.1.0:
version "3.1.1"
resolved "https://registry.yarnpkg.com/path-key/-/path-key-3.1.1.tgz#581f6ade658cbba65a0d3380de7753295054f375"
integrity sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==
path-parse@^1.0.7:
version "1.0.7"
resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735"
integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==
path-scurry@^1.11.1:
version "1.11.1"
resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-1.11.1.tgz#7960a668888594a0720b12a911d1a742ab9f11d2"
integrity sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==
dependencies:
lru-cache "^10.2.0"
minipass "^5.0.0 || ^6.0.2 || ^7.0.0"
path-type@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b"
integrity sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==
picocolors@^1.0.0:
version "1.0.1"
resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.1.tgz#a8ad579b571952f0e5d25892de5445bcfe25aaa1"
integrity sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==
picomatch@^2.3.1:
version "2.3.1"
resolved "https://registry.yarnpkg.com/picomatch/-/picomatch-2.3.1.tgz#3ba3833733646d9d3e4995946c1365a67fb07a42"
integrity sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==
possible-typed-array-names@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/possible-typed-array-names/-/possible-typed-array-names-1.0.0.tgz#89bb63c6fada2c3e90adc4a647beeeb39cc7bf8f"
integrity sha512-d7Uw+eZoloe0EHDIYoe+bQ5WXnGMOpmiZFTuMWCwpjzzkL2nTjcKiAk4hh8TjnGye2TwWOk3UXucZ+3rbmBa8Q==
prettier@^3.2.5:
version "3.2.5"
resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.2.5.tgz#e52bc3090586e824964a8813b09aba6233b28368"
integrity sha512-3/GWa9aOC0YeD7LUfvOG2NiDyhOWRvt1k+rcKhOuYnMY24iiCphgneUfJDyFXd6rZCAnuLBv6UeAULtrhT/F4A==
proxy-from-env@^1.1.0:
version "1.1.0"
resolved "https://registry.yarnpkg.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz#e102f16ca355424865755d2c9e8ea4f24d58c3e2"
integrity sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==
punycode.js@^2.3.1:
version "2.3.1"
resolved "https://registry.yarnpkg.com/punycode.js/-/punycode.js-2.3.1.tgz#6b53e56ad75588234e79f4affa90972c7dd8cdb7"
integrity sha512-uxFIHU0YlHYhDQtV4R9J6a52SLx28BCjT+4ieh7IGbgwVJWO+km431c4yRlREUAsAmt/uMjQUyQHNEPf0M39CA==
queue-microtask@^1.2.2:
version "1.2.3"
resolved "https://registry.yarnpkg.com/queue-microtask/-/queue-microtask-1.2.3.tgz#4929228bbc724dfac43e0efb058caf7b6cfb6243"
integrity sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==
quick-lru@^4.0.1:
version "4.0.1"
resolved "https://registry.yarnpkg.com/quick-lru/-/quick-lru-4.0.1.tgz#5b8878f113a58217848c6482026c73e1ba57727f"
integrity sha512-ARhCpm70fzdcvNQfPoy49IaanKkTlRWF2JMzqhcJbhSFRZv7nPTvZJdcY7301IPmvW+/p0RgIWnQDLJxifsQ7g==
read-pkg-up@^7.0.1:
version "7.0.1"
resolved "https://registry.yarnpkg.com/read-pkg-up/-/read-pkg-up-7.0.1.tgz#f3a6135758459733ae2b95638056e1854e7ef507"
integrity sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==
dependencies:
find-up "^4.1.0"
read-pkg "^5.2.0"
type-fest "^0.8.1"
read-pkg@^5.2.0:
version "5.2.0"
resolved "https://registry.yarnpkg.com/read-pkg/-/read-pkg-5.2.0.tgz#7bf295438ca5a33e56cd30e053b34ee7250c93cc"
integrity sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==
dependencies:
"@types/normalize-package-data" "^2.4.0"
normalize-package-data "^2.5.0"
parse-json "^5.0.0"
type-fest "^0.6.0"
readline@^1.3.0:
version "1.3.0"
resolved "https://registry.yarnpkg.com/readline/-/readline-1.3.0.tgz#c580d77ef2cfc8752b132498060dc9793a7ac01c"
integrity sha512-k2d6ACCkiNYz222Fs/iNze30rRJ1iIicW7JuX/7/cozvih6YCkFZH+J6mAFDVgv0dRBaAyr4jDqC95R2y4IADg==
redent@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/redent/-/redent-3.0.0.tgz#e557b7998316bb53c9f1f56fa626352c6963059f"
integrity sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==
dependencies:
indent-string "^4.0.0"
strip-indent "^3.0.0"
regexp.prototype.flags@^1.5.2:
version "1.5.2"
resolved "https://registry.yarnpkg.com/regexp.prototype.flags/-/regexp.prototype.flags-1.5.2.tgz#138f644a3350f981a858c44f6bb1a61ff59be334"
integrity sha512-NcDiDkTLuPR+++OCKB0nWafEmhg/Da8aUPLPMQbK+bxKKCm1/S5he+AqYa4PlMCVBalb4/yxIRub6qkEx5yJbw==
dependencies:
call-bind "^1.0.6"
define-properties "^1.2.1"
es-errors "^1.3.0"
set-function-name "^2.0.1"
remark-footnotes@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/remark-footnotes/-/remark-footnotes-3.0.0.tgz#5756b56f8464fa7ed80dbba0c966136305d8cb8d"
integrity sha512-ZssAvH9FjGYlJ/PBVKdSmfyPc3Cz4rTWgZLI4iE/SX8Nt5l3o3oEjv3wwG5VD7xOjktzdwp5coac+kJV9l4jgg==
dependencies:
mdast-util-footnote "^0.1.0"
micromark-extension-footnote "^0.3.0"
remark-frontmatter@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/remark-frontmatter/-/remark-frontmatter-3.0.0.tgz#ca5d996361765c859bd944505f377d6b186a6ec6"
integrity sha512-mSuDd3svCHs+2PyO29h7iijIZx4plX0fheacJcAoYAASfgzgVIcXGYSq9GFyYocFLftQs8IOmmkgtOovs6d4oA==
dependencies:
mdast-util-frontmatter "^0.2.0"
micromark-extension-frontmatter "^0.2.0"
remark-gfm@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/remark-gfm/-/remark-gfm-1.0.0.tgz#9213643001be3f277da6256464d56fd28c3b3c0d"
integrity sha512-KfexHJCiqvrdBZVbQ6RopMZGwaXz6wFJEfByIuEwGf0arvITHjiKKZ1dpXujjH9KZdm1//XJQwgfnJ3lmXaDPA==
dependencies:
mdast-util-gfm "^0.1.0"
micromark-extension-gfm "^0.3.0"
remark-parse@^9.0.0:
version "9.0.0"
resolved "https://registry.yarnpkg.com/remark-parse/-/remark-parse-9.0.0.tgz#4d20a299665880e4f4af5d90b7c7b8a935853640"
integrity sha512-geKatMwSzEXKHuzBNU1z676sGcDcFoChMK38TgdHJNAYfFtsfHDQG7MoJAjs6sgYMqyLduCYWDIWZIxiPeafEw==
dependencies:
mdast-util-from-markdown "^0.8.0"
repeat-string@^1.0.0:
version "1.6.1"
resolved "https://registry.yarnpkg.com/repeat-string/-/repeat-string-1.6.1.tgz#8dcae470e1c88abc2d600fff4a776286da75e637"
integrity sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==
resolve@^1.10.0:
version "1.22.8"
resolved "https://registry.yarnpkg.com/resolve/-/resolve-1.22.8.tgz#b6c87a9f2aa06dfab52e3d70ac8cde321fa5a48d"
integrity sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==
dependencies:
is-core-module "^2.13.0"
path-parse "^1.0.7"
supports-preserve-symlinks-flag "^1.0.0"
retry@^0.13.1:
version "0.13.1"
resolved "https://registry.yarnpkg.com/retry/-/retry-0.13.1.tgz#185b1587acf67919d63b357349e03537b2484658"
integrity sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==
reusify@^1.0.4:
version "1.0.4"
resolved "https://registry.yarnpkg.com/reusify/-/reusify-1.0.4.tgz#90da382b1e126efc02146e90845a88db12925d76"
integrity sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==
rimraf@^5.0.1:
version "5.0.10"
resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-5.0.10.tgz#23b9843d3dc92db71f96e1a2ce92e39fd2a8221c"
integrity sha512-l0OE8wL34P4nJH/H2ffoaniAokM2qSmrtXHmlpvYr5AVVX8msAyW0l8NVJFDxlSK4u3Uh/f41cQheDVdnYijwQ==
dependencies:
glob "^10.3.7"
rollup@^4.5.2:
version "4.24.4"
resolved "https://registry.yarnpkg.com/rollup/-/rollup-4.24.4.tgz#fdc76918de02213c95447c9ffff5e35dddb1d058"
integrity sha512-vGorVWIsWfX3xbcyAS+I047kFKapHYivmkaT63Smj77XwvLSJos6M1xGqZnBPFQFBRZDOcG1QnYEIxAvTr/HjA==
dependencies:
"@types/estree" "1.0.6"
optionalDependencies:
"@rollup/rollup-android-arm-eabi" "4.24.4"
"@rollup/rollup-android-arm64" "4.24.4"
"@rollup/rollup-darwin-arm64" "4.24.4"
"@rollup/rollup-darwin-x64" "4.24.4"
"@rollup/rollup-freebsd-arm64" "4.24.4"
"@rollup/rollup-freebsd-x64" "4.24.4"
"@rollup/rollup-linux-arm-gnueabihf" "4.24.4"
"@rollup/rollup-linux-arm-musleabihf" "4.24.4"
"@rollup/rollup-linux-arm64-gnu" "4.24.4"
"@rollup/rollup-linux-arm64-musl" "4.24.4"
"@rollup/rollup-linux-powerpc64le-gnu" "4.24.4"
"@rollup/rollup-linux-riscv64-gnu" "4.24.4"
"@rollup/rollup-linux-s390x-gnu" "4.24.4"
"@rollup/rollup-linux-x64-gnu" "4.24.4"
"@rollup/rollup-linux-x64-musl" "4.24.4"
"@rollup/rollup-win32-arm64-msvc" "4.24.4"
"@rollup/rollup-win32-ia32-msvc" "4.24.4"
"@rollup/rollup-win32-x64-msvc" "4.24.4"
fsevents "~2.3.2"
run-parallel@^1.1.9:
version "1.2.0"
resolved "https://registry.yarnpkg.com/run-parallel/-/run-parallel-1.2.0.tgz#66d1368da7bdf921eb9d95bd1a9229e7f21a43ee"
integrity sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==
dependencies:
queue-microtask "^1.2.2"
safe-array-concat@^1.1.2:
version "1.1.2"
resolved "https://registry.yarnpkg.com/safe-array-concat/-/safe-array-concat-1.1.2.tgz#81d77ee0c4e8b863635227c721278dd524c20edb"
integrity sha512-vj6RsCsWBCf19jIeHEfkRMw8DPiBb+DMXklQ/1SGDHOMlHdPUkZXFQ2YdplS23zESTijAcurb1aSgJA3AgMu1Q==
dependencies:
call-bind "^1.0.7"
get-intrinsic "^1.2.4"
has-symbols "^1.0.3"
isarray "^2.0.5"
safe-regex-test@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/safe-regex-test/-/safe-regex-test-1.0.3.tgz#a5b4c0f06e0ab50ea2c395c14d8371232924c377"
integrity sha512-CdASjNJPvRa7roO6Ra/gLYBTzYzzPyyBXxIMdGW3USQLyjWEls2RgW5UBTXaQVp+OrpeCK3bLem8smtmheoRuw==
dependencies:
call-bind "^1.0.6"
es-errors "^1.3.0"
is-regex "^1.1.4"
"semver@2 || 3 || 4 || 5":
version "5.7.2"
resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.2.tgz#48d55db737c3287cd4835e17fa13feace1c41ef8"
integrity sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==
semver@^7.3.4:
version "7.6.2"
resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.2.tgz#1e3b34759f896e8f14d6134732ce798aeb0c6e13"
integrity sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w==
set-function-length@^1.2.1:
version "1.2.2"
resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449"
integrity sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==
dependencies:
define-data-property "^1.1.4"
es-errors "^1.3.0"
function-bind "^1.1.2"
get-intrinsic "^1.2.4"
gopd "^1.0.1"
has-property-descriptors "^1.0.2"
set-function-name@^2.0.1:
version "2.0.2"
resolved "https://registry.yarnpkg.com/set-function-name/-/set-function-name-2.0.2.tgz#16a705c5a0dc2f5e638ca96d8a8cd4e1c2b90985"
integrity sha512-7PGFlmtwsEADb0WYyvCMa1t+yke6daIG4Wirafur5kcf+MhUnPms1UeR0CKQdTZD81yESwMHbtn+TR+dMviakQ==
dependencies:
define-data-property "^1.1.4"
es-errors "^1.3.0"
functions-have-names "^1.2.3"
has-property-descriptors "^1.0.2"
shebang-command@^2.0.0:
version "2.0.0"
resolved "https://registry.yarnpkg.com/shebang-command/-/shebang-command-2.0.0.tgz#ccd0af4f8835fbdc265b82461aaf0c36663f34ea"
integrity sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==
dependencies:
shebang-regex "^3.0.0"
shebang-regex@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/shebang-regex/-/shebang-regex-3.0.0.tgz#ae16f1644d873ecad843b0307b143362d4c42172"
integrity sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==
shiki@^1.9.0:
version "1.9.0"
resolved "https://registry.yarnpkg.com/shiki/-/shiki-1.9.0.tgz#e4d3a044d9c746aefbea47615e83323fdc3dc361"
integrity sha512-i6//Lqgn7+7nZA0qVjoYH0085YdNk4MC+tJV4bo+HgjgRMJ0JmkLZzFAuvVioJqLkcGDK5GAMpghZEZkCnwxpQ==
dependencies:
"@shikijs/core" "1.9.0"
side-channel@^1.0.4:
version "1.0.6"
resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.6.tgz#abd25fb7cd24baf45466406b1096b7831c9215f2"
integrity sha512-fDW/EZ6Q9RiO8eFG8Hj+7u/oW+XrPTIChwCOM2+th2A6OblDtYYIpve9m+KvI9Z4C9qSEXlaGR6bTEYHReuglA==
dependencies:
call-bind "^1.0.7"
es-errors "^1.3.0"
get-intrinsic "^1.2.4"
object-inspect "^1.13.1"
signal-exit@^4.0.1:
version "4.1.0"
resolved "https://registry.yarnpkg.com/signal-exit/-/signal-exit-4.1.0.tgz#952188c1cbd546070e2dd20d0f41c0ae0530cb04"
integrity sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==
slash@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/slash/-/slash-3.0.0.tgz#6539be870c165adbd5240220dbe361f1bc4d4634"
integrity sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==
spdx-correct@^3.0.0:
version "3.2.0"
resolved "https://registry.yarnpkg.com/spdx-correct/-/spdx-correct-3.2.0.tgz#4f5ab0668f0059e34f9c00dce331784a12de4e9c"
integrity sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==
dependencies:
spdx-expression-parse "^3.0.0"
spdx-license-ids "^3.0.0"
spdx-exceptions@^2.1.0:
version "2.5.0"
resolved "https://registry.yarnpkg.com/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz#5d607d27fc806f66d7b64a766650fa890f04ed66"
integrity sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==
spdx-expression-parse@^3.0.0:
version "3.0.1"
resolved "https://registry.yarnpkg.com/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz#cf70f50482eefdc98e3ce0a6833e4a53ceeba679"
integrity sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==
dependencies:
spdx-exceptions "^2.1.0"
spdx-license-ids "^3.0.0"
spdx-license-ids@^3.0.0:
version "3.0.18"
resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.18.tgz#22aa922dcf2f2885a6494a261f2d8b75345d0326"
integrity sha512-xxRs31BqRYHwiMzudOrpSiHtZ8i/GeionCBDSilhYRj+9gIcI8wCZTlXZKu9vZIVqViP3dcp9qE5G6AlIaD+TQ==
sprintf-js@~1.0.2:
version "1.0.3"
resolved "https://registry.yarnpkg.com/sprintf-js/-/sprintf-js-1.0.3.tgz#04e6926f662895354f3dd015203633b857297e2c"
integrity sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==
"string-width-cjs@npm:string-width@^4.2.0":
version "4.2.3"
resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010"
integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
dependencies:
emoji-regex "^8.0.0"
is-fullwidth-code-point "^3.0.0"
strip-ansi "^6.0.1"
string-width@^4.1.0:
version "4.2.3"
resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010"
integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==
dependencies:
emoji-regex "^8.0.0"
is-fullwidth-code-point "^3.0.0"
strip-ansi "^6.0.1"
string-width@^5.0.1, string-width@^5.1.2:
version "5.1.2"
resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794"
integrity sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==
dependencies:
eastasianwidth "^0.2.0"
emoji-regex "^9.2.2"
strip-ansi "^7.0.1"
string.prototype.trim@^1.2.9:
version "1.2.9"
resolved "https://registry.yarnpkg.com/string.prototype.trim/-/string.prototype.trim-1.2.9.tgz#b6fa326d72d2c78b6df02f7759c73f8f6274faa4"
integrity sha512-klHuCNxiMZ8MlsOihJhJEBJAiMVqU3Z2nEXWfWnIqjN0gEFS9J9+IxKozWWtQGcgoa1WUZzLjKPTr4ZHNFTFxw==
dependencies:
call-bind "^1.0.7"
define-properties "^1.2.1"
es-abstract "^1.23.0"
es-object-atoms "^1.0.0"
string.prototype.trimend@^1.0.8:
version "1.0.8"
resolved "https://registry.yarnpkg.com/string.prototype.trimend/-/string.prototype.trimend-1.0.8.tgz#3651b8513719e8a9f48de7f2f77640b26652b229"
integrity sha512-p73uL5VCHCO2BZZ6krwwQE3kCzM7NKmis8S//xEC6fQonchbum4eP6kR4DLEjQFO3Wnj3Fuo8NM0kOSjVdHjZQ==
dependencies:
call-bind "^1.0.7"
define-properties "^1.2.1"
es-object-atoms "^1.0.0"
string.prototype.trimstart@^1.0.8:
version "1.0.8"
resolved "https://registry.yarnpkg.com/string.prototype.trimstart/-/string.prototype.trimstart-1.0.8.tgz#7ee834dda8c7c17eff3118472bb35bfedaa34dde"
integrity sha512-UXSH262CSZY1tfu3G3Secr6uGLCFVPMhIqHjlgCUtCCcgihYc/xKs9djMTMUOb2j1mVSeU8EU6NWc/iQKU6Gfg==
dependencies:
call-bind "^1.0.7"
define-properties "^1.2.1"
es-object-atoms "^1.0.0"
"strip-ansi-cjs@npm:strip-ansi@^6.0.1":
version "6.0.1"
resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9"
integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==
dependencies:
ansi-regex "^5.0.1"
strip-ansi@^6.0.0, strip-ansi@^6.0.1:
version "6.0.1"
resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9"
integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==
dependencies:
ansi-regex "^5.0.1"
strip-ansi@^7.0.1:
version "7.1.0"
resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.1.0.tgz#d5b6568ca689d8561370b0707685d22434faff45"
integrity sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==
dependencies:
ansi-regex "^6.0.1"
strip-indent@^3.0.0:
version "3.0.0"
resolved "https://registry.yarnpkg.com/strip-indent/-/strip-indent-3.0.0.tgz#c32e1cee940b6b3432c771bc2c54bcce73cd3001"
integrity sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==
dependencies:
min-indent "^1.0.0"
supports-color@^5.3.0:
version "5.5.0"
resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-5.5.0.tgz#e2e69a44ac8772f78a1ec0b35b689df6530efc8f"
integrity sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==
dependencies:
has-flag "^3.0.0"
supports-preserve-symlinks-flag@^1.0.0:
version "1.0.0"
resolved "https://registry.yarnpkg.com/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz#6eda4bd344a3c94aea376d4cc31bc77311039e09"
integrity sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==
to-regex-range@^5.0.1:
version "5.0.1"
resolved "https://registry.yarnpkg.com/to-regex-range/-/to-regex-range-5.0.1.tgz#1648c44aae7c8d988a326018ed72f5b4dd0392e4"
integrity sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==
dependencies:
is-number "^7.0.0"
transform-markdown-links@^2.0.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/transform-markdown-links/-/transform-markdown-links-2.1.0.tgz#de2178d96ef0e020226ebd967dcc5873df039792"
integrity sha512-7HWQwQ9US+tJSMMzi1aP+KA3QwfjDs8sB4H5GBMRHFNBMQVdgoF6VfIFy2nJR/UHRTkYoGFwWh2pe+QIwSvfOA==
traverse@^0.6.7:
version "0.6.9"
resolved "https://registry.yarnpkg.com/traverse/-/traverse-0.6.9.tgz#76cfdbacf06382d460b76f8b735a44a6209d8b81"
integrity sha512-7bBrcF+/LQzSgFmT0X5YclVqQxtv7TDJ1f8Wj7ibBu/U6BMLeOpUxuZjV7rMc44UtKxlnMFigdhFAIszSX1DMg==
dependencies:
gopd "^1.0.1"
typedarray.prototype.slice "^1.0.3"
which-typed-array "^1.1.15"
trim-newlines@^3.0.0:
version "3.0.1"
resolved "https://registry.yarnpkg.com/trim-newlines/-/trim-newlines-3.0.1.tgz#260a5d962d8b752425b32f3a7db0dcacd176c144"
integrity sha512-c1PTsA3tYrIsLGkJkzHF+w9F2EyxfXGo4UyJc4pFL++FMjnq0HJS69T3M7d//gKrFKwy429bouPescbjecU+Zw==
trough@^1.0.0:
version "1.0.5"
resolved "https://registry.yarnpkg.com/trough/-/trough-1.0.5.tgz#b8b639cefad7d0bb2abd37d433ff8293efa5f406"
integrity sha512-rvuRbTarPXmMb79SmzEp8aqXNKcK+y0XaB298IXueQ8I2PsrATcPBCSPyK/dDNa2iWOhKlfNnOjdAOTBU/nkFA==
ts-morph@^21.0.1:
version "21.0.1"
resolved "https://registry.yarnpkg.com/ts-morph/-/ts-morph-21.0.1.tgz#712302a0f6e9dbf1aa8d9cf33a4386c4b18c2006"
integrity sha512-dbDtVdEAncKctzrVZ+Nr7kHpHkv+0JDJb2MjjpBaj8bFeCkePU9rHfMklmhuLFnpeq/EJZk2IhStY6NzqgjOkg==
dependencies:
"@ts-morph/common" "~0.22.0"
code-block-writer "^12.0.0"
type-fest@^0.18.0:
version "0.18.1"
resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.18.1.tgz#db4bc151a4a2cf4eebf9add5db75508db6cc841f"
integrity sha512-OIAYXk8+ISY+qTOwkHtKqzAuxchoMiD9Udx+FSGQDuiRR+PJKJHc2NJAXlbhkGwTt/4/nKZxELY1w3ReWOL8mw==
type-fest@^0.6.0:
version "0.6.0"
resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.6.0.tgz#8d2a2370d3df886eb5c90ada1c5bf6188acf838b"
integrity sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==
type-fest@^0.8.1:
version "0.8.1"
resolved "https://registry.yarnpkg.com/type-fest/-/type-fest-0.8.1.tgz#09e249ebde851d3b1e48d27c105444667f17b83d"
integrity sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==
typed-array-buffer@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/typed-array-buffer/-/typed-array-buffer-1.0.2.tgz#1867c5d83b20fcb5ccf32649e5e2fc7424474ff3"
integrity sha512-gEymJYKZtKXzzBzM4jqa9w6Q1Jjm7x2d+sh19AdsD4wqnMPDYyvwpsIc2Q/835kHuo3BEQ7CjelGhfTsoBb2MQ==
dependencies:
call-bind "^1.0.7"
es-errors "^1.3.0"
is-typed-array "^1.1.13"
typed-array-byte-length@^1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/typed-array-byte-length/-/typed-array-byte-length-1.0.1.tgz#d92972d3cff99a3fa2e765a28fcdc0f1d89dec67"
integrity sha512-3iMJ9q0ao7WE9tWcaYKIptkNBuOIcZCCT0d4MRvuuH88fEoEH62IuQe0OtraD3ebQEoTRk8XCBoknUNc1Y67pw==
dependencies:
call-bind "^1.0.7"
for-each "^0.3.3"
gopd "^1.0.1"
has-proto "^1.0.3"
is-typed-array "^1.1.13"
typed-array-byte-offset@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/typed-array-byte-offset/-/typed-array-byte-offset-1.0.2.tgz#f9ec1acb9259f395093e4567eb3c28a580d02063"
integrity sha512-Ous0vodHa56FviZucS2E63zkgtgrACj7omjwd/8lTEMEPFFyjfixMZ1ZXenpgCFBBt4EC1J2XsyVS2gkG0eTFA==
dependencies:
available-typed-arrays "^1.0.7"
call-bind "^1.0.7"
for-each "^0.3.3"
gopd "^1.0.1"
has-proto "^1.0.3"
is-typed-array "^1.1.13"
typed-array-length@^1.0.6:
version "1.0.6"
resolved "https://registry.yarnpkg.com/typed-array-length/-/typed-array-length-1.0.6.tgz#57155207c76e64a3457482dfdc1c9d1d3c4c73a3"
integrity sha512-/OxDN6OtAk5KBpGb28T+HZc2M+ADtvRxXrKKbUwtsLgdoxgX13hyy7ek6bFRl5+aBs2yZzB0c4CnQfAtVypW/g==
dependencies:
call-bind "^1.0.7"
for-each "^0.3.3"
gopd "^1.0.1"
has-proto "^1.0.3"
is-typed-array "^1.1.13"
possible-typed-array-names "^1.0.0"
typedarray.prototype.slice@^1.0.3:
version "1.0.3"
resolved "https://registry.yarnpkg.com/typedarray.prototype.slice/-/typedarray.prototype.slice-1.0.3.tgz#bce2f685d3279f543239e4d595e0d021731d2d1a"
integrity sha512-8WbVAQAUlENo1q3c3zZYuy5k9VzBQvp8AX9WOtbvyWlLM1v5JaSRmjubLjzHF4JFtptjH/5c/i95yaElvcjC0A==
dependencies:
call-bind "^1.0.7"
define-properties "^1.2.1"
es-abstract "^1.23.0"
es-errors "^1.3.0"
typed-array-buffer "^1.0.2"
typed-array-byte-offset "^1.0.2"
typedoc-plugin-markdown@^4.1.0:
version "4.1.0"
resolved "https://registry.yarnpkg.com/typedoc-plugin-markdown/-/typedoc-plugin-markdown-4.1.0.tgz#0969e82d9821c956145a4b8a9a70f4e00bde27e8"
integrity sha512-sUiEJVaa6+MOFShRy14j1OP/VXC5OLyHNecJ2nKeGuBy2M3YiMatSLoIiddFAqVptSuILJTZiJzCBIY6yzAVyg==
typedoc@^0.26.1:
version "0.26.1"
resolved "https://registry.yarnpkg.com/typedoc/-/typedoc-0.26.1.tgz#fc43108abdea64929a2e636877e250d5dea50957"
integrity sha512-APsVXqh93jTlpkLuw6+/IORx7n5LN8hzJV8nvMIrYYaIva0VCq0CoDN7Z3hsRThEYVExI/qoFHnAAxrhG+Wd7Q==
dependencies:
lunr "^2.3.9"
markdown-it "^14.1.0"
minimatch "^9.0.4"
shiki "^1.9.0"
yaml "^2.4.5"
typescript@^5.4.5:
version "5.4.5"
resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.4.5.tgz#42ccef2c571fdbd0f6718b1d1f5e6e5ef006f611"
integrity sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ==
uc.micro@^2.0.0, uc.micro@^2.1.0:
version "2.1.0"
resolved "https://registry.yarnpkg.com/uc.micro/-/uc.micro-2.1.0.tgz#f8d3f7d0ec4c3dea35a7e3c8efa4cb8b45c9e7ee"
integrity sha512-ARDJmphmdvUk6Glw7y9DQ2bFkKBHwQHLi2lsaH6PPmz/Ka9sFOBsBluozhDltWmnv9u/cF6Rt87znRTPV+yp/A==
unbox-primitive@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/unbox-primitive/-/unbox-primitive-1.0.2.tgz#29032021057d5e6cdbd08c5129c226dff8ed6f9e"
integrity sha512-61pPlCD9h51VoreyJ0BReideM3MDKMKnh6+V9L08331ipq6Q8OFXZYiqP6n/tbHx4s5I9uRhcye6BrbkizkBDw==
dependencies:
call-bind "^1.0.2"
has-bigints "^1.0.2"
has-symbols "^1.0.3"
which-boxed-primitive "^1.0.2"
underscore@^1.13.2:
version "1.13.6"
resolved "https://registry.yarnpkg.com/underscore/-/underscore-1.13.6.tgz#04786a1f589dc6c09f761fc5f45b89e935136441"
integrity sha512-+A5Sja4HP1M08MaXya7p5LvjuM7K6q/2EaC0+iovj/wOcMsTzMvDFbasi/oSapiwOlt252IqsKqPjCl7huKS0A==
undici-types@~5.26.4:
version "5.26.5"
resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617"
integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==
unified@^9.2.2:
version "9.2.2"
resolved "https://registry.yarnpkg.com/unified/-/unified-9.2.2.tgz#67649a1abfc3ab85d2969502902775eb03146975"
integrity sha512-Sg7j110mtefBD+qunSLO1lqOEKdrwBFBrR6Qd8f4uwkhWNlbkaqwHse6e7QvD3AP/MNoJdEDLaf8OxYyoWgorQ==
dependencies:
bail "^1.0.0"
extend "^3.0.0"
is-buffer "^2.0.0"
is-plain-obj "^2.0.0"
trough "^1.0.0"
vfile "^4.0.0"
unist-util-is@^4.0.0:
version "4.1.0"
resolved "https://registry.yarnpkg.com/unist-util-is/-/unist-util-is-4.1.0.tgz#976e5f462a7a5de73d94b706bac1b90671b57797"
integrity sha512-ZOQSsnce92GrxSqlnEEseX0gi7GH9zTJZ0p9dtu87WRb/37mMPO2Ilx1s/t9vBHrFhbgweUwb+t7cIn5dxPhZg==
unist-util-stringify-position@^2.0.0:
version "2.0.3"
resolved "https://registry.yarnpkg.com/unist-util-stringify-position/-/unist-util-stringify-position-2.0.3.tgz#cce3bfa1cdf85ba7375d1d5b17bdc4cada9bd9da"
integrity sha512-3faScn5I+hy9VleOq/qNbAd6pAx7iH5jYBMS9I1HgQVijz/4mv5Bvw5iw1sC/90CODiKo81G/ps8AJrISn687g==
dependencies:
"@types/unist" "^2.0.2"
unist-util-visit-parents@^3.0.0:
version "3.1.1"
resolved "https://registry.yarnpkg.com/unist-util-visit-parents/-/unist-util-visit-parents-3.1.1.tgz#65a6ce698f78a6b0f56aa0e88f13801886cdaef6"
integrity sha512-1KROIZWo6bcMrZEwiH2UrXDyalAa0uqzWCxCJj6lPOvTve2WkfgCytoDTPaMnodXh1WrXOq0haVYHj99ynJlsg==
dependencies:
"@types/unist" "^2.0.0"
unist-util-is "^4.0.0"
universal-user-agent@^7.0.0, universal-user-agent@^7.0.2:
version "7.0.2"
resolved "https://registry.yarnpkg.com/universal-user-agent/-/universal-user-agent-7.0.2.tgz#52e7d0e9b3dc4df06cc33cb2b9fd79041a54827e"
integrity sha512-0JCqzSKnStlRRQfCdowvqy3cy0Dvtlb8xecj/H8JFZuCze4rwjPZQOgvFvn0Ws/usCHQFGpyr+pB9adaGwXn4Q==
update-section@^0.3.3:
version "0.3.3"
resolved "https://registry.yarnpkg.com/update-section/-/update-section-0.3.3.tgz#458f17820d37820dc60e20b86d94391b00123158"
integrity sha512-BpRZMZpgXLuTiKeiu7kK0nIPwGdyrqrs6EDSaXtjD/aQ2T+qVo9a5hRC3HN3iJjCMxNT/VxoLGQ7E/OzE5ucnw==
uuid@^9.0.0:
version "9.0.1"
resolved "https://registry.yarnpkg.com/uuid/-/uuid-9.0.1.tgz#e188d4c8853cc722220392c424cd637f32293f30"
integrity sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==
validate-npm-package-license@^3.0.1:
version "3.0.4"
resolved "https://registry.yarnpkg.com/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz#fc91f6b9c7ba15c857f4cb2c5defeec39d4f410a"
integrity sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==
dependencies:
spdx-correct "^3.0.0"
spdx-expression-parse "^3.0.0"
vfile-message@^2.0.0:
version "2.0.4"
resolved "https://registry.yarnpkg.com/vfile-message/-/vfile-message-2.0.4.tgz#5b43b88171d409eae58477d13f23dd41d52c371a"
integrity sha512-DjssxRGkMvifUOJre00juHoP9DPWuzjxKuMDrhNbk2TdaYYBNMStsNhEOt3idrtI12VQYM/1+iM0KOzXi4pxwQ==
dependencies:
"@types/unist" "^2.0.0"
unist-util-stringify-position "^2.0.0"
vfile@^4.0.0:
version "4.2.1"
resolved "https://registry.yarnpkg.com/vfile/-/vfile-4.2.1.tgz#03f1dce28fc625c625bc6514350fbdb00fa9e624"
integrity sha512-O6AE4OskCG5S1emQ/4gl8zK586RqA3srz3nfK/Viy0UPToBc5Trp9BVFb1u0CjsKrAWwnpr4ifM/KBXPWwJbCA==
dependencies:
"@types/unist" "^2.0.0"
is-buffer "^2.0.0"
unist-util-stringify-position "^2.0.0"
vfile-message "^2.0.0"
which-boxed-primitive@^1.0.2:
version "1.0.2"
resolved "https://registry.yarnpkg.com/which-boxed-primitive/-/which-boxed-primitive-1.0.2.tgz#13757bc89b209b049fe5d86430e21cf40a89a8e6"
integrity sha512-bwZdv0AKLpplFY2KZRX6TvyuN7ojjr7lwkg6ml0roIy9YeuSr7JS372qlNW18UQYzgYK9ziGcerWqZOmEn9VNg==
dependencies:
is-bigint "^1.0.1"
is-boolean-object "^1.1.0"
is-number-object "^1.0.4"
is-string "^1.0.5"
is-symbol "^1.0.3"
which-typed-array@^1.1.14, which-typed-array@^1.1.15:
version "1.1.15"
resolved "https://registry.yarnpkg.com/which-typed-array/-/which-typed-array-1.1.15.tgz#264859e9b11a649b388bfaaf4f767df1f779b38d"
integrity sha512-oV0jmFtUky6CXfkqehVvBP/LSWJ2sy4vWMioiENyJLePrBO/yKyV9OyJySfAKosh+RYkIl5zJCNZ8/4JncrpdA==
dependencies:
available-typed-arrays "^1.0.7"
call-bind "^1.0.7"
for-each "^0.3.3"
gopd "^1.0.1"
has-tostringtag "^1.0.2"
which@^2.0.1:
version "2.0.2"
resolved "https://registry.yarnpkg.com/which/-/which-2.0.2.tgz#7c6a8dd0a636a0327e10b59c9286eee93f3f51b1"
integrity sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==
dependencies:
isexe "^2.0.0"
"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0":
version "7.0.0"
resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43"
integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==
dependencies:
ansi-styles "^4.0.0"
string-width "^4.1.0"
strip-ansi "^6.0.0"
wrap-ansi@^8.1.0:
version "8.1.0"
resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-8.1.0.tgz#56dc22368ee570face1b49819975d9b9a5ead214"
integrity sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==
dependencies:
ansi-styles "^6.1.0"
string-width "^5.0.1"
strip-ansi "^7.0.1"
yallist@^4.0.0:
version "4.0.0"
resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72"
integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==
yaml@^2.4.5:
version "2.4.5"
resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.4.5.tgz#60630b206dd6d84df97003d33fc1ddf6296cca5e"
integrity sha512-aBx2bnqDzVOyNKfsysjA2ms5ZlnjSAW2eG3/L5G/CSujfjLJTJsEw1bGw8kCf04KodQWk1pxlGnZ56CRxiawmg==
yargs-parser@^20.2.3:
version "20.2.9"
resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.9.tgz#2eb7dc3b0289718fc295f362753845c41a0c94ee"
integrity sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==
zwitch@^1.0.0:
version "1.0.5"
resolved "https://registry.yarnpkg.com/zwitch/-/zwitch-1.0.5.tgz#d11d7381ffed16b742f6af7b3f223d5cd9fe9920"
integrity sha512-V50KMwwzqJV0NpZIZFwfOD5/lyny3WlSzRiXgA0G7VUnRlqttta1L6UQIHzd6EuBY/cHGfwTIck7w1yH6Q5zUw==
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/sdk-js/README.md | # LangGraph JS/TS SDK
This repository contains the JS/TS SDK for interacting with the LangGraph REST API.
## Quick Start
To get started with the JS/TS SDK, [install the package](https://www.npmjs.com/package/@langchain/langgraph-sdk)
```bash
yarn add @langchain/langgraph-sdk
```
You will need a running LangGraph API server. If you're running a server locally using `langgraph-cli`, SDK will automatically point at `http://localhost:8123`, otherwise
you would need to specify the server URL when creating a client.
```js
import { Client } from "@langchain/langgraph-sdk";
const client = new Client();
// List all assistants
const assistants = await client.assistants.search({
metadata: null,
offset: 0,
limit: 10,
});
// We auto-create an assistant for each graph you register in config.
const agent = assistants[0];
// Start a new thread
const thread = await client.threads.create();
// Start a streaming run
const messages = [{ role: "human", content: "what's the weather in la" }];
const streamResponse = client.runs.stream(
thread["thread_id"],
agent["assistant_id"],
{
input: { messages },
}
);
for await (const chunk of streamResponse) {
console.log(chunk);
}
```
## Documentation
To generate documentation, run the following commands:
1. Generate docs.
yarn typedoc
1. Consolidate doc files into one markdown file.
npx concat-md --decrease-title-levels --ignore=js_ts_sdk_ref.md --start-title-level-at 2 docs > docs/js_ts_sdk_ref.md
1. Copy `js_ts_sdk_ref.md` to MkDocs directory.
cp docs/js_ts_sdk_ref.md ../../docs/docs/cloud/reference/sdk/js_ts_sdk_ref.md
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/sdk-js/langchain.config.js | import { resolve, dirname } from "node:path";
import { fileURLToPath } from "node:url";
/**
* @param {string} relativePath
* @returns {string}
*/
function abs(relativePath) {
return resolve(dirname(fileURLToPath(import.meta.url)), relativePath);
}
export const config = {
internals: [],
entrypoints: { index: "index", client: "client" },
tsConfigPath: resolve("./tsconfig.json"),
cjsSource: "./dist-cjs",
cjsDestination: "./dist",
abs,
};
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/sdk-js/package.json | {
"name": "@langchain/langgraph-sdk",
"version": "0.0.31",
"description": "Client library for interacting with the LangGraph API",
"type": "module",
"packageManager": "yarn@1.22.19",
"scripts": {
"clean": "rm -rf dist/ dist-cjs/",
"build": "yarn clean && yarn lc_build --create-entrypoints --pre --tree-shaking",
"prepublish": "yarn run build",
"format": "prettier --write src",
"lint": "prettier --check src && tsc --noEmit"
},
"main": "index.js",
"license": "MIT",
"dependencies": {
"@types/json-schema": "^7.0.15",
"p-queue": "^6.6.2",
"p-retry": "4",
"uuid": "^9.0.0"
},
"devDependencies": {
"@langchain/scripts": "^0.1.4",
"@tsconfig/recommended": "^1.0.2",
"@types/node": "^20.12.12",
"@types/uuid": "^9.0.1",
"concat-md": "^0.5.1",
"prettier": "^3.2.5",
"typedoc": "^0.26.1",
"typedoc-plugin-markdown": "^4.1.0",
"typescript": "^5.4.5"
},
"exports": {
".": {
"types": {
"import": "./index.d.ts",
"require": "./index.d.cts",
"default": "./index.d.ts"
},
"import": "./index.js",
"require": "./index.cjs"
},
"./client": {
"types": {
"import": "./client.d.ts",
"require": "./client.d.cts",
"default": "./client.d.ts"
},
"import": "./client.js",
"require": "./client.cjs"
},
"./package.json": "./package.json"
},
"files": [
"dist/",
"index.cjs",
"index.js",
"index.d.ts",
"index.d.cts",
"client.cjs",
"client.js",
"client.d.ts",
"client.d.cts"
]
}
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/sdk-js/tsconfig.cjs.json | {
"extends": "./tsconfig.json",
"compilerOptions": {
"module": "CommonJS",
"moduleResolution": "Node",
"declaration": false
},
"exclude": ["node_modules", "dist", "**/tests"]
}
|
0 | lc_public_repos/langgraph/libs | lc_public_repos/langgraph/libs/sdk-js/.prettierrc | {}
|
0 | lc_public_repos/langgraph/libs/sdk-js | lc_public_repos/langgraph/libs/sdk-js/src/types.ts | import { Checkpoint, Config, Metadata } from "./schema.js";
export type StreamMode =
| "values"
| "messages"
| "updates"
| "events"
| "debug"
| "custom"
| "messages-tuple";
export type MultitaskStrategy = "reject" | "interrupt" | "rollback" | "enqueue";
export type OnConflictBehavior = "raise" | "do_nothing";
export type OnCompletionBehavior = "complete" | "continue";
export type DisconnectMode = "cancel" | "continue";
export type StreamEvent =
| "events"
| "metadata"
| "debug"
| "updates"
| "values"
| "messages/partial"
| "messages/metadata"
| "messages/complete"
| (string & {});
export interface Send {
node: string;
input: Record<string, unknown> | null;
}
export interface Command {
/**
* An object to update the thread state with.
*/
update?: Record<string, unknown>;
/**
* The value to return from an `interrupt` function call.
*/
resume?: unknown;
/**
* Determine the next node to navigate to. Can be one of the following:
* - Name(s) of the node names to navigate to next.
* - `Send` command(s) to execute node(s) with provided input.
*/
goto?: Send | Send[] | string | string[];
}
interface RunsInvokePayload {
/**
* Input to the run. Pass `null` to resume from the current state of the thread.
*/
input?: Record<string, unknown> | null;
/**
* Metadata for the run.
*/
metadata?: Metadata;
/**
* Additional configuration for the run.
*/
config?: Config;
/**
* Checkpoint ID for when creating a new run.
*/
checkpointId?: string;
/**
* Checkpoint for when creating a new run.
*/
checkpoint?: Omit<Checkpoint, "thread_id">;
/**
* Interrupt execution before entering these nodes.
*/
interruptBefore?: "*" | string[];
/**
* Interrupt execution after leaving these nodes.
*/
interruptAfter?: "*" | string[];
/**
* Strategy to handle concurrent runs on the same thread. Only relevant if
* there is a pending/inflight run on the same thread. One of:
* - "reject": Reject the new run.
* - "interrupt": Interrupt the current run, keeping steps completed until now,
and start a new one.
* - "rollback": Cancel and delete the existing run, rolling back the thread to
the state before it had started, then start the new run.
* - "enqueue": Queue up the new run to start after the current run finishes.
*/
multitaskStrategy?: MultitaskStrategy;
/**
* Abort controller signal to cancel the run.
*/
signal?: AbortController["signal"];
/**
* Behavior to handle run completion. Only relevant if
* there is a pending/inflight run on the same thread. One of:
* - "complete": Complete the run.
* - "continue": Continue the run.
*/
onCompletion?: OnCompletionBehavior;
/**
* Webhook to call when the run is complete.
*/
webhook?: string;
/**
* Behavior to handle disconnection. Only relevant if
* there is a pending/inflight run on the same thread. One of:
* - "cancel": Cancel the run.
* - "continue": Continue the run.
*/
onDisconnect?: DisconnectMode;
/**
* The number of seconds to wait before starting the run.
* Use to schedule future runs.
*/
afterSeconds?: number;
/**
* Behavior if the specified run doesn't exist. Defaults to "reject".
*/
ifNotExists?: "create" | "reject";
/**
* One or more commands to invoke the graph with.
*/
command?: Command;
}
export interface RunsStreamPayload extends RunsInvokePayload {
/**
* One of `"values"`, `"messages"`, `"updates"` or `"events"`.
* - `"values"`: Stream the thread state any time it changes.
* - `"messages"`: Stream chat messages from thread state and calls to chat models,
* token-by-token where possible.
* - `"updates"`: Stream the state updates returned by each node.
* - `"events"`: Stream all events produced by the run. You can also access these
* afterwards using the `client.runs.listEvents()` method.
*/
streamMode?: StreamMode | Array<StreamMode>;
/**
* Stream output from subgraphs. By default, streams only the top graph.
*/
streamSubgraphs?: boolean;
/**
* Pass one or more feedbackKeys if you want to request short-lived signed URLs
* for submitting feedback to LangSmith with this key for this run.
*/
feedbackKeys?: string[];
}
export interface RunsCreatePayload extends RunsInvokePayload {}
export interface CronsCreatePayload extends RunsCreatePayload {
/**
* Schedule for running the Cron Job
*/
schedule: string;
}
export interface RunsWaitPayload extends RunsStreamPayload {
/**
* Raise errors returned by the run. Default is `true`.
*/
raiseError?: boolean;
}
|
0 | lc_public_repos/langgraph/libs/sdk-js | lc_public_repos/langgraph/libs/sdk-js/src/schema.ts | import type { JSONSchema7 } from "json-schema";
type Optional<T> = T | null | undefined;
export type RunStatus =
| "pending"
| "running"
| "error"
| "success"
| "timeout"
| "interrupted";
export type ThreadStatus = "idle" | "busy" | "interrupted" | "error";
type MultitaskStrategy = "reject" | "interrupt" | "rollback" | "enqueue";
export type CancelAction = "interrupt" | "rollback";
export interface Config {
/**
* Tags for this call and any sub-calls (eg. a Chain calling an LLM).
* You can use these to filter calls.
*/
tags?: string[];
/**
* Maximum number of times a call can recurse.
* If not provided, defaults to 25.
*/
recursion_limit?: number;
/**
* Runtime values for attributes previously made configurable on this Runnable.
*/
configurable: {
/**
* ID of the thread
*/
thread_id?: string;
/**
* Timestamp of the state checkpoint
*/
checkpoint_id?: string;
[key: string]: unknown;
};
}
export interface GraphSchema {
/**
* The ID of the graph.
*/
graph_id: string;
/**
* The schema for the input state.
* Missing if unable to generate JSON schema from graph.
*/
input_schema?: JSONSchema7;
/**
* The schema for the output state.
* Missing if unable to generate JSON schema from graph.
*/
output_schema?: JSONSchema7;
/**
* The schema for the graph state.
* Missing if unable to generate JSON schema from graph.
*/
state_schema?: JSONSchema7;
/**
* The schema for the graph config.
* Missing if unable to generate JSON schema from graph.
*/
config_schema?: JSONSchema7;
}
export type Subgraphs = Record<string, GraphSchema>;
export type Metadata = Optional<{
source?: "input" | "loop" | "update" | (string & {});
step?: number;
writes?: Record<string, unknown> | null;
parents?: Record<string, string>;
[key: string]: unknown;
}>;
export interface AssistantBase {
/** The ID of the assistant. */
assistant_id: string;
/** The ID of the graph. */
graph_id: string;
/** The assistant config. */
config: Config;
/** The time the assistant was created. */
created_at: string;
/** The assistant metadata. */
metadata: Metadata;
/** The version of the assistant. */
version: number;
}
export interface AssistantVersion extends AssistantBase {}
export interface Assistant extends AssistantBase {
/** The last time the assistant was updated. */
updated_at: string;
/** The name of the assistant */
name: string;
}
export interface AssistantGraph {
nodes: Array<{
id: string | number;
name?: string;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
data?: Record<string, any> | string;
metadata?: unknown;
}>;
edges: Array<{
source: string;
target: string;
data?: string;
conditional?: boolean;
}>;
}
/**
* An interrupt thrown inside a thread.
*/
export interface Interrupt {
value: unknown;
when: "during";
resumable: boolean;
ns?: string[];
}
export interface Thread<ValuesType = DefaultValues> {
/** The ID of the thread. */
thread_id: string;
/** The time the thread was created. */
created_at: string;
/** The last time the thread was updated. */
updated_at: string;
/** The thread metadata. */
metadata: Metadata;
/** The status of the thread */
status: ThreadStatus;
/** The current state of the thread. */
values: ValuesType;
/** Interrupts which were thrown in this thread */
interrupts: Record<string, Array<Interrupt>>;
}
export interface Cron {
/** The ID of the cron */
cron_id: string;
/** The ID of the thread */
thread_id: Optional<string>;
/** The end date to stop running the cron. */
end_time: Optional<string>;
/** The schedule to run, cron format. */
schedule: string;
/** The time the cron was created. */
created_at: string;
/** The last time the cron was updated. */
updated_at: string;
/** The run payload to use for creating new run. */
payload: Record<string, unknown>;
}
export type DefaultValues = Record<string, unknown>[] | Record<string, unknown>;
export interface ThreadState<ValuesType = DefaultValues> {
/** The state values */
values: ValuesType;
/** The next nodes to execute. If empty, the thread is done until new input is received */
next: string[];
/** Checkpoint of the thread state */
checkpoint: Checkpoint;
/** Metadata for this state */
metadata: Metadata;
/** Time of state creation */
created_at: Optional<string>;
/** The parent checkpoint. If missing, this is the root checkpoint */
parent_checkpoint: Optional<Checkpoint>;
/** Tasks to execute in this step. If already attempted, may contain an error */
tasks: Array<ThreadTask>;
}
export interface ThreadTask {
id: string;
name: string;
result?: unknown;
error: Optional<string>;
interrupts: Array<Interrupt>;
checkpoint: Optional<Checkpoint>;
state: Optional<ThreadState>;
}
export interface Run {
/** The ID of the run */
run_id: string;
/** The ID of the thread */
thread_id: string;
/** The assistant that wwas used for this run */
assistant_id: string;
/** The time the run was created */
created_at: string;
/** The last time the run was updated */
updated_at: string;
/** The status of the run. */
status: RunStatus;
/** Run metadata */
metadata: Metadata;
/** Strategy to handle concurrent runs on the same thread */
multitask_strategy: Optional<MultitaskStrategy>;
}
export interface Checkpoint {
thread_id: string;
checkpoint_ns: string;
checkpoint_id: Optional<string>;
checkpoint_map: Optional<Record<string, unknown>>;
}
export interface ListNamespaceResponse {
namespaces: string[][];
}
export interface Item {
namespace: string[];
key: string;
value: Record<string, any>;
createdAt: string;
updatedAt: string;
}
export interface SearchItem extends Item {
score?: number;
}
export interface SearchItemsResponse {
items: SearchItem[];
}
|
0 | lc_public_repos/langgraph/libs/sdk-js | lc_public_repos/langgraph/libs/sdk-js/src/client.ts | import {
Assistant,
AssistantGraph,
CancelAction,
Config,
DefaultValues,
GraphSchema,
Metadata,
Run,
RunStatus,
Thread,
ThreadState,
Cron,
AssistantVersion,
Subgraphs,
Checkpoint,
SearchItemsResponse,
ListNamespaceResponse,
Item,
ThreadStatus,
} from "./schema.js";
import { AsyncCaller, AsyncCallerParams } from "./utils/async_caller.js";
import {
EventSourceParser,
createParser,
} from "./utils/eventsource-parser/index.js";
import { IterableReadableStream } from "./utils/stream.js";
import {
RunsCreatePayload,
RunsStreamPayload,
RunsWaitPayload,
StreamEvent,
CronsCreatePayload,
OnConflictBehavior,
} from "./types.js";
import { mergeSignals } from "./utils/signals.js";
import { getEnvironmentVariable } from "./utils/env.js";
/**
* Get the API key from the environment.
* Precedence:
* 1. explicit argument
* 2. LANGGRAPH_API_KEY
* 3. LANGSMITH_API_KEY
* 4. LANGCHAIN_API_KEY
*
* @param apiKey - Optional API key provided as an argument
* @returns The API key if found, otherwise undefined
*/
export function getApiKey(apiKey?: string): string | undefined {
if (apiKey) {
return apiKey;
}
const prefixes = ["LANGGRAPH", "LANGSMITH", "LANGCHAIN"];
for (const prefix of prefixes) {
const envKey = getEnvironmentVariable(`${prefix}_API_KEY`);
if (envKey) {
// Remove surrounding quotes
return envKey.trim().replace(/^["']|["']$/g, "");
}
}
return undefined;
}
interface ClientConfig {
apiUrl?: string;
apiKey?: string;
callerOptions?: AsyncCallerParams;
timeoutMs?: number;
defaultHeaders?: Record<string, string | null | undefined>;
}
class BaseClient {
protected asyncCaller: AsyncCaller;
protected timeoutMs: number;
protected apiUrl: string;
protected defaultHeaders: Record<string, string | null | undefined>;
constructor(config?: ClientConfig) {
this.asyncCaller = new AsyncCaller({
maxRetries: 4,
maxConcurrency: 4,
...config?.callerOptions,
});
this.timeoutMs = config?.timeoutMs || 12_000;
// default limit being capped by Chrome
// https://github.com/nodejs/undici/issues/1373
// Regex to remove trailing slash, if present
this.apiUrl = config?.apiUrl?.replace(/\/$/, "") || "http://localhost:8123";
this.defaultHeaders = config?.defaultHeaders || {};
const apiKey = getApiKey(config?.apiKey);
if (apiKey) {
this.defaultHeaders["X-Api-Key"] = apiKey;
}
}
protected prepareFetchOptions(
path: string,
options?: RequestInit & {
json?: unknown;
params?: Record<string, unknown>;
timeoutMs?: number | null;
},
): [url: URL, init: RequestInit] {
const mutatedOptions = {
...options,
headers: { ...this.defaultHeaders, ...options?.headers },
};
if (mutatedOptions.json) {
mutatedOptions.body = JSON.stringify(mutatedOptions.json);
mutatedOptions.headers = {
...mutatedOptions.headers,
"Content-Type": "application/json",
};
delete mutatedOptions.json;
}
let timeoutSignal: AbortSignal | null = null;
if (typeof options?.timeoutMs !== "undefined") {
if (options.timeoutMs != null) {
timeoutSignal = AbortSignal.timeout(options.timeoutMs);
}
} else {
timeoutSignal = AbortSignal.timeout(this.timeoutMs);
}
mutatedOptions.signal = mergeSignals(timeoutSignal, mutatedOptions.signal);
const targetUrl = new URL(`${this.apiUrl}${path}`);
if (mutatedOptions.params) {
for (const [key, value] of Object.entries(mutatedOptions.params)) {
if (value == null) continue;
let strValue =
typeof value === "string" || typeof value === "number"
? value.toString()
: JSON.stringify(value);
targetUrl.searchParams.append(key, strValue);
}
delete mutatedOptions.params;
}
return [targetUrl, mutatedOptions];
}
protected async fetch<T>(
path: string,
options?: RequestInit & {
json?: unknown;
params?: Record<string, unknown>;
timeoutMs?: number | null;
signal?: AbortSignal;
},
): Promise<T> {
const response = await this.asyncCaller.fetch(
...this.prepareFetchOptions(path, options),
);
if (response.status === 202 || response.status === 204) {
return undefined as T;
}
return response.json() as T;
}
}
export class CronsClient extends BaseClient {
/**
*
* @param threadId The ID of the thread.
* @param assistantId Assistant ID to use for this cron job.
* @param payload Payload for creating a cron job.
* @returns The created background run.
*/
async createForThread(
threadId: string,
assistantId: string,
payload?: CronsCreatePayload,
): Promise<Run> {
const json: Record<string, any> = {
schedule: payload?.schedule,
input: payload?.input,
config: payload?.config,
metadata: payload?.metadata,
assistant_id: assistantId,
interrupt_before: payload?.interruptBefore,
interrupt_after: payload?.interruptAfter,
webhook: payload?.webhook,
multitask_strategy: payload?.multitaskStrategy,
if_not_exists: payload?.ifNotExists,
};
return this.fetch<Run>(`/threads/${threadId}/runs/crons`, {
method: "POST",
json,
});
}
/**
*
* @param assistantId Assistant ID to use for this cron job.
* @param payload Payload for creating a cron job.
* @returns
*/
async create(
assistantId: string,
payload?: CronsCreatePayload,
): Promise<Run> {
const json: Record<string, any> = {
schedule: payload?.schedule,
input: payload?.input,
config: payload?.config,
metadata: payload?.metadata,
assistant_id: assistantId,
interrupt_before: payload?.interruptBefore,
interrupt_after: payload?.interruptAfter,
webhook: payload?.webhook,
multitask_strategy: payload?.multitaskStrategy,
if_not_exists: payload?.ifNotExists,
};
return this.fetch<Run>(`/runs/crons`, {
method: "POST",
json,
});
}
/**
*
* @param cronId Cron ID of Cron job to delete.
*/
async delete(cronId: string): Promise<void> {
await this.fetch<void>(`/runs/crons/${cronId}`, {
method: "DELETE",
});
}
/**
*
* @param query Query options.
* @returns List of crons.
*/
async search(query?: {
assistantId?: string;
threadId?: string;
limit?: number;
offset?: number;
}): Promise<Cron[]> {
return this.fetch<Cron[]>("/runs/crons/search", {
method: "POST",
json: {
assistant_id: query?.assistantId ?? undefined,
thread_id: query?.threadId ?? undefined,
limit: query?.limit ?? 10,
offset: query?.offset ?? 0,
},
});
}
}
export class AssistantsClient extends BaseClient {
/**
* Get an assistant by ID.
*
* @param assistantId The ID of the assistant.
* @returns Assistant
*/
async get(assistantId: string): Promise<Assistant> {
return this.fetch<Assistant>(`/assistants/${assistantId}`);
}
/**
* Get the JSON representation of the graph assigned to a runnable
* @param assistantId The ID of the assistant.
* @param options.xray Whether to include subgraphs in the serialized graph representation. If an integer value is provided, only subgraphs with a depth less than or equal to the value will be included.
* @returns Serialized graph
*/
async getGraph(
assistantId: string,
options?: { xray?: boolean | number },
): Promise<AssistantGraph> {
return this.fetch<AssistantGraph>(`/assistants/${assistantId}/graph`, {
params: { xray: options?.xray },
});
}
/**
* Get the state and config schema of the graph assigned to a runnable
* @param assistantId The ID of the assistant.
* @returns Graph schema
*/
async getSchemas(assistantId: string): Promise<GraphSchema> {
return this.fetch<GraphSchema>(`/assistants/${assistantId}/schemas`);
}
/**
* Get the schemas of an assistant by ID.
*
* @param assistantId The ID of the assistant to get the schema of.
* @param options Additional options for getting subgraphs, such as namespace or recursion extraction.
* @returns The subgraphs of the assistant.
*/
async getSubgraphs(
assistantId: string,
options?: {
namespace?: string;
recurse?: boolean;
},
): Promise<Subgraphs> {
if (options?.namespace) {
return this.fetch<Subgraphs>(
`/assistants/${assistantId}/subgraphs/${options.namespace}`,
{ params: { recurse: options?.recurse } },
);
}
return this.fetch<Subgraphs>(`/assistants/${assistantId}/subgraphs`, {
params: { recurse: options?.recurse },
});
}
/**
* Create a new assistant.
* @param payload Payload for creating an assistant.
* @returns The created assistant.
*/
async create(payload: {
graphId: string;
config?: Config;
metadata?: Metadata;
assistantId?: string;
ifExists?: OnConflictBehavior;
name?: string;
}): Promise<Assistant> {
return this.fetch<Assistant>("/assistants", {
method: "POST",
json: {
graph_id: payload.graphId,
config: payload.config,
metadata: payload.metadata,
assistant_id: payload.assistantId,
if_exists: payload.ifExists,
name: payload.name,
},
});
}
/**
* Update an assistant.
* @param assistantId ID of the assistant.
* @param payload Payload for updating the assistant.
* @returns The updated assistant.
*/
async update(
assistantId: string,
payload: {
graphId?: string;
config?: Config;
metadata?: Metadata;
name?: string;
},
): Promise<Assistant> {
return this.fetch<Assistant>(`/assistants/${assistantId}`, {
method: "PATCH",
json: {
graph_id: payload.graphId,
config: payload.config,
metadata: payload.metadata,
name: payload.name,
},
});
}
/**
* Delete an assistant.
*
* @param assistantId ID of the assistant.
*/
async delete(assistantId: string): Promise<void> {
return this.fetch<void>(`/assistants/${assistantId}`, {
method: "DELETE",
});
}
/**
* List assistants.
* @param query Query options.
* @returns List of assistants.
*/
async search(query?: {
graphId?: string;
metadata?: Metadata;
limit?: number;
offset?: number;
}): Promise<Assistant[]> {
return this.fetch<Assistant[]>("/assistants/search", {
method: "POST",
json: {
graph_id: query?.graphId ?? undefined,
metadata: query?.metadata ?? undefined,
limit: query?.limit ?? 10,
offset: query?.offset ?? 0,
},
});
}
/**
* List all versions of an assistant.
*
* @param assistantId ID of the assistant.
* @returns List of assistant versions.
*/
async getVersions(
assistantId: string,
payload?: {
metadata?: Metadata;
limit?: number;
offset?: number;
},
): Promise<AssistantVersion[]> {
return this.fetch<AssistantVersion[]>(
`/assistants/${assistantId}/versions`,
{
method: "POST",
json: {
metadata: payload?.metadata ?? undefined,
limit: payload?.limit ?? 10,
offset: payload?.offset ?? 0,
},
},
);
}
/**
* Change the version of an assistant.
*
* @param assistantId ID of the assistant.
* @param version The version to change to.
* @returns The updated assistant.
*/
async setLatest(assistantId: string, version: number): Promise<Assistant> {
return this.fetch<Assistant>(`/assistants/${assistantId}/latest`, {
method: "POST",
json: { version },
});
}
}
export class ThreadsClient extends BaseClient {
/**
* Get a thread by ID.
*
* @param threadId ID of the thread.
* @returns The thread.
*/
async get(threadId: string): Promise<Thread> {
return this.fetch<Thread>(`/threads/${threadId}`);
}
/**
* Create a new thread.
*
* @param payload Payload for creating a thread.
* @returns The created thread.
*/
async create(payload?: {
/**
* Metadata for the thread.
*/
metadata?: Metadata;
threadId?: string;
ifExists?: OnConflictBehavior;
}): Promise<Thread> {
return this.fetch<Thread>(`/threads`, {
method: "POST",
json: {
metadata: payload?.metadata,
thread_id: payload?.threadId,
if_exists: payload?.ifExists,
},
});
}
/**
* Copy an existing thread
* @param threadId ID of the thread to be copied
* @returns Newly copied thread
*/
async copy(threadId: string): Promise<Thread> {
return this.fetch<Thread>(`/threads/${threadId}/copy`, {
method: "POST",
});
}
/**
* Update a thread.
*
* @param threadId ID of the thread.
* @param payload Payload for updating the thread.
* @returns The updated thread.
*/
async update(
threadId: string,
payload?: {
/**
* Metadata for the thread.
*/
metadata?: Metadata;
},
): Promise<Thread> {
return this.fetch<Thread>(`/threads/${threadId}`, {
method: "PATCH",
json: { metadata: payload?.metadata },
});
}
/**
* Delete a thread.
*
* @param threadId ID of the thread.
*/
async delete(threadId: string): Promise<void> {
return this.fetch<void>(`/threads/${threadId}`, {
method: "DELETE",
});
}
/**
* List threads
*
* @param query Query options
* @returns List of threads
*/
async search(query?: {
/**
* Metadata to filter threads by.
*/
metadata?: Metadata;
/**
* Maximum number of threads to return.
* Defaults to 10
*/
limit?: number;
/**
* Offset to start from.
*/
offset?: number;
/**
* Thread status to filter on.
* Must be one of 'idle', 'busy', 'interrupted' or 'error'.
*/
status?: ThreadStatus;
}): Promise<Thread[]> {
return this.fetch<Thread[]>("/threads/search", {
method: "POST",
json: {
metadata: query?.metadata ?? undefined,
limit: query?.limit ?? 10,
offset: query?.offset ?? 0,
status: query?.status,
},
});
}
/**
* Get state for a thread.
*
* @param threadId ID of the thread.
* @returns Thread state.
*/
async getState<ValuesType = DefaultValues>(
threadId: string,
checkpoint?: Checkpoint | string,
options?: { subgraphs?: boolean },
): Promise<ThreadState<ValuesType>> {
if (checkpoint != null) {
if (typeof checkpoint !== "string") {
return this.fetch<ThreadState<ValuesType>>(
`/threads/${threadId}/state/checkpoint`,
{
method: "POST",
json: { checkpoint, subgraphs: options?.subgraphs },
},
);
}
// deprecated
return this.fetch<ThreadState<ValuesType>>(
`/threads/${threadId}/state/${checkpoint}`,
{ params: { subgraphs: options?.subgraphs } },
);
}
return this.fetch<ThreadState<ValuesType>>(`/threads/${threadId}/state`, {
params: { subgraphs: options?.subgraphs },
});
}
/**
* Add state to a thread.
*
* @param threadId The ID of the thread.
* @returns
*/
async updateState<ValuesType = DefaultValues>(
threadId: string,
options: {
values: ValuesType;
checkpoint?: Checkpoint;
checkpointId?: string;
asNode?: string;
},
): Promise<Pick<Config, "configurable">> {
return this.fetch<Pick<Config, "configurable">>(
`/threads/${threadId}/state`,
{
method: "POST",
json: {
values: options.values,
checkpoint_id: options.checkpointId,
checkpoint: options.checkpoint,
as_node: options?.asNode,
},
},
);
}
/**
* Patch the metadata of a thread.
*
* @param threadIdOrConfig Thread ID or config to patch the state of.
* @param metadata Metadata to patch the state with.
*/
async patchState(
threadIdOrConfig: string | Config,
metadata: Metadata,
): Promise<void> {
let threadId: string;
if (typeof threadIdOrConfig !== "string") {
if (typeof threadIdOrConfig.configurable.thread_id !== "string") {
throw new Error(
"Thread ID is required when updating state with a config.",
);
}
threadId = threadIdOrConfig.configurable.thread_id;
} else {
threadId = threadIdOrConfig;
}
return this.fetch<void>(`/threads/${threadId}/state`, {
method: "PATCH",
json: { metadata: metadata },
});
}
/**
* Get all past states for a thread.
*
* @param threadId ID of the thread.
* @param options Additional options.
* @returns List of thread states.
*/
async getHistory<ValuesType = DefaultValues>(
threadId: string,
options?: {
limit?: number;
before?: Config;
checkpoint?: Partial<Omit<Checkpoint, "thread_id">>;
metadata?: Metadata;
},
): Promise<ThreadState<ValuesType>[]> {
return this.fetch<ThreadState<ValuesType>[]>(
`/threads/${threadId}/history`,
{
method: "POST",
json: {
limit: options?.limit ?? 10,
before: options?.before,
metadata: options?.metadata,
checkpoint: options?.checkpoint,
},
},
);
}
}
export class RunsClient extends BaseClient {
stream(
threadId: null,
assistantId: string,
payload?: Omit<RunsStreamPayload, "multitaskStrategy" | "onCompletion">,
): AsyncGenerator<{
event: StreamEvent;
data: any;
}>;
stream(
threadId: string,
assistantId: string,
payload?: RunsStreamPayload,
): AsyncGenerator<{
event: StreamEvent;
data: any;
}>;
/**
* Create a run and stream the results.
*
* @param threadId The ID of the thread.
* @param assistantId Assistant ID to use for this run.
* @param payload Payload for creating a run.
*/
async *stream(
threadId: string | null,
assistantId: string,
payload?: RunsStreamPayload,
): AsyncGenerator<{
event: StreamEvent;
data: any;
}> {
const json: Record<string, any> = {
input: payload?.input,
command: payload?.command,
config: payload?.config,
metadata: payload?.metadata,
stream_mode: payload?.streamMode,
stream_subgraphs: payload?.streamSubgraphs,
feedback_keys: payload?.feedbackKeys,
assistant_id: assistantId,
interrupt_before: payload?.interruptBefore,
interrupt_after: payload?.interruptAfter,
checkpoint: payload?.checkpoint,
checkpoint_id: payload?.checkpointId,
webhook: payload?.webhook,
multitask_strategy: payload?.multitaskStrategy,
on_completion: payload?.onCompletion,
on_disconnect: payload?.onDisconnect,
after_seconds: payload?.afterSeconds,
if_not_exists: payload?.ifNotExists,
};
const endpoint =
threadId == null ? `/runs/stream` : `/threads/${threadId}/runs/stream`;
const response = await this.asyncCaller.fetch(
...this.prepareFetchOptions(endpoint, {
method: "POST",
json,
timeoutMs: null,
signal: payload?.signal,
}),
);
let parser: EventSourceParser;
let onEndEvent: () => void;
const textDecoder = new TextDecoder();
const stream: ReadableStream<{ event: string; data: any }> = (
response.body || new ReadableStream({ start: (ctrl) => ctrl.close() })
).pipeThrough(
new TransformStream({
async start(ctrl) {
parser = createParser((event) => {
if (
(payload?.signal && payload.signal.aborted) ||
(event.type === "event" && event.data === "[DONE]")
) {
ctrl.terminate();
return;
}
if ("data" in event) {
ctrl.enqueue({
event: event.event ?? "message",
data: JSON.parse(event.data),
});
}
});
onEndEvent = () => {
ctrl.enqueue({ event: "end", data: undefined });
};
},
async transform(chunk) {
const payload = textDecoder.decode(chunk);
parser.feed(payload);
// eventsource-parser will ignore events
// that are not terminated by a newline
if (payload.trim() === "event: end") onEndEvent();
},
}),
);
yield* IterableReadableStream.fromReadableStream(stream);
}
/**
* Create a run.
*
* @param threadId The ID of the thread.
* @param assistantId Assistant ID to use for this run.
* @param payload Payload for creating a run.
* @returns The created run.
*/
async create(
threadId: string,
assistantId: string,
payload?: RunsCreatePayload,
): Promise<Run> {
const json: Record<string, any> = {
input: payload?.input,
command: payload?.command,
config: payload?.config,
metadata: payload?.metadata,
assistant_id: assistantId,
interrupt_before: payload?.interruptBefore,
interrupt_after: payload?.interruptAfter,
webhook: payload?.webhook,
checkpoint: payload?.checkpoint,
checkpoint_id: payload?.checkpointId,
multitask_strategy: payload?.multitaskStrategy,
after_seconds: payload?.afterSeconds,
if_not_exists: payload?.ifNotExists,
};
return this.fetch<Run>(`/threads/${threadId}/runs`, {
method: "POST",
json,
signal: payload?.signal,
});
}
/**
* Create a batch of stateless background runs.
*
* @param payloads An array of payloads for creating runs.
* @returns An array of created runs.
*/
async createBatch(
payloads: (RunsCreatePayload & { assistantId: string })[],
): Promise<Run[]> {
const filteredPayloads = payloads
.map((payload) => ({ ...payload, assistant_id: payload.assistantId }))
.map((payload) => {
return Object.fromEntries(
Object.entries(payload).filter(([_, v]) => v !== undefined),
);
});
return this.fetch<Run[]>("/runs/batch", {
method: "POST",
json: filteredPayloads,
});
}
async wait(
threadId: null,
assistantId: string,
payload?: Omit<RunsWaitPayload, "multitaskStrategy" | "onCompletion">,
): Promise<ThreadState["values"]>;
async wait(
threadId: string,
assistantId: string,
payload?: RunsWaitPayload,
): Promise<ThreadState["values"]>;
/**
* Create a run and wait for it to complete.
*
* @param threadId The ID of the thread.
* @param assistantId Assistant ID to use for this run.
* @param payload Payload for creating a run.
* @returns The last values chunk of the thread.
*/
async wait(
threadId: string | null,
assistantId: string,
payload?: RunsWaitPayload,
): Promise<ThreadState["values"]> {
const json: Record<string, any> = {
input: payload?.input,
command: payload?.command,
config: payload?.config,
metadata: payload?.metadata,
assistant_id: assistantId,
interrupt_before: payload?.interruptBefore,
interrupt_after: payload?.interruptAfter,
checkpoint: payload?.checkpoint,
checkpoint_id: payload?.checkpointId,
webhook: payload?.webhook,
multitask_strategy: payload?.multitaskStrategy,
on_completion: payload?.onCompletion,
on_disconnect: payload?.onDisconnect,
after_seconds: payload?.afterSeconds,
if_not_exists: payload?.ifNotExists,
};
const endpoint =
threadId == null ? `/runs/wait` : `/threads/${threadId}/runs/wait`;
const response = await this.fetch<ThreadState["values"]>(endpoint, {
method: "POST",
json,
timeoutMs: null,
signal: payload?.signal,
});
const raiseError =
payload?.raiseError !== undefined ? payload.raiseError : true;
if (
raiseError &&
"__error__" in response &&
typeof response.__error__ === "object" &&
response.__error__ &&
"error" in response.__error__ &&
"message" in response.__error__
) {
throw new Error(
`${response.__error__?.error}: ${response.__error__?.message}`,
);
}
return response;
}
/**
* List all runs for a thread.
*
* @param threadId The ID of the thread.
* @param options Filtering and pagination options.
* @returns List of runs.
*/
async list(
threadId: string,
options?: {
/**
* Maximum number of runs to return.
* Defaults to 10
*/
limit?: number;
/**
* Offset to start from.
* Defaults to 0.
*/
offset?: number;
/**
* Status of the run to filter by.
*/
status?: RunStatus;
},
): Promise<Run[]> {
return this.fetch<Run[]>(`/threads/${threadId}/runs`, {
params: {
limit: options?.limit ?? 10,
offset: options?.offset ?? 0,
status: options?.status ?? undefined,
},
});
}
/**
* Get a run by ID.
*
* @param threadId The ID of the thread.
* @param runId The ID of the run.
* @returns The run.
*/
async get(threadId: string, runId: string): Promise<Run> {
return this.fetch<Run>(`/threads/${threadId}/runs/${runId}`);
}
/**
* Cancel a run.
*
* @param threadId The ID of the thread.
* @param runId The ID of the run.
* @param wait Whether to block when canceling
* @param action Action to take when cancelling the run. Possible values are `interrupt` or `rollback`. Default is `interrupt`.
* @returns
*/
async cancel(
threadId: string,
runId: string,
wait: boolean = false,
action: CancelAction = "interrupt",
): Promise<void> {
return this.fetch<void>(`/threads/${threadId}/runs/${runId}/cancel`, {
method: "POST",
params: {
wait: wait ? "1" : "0",
action: action,
},
});
}
/**
* Block until a run is done.
*
* @param threadId The ID of the thread.
* @param runId The ID of the run.
* @returns
*/
async join(
threadId: string,
runId: string,
options?: { signal?: AbortSignal },
): Promise<void> {
return this.fetch<void>(`/threads/${threadId}/runs/${runId}/join`, {
timeoutMs: null,
signal: options?.signal,
});
}
/**
* Stream output from a run in real-time, until the run is done.
* Output is not buffered, so any output produced before this call will
* not be received here.
*
* @param threadId The ID of the thread.
* @param runId The ID of the run.
* @returns An async generator yielding stream parts.
*/
async *joinStream(
threadId: string,
runId: string,
options?:
| { signal?: AbortSignal; cancelOnDisconnect?: boolean }
| AbortSignal,
): AsyncGenerator<{ event: StreamEvent; data: any }> {
const opts =
typeof options === "object" &&
options != null &&
options instanceof AbortSignal
? { signal: options }
: options;
const response = await this.asyncCaller.fetch(
...this.prepareFetchOptions(`/threads/${threadId}/runs/${runId}/stream`, {
method: "GET",
timeoutMs: null,
signal: opts?.signal,
params: { cancel_on_disconnect: opts?.cancelOnDisconnect ? "1" : "0" },
}),
);
let parser: EventSourceParser;
let onEndEvent: () => void;
const textDecoder = new TextDecoder();
const stream: ReadableStream<{ event: string; data: any }> = (
response.body || new ReadableStream({ start: (ctrl) => ctrl.close() })
).pipeThrough(
new TransformStream({
async start(ctrl) {
parser = createParser((event) => {
if (
(opts?.signal && opts.signal.aborted) ||
(event.type === "event" && event.data === "[DONE]")
) {
ctrl.terminate();
return;
}
if ("data" in event) {
ctrl.enqueue({
event: event.event ?? "message",
data: JSON.parse(event.data),
});
}
});
onEndEvent = () => {
ctrl.enqueue({ event: "end", data: undefined });
};
},
async transform(chunk) {
const payload = textDecoder.decode(chunk);
parser.feed(payload);
// eventsource-parser will ignore events
// that are not terminated by a newline
if (payload.trim() === "event: end") onEndEvent();
},
}),
);
yield* IterableReadableStream.fromReadableStream(stream);
}
/**
* Delete a run.
*
* @param threadId The ID of the thread.
* @param runId The ID of the run.
* @returns
*/
async delete(threadId: string, runId: string): Promise<void> {
return this.fetch<void>(`/threads/${threadId}/runs/${runId}`, {
method: "DELETE",
});
}
}
interface APIItem {
namespace: string[];
key: string;
value: Record<string, any>;
created_at: string;
updated_at: string;
}
interface APISearchItemsResponse {
items: APIItem[];
}
export class StoreClient extends BaseClient {
/**
* Store or update an item.
*
* @param namespace A list of strings representing the namespace path.
* @param key The unique identifier for the item within the namespace.
* @param value A dictionary containing the item's data.
* @returns Promise<void>
*/
async putItem(
namespace: string[],
key: string,
value: Record<string, any>,
): Promise<void> {
namespace.forEach((label) => {
if (label.includes(".")) {
throw new Error(
`Invalid namespace label '${label}'. Namespace labels cannot contain periods ('.')`,
);
}
});
const payload = {
namespace,
key,
value,
};
return this.fetch<void>("/store/items", {
method: "PUT",
json: payload,
});
}
/**
* Retrieve a single item.
*
* @param namespace A list of strings representing the namespace path.
* @param key The unique identifier for the item.
* @returns Promise<Item>
*/
async getItem(namespace: string[], key: string): Promise<Item | null> {
namespace.forEach((label) => {
if (label.includes(".")) {
throw new Error(
`Invalid namespace label '${label}'. Namespace labels cannot contain periods ('.')`,
);
}
});
const response = await this.fetch<APIItem>("/store/items", {
params: { namespace: namespace.join("."), key },
});
return response
? {
...response,
createdAt: response.created_at,
updatedAt: response.updated_at,
}
: null;
}
/**
* Delete an item.
*
* @param namespace A list of strings representing the namespace path.
* @param key The unique identifier for the item.
* @returns Promise<void>
*/
async deleteItem(namespace: string[], key: string): Promise<void> {
namespace.forEach((label) => {
if (label.includes(".")) {
throw new Error(
`Invalid namespace label '${label}'. Namespace labels cannot contain periods ('.')`,
);
}
});
return this.fetch<void>("/store/items", {
method: "DELETE",
json: { namespace, key },
});
}
/**
* Search for items within a namespace prefix.
*
* @param namespacePrefix List of strings representing the namespace prefix.
* @param options.filter Optional dictionary of key-value pairs to filter results.
* @param options.limit Maximum number of items to return (default is 10).
* @param options.offset Number of items to skip before returning results (default is 0).
* @param options.query Optional search query.
* @returns Promise<SearchItemsResponse>
*/
async searchItems(
namespacePrefix: string[],
options?: {
filter?: Record<string, any>;
limit?: number;
offset?: number;
query?: string;
},
): Promise<SearchItemsResponse> {
const payload = {
namespace_prefix: namespacePrefix,
filter: options?.filter,
limit: options?.limit ?? 10,
offset: options?.offset ?? 0,
query: options?.query,
};
const response = await this.fetch<APISearchItemsResponse>(
"/store/items/search",
{
method: "POST",
json: payload,
},
);
return {
items: response.items.map((item) => ({
...item,
createdAt: item.created_at,
updatedAt: item.updated_at,
})),
};
}
/**
* List namespaces with optional match conditions.
*
* @param options.prefix Optional list of strings representing the prefix to filter namespaces.
* @param options.suffix Optional list of strings representing the suffix to filter namespaces.
* @param options.maxDepth Optional integer specifying the maximum depth of namespaces to return.
* @param options.limit Maximum number of namespaces to return (default is 100).
* @param options.offset Number of namespaces to skip before returning results (default is 0).
* @returns Promise<ListNamespaceResponse>
*/
async listNamespaces(options?: {
prefix?: string[];
suffix?: string[];
maxDepth?: number;
limit?: number;
offset?: number;
}): Promise<ListNamespaceResponse> {
const payload = {
prefix: options?.prefix,
suffix: options?.suffix,
max_depth: options?.maxDepth,
limit: options?.limit ?? 100,
offset: options?.offset ?? 0,
};
return this.fetch<ListNamespaceResponse>("/store/namespaces", {
method: "POST",
json: payload,
});
}
}
export class Client {
/**
* The client for interacting with assistants.
*/
public assistants: AssistantsClient;
/**
* The client for interacting with threads.
*/
public threads: ThreadsClient;
/**
* The client for interacting with runs.
*/
public runs: RunsClient;
/**
* The client for interacting with cron runs.
*/
public crons: CronsClient;
/**
* The client for interacting with the KV store.
*/
public store: StoreClient;
constructor(config?: ClientConfig) {
this.assistants = new AssistantsClient(config);
this.threads = new ThreadsClient(config);
this.runs = new RunsClient(config);
this.crons = new CronsClient(config);
this.store = new StoreClient(config);
}
}
|
0 | lc_public_repos/langgraph/libs/sdk-js | lc_public_repos/langgraph/libs/sdk-js/src/index.ts | export { Client } from "./client.js";
export type {
Assistant,
AssistantVersion,
AssistantGraph,
Config,
DefaultValues,
GraphSchema,
Metadata,
Run,
Thread,
ThreadTask,
ThreadState,
ThreadStatus,
Cron,
Checkpoint,
Interrupt,
} from "./schema.js";
export type { OnConflictBehavior, Command } from "./types.js";
|
0 | lc_public_repos/langgraph/libs/sdk-js/src | lc_public_repos/langgraph/libs/sdk-js/src/utils/async_caller.ts | import pRetry from "p-retry";
import PQueueMod from "p-queue";
const STATUS_NO_RETRY = [
400, // Bad Request
401, // Unauthorized
402, // Payment required
403, // Forbidden
404, // Not Found
405, // Method Not Allowed
406, // Not Acceptable
407, // Proxy Authentication Required
408, // Request Timeout
422, // Unprocessable Entity
];
const STATUS_IGNORE = [
409, // Conflict
];
type ResponseCallback = (response?: Response) => Promise<boolean>;
export interface AsyncCallerParams {
/**
* The maximum number of concurrent calls that can be made.
* Defaults to `Infinity`, which means no limit.
*/
maxConcurrency?: number;
/**
* The maximum number of retries that can be made for a single call,
* with an exponential backoff between each attempt. Defaults to 6.
*/
maxRetries?: number;
onFailedResponseHook?: ResponseCallback;
/**
* Specify a custom fetch implementation.
*
* By default we expect the `fetch` is available in the global scope.
*/
fetch?: typeof fetch | ((...args: any[]) => any);
}
export interface AsyncCallerCallOptions {
signal?: AbortSignal;
}
/**
* Do not rely on globalThis.Response, rather just
* do duck typing
*/
function isResponse(x: unknown): x is Response {
if (x == null || typeof x !== "object") return false;
return "status" in x && "statusText" in x && "text" in x;
}
/**
* Utility error to properly handle failed requests
*/
class HTTPError extends Error {
status: number;
text: string;
response?: Response;
constructor(status: number, message: string, response?: Response) {
super(`HTTP ${status}: ${message}`);
this.status = status;
this.text = message;
this.response = response;
}
static async fromResponse(
response: Response,
options?: { includeResponse?: boolean },
): Promise<HTTPError> {
try {
return new HTTPError(
response.status,
await response.text(),
options?.includeResponse ? response : undefined,
);
} catch {
return new HTTPError(
response.status,
response.statusText,
options?.includeResponse ? response : undefined,
);
}
}
}
/**
* A class that can be used to make async calls with concurrency and retry logic.
*
* This is useful for making calls to any kind of "expensive" external resource,
* be it because it's rate-limited, subject to network issues, etc.
*
* Concurrent calls are limited by the `maxConcurrency` parameter, which defaults
* to `Infinity`. This means that by default, all calls will be made in parallel.
*
* Retries are limited by the `maxRetries` parameter, which defaults to 5. This
* means that by default, each call will be retried up to 5 times, with an
* exponential backoff between each attempt.
*/
export class AsyncCaller {
protected maxConcurrency: AsyncCallerParams["maxConcurrency"];
protected maxRetries: AsyncCallerParams["maxRetries"];
private queue: (typeof import("p-queue"))["default"]["prototype"];
private onFailedResponseHook?: ResponseCallback;
private customFetch?: typeof fetch;
constructor(params: AsyncCallerParams) {
this.maxConcurrency = params.maxConcurrency ?? Infinity;
this.maxRetries = params.maxRetries ?? 4;
if ("default" in PQueueMod) {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
this.queue = new (PQueueMod.default as any)({
concurrency: this.maxConcurrency,
});
} else {
// eslint-disable-next-line @typescript-eslint/no-explicit-any
this.queue = new (PQueueMod as any)({ concurrency: this.maxConcurrency });
}
this.onFailedResponseHook = params?.onFailedResponseHook;
this.customFetch = params.fetch;
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
call<A extends any[], T extends (...args: A) => Promise<any>>(
callable: T,
...args: Parameters<T>
): Promise<Awaited<ReturnType<T>>> {
const onFailedResponseHook = this.onFailedResponseHook;
return this.queue.add(
() =>
pRetry(
() =>
callable(...(args as Parameters<T>)).catch(async (error) => {
// eslint-disable-next-line no-instanceof/no-instanceof
if (error instanceof Error) {
throw error;
} else if (isResponse(error)) {
throw await HTTPError.fromResponse(error, {
includeResponse: !!onFailedResponseHook,
});
} else {
throw new Error(error);
}
}),
{
async onFailedAttempt(error) {
if (
error.message.startsWith("Cancel") ||
error.message.startsWith("TimeoutError") ||
error.message.startsWith("AbortError")
) {
throw error;
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
if ((error as any)?.code === "ECONNABORTED") {
throw error;
}
if (error instanceof HTTPError) {
if (STATUS_NO_RETRY.includes(error.status)) {
throw error;
} else if (STATUS_IGNORE.includes(error.status)) {
return;
}
if (onFailedResponseHook && error.response) {
await onFailedResponseHook(error.response);
}
}
},
// If needed we can change some of the defaults here,
// but they're quite sensible.
retries: this.maxRetries,
randomize: true,
},
),
{ throwOnTimeout: true },
);
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
callWithOptions<A extends any[], T extends (...args: A) => Promise<any>>(
options: AsyncCallerCallOptions,
callable: T,
...args: Parameters<T>
): Promise<Awaited<ReturnType<T>>> {
// Note this doesn't cancel the underlying request,
// when available prefer to use the signal option of the underlying call
if (options.signal) {
return Promise.race([
this.call<A, T>(callable, ...args),
new Promise<never>((_, reject) => {
options.signal?.addEventListener("abort", () => {
reject(new Error("AbortError"));
});
}),
]);
}
return this.call<A, T>(callable, ...args);
}
fetch(...args: Parameters<typeof fetch>): ReturnType<typeof fetch> {
const fetchFn = this.customFetch ?? fetch;
return this.call(() =>
fetchFn(...args).then((res) => (res.ok ? res : Promise.reject(res))),
);
}
}
|
0 | lc_public_repos/langgraph/libs/sdk-js/src | lc_public_repos/langgraph/libs/sdk-js/src/utils/env.ts | export function getEnvironmentVariable(name: string): string | undefined {
// Certain setups (Deno, frontend) will throw an error if you try to access environment variables
try {
return typeof process !== "undefined"
? // eslint-disable-next-line no-process-env
process.env?.[name]
: undefined;
} catch (e) {
return undefined;
}
}
|
0 | lc_public_repos/langgraph/libs/sdk-js/src | lc_public_repos/langgraph/libs/sdk-js/src/utils/stream.ts | // in this case don't quite match.
type IterableReadableStreamInterface<T> = ReadableStream<T> & AsyncIterable<T>;
/*
* Support async iterator syntax for ReadableStreams in all environments.
* Source: https://github.com/MattiasBuelens/web-streams-polyfill/pull/122#issuecomment-1627354490
*/
export class IterableReadableStream<T>
extends ReadableStream<T>
implements IterableReadableStreamInterface<T>
{
public reader: ReadableStreamDefaultReader<T>;
ensureReader() {
if (!this.reader) {
this.reader = this.getReader();
}
}
async next(): Promise<IteratorResult<T>> {
this.ensureReader();
try {
const result = await this.reader.read();
if (result.done) {
this.reader.releaseLock(); // release lock when stream becomes closed
return {
done: true,
value: undefined,
};
} else {
return {
done: false,
value: result.value,
};
}
} catch (e) {
this.reader.releaseLock(); // release lock when stream becomes errored
throw e;
}
}
async return(): Promise<IteratorResult<T>> {
this.ensureReader();
// If wrapped in a Node stream, cancel is already called.
if (this.locked) {
const cancelPromise = this.reader.cancel(); // cancel first, but don't await yet
this.reader.releaseLock(); // release lock first
await cancelPromise; // now await it
}
return { done: true, value: undefined };
}
// eslint-disable-next-line @typescript-eslint/no-explicit-any
async throw(e: any): Promise<IteratorResult<T>> {
this.ensureReader();
if (this.locked) {
const cancelPromise = this.reader.cancel(); // cancel first, but don't await yet
this.reader.releaseLock(); // release lock first
await cancelPromise; // now await it
}
throw e;
}
// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-ignore Not present in Node 18 types, required in latest Node 22
async [Symbol.asyncDispose]() {
await this.return();
}
[Symbol.asyncIterator]() {
return this;
}
static fromReadableStream<T>(stream: ReadableStream<T>) {
// From https://developer.mozilla.org/en-US/docs/Web/API/Streams_API/Using_readable_streams#reading_the_stream
const reader = stream.getReader();
return new IterableReadableStream<T>({
start(controller) {
return pump();
function pump(): Promise<T | undefined> {
return reader.read().then(({ done, value }) => {
// When no more data needs to be consumed, close the stream
if (done) {
controller.close();
return;
}
// Enqueue the next data chunk into our target stream
controller.enqueue(value);
return pump();
});
}
},
cancel() {
reader.releaseLock();
},
});
}
static fromAsyncGenerator<T>(generator: AsyncGenerator<T>) {
return new IterableReadableStream<T>({
async pull(controller) {
const { value, done } = await generator.next();
// When no more data needs to be consumed, close the stream
if (done) {
controller.close();
}
// Fix: `else if (value)` will hang the streaming when nullish value (e.g. empty string) is pulled
controller.enqueue(value);
},
async cancel(reason) {
await generator.return(reason);
},
});
}
}
|
0 | lc_public_repos/langgraph/libs/sdk-js/src | lc_public_repos/langgraph/libs/sdk-js/src/utils/signals.ts | export function mergeSignals(...signals: (AbortSignal | null | undefined)[]) {
const nonZeroSignals = signals.filter(
(signal): signal is AbortSignal => signal != null,
);
if (nonZeroSignals.length === 0) return undefined;
if (nonZeroSignals.length === 1) return nonZeroSignals[0];
const controller = new AbortController();
for (const signal of signals) {
if (signal?.aborted) {
controller.abort(signal.reason);
return controller.signal;
}
signal?.addEventListener("abort", () => controller.abort(signal.reason), {
once: true,
});
}
return controller.signal;
}
|
0 | lc_public_repos/langgraph/libs/sdk-js/src/utils | lc_public_repos/langgraph/libs/sdk-js/src/utils/eventsource-parser/LICENSE | MIT License
Copyright (c) 2024 Espen Hovlandsdal <espen@hovlandsdal.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. |
0 | lc_public_repos/langgraph/libs/sdk-js/src/utils | lc_public_repos/langgraph/libs/sdk-js/src/utils/eventsource-parser/types.ts | /**
* EventSource parser instance.
*
* Needs to be reset between reconnections/when switching data source, using the `reset()` method.
*
* @public
*/
export interface EventSourceParser {
/**
* Feeds the parser another chunk. The method _does not_ return a parsed message.
* Instead, if the chunk was a complete message (or completed a previously incomplete message),
* it will invoke the `onParse` callback used to create the parsers.
*
* @param chunk - The chunk to parse. Can be a partial, eg in the case of streaming messages.
* @public
*/
feed(chunk: string): void;
/**
* Resets the parser state. This is required when you have a new stream of messages -
* for instance in the case of a client being disconnected and reconnecting.
*
* @public
*/
reset(): void;
}
/**
* A parsed EventSource event
*
* @public
*/
export interface ParsedEvent {
/**
* Differentiates the type from reconnection intervals and other types of messages
* Not to be confused with `event`.
*/
type: "event";
/**
* The event type sent from the server. Note that this differs from the browser `EventSource`
* implementation in that browsers will default this to `message`, whereas this parser will
* leave this as `undefined` if not explicitly declared.
*/
event?: string;
/**
* ID of the message, if any was provided by the server. Can be used by clients to keep the
* last received message ID in sync when reconnecting.
*/
id?: string;
/**
* The data received for this message
*/
data: string;
}
/**
* An event emitted from the parser when the server sends a value in the `retry` field,
* indicating how many seconds the client should wait before attempting to reconnect.
*
* @public
*/
export interface ReconnectInterval {
/**
* Differentiates the type from `event` and other types of messages
*/
type: "reconnect-interval";
/**
* Number of seconds to wait before reconnecting. Note that the parser does not care about
* this value at all - it only emits the value for clients to use.
*/
value: number;
}
/**
* The different types of messages the parsed can emit to the `onParse` callback
*
* @public
*/
export type ParseEvent = ParsedEvent | ReconnectInterval;
/**
* Callback passed as the `onParse` callback to a parser
*
* @public
*/
export type EventSourceParseCallback = (event: ParseEvent) => void;
|
0 | lc_public_repos/langgraph/libs/sdk-js/src/utils | lc_public_repos/langgraph/libs/sdk-js/src/utils/eventsource-parser/parse.ts | /**
* EventSource/Server-Sent Events parser
* @see https://html.spec.whatwg.org/multipage/server-sent-events.html
*
* Based on code from the {@link https://github.com/EventSource/eventsource | EventSource module},
* which is licensed under the MIT license. And copyrighted the EventSource GitHub organisation.
*/
import type { EventSourceParseCallback, EventSourceParser } from "./types.js";
/**
* Creates a new EventSource parser.
*
* @param onParse - Callback to invoke when a new event is parsed, or a new reconnection interval
* has been sent from the server
*
* @returns A new EventSource parser, with `parse` and `reset` methods.
* @public
*/
export function createParser(
onParse: EventSourceParseCallback,
): EventSourceParser {
// Processing state
let isFirstChunk: boolean;
let buffer: string;
let startingPosition: number;
let startingFieldLength: number;
// Event state
let eventId: string | undefined;
let eventName: string | undefined;
let data: string;
reset();
return { feed, reset };
function reset(): void {
isFirstChunk = true;
buffer = "";
startingPosition = 0;
startingFieldLength = -1;
eventId = undefined;
eventName = undefined;
data = "";
}
function feed(chunk: string): void {
buffer = buffer ? buffer + chunk : chunk;
// Strip any UTF8 byte order mark (BOM) at the start of the stream.
// Note that we do not strip any non - UTF8 BOM, as eventsource streams are
// always decoded as UTF8 as per the specification.
if (isFirstChunk && hasBom(buffer)) {
buffer = buffer.slice(BOM.length);
}
isFirstChunk = false;
// Set up chunk-specific processing state
const length = buffer.length;
let position = 0;
let discardTrailingNewline = false;
// Read the current buffer byte by byte
while (position < length) {
// EventSource allows for carriage return + line feed, which means we
// need to ignore a linefeed character if the previous character was a
// carriage return
// @todo refactor to reduce nesting, consider checking previous byte?
// @todo but consider multiple chunks etc
if (discardTrailingNewline) {
if (buffer[position] === "\n") {
++position;
}
discardTrailingNewline = false;
}
let lineLength = -1;
let fieldLength = startingFieldLength;
let character: string;
for (
let index = startingPosition;
lineLength < 0 && index < length;
++index
) {
character = buffer[index];
if (character === ":" && fieldLength < 0) {
fieldLength = index - position;
} else if (character === "\r") {
discardTrailingNewline = true;
lineLength = index - position;
} else if (character === "\n") {
lineLength = index - position;
}
}
if (lineLength < 0) {
startingPosition = length - position;
startingFieldLength = fieldLength;
break;
} else {
startingPosition = 0;
startingFieldLength = -1;
}
parseEventStreamLine(buffer, position, fieldLength, lineLength);
position += lineLength + 1;
}
if (position === length) {
// If we consumed the entire buffer to read the event, reset the buffer
buffer = "";
} else if (position > 0) {
// If there are bytes left to process, set the buffer to the unprocessed
// portion of the buffer only
buffer = buffer.slice(position);
}
}
function parseEventStreamLine(
lineBuffer: string,
index: number,
fieldLength: number,
lineLength: number,
) {
if (lineLength === 0) {
// We reached the last line of this event
if (data.length > 0) {
onParse({
type: "event",
id: eventId,
event: eventName || undefined,
data: data.slice(0, -1), // remove trailing newline
});
data = "";
eventId = undefined;
}
eventName = undefined;
return;
}
const noValue = fieldLength < 0;
const field = lineBuffer.slice(
index,
index + (noValue ? lineLength : fieldLength),
);
let step = 0;
if (noValue) {
step = lineLength;
} else if (lineBuffer[index + fieldLength + 1] === " ") {
step = fieldLength + 2;
} else {
step = fieldLength + 1;
}
const position = index + step;
const valueLength = lineLength - step;
const value = lineBuffer.slice(position, position + valueLength).toString();
if (field === "data") {
data += value ? `${value}\n` : "\n";
} else if (field === "event") {
eventName = value;
} else if (field === "id" && !value.includes("\u0000")) {
eventId = value;
} else if (field === "retry") {
const retry = parseInt(value, 10);
if (!Number.isNaN(retry)) {
onParse({ type: "reconnect-interval", value: retry });
}
}
}
}
const BOM = [239, 187, 191];
function hasBom(buffer: string) {
return BOM.every(
(charCode: number, index: number) => buffer.charCodeAt(index) === charCode,
);
}
|
0 | lc_public_repos/langgraph/libs/sdk-js/src/utils | lc_public_repos/langgraph/libs/sdk-js/src/utils/eventsource-parser/index.ts | // From https://github.com/rexxars/eventsource-parser
// Inlined due to CJS import issues
export { createParser } from "./parse.js";
export type {
EventSourceParseCallback,
EventSourceParser,
ParsedEvent,
ParseEvent,
ReconnectInterval,
} from "./types.js";
|
0 | lc_public_repos/langgraph/libs/sdk-js/src/utils | lc_public_repos/langgraph/libs/sdk-js/src/utils/eventsource-parser/stream.ts | import { createParser } from "./parse.js";
import type { EventSourceParser, ParsedEvent } from "./types.js";
/**
* A TransformStream that ingests a stream of strings and produces a stream of ParsedEvents.
*
* @example
* ```
* const eventStream =
* response.body
* .pipeThrough(new TextDecoderStream())
* .pipeThrough(new EventSourceParserStream())
* ```
* @public
*/
export class EventSourceParserStream extends TransformStream<
string,
ParsedEvent
> {
constructor() {
let parser!: EventSourceParser;
super({
start(controller) {
parser = createParser((event: any) => {
if (event.type === "event") {
controller.enqueue(event);
}
});
},
transform(chunk) {
parser.feed(chunk);
},
});
}
}
export type { ParsedEvent } from "./types.js";
|
0 | lc_public_repos/langgraph | lc_public_repos/langgraph/docs/mkdocs.yml | site_name: ""
site_description: Build language agents as graphs
site_url: https://langchain-ai.github.io/langgraph/
repo_url: https://github.com/langchain-ai/langgraph
theme:
name: material
custom_dir: overrides
logo_dark_mode: static/wordmark_light.svg
logo_light_mode: static/wordmark_dark.svg
favicon: static/favicon.png
icon:
repo: fontawesome/brands/git-alt
features:
- announce.dismiss
- content.code.annotate
- content.code.copy
- content.code.select
- content.tabs.link
- content.tooltips
- header.autohide
- navigation.expand
- navigation.footer
- navigation.indexes
- navigation.instant
- navigation.sections
- navigation.instant.prefetch
- navigation.instant.progress
- navigation.path
- navigation.prune
- navigation.tabs
- navigation.tabs.sticky
- navigation.top
- navigation.tracking
- search.highlight
- search.share
- search.suggest
- toc.follow
palette:
- scheme: default
primary: white
accent: gray
toggle:
icon: material/brightness-7
name: Switch to dark mode
- scheme: slate
primary: grey
accent: white
toggle:
icon: material/brightness-4
name: Switch to light mode
font:
text: "Public Sans"
code: "Roboto Mono"
plugins:
- search:
separator: '[\s\u200b\-_,:!=\[\]()"`/]+|\.(?!\d)|&[lg]t;|(?!\b)(?=[A-Z][a-z])'
- autorefs
- redirects:
redirect_maps:
'cloud/index.md': 'concepts/index.md#langgraph-platform'
'cloud/how-tos/index.md': 'how-tos/index.md#langgraph-platform'
'cloud/concepts/api.md': 'concepts/langgraph_server.md'
'cloud/concepts/cloud.md': 'concepts/langgraph_cloud.md'
'cloud/faq/studio.md': 'concepts/langgraph_studio.md#studio-faqs'
- mkdocstrings:
handlers:
python:
import:
- https://docs.python.org/3/objects.inv
- https://api.python.langchain.com/en/latest/objects.inv
options:
members_order: source
allow_inspection: true
heading_level: 2
show_bases: true
show_source: true
summary: true
inherited_members: true
selection:
docstring_style: google
docstring_section_style: list
show_root_toc_entry: false
show_signature_annotations: true
show_symbol_type_heading: true
show_symbol_type_toc: true
signature_crossrefs: true
options:
filters:
- "!^_"
nav:
- Home: index.md
- Tutorials:
- tutorials/index.md
- Quick Start:
- Quick Start: tutorials#quick-start
- tutorials/introduction.ipynb
- tutorials/langgraph-platform/local-server.md
- cloud/quick_start.md
- Chatbots:
- Chatbots: tutorials#chatbots
- tutorials/customer-support/customer-support.ipynb
- tutorials/chatbots/information-gather-prompting.ipynb
- tutorials/code_assistant/langgraph_code_assistant.ipynb
- RAG:
- RAG: tutorials#rag
- tutorials/rag/langgraph_adaptive_rag.ipynb
- tutorials/rag/langgraph_adaptive_rag_local.ipynb
- tutorials/rag/langgraph_agentic_rag.ipynb
- tutorials/rag/langgraph_crag.ipynb
- tutorials/rag/langgraph_crag_local.ipynb
- tutorials/rag/langgraph_self_rag.ipynb
- tutorials/rag/langgraph_self_rag_local.ipynb
- tutorials/sql-agent.ipynb
- Agent Architectures:
- Agent Architectures: tutorials#agent-architectures
- Multi-Agent Systems:
- Multi-Agent Systems: tutorials#multi-agent-systems
- tutorials/multi_agent/multi-agent-collaboration.ipynb
- tutorials/multi_agent/agent_supervisor.ipynb
- tutorials/multi_agent/hierarchical_agent_teams.ipynb
- Planning Agents:
- Planning Agents: tutorials#planning-agents
- tutorials/plan-and-execute/plan-and-execute.ipynb
- tutorials/rewoo/rewoo.ipynb
- tutorials/llm-compiler/LLMCompiler.ipynb
- Reflection & Critique:
- Reflection & Critique: tutorials#reflection-critique
- tutorials/reflection/reflection.ipynb
- tutorials/reflexion/reflexion.ipynb
- tutorials/tot/tot.ipynb
- tutorials/lats/lats.ipynb
- tutorials/self-discover/self-discover.ipynb
- Evaluation & Analysis:
- Evaluation & Analysis: tutorials#evaluation
- tutorials/chatbot-simulation-evaluation/agent-simulation-evaluation.ipynb
- tutorials/chatbot-simulation-evaluation/langsmith-agent-simulation-evaluation.ipynb
- Experimental:
- Experimental: tutorials#experimental
- tutorials/storm/storm.ipynb
- tutorials/tnt-llm/tnt-llm.ipynb
- tutorials/web-navigation/web_voyager.ipynb
- tutorials/usaco/usaco.ipynb
- tutorials/extraction/retries.ipynb
- How-to Guides:
- how-tos/index.md
- LangGraph:
- LangGraph: how-tos#langgraph
- Controllability:
- Controllability: how-tos#controllability
- how-tos/branching.ipynb
- how-tos/map-reduce.ipynb
- how-tos/recursion-limit.ipynb
- how-tos/command.ipynb
- Persistence:
- Persistence: how-tos#persistence
- how-tos/persistence.ipynb
- how-tos/subgraph-persistence.ipynb
- how-tos/cross-thread-persistence.ipynb
- how-tos/persistence_postgres.ipynb
- how-tos/persistence_mongodb.ipynb
- how-tos/persistence_redis.ipynb
- Memory:
- Memory: how-tos#memory
- how-tos/memory/manage-conversation-history.ipynb
- how-tos/memory/delete-messages.ipynb
- how-tos/memory/add-summary-conversation-history.ipynb
- how-tos/memory/semantic-search.ipynb
- Human-in-the-loop:
- Human-in-the-loop: how-tos#human-in-the-loop
- how-tos/human_in_the_loop/breakpoints.ipynb
- how-tos/human_in_the_loop/dynamic_breakpoints.ipynb
- how-tos/human_in_the_loop/edit-graph-state.ipynb
- how-tos/human_in_the_loop/wait-user-input.ipynb
- how-tos/human_in_the_loop/time-travel.ipynb
- how-tos/human_in_the_loop/review-tool-calls.ipynb
- Streaming:
- Streaming: how-tos#streaming
- how-tos/stream-values.ipynb
- how-tos/stream-updates.ipynb
- how-tos/streaming-tokens.ipynb
- how-tos/streaming-tokens-without-langchain.ipynb
- how-tos/streaming-content.ipynb
- how-tos/stream-multiple.ipynb
- how-tos/streaming-events-from-within-tools.ipynb
- how-tos/streaming-events-from-within-tools-without-langchain.ipynb
- how-tos/streaming-from-final-node.ipynb
- how-tos/streaming-subgraphs.ipynb
- how-tos/disable-streaming.ipynb
- Tool calling:
- Tool calling: how-tos#tool-calling
- how-tos/tool-calling.ipynb
- how-tos/tool-calling-errors.ipynb
- how-tos/pass-run-time-values-to-tools.ipynb
- how-tos/pass-config-to-tools.ipynb
- how-tos/many-tools.ipynb
- Subgraphs:
- Subgraphs: how-tos#subgraphs
- how-tos/subgraph.ipynb
- how-tos/subgraphs-manage-state.ipynb
- how-tos/subgraph-transform-state.ipynb
- State Management:
- State Management: how-tos#state-management
- how-tos/state-model.ipynb
- how-tos/input_output_schema.ipynb
- how-tos/pass_private_state.ipynb
- Other:
- Other: how-tos#other
- how-tos/async.ipynb
- how-tos/visualization.ipynb
- how-tos/configuration.ipynb
- how-tos/node-retries.ipynb
- how-tos/react-agent-structured-output.ipynb
- how-tos/run-id-langsmith.ipynb
- how-tos/return-when-recursion-limit-hits.ipynb
- Prebuilt ReAct Agent:
- Prebuilt ReAct Agent: how-tos#prebuilt-react-agent
- how-tos/create-react-agent.ipynb
- how-tos/create-react-agent-memory.ipynb
- how-tos/create-react-agent-system-prompt.ipynb
- how-tos/create-react-agent-hitl.ipynb
- how-tos/react-agent-from-scratch.ipynb
- LangGraph Platform:
- LangGraph Platform: how-tos#langgraph-platform
- Application Structure:
- Application Structure: how-tos#application-structure
- cloud/deployment/setup.md
- cloud/deployment/setup_pyproject.md
- cloud/deployment/setup_javascript.md
- cloud/deployment/semantic_search.md
- cloud/deployment/custom_docker.md
- cloud/deployment/test_locally.md
- cloud/deployment/graph_rebuild.md
- Deployment:
- Deployment: how-tos#deployment
- cloud/deployment/cloud.md
- how-tos/deploy-self-hosted.md
- how-tos/use-remote-graph.md
- Assistants:
- Assistants: how-tos#assistants
- cloud/how-tos/configuration_cloud.md
- cloud/how-tos/assistant_versioning.md
- Threads:
- Threads: how-tos#threads
- cloud/how-tos/copy_threads.md
- cloud/how-tos/check_thread_status.md
- Runs:
- Runs: how-tos#runs
- cloud/how-tos/background_run.md
- cloud/how-tos/same-thread.md
- cloud/how-tos/cron_jobs.md
- cloud/how-tos/stateless_runs.md
- Streaming:
- Streaming: how-tos#streaming_1
- cloud/how-tos/stream_values.md
- cloud/how-tos/stream_updates.md
- cloud/how-tos/stream_messages.md
- cloud/how-tos/stream_events.md
- cloud/how-tos/stream_debug.md
- cloud/how-tos/stream_multiple.md
- Human-in-the-loop:
- Human-in-the-loop: how-tos#human-in-the-loop_1
- cloud/how-tos/human_in_the_loop_breakpoint.md
- cloud/how-tos/human_in_the_loop_user_input.md
- cloud/how-tos/human_in_the_loop_edit_state.md
- cloud/how-tos/human_in_the_loop_time_travel.md
- cloud/how-tos/human_in_the_loop_review_tool_calls.md
- Double-texting:
- Double-texting: how-tos#double-texting
- cloud/how-tos/interrupt_concurrent.md
- cloud/how-tos/rollback_concurrent.md
- cloud/how-tos/reject_concurrent.md
- cloud/how-tos/enqueue_concurrent.md
- Webhooks:
- cloud/how-tos/webhooks.md
- Cron Jobs:
- cloud/how-tos/cron_jobs.md
- LangGraph Studio:
- LangGraph Studio: how-tos#langgraph-studio
- cloud/how-tos/test_deployment.md
- cloud/how-tos/test_local_deployment.md
- cloud/how-tos/invoke_studio.md
- cloud/how-tos/threads_studio.md
- Troubleshooting:
- Troubleshooting: how-tos#troubleshooting
- troubleshooting/errors/index.md
- troubleshooting/errors/GRAPH_RECURSION_LIMIT.md
- troubleshooting/errors/INVALID_CONCURRENT_GRAPH_UPDATE.md
- troubleshooting/errors/INVALID_GRAPH_NODE_RETURN_VALUE.md
- troubleshooting/errors/MULTIPLE_SUBGRAPHS.md
- Conceptual Guides:
- concepts/index.md
- LangGraph:
- LangGraph: concepts#langgraph
- concepts/high_level.md
- concepts/low_level.md
- concepts/agentic_concepts.md
- concepts/multi_agent.md
- concepts/human_in_the_loop.md
- concepts/persistence.md
- concepts/memory.md
- concepts/streaming.md
- concepts/faq.md
- LangGraph Platform:
- LangGraph Platform: concepts#langgraph-platform
- High Level:
- High Level: concepts#high-level
- concepts/langgraph_platform.md
- concepts/deployment_options.md
- concepts/plans.md
- concepts/template_applications.md
- Components:
- Components: concepts#components
- concepts/langgraph_server.md
- concepts/langgraph_studio.md
- concepts/langgraph_cli.md
- concepts/sdk.md
- how-tos/use-remote-graph.md
- LangGraph Server:
- LangGraph Server: concepts#langgraph-server
- concepts/application_structure.md
- concepts/assistants.md
- concepts/double_texting.md
- Deployment Options:
- Deployment Options: concepts#deployment-options
- concepts/self_hosted.md
- concepts/langgraph_cloud.md
- concepts/bring_your_own_cloud.md
- Reference:
- "reference/index.md"
- Library:
- Graphs: reference/graphs.md
- Checkpointing: reference/checkpoints.md
- Storage: reference/store.md
- Prebuilt Components: reference/prebuilt.md
- Channels: reference/channels.md
- Errors: reference/errors.md
- Types: reference/types.md
- Constants: reference/constants.md
- LangGraph Platform:
- Server API: "cloud/reference/api/api_ref.md"
- CLI: "cloud/reference/cli.md"
- SDK (Python): "cloud/reference/sdk/python_sdk_ref.md"
- SDK (JS/TS): "cloud/reference/sdk/js_ts_sdk_ref.md"
- RemoteGraph: reference/remote_graph.md
- Environment Variables: "cloud/reference/env_var.md"
markdown_extensions:
- abbr
- admonition
- pymdownx.details
- attr_list
- def_list
- footnotes
- md_in_html
- pymdownx.superfences
- pymdownx.tabbed:
alternate_style: true
- toc:
permalink: true
- pymdownx.arithmatex:
generic: true
- pymdownx.betterem:
smart_enable: all
- pymdownx.caret
- pymdownx.details
- pymdownx.emoji:
emoji_generator: !!python/name:material.extensions.emoji.to_svg
emoji_index: !!python/name:material.extensions.emoji.twemoji
- pymdownx.highlight:
anchor_linenums: true
line_spans: __span
use_pygments: true
pygments_lang_class: true
- pymdownx.inlinehilite
- pymdownx.keys
- pymdownx.magiclink:
normalize_issue_symbols: true
repo_url_shorthand: true
user: langchain-ai
repo: langgraph
- pymdownx.mark
- pymdownx.smartsymbols
- pymdownx.snippets:
auto_append:
- includes/mkdocs.md
- pymdownx.superfences:
custom_fences:
- name: mermaid
class: mermaid
format: !!python/name:pymdownx.superfences.fence_code_format
- pymdownx.tabbed:
alternate_style: true
combine_header_slug: true
- pymdownx.tasklist:
custom_checkbox: true
- markdown_include.include:
base_path: ./
- github-callouts
hooks:
- _scripts/notebook_hooks.py
extra:
social:
- icon: fontawesome/brands/js
link: https://langchain-ai.github.io/langgraphjs/
- icon: fontawesome/brands/github
link: https://github.com/langchain-ai/langgraph
- icon: fontawesome/brands/twitter
link: https://twitter.com/LangChainAI
analytics:
- provider: google
- property: G-G8X6ELZYE0
- feedback:
title: Was this page helpful?
ratings:
- icon: material/emoticon-happy-outline
name: This page was helpful
data: 1
note: >-
Thanks for your feedback!
- icon: material/emoticon-sad-outline
name: This page could be improved
data: 0
note: >-
Thanks for your feedback! Please help us improve this page by adding to the discussion below.
validation:
# https://www.mkdocs.org/user-guide/configuration/
# We're `ignoring` nav.omitted_files because we are going to rely
# on files being properly links to from the index pages of:
# - tutorials
# - concepts
# - how-tos
# - reference
omitted_files: ignore
absolute_links: warn
unrecognized_links: warn
# TODO: figure out how to enable 'warn' for this
# it's only an issue for tutorials/storm/storm.ipynb
# because it creates anchors in the generated report
# and those anchors are not available in the actual doc
anchors: info
# this is needed to handle headers with anchors for nav
not_found: info
|
0 | lc_public_repos/langgraph | lc_public_repos/langgraph/docs/test-compose.yml | name: notebook-tests
services:
mongo:
image: mongo:latest
ports:
- "27017:27017"
redis:
image: redis:latest
ports:
- "6379:6379"
postgres:
image: postgres:16
ports:
- "5442:5432"
environment:
POSTGRES_DB: postgres
POSTGRES_USER: postgres
POSTGRES_PASSWORD: postgres
healthcheck:
test: pg_isready -U postgres
start_period: 10s
timeout: 1s
retries: 5
interval: 60s
start_interval: 1s
|
0 | lc_public_repos/langgraph | lc_public_repos/langgraph/docs/README.md | # Setup
To setup requirements for building docs you can run:
```bash
poetry install --with test
```
## Serving documentation locally
To run the documentation server locally you can run:
```bash
make serve-docs
```
## Execute notebooks
If you would like to automatically execute all of the notebooks, to mimic the "Run notebooks" GHA, you can run:
```bash
python docs/_scripts/prepare_notebooks_for_ci.py
./docs/_scripts/execute_notebooks.sh
```
**Note**: if you want to run the notebooks without `%pip install` cells, you can run:
```bash
python docs/_scripts/prepare_notebooks_for_ci.py --comment-install-cells
./docs/_scripts/execute_notebooks.sh
```
`prepare_notebooks_for_ci.py` script will add VCR cassette context manager for each cell in the notebook, so that:
* when the notebook is run for the first time, cells with network requests will be recorded to a VCR cassette file
* when the notebook is run subsequently, the cells with network requests will be replayed from the cassettes
**Note**: this is currently limited only to the notebooks in `docs/docs/how-tos`
## Adding new notebooks
If you are adding a notebook with API requests, it's **recommended** to record network requests so that they can be subsequently replayed. If this is not done, the notebook runner will make API requests every time the notebook is run, which can be costly and slow.
To record network requests, please make sure to first run `prepare_notebooks_for_ci.py` script.
Then, run
```bash
jupyter execute <path_to_notebook>
```
Once the notebook is executed, you should see the new VCR cassettes recorded in `docs/cassettes` directory and discard the updated notebook.
## Updating existing notebooks
If you are updating an existing notebook, please make sure to remove any existing cassettes for the notebook in `docs/cassettes` directory (each cassette is prefixed with the notebook name), and then run the steps from the "Adding new notebooks" section above.
To delete cassettes for a notebook, you can run:
```bash
rm docs/cassettes/<notebook_name>*
``` |
0 | lc_public_repos/langgraph | lc_public_repos/langgraph/docs/codespell_notebooks.sh | ERROR_FOUND=0
for file in $(find $1 -name "*.ipynb"); do
OUTPUT=$(cat "$file" | jupytext --from ipynb --to py:percent | codespell -)
if [ -n "$OUTPUT" ]; then
echo "Errors found in $file"
echo "$OUTPUT"
ERROR_FOUND=1
fi
done
if [ "$ERROR_FOUND" -ne 0 ]; then
exit 1
fi |
0 | lc_public_repos/langgraph/docs | lc_public_repos/langgraph/docs/cassettes/LLMCompiler_0b3a0916-d8ca-4092-b91c-d9e2b05259d8.msgpack.zlib | eNrtfQ14HEeVoPPDX5IjBJIAy19nMCvZmR71/M/IsbOyJFuyLUuWZMu2pIx7umtm2urpHrp7NBobsXvJ8hdYYDabuxBgibEsJ8Y4CTEhxEmOsLcsy19CWEhMQjgIHMnut8fCx7Is4Tb3XlV1T480tuXEt3Ei+Uvsmanqqvdevd/qeq+uPThJLFszjXMOa4ZDLFlx4It947UHLfLuMrGdP58tEqdgqjMD/UPD+8uWdnxlwXFKdntbm1zSQmaJGLIWUsxi22S4TSnITht8LumEDjOTNdXqDy/+k72BIrFtOU/sQLswujegmDCX4cCXwHptkhiCLJRtYgkwo1UNCopFZIfAjyVdNgTHFGxTnySC5ggVzSkIToEIZado2o5Qki1Z14mu7ZGzmq451ZDQLSsF9qBdMMu6KiA8lmbDeDAPRU/IWWaRDpMzdd2saEZeiApOtUTs9jEjHBIceVLTqxmbyJZSyFjELuuOndltm0YrBXH1WACfZu0M6rHACkEEkPlvxMhrBgmNGWNGJCQUZafQWrLMrE6K7YLtWIAjkmDKaRf6SwiSrI/qmu2MQtv4OIy0RsjppuwAODDqEKJvU4BhkElNJSodUuBDhmivXfzbLkEBTLNEIEAsICrApOGKNDwitJJQPiSMBcLClUIUgTexZ8W01HldCmYFnjWqglyCYWxBtgjCAn9rOf4Bf4p67YYqRPgXGJlBt8MsI1yG6cA/ulLWcYWLQFcNQSNTJaAy8qEtaIZgGgR76SFhHYClGbYjGwoJCrsoIVsozEGY40oh1rJil6CaMCuODOBPhITenFCF2QwCZALmOfl0wbl9dUSpCOuIvOUQvSro2gRpmBrnRCSho+E2uMBQZPs0QytqeyiZBKNczMIymDnWdRdnQqCTDRABr8C/JROgAaLPRRg/EVnFhxE0ZFSXncYCFZA2QWN8EZbehZ2Wh3EpPdgoOlHvgeUI/fIIdGFY920dGmZ9TjBow6gclqBQKWgAdUWD5yyilpW5aFI+40g2LL5gcl4HonJGBp5DvsfHgPUBP0qWXVw6duGaFIheojOA/jAcrgu4LFDuxwWvs6G3yHwMu2GBZcOuAJT4OFVvAA6jBYLnwkQZAMCYBxsXNP8yMjog89mEgWWWnVKZPsWAJJOaWba9VS8bIBYMKHc+UGx+pEOUXnRxfBCdatwGsmmNIqCabFEAXM2pLwmdYnP3tu5Bb6JdTH/totqQj91i87ltRpRJ2dJkoDxKKoJSVzyylS8XYZFCwnABeAj+yxJFBtVeH9ciTtlCcAWEVMjqZhYGAZ5D2GXgMTqkZuRMC0BGAstZmJ3+CiODig9Sassew1kAkF4mOCfwADxHkEWB+/0U8NjN8NHRA4rh6ROKxpWRfZR1UcRBGxiBiS6ZklHXBgWwImx0kKm1oEmUCaE/KxflBvmsix3wtk/SbL4qMlonooaErUDCeZ0bh/VMyupRlPSxwLgnsmzFR1yayPYEoOSyP6evh2FQsEtE0XJVZmcNzbHnaqW6lpiaQiYoEC1fcK4eC+BizG0sgnwgg2KzT535u0ETGkpQVLtNzWhd0S50gl0misNsCRjwLBhTW+CGmJlvsOlosjw1Q3HsMIRNm/q4poCxUbkBA5RLyPnGpDmBGpRNggvILWROA53kquu6I4LYVGRggTLwnc6kDvwKZgDJFFHKDuG0xSFdf0PWK3IVOZ8+ocugQbiq4PKCgwQpZlR3ZIkLJrZXwNtRwKcC7NAZEQShVV7BDa2rvbh9Vwk4bEWgjCpkq0JeRlwQv7qy4KRyYMVtxBfIQtCoMd8HsUaaAnFsdFVwrmyTuVDcGqfzIWLgjKWCDAyapcJHWYxTp06zkLC+DAKl4zoyp4b6aZwwKrEVS8vCyMCMk6SuCzQDsGjjEksdNEo31p8aEwY3V2jFMhAbdbXigImRVWoUAG+mUnAMfLphuBwKbR0Ub7jhAmmErsTULAeNDpn3UPKpbG965ltCR9P2d63rJtSLjjcNNy5+svjI7DKXNzosCHpJ3BowslGMUFj5xEQYqML0BgI9icoTEWiCcOOsFIuCPIm2GcQfREHo7XKtPsiUB4JmoKNuAwOwQXoNxnJ1saScyoUsS5cVVQj0gS4NHArWTCEqspJLDBiNy4/jUDa0QfG4FGPGQViuMetGgENZQ2/XCewjgI/LwJfelTuPLxgCHUxyqUdEJVq2TybDIWFIrgotV3Vv7sr0r8sMbOrYvKZFkHMIL3Mp+DiMxIZdtuoSAbp8ivqIdpMghj7QbwCNXYw9v5+DwZcaPB+ZK6u6oMqqit4t6j2b6wOb+pKg/9hoDDkeC5nMHTLQHIN6LnFG3EwgMoSHHMukPp5BKt6ymnRFwWwzYpjI1i6EIbBEAcvUCUZ3dhVGLAamg0JD0DcCqr+F0dbUQbxoIGehVdchHGS6kUYd6B7rppFHSGzmJ3izAodaoOiv9s+HynvebIOkSNBVCAr9mzft4BpPrceSqB6Zp8cXVzEt4EeHs9kVQjfEQaC0NHUKQkPT1FvBCcgYcpGshg/2imYIj8NvRRNEHn/MlxwxJoLfkzVFypekgs8Y0BaGf0GgiFyEL45VBuQDMEAJ1TSwC/wohZLTBwtgNSFOf3LZ62aAj53akcbY+3ZZUQhMQgzFRCmqfT6/RysFQTJyGPgcAmIYhK5d7dAEISWR0nmWPVW7A0M1TaHeVhsGuYc58UTksvnNh5DKIrWxtaP9AERHbxtXNOFQLBWS7pgSbVSS6OiKwOv52myJth/zN5TAeYFBRL4BUZtlDx/x9zHt2oE+WekfahgS3araAdkqJmJ3+X+30FAXSe1g58D86XhjfbpoKBwJxe5sGNiuGkrtQE7WbfKlhoeJY1VFxYQxavukWcUET4LUjv86k1FymWxxddfOLVNTuWp4JJyYNCvD27dmuga3JPPh0rvDlVC5t5QzewbCOwpDw/pWMZyMJKOJRDyZEsMhKRQOhcV0tbM/uq3PWdvVvbF7RO1fty2fLSdS1oSSKw/u6d+2dm1hsLNXVntHctX1m9ZuypnbRrY4HVvWxuJSIWEX9XUxKWHu7NGGBtd1h3aPpLfIdrpjlQDQlUEoVxv2tr5qx8TE9h3r1lX68lPp7bt7tqzt0tcN7LCnhpXO9dWetcXOoe7Qph0+8FLxsChxCBNSLCXhnyMub+jEyDuF2kw0LcVudb2I62aBZk7ZvnYGGJF86+sH+b7PZ/s31nn4spkuYMra/cOFMgTxCWGIlISIFIkJ4US7FG0PJ4X1fcOHO/k8w8iDx2m00EbQiolMXlYJSkG2bOKsLjs5MXXnsAVuRg4Ys9sVgoNKoWxMEPVQZ1P2vx/ZH9YWEQLxFckUROFE5GDWDm8XB9kWmNjbdReTNdG08rIBepqOdRuyNgChGUd5M+hAHBImF4t2bX8sEjvCW1yuOwSISmJYEqXwl5khFR0EvGRagBg4TRbo/9rxIFgHlLDV0XA8mgCyr0Jrq5dVMlTOdplFdI5WoY3TTVm9d0qkOxVgTmBZ6N988w6kJ4yLds/8Hg7YAsOuHYzQVZUe8PewCE6AWHjjzKThz33NO7lDhaGLlEje29gLVqg+zP5E0b5nfjsfYSaSLNqHp9zuoqbWji+HLxkpp8QjUjqVUomUiEvRaCKaTpBsTI1HIhGSS9/euU7sBBeGiEOU/WoHu3Zs7ujr7bx7u+jnI5FttkG7YdqGlsvNDhELlqZ2SNHNsgrK0iKzMNZgx47a0ZSSjiWzci5MJDWRlFPi0IbOg7IOyzSp1O4qRFcH2mOxaGAVWPLVqURMkuhu53+dZXsFP1xhqbIjt4Mp0tRAewC3RhUIDcWOtfq6bcN9651yl9w52dnZ06dumOyb2rJxz7adWwLBgJndDbzKnwjVN1NDlJuhA9sdhTFdSU0ng66t4aZGCkfidUvDjVIGHH4wpBA3oVk0yroOYxVMTcFd2VEA01DJVKBdCgZgKEcOtO/lVi0gA0tTxw1nd+1qAL5AuF22ZZ2NNh0M6GYe9wJsd3iYUbNxA1UGs8F7jU9fcMFLnzJ1MoQDixTx0GJFHF8PLFbcNb26WFHnr4MWLfp8923R4o+hz2JFnr32XKzYrx4LLFbUTX2xYo7bUovWwynJ1mLF3TIX77LTfZHFiv1YYMXYojXwkaUodimKXYpil6LYpSh2KYpdimKXotiXFer8qMCiNfW6liN2SV60gi+YucUb0y1tYixtYiyF8YsD+ehSGL/YtBye5Vy0Dv2KRbvsVy1WzLs3dy3a4L1/3aJFHU/XL1bk1ywqxE+Na8B2zFLAh+1oV//m7vELLjhx1vr9/qR1lt5Nk9bZbijLUz9n2d4ANGcmSBVP7TuTelWslEqJ3UVTG9ALPbslfcu2cJGs1bdF7dTmDdKI0YPn+NmuEjzRJJcB24vylLftyE/8s8RylZScAj4oq5OYWadib37ON6OyU76YMI/JBGSq6c9ub5atBb/Ss+u+BkuuZOq5EHNbtSJPyqcNXp7B+w9txSP+HfSI/93s7L53iLctEorCf7d3sJyD7uY5B7OsubZ/ZdvKExzAPuIezd3EjpLvj8QTjce+56UenM5B8/e5h3sPYC7+1MkPnUfC8w+dz098WBgeM5FYKtF4GPg1d+912aQ5lwRYtlimXMq4iZke97try765SwbLH/C4CgTJ0Rx6WLeT5iYIrYqpTMiOaWJZghFtQisRVZNhorKFEuuKAzFCFbcxZFr5NvzWxsbI1MdoOPjbQ4SKbPvTz2iKjo1HyycxGVGzVGwWLKKYljoaG2fJjrJNf8mDLLOURXx0fVkDmtq2MGJauioM0kdsN/OK0wrGpWmGjGQ8S6eCD4RG4+NjxjDPXxJd2mrGhOACjyliWcwZKxs0c1EW9pgmaBhYWV2wiePg0BWa5hsNi1UQTRxGyJGirBOKDOCnoPITZEcYkC1ZxWoSI5qu4r6aMASi65RlLFzRbeR1QDU0Gh0Xen0pzKVyVgcFVseaITvA8Bm2WNoiTGEzUk5qTpUmZrGFaLFZTi6hxREUueRotIdGqcw3N4WY9NM/uyEhCYiBHRpNAF02AXyjsLQOfOZ8gYiuteBzTiMAwk7TxPRuRjZgJ03FzFmeoMwS3+yyNcnoz9KovJxDA1hRBzLTVN2GshqYaON228NmKBGaKQrwh9PRWBATDGm6n2VRnqGPDssWYC8jUPj0UFU1CFB1M6kIQ2bZKQgjsCJ2UOgAelkgOHLQ5QVbhgkRcQr67rKaZ2nfPEuStuCiyo4HX2g0CVThWYbhdFyyaYEQB7OvHEqmoqzS7NF6DhynIdYRcPmjgdWCQrbsuL0sgiYV4KBFOGxhLGAXGP0xT8/QFDpQQSuOBUKjKYAFUx4jkpQMCr61ArnMGyYKDOarsdS4ki6DscC1L8LiKjxNHhqMMqgbVmIBlg0LcGCqOk3JDAommGNTtpwCdmF5tPSnkmmZtmZ/9RZgHz4grqqhaizZjyV+mopStjAVu2gaNPdTkPN0ASkXsOEK5SJNj8aCGUEgL3wCy1HOFzCPnhYD0DWCq80T+bzszqJchb62ydJOs4QYQtbCBx3K4CiaTNUhW2QJzptDccSaC6pIAVI14ni5jDnNAsaNcVnAh7AcAUqrxzGshacs4w84AAixgGZOszjh+JOu9FUwnzhXxvoVoEzAxsCag7SnG8VrDPSyA0OoYwHG12SqoGU1tkqOKxQICKx2WmitF6FBrqoIQGGnQOeuEDJBDI4hWCOAGf0EewUtkmKpmISJtQ2IjZaOggrGT6d8QtNQNcdlOE1h2gfUTC7HFgC746BgOQja7SKRDZNJEKZYgU7WYbVgbbUiJnfSojPWBFEBFprDjGzsKjcF6GcDX1ZDmHwfGg2Hx4UOVBimpQE7+dBGeMrGJNF4xjsYYofplYgUTsKTESaWmCjlSQKWkSiydbLL1FVF8shCycTEPL8OAhTXylYWCCWst0gepqfVeKh1VD0BLqAsM5ltF7pBgYKLgwvQWZDRkgEUUYBiiBDKlq4KHSQ5YABcAPeX7imgMkqbDmqA/ypw+1jndtPNNeZ6tq5H/cxFPYEGBR906cYLPqQi2LyhDBoNaBUPjobHRyPjriq1NbCsOZBeA5OzwV8E2SCqP1XWew9EBQXnntC4teIwF00OMJCWgPUy0Bpws6B4BT/Q9QHmbwGYcqjGGjT9AAHXFuFZi5azxyyjw+kpggJhahXUiWwzReXjawoXpQf8TpP9QNvbvEYOwgvW2CmooC0UopOsJbOyHbwgDfZBm0az25FKiBkGIuBSgGckhdLpVCQeiYDf5HeKWeREnSnqWmbQOgTaIyEpPf0CA4rXvMCAYt66MaanbsNSZHE6kUUi/qJFFrEzGFmEw5FUY2RxfqEeWZycXc5ciNFjVoRN4KkJXSb3YW1hE4B7Na+bMVSy0FsaIPDc3ICjUqmEsCAB7QK2x6ZSUzArInq+omqKDF5blNHwYaEBEbqJNKigAYYYjkRTsWi0ISbpxXoCbhUtZt5QhTnMf1dY2R+mYNGsVwxYRhY3cGDbhA7V0gisgrCJ5GXAecxwcaOuEitiQUMbHI6OVS6hrgxL+Hfc8zbAn0GnAmQQbDdqQpOpTVZSibvWPgcLvlYFrvxpFaMqlliiPpaNVWmoD2EXsZoEm5/VS0I1rnnRUEnWi7jeVRPctAKdxQT3ysoT7yHChqNMociel69ZHsOw0ja7MRwBZerWecJ6EB1gBoNUPbPRKgWTgcyrM9gmevyUrMyJc7Dk1IRXxA+dGVaNpcBjKtDmeZOHBJQgmqWUi6zEUL0XXzUeDeL4IeYoW+DnUBPJK9Ghj1jBkk0WWnB3UJQGSi2XFcDEYa0ifBytClYXIhZN84ehKBK0NogftVa0SQXTQn9KLmIJAOqboZVY4atGkcfgTbN5SaABExkTzRgniluICutzVJDK/tCZRnPsB41RGGBhfMFr/YCPB3LRWMsK/i3R0igWcT3fIqu+wsJIHkQz3gKgGk1hVEqko/GF2sJwZHqhJSHtIgQqIS83nsq3VTbstqzsuJax9ct7AyCtDtsCxJ3KbCIpqaqcExVVVsWYQsJiNkWSYjieDudS4ZQqxyREAHx0h0MVQC0rSmkxkhhGXRtpj8dDUiqeSiWvlKR2CfuDBPLeTLWRKYgc2e6qblq4dWlBtIGpzKw8BDbZ6gQ0IAIUFbFUxYnViQxP4odGWhQhkoLfdQ3cEKp9PYxFSuRgABjfwT0AaCtiDQsxHAvFRVq2Av8WEzEICvxTB5g9xEerGVqzEaMQaiVwY2fAbeUP+OBhtSwQHBeIBmChuaENAZzTIVHHZV7LdDAAKy7jXivNCgd+xfYMXTgpFAlFUmJEEvO5aDSZkHJpUQU3rRqYhucIaFDPrjCPjq45Vk+hG7mwnDDzQheU8Sb3wjO82BqFg82AMmI4GSCQ70dHznNjZrOijxk+OzgUqOgD9Yb6Q6oJkamaoUEWh0xKU7ikSDzOgNq5QKYFllMYlAtlc14cie2Gc2ix+Bs00XpF2MI+QcPeFl6Ar6UdYk9fbR3cyqDVC3PClUL8aqwR18LLvEHX0RYwNY4AfXBrv8xUEnQFVRqNgPVCybBbxqdx+ZGgaL5gNiyAAz9xsY2TRDimpuJiSs4CPnJYEtNSJC1K2UhMiUVJVorJCxLbdCQpRU8ttktieraKqX8BT1NMFyoVL64gh+Y+SjHeuUAJODM6YLBsGOiSDaElBto26AOuBaDbiXWA6zRPMdNbd6CHG6ohy6rq7iPy7qgWvCJk1CNlHnGubLgV2+AP1qnUaG1Mu5yFEMPBIoCaQQuvVoXle9857XmVrAgcHaNCt/xdR9at6gp/sKrj8gVoKey3mdWA45VEEeL6TOgh4W4V8IlbvxCDO3DYnZBXN5DqGIVtuFhEJxA4O7zIKK86qGFVMKxdy+qRFt1xQkDVOsUyExUsC8bWzXOkfJIJP3Nd6lULa1SxVPrrOjYBnJOQ4pIoKVlZjMUk4LVcOCumUnIkTHLJFJEiC9Ox0XAs0UTHnqh3PBKp9z6zGpg+8NJTvmdKxdoEt0UhGlNxGF0JtIc9pqBVEiEmdaB7kHLAaB0sxBuEr0Qja3xXD/90wj8D9MdhEBPcIgmAivS4kGmIjCcNOJ6rKqCfW5O5sYOrJMapssZAxt++16dExilN3Oruoy8YmyEqE31sxJOhxZ58IfTjIwQDp0c9aHbcrqA4sXyXzurVN1RYZ2rPV2u8Hrdn6/VjeY1Gxregs0Hh4BMCFxdW+rex/DOwtsHqOmKFW5O9pGpS2JoWxd3Cv7SD9t3rfmG1DxtKyE9Pjxm7du3CRQWdu3cvQqXjVpLBCscTfJWJb4bm4mM3FKfHsfhQY0YoFOLohAjWaAYiteIEK6CBdmE40fn6m6BHKHbecB0URYoI+4gtY8Zaktfm4jrCawtHk/F0VFgpJJJX42Y9JXknjNm6dy8Ojf5yvcsYyOmK5lA3dpsHfyQeTiWj4Tlwer/6IaPjXNMabouvOClEK1eyPqeEyes4D6pUKBKJpKJh8E8i0WQynJoDX5P2QJ21M+z1NXB4TmR7jYG6HzJHYKan0Vi9QMHvwbeHZ7Pc+7WmTwHsddn+xSQep5s9gK+GC/ieyWpEx0XEdZldBV43AIF2rD86TZ0R3qmJZZl+3vGBz/lAhPmj4IWc8sG6H3K6gcVCnXMWWIBZfnc7lsBtDwdeekHG/EeR4jsX6D2+kPiEf8RPDZ6AL7p46RjJk9nIBhP5Qizk6RvIk9jHeeaRW8fnbxyb2sYX0TSewDL+5xrGFxThBf3ScNLw/HlNQ0/ezJllKaJ/USP6k1rRpW2bl+G2DXcrvX2brJqNkpSUFWMkHgc7mouJqQR8DUdJMpFKECmmLnTfJhWPn3pvHAv8szMFGbxSoEi5q/GI/pwa9KzVdXCad3Gr07MDHli0PhxsLFEvhZLBgEsEXu+au6X0oL77GsY0dX6OgHV1eRCPJrgf2z3H0GcAApgc4N3Bwbme3efBL/eYZwNWeByOq0zJgdeXuAIHwDsa28lgSQXoocOX+dN4zeCPFDTv5b3qv0QsyItzYy+8tsAne/w4jBrEa1X4qQ98pR6oM5MXHuCozYHgF7jNmZbeYMMvWslWm9CgySyUt+kBRxpT1JHn0497T/CMEIhO2B4QvaIig4uYwYstbI8j2E80pQOhP+3FxfiHByF0Qeo8A7/Td8YZPLdA+c6/+abbGa7ZLI/vcAfPziyAq71e9S1Xh/08n7UXvMu39Bb5pK+nzuQmKK4XW0GbhrB88dHUshspGqPfBXDE/HVno2a8E3xzYbSJYhHHA69/oHtzR2+mY6A3s7F7R4Dui9LzeSB6qGoiTHk2+gUc2BcQVddNw39WcBx5eQTHQLidCzTRZ+blnW+9/f6fL2Q+7d1zW8GwM+DbgZ+zgd4oBksB+VJA/vIJyOd446e/i9pMevy70CcSnpPGaY0R+ZkBamEivRQKvqBQcC47jc8J73SdhnxNT7If9ft5bdyvW+A59oUeBj/FEfK5Z8UPhKVYJH6CY+3fPufz7rn2O4dNU+jDxeXXHdkuhh85xT1gx4UOdoNSJ79BqQOZT+yE2IKdfbVrM/hK4b6m3froGVK7tm/l/XPau9lFTD38IqZ9K+ei9tl4/GCH7ohDk0rteKjxJp5IPI1XGgULUTGSbtIwo570sH4cD+s3B7gOz6zNjv8fLE9q4AQbx5r277c0UPW1W+6e09onTyHX1PYnJGn/Nk2uHQJuEfKmCcar4Wz/8STGgo6sYfzeh0lhNO+M3shIPRIbzDA77w+c52X8BKbP5OXtl//qJJe304vJG+6SpQqbGkPZnggJPe6tzPRodX7OFaA8+bLDvV5IGFIsjPdKeMs03qDo3tOYp5fEu9e/sqwGPFWOfehpc88vafQp8KI6UNQ+14JmKLDMQjyRTW9FZUGw755hD52sbLM8S/y1P4vLLjvuJeVhMSLYBGkBK8FvwMwbePJfszztwy9Z5KDzXjmuoVgc3nDZMdXRBIcNUqzxqL5pKJhwjKmlfHlYGgFejJ0r6x7C5olx5lei40qU6QEyehssG9+Gme2cjAapKrBKCyyxE0cNApiU1PT2WOeEgIdAe7BrFbkRqFsi1qcdNTwnfbtw1Tya00sY3RWm6eJgNAr8Jk26xnYZs800erOvj2LNvcg14DwozIu5yr0yE+gjTxBsmQSZolTgF1oiO7aGVwjrKPat9dtx66Rl91b7ibyi3bvM2ndfLoLOiMg9TCoIOH5kBdAIadnqNOwr1bNLGtBCO0j5n95/Tq8Lx3WQ83imXuhk/rjMvF2UCFDg7Eo+gEvjLgbmaVSFCmHmka3kWXc95+jelrKlt7QLLadZlKDFPc5s4HHmlrOuLMHLvi7B2NhCKxOMlSNSONpQnKChOsFJyhPQSf4/Fyhgc5ydJQrOqhoFL3aRgrGxKZkz01KpgudZqmCpVsEZrVVwlhQrmFetoEm5gkVerwC0/PyKBVT1n2U1C1qmx+n94HTbWAiw6gL84gY3358V8vf5Vt5btlP7V2cuB7vRBRt7fmnYp8rDXkrE9hKxz7JM7JdAKvbZmYsNwXDgjAo5rQLsG879fsoHt9ruqwuIxScpcpMaKEA3NEZjqyCGbjxOY2HGSRaPdEO4v4BSsYBtjZBAt2vlqv9ZL8aneraIsSx3cukGTc49d+EfEj1N3DwAVYvba6CvVdwFs03KShbfjaDvjCrAdqCZGwNsb868TPnWXWYa29OlhkhuAtwawIuOAg/Q9zbNAmgsmMJP2gjN3y8L+PKXlmhpepbCq5/iHrfxfmh4Ly3gi2mh8ciF4J658K1y0PcZO7hssQHYop+/mpye5iPxfb3nNQj0aTiuIgS6TsIu/j0dpLC7qeLtt7i71aEAJ5R7akdoPLaDOLO9JPp5DghoTujrFCbHzDuvb7i4rrdNdBb8yB6+Lg28QzlCQK4TAPiyP8eI1WTGRgza2H4QQ6QRcrcLQ4PVVBECg+6vTcBAOOrHdoTR+hDIeN4Q6xCCZuO4Z3mC88mYA12YlZWJZnTsgOGqtma7+rGuG1zhZKKGoRZYWL41irKJet7bX8VNDm0KlHGgAVh34oXg60HZgO+gt6E1F9Hx+UO4/FJf0vEmD0773vA8uex1MwWIPmtHGjfLb5fZex3S/L3OIaXp65tZ+QSve07xluUQ6iNRZq+Z2PGBNvbeXQiHYqmQdMeUaFOziS8Z8D1UbZadADrmbygB9WAQkZ/Kqc2yh4/4+5h27UAfnjFqGBINUu0APXB0l/93fn6odpAfKTrWpLE+HTtbdGfDwHbVUGoHqKL7UsPDGCeJCtr32j5pVqFxUO34rzMZJZfJFldviGhaKjOilDN9HXqsOzpQ6cxtzK+d7NqWnYpskpydQ3Yl37Np48ZMTPTKFUtiOERfzInrIKTbNtib2Ty4fvdQd9aJb5kKke3ruzqiQ0aXLvUVNk+ESCrSu32438oY7+7vcUw5ks/0JtPFnj05TTas7JZwV0qpdBvSsNa9tsuO9UtbVgkAXRmcgdWW0bPD6u/ZUElvHOnfZO22M7GpyGS/HOnM7twTLq11SNeOyIZutWr5wYMPosQhTEixlIR/jri8ofMCUPFUMno6paqeXHb5yctTReMLKE91Jz0uAtFk/V3mQVoYmqgneFl5v8xeein8pRdh7/Q4VLXD20X+tlHs7bqLnxk1rbxsaHvotLXbvHfiR3kzKC4cEiYXiza+3kwlpCO8zWWzQxiIi2FJlMJfRmWigFQh6CXwGUWbKGULvLva8WBRnkKRWh0Nx6MJoPMq96DmUDnbxeqgrUJ1p5uyeu+UCOaX0JdsIvvbLViGUMCfe+b3cMwJYti1gxG6jNID/h4WwQkQD2+cmTT8ua95J3eocDqdSsVi9zb2sokPnP2Jon3P/HY+wkw0VrQPT7ndRU2tHV8OXzJSKpmOJhUpG02lE0kpLMXj2WQ4qiTi0Ww0LSVu71wndoKTTsQhym+1g107Nnf09XbevV30M47Yz05x1g4apg2xfc6tk3ZI0c2yCtrRIrMw1mDHjtrRlJKOJbOqkkvlgBsJjL2h8w53NI/NZlC1HoRQXLQnldpdje96U4mYJDW8Mv3b80rv+PCrl9E/58H/zz13eGvfxx5PvO7+Z0cumjr+Yf2xI49+8l9FrTZ74cz9y1sfbpe+9g89P735p3f8rOea/J3P3X/Lz0z1uss/ePH7v/KT+/4t9Z5PVd++rPWGz72idXzf0z98/CePV357/3v/7ZvPZq7+w5pjt//h6s8f+x+fvOoP//rv+39836c/FyT/8vva0bd+Yd3NE2bvRcfWHD5v38DaTz/R98v33vzQL57a8cZb0y2ffGRD298cL131iSdfO33LDx6e/MyOO7976ffP/Y+3L1t2VeW53v3ffKZX3PWpt23bff0lGwc//NQfL7vssdnL3he+ufXpLnLwllzLz/5Sf3T73stflzv2SOGfP3TPc6s/M/2ZC973gyP/+IFv//Z73/vfD7z3W7/61ef+PXX77x56ZuRPv/No5y/f03Jr4VNrLn86+N6xzFjtnNYPlP/7s1/52v/97gfN5dccOK/1IjJ0ww3Lx1e33Tj8xF/dGPx+9rY1W8/X7evPfXvnrnOOzr6r59n/suobD3b97hNVpfW887+957qvfurBu7OPZf88vqNT/Llw0cS1yaHHLnn/TX/yYGqq8NYHnau+/LYHX/Xk4MYrzntn8pK3XljY+KavXLr32q7L/nb74xNfXzb+2o92JK/4+A3XbZ7Y+dQ3rht7Q//O9R//6HXiK4KvObyidY8zUHos+kTXB//66L6bjmq/ye5857HzI2+5+BdPXbP34qG/3P9nv339bMtfvPmWH7z11zOdb3r4zuFLBesNMfJwtudS8ut7fkQGzj8iRzaPmn/3gd+ff2t64JXpqvW216/6+PWvveCG6/84d8e7Hr7pf+780fZLr73+SwPJe8/95pGZv/nu4I7WD/zmO9JvHr3ixytf+caBTyhvjmxPfuHgPZd8/TbpOx/U/v7/LBt/YvDpgQvesOe6V/+vm9/yzOW3H/n5E2v2H/jGf7tg1eOh7/+0etu53Q+FHrly9sK/+kZvNX/bO+QvXfTZFZ9e8ZF1LZOv+s3Mh37/7J5vB95Ijm9bNzLywD3/8q1Xv3djIvHFX3z83p7X/2z7q2fWps/9O/vRmT9c97lHbrz7TZNXf2X5pbc+nRh/0xUfEj5y7veeKU3e+siT4cF9z/aXf/fcxxK9ie5zL37miz+pvOOXhduuueYfuo/tefN7/unZb+5oee1w7JJ/fqD7l49mPrr6x9Yz593+1NYLR/7+sTt3vuKrj31x8LL3TT/06an2V35/7CMX/vy8R6/Z9tW3WK8pRap3vSP0j7GHNv/kmffc/bELLvzhXfcd13+788mVLR3f+4sbn1r+T91/NPLXwtfWH/jRlq1PdO/bs/UmZ9+am5761h/dd9F/vALF6rxlgcknRr/wqmXL/h8i50dg |
0 | lc_public_repos/langgraph/docs | lc_public_repos/langgraph/docs/cassettes/shared-state_d862be40-1f8a-4057-81c4-b7bf073dc4c1.msgpack.zlib | eNqFVH9oG1Ucb90fDmR1DgTR4R5Bq2hfcpdLmqRs1Jqm1dW2oY2rrWh9uXvJXXP37nb30iXO/mE3HKJ03mAqxTFZ02SEbmtd58AfYGVDpYL7Y1IiTMU/RHAynMgQOuq7NGk7Wuv9cbz3vr8+38/3895oYRiblqKT2imFUGwikbKNZY8WTLw/jS16OK9hKutSLtrdG5tIm0rpUZlSw2ryeJChuBGhsqkbiugWdc0zzHs0bFkoia1cXJeypasHXRrKDFI9hYnlagI85/U1AFfViZ28eNBl6ipmK1fawqaLWUWdISHUOTogIwoUC2hZQJCGm10jLznhuoRVxyyqKC1hKEA/tHRCMIVeVoBr9HJOHitrUaw5fv16GiATAwRkrBqJtAqQZSkWZegBRWpKIUlAdUBlDBwQbvA8+wOFJPSm5aVT3MHxtB53jRRkjCTG2tGcrFvUnlnHwzkkitigEBNRl1hu+0zyVcVoABJOqIjiouhgLRNtF1MYGxCpyjDOL0fZ08gwVEVEjt0zxPqaqhACadbA681FBzJkdBJqX2yp4vBEs2xsBHBuwef2Tmcg61YhKiMeqohByhtl+6drDQYSUywPrEjCzi8Hn13ro1v2ZCcSu3vvSIlMUbYnkak1+s6vPTfThCoatgvh6PpyFeNqOcHN8+7QzB2JrSwR7ckEUi08s0LySkiRzVuAXCPk+LNVllRMklS2J7y897SJLYPJGR/Ks5Q0bY3m2ETwt18XKgo81d1RneaPNTtyrWw69ucxOd0AOAF0ixQ4emK/Jr/Q5OdAe2dsKlwpE9twGDMxExErwQYSqQ6/IMppksJSMbzh2Euu1bZMVl9VNIXCyu1jw3K2ds7HcVypflNPE2uMNadiTgiFQv+TlzGDqT3r9Ad5DnJCrNKlb6AENopcvsMVPHkHD0P0yCaeq3iq3mBT7//Aww0UK6ChItmfsfUgx++LdkXkwHBHsN3Xt5eIfDJMwpF9FzJQVPW0BCl7xzAsCyJD7RLgeMQn4qGgXwj4/VJc8kteMS7FuaAQ53FA8k4MK8gu8m4eJHU9qeJz4TYYRqKMYW9ZNnahtb+rpfPZ8NQLsEeP64y/GGI8E53gfC82mRztYrk0u+AmzrPwnpZ+ezYoignJi+PBkBAIiqFGGOnrma4KaEUgOed1KL+XrzOZmuzo0g+73tpaU/62PDfW0XHpqe2Hl/ae/qs1/tp32p6Ge0bbTz7zfuf4Ax/Qr+hRads/N45F5nrn+u4dalv4YmLh4ye6bjx8a7D7p2to6MDsNS1/5NYft+fv33mduzLu+v7Kjp1tbz65tXhi4JVDYc+2SOYqn3vj5vlFVx+qn58/tdvjnr+r7sL0fQ8t/r4YGPKd+Xsis7vjQaFuDz6RuPhR89zNXz+sD/t7oiizPXB80v4z9fhI/S/jP4cu//bO2GO33x47Ih3/5u6ZL98L1B273vzJy+/uYk0sLW2p6bu8sL+2tqbmX44/ioo= |
0 | lc_public_repos/langgraph/docs | lc_public_repos/langgraph/docs/cassettes/retries_746b409c-693d-49af-8c2b-bea0a4b0028d.msgpack.zlib | eNrMuwdYk8vaNhoERKWJICihSVXpvYNIiUiTDqEJIRAg1ITQQUWK0gSkN5EmXTpIl9679F6lhN7biavo2t+31v73vv5zrutM3kyezDz1npnnLZO8ykBBHRAWtjZYORY2SKiDMQSJ+YIIfZXhALV3hCKQr9OtoUiYrWkKSFYjxdHBYgx7AIZE2iFEODmtoQ7WxhamHBY2VpwW1uacT9W1BI21+e1MZWzNoS6m1iYgYV5dHhgMYqNqo2LxWPApjzI3xAYMf+pqa/6Ux9nOxFoLBrEwFTbT4YI/VbWzgVjLORlLP9ZUk0EoSGtrOeryCFuBtYTtTGwec5tKw0zA2vyWJjxqcB0euCPkiRaXjrqVgIqlnbWxzlNXUynbp3/2K4D+kNVwtoVYCLE+e6KM1NV2RjyzdIabaPC7gHXUXKBawq6mT57CTEFaNlB1KWF5kJ2rCQ+/k66OmqWxjBMKrM3F+kxa2NVEWw5hIsNnJ20OtzTh1XI0lRNGgnWUXXW1TeEQF9jftFlhfHdGmKjDECYgrh9y1j9s61rLWRq7wP5CY/h0wDATENwKYxMFsYDxYPrswCA5LhNe+d/kMFj+6BcA68jbgq3hjj/6odZaLgqqmFhBWn/Eqgb//yDWx+qa3HAIrzIMzKNpq80l91RDW8sVwiNnA9bislOxtBUwAcn9C+Z/jom8NBfymcVjuIm1MgoM0hT8e9ykkIoafOYmIGekihNGhzXGLugpTJcHYa6ozsUq/+QvuFgi/sATI2PxN33/guEfPCAwZm7JIYx17OD/hgfT5myhq63s8JsPf8Txh///22fXH378gbX67/6rasKV/+z/PY5fY/SHHiuw9l9wwGAG4VKDg39rB8NMtZ25fvMV5CQgz6PrArZ8bA22cEZgxh2uqA2DG2ub2mLWkrOipTzGtjJCF2NLDaRl/UMGYqP8pyzShPcxBjs4F1TD1ul/2Pkh8xcbT611LcHWuk624pIm5tK2cFsH8XtOMAskNNXE1tSlLgMGNTbFpIPXWZoIqAO7lDnUBhlaaueCWf827H/kAwQnDwcv5siXgkCgdkh2WRuIramFjXlorrmrhR0bnSnUDG6MhKb/3h2a8pDzYZa0rY0N9LfEEpplBYXasRvDLVDQTw5QhB0m10C90xFIY6Qj4lUqRhW0szXDGopAGJtDP6oo/OlRSKYa1M7WAcmuYRs6euhGD7UxtbPFJCwEvYieG72jA5xehP73tKTPqc9pzGEDhXNA4LaOphhnHKAcEFtrfU6H3zToc6L4JBHidhZWSC6IPNSFiUdO3RbEoyJnqQpl4nnMBxfU1DTRhvIZOz6BOqk8seR7qihoD5KyU7FRFnwqoIthl0dwafDxw01tpJX5HKWcXdXsnDWUTC0QmmpKvObymkgMi440r6nLD218MnAUUsjRFI4wfiKsCNeFqerCuOQ0kSoCXLZCiCcmqky8MpiD3sOAjd7cwdbRDhMHxIwd4z49G721sbMRBgZ6EQEuPiEuLo9UGQywodUaMEc2Oh4BOnWoHR0PFw8fHbewCA+3CDcXHUhJI19ajl3aGAKDsqv/hmloyhN5jRRlWcXQUTE3eoQjZlQQCCOzP/I8vQgXG/3vuBghbf+d7RzMGCIx04Fdw8UOGpplYY3p5LS0g5pXSv2mkv1Hv4MtnF0KDrd1YldxsDC3sAlNfpj3p5wi1MYcCQtN4+Hn5eNJ1TJ2cAkt+52Lje5/TKUUzMQLTefmEuLmF879PZg/tIfmY/xix5gW5xEQFOLj4kpXhzpgTmChWb8GO/cPbWrGNuZQRGiaiQsSishVNEYg2ZUw6s0soKah1dpQUwyG/H/BkFeEW0iEl/sHhv8wX9Mx0KpJ6YYWC0GE+YVNhblNhCH8QhCoMLusttpvC+glZiI7YAJoMrm4HLmcAdx8KicvB8DCAgCwMC/A5RzWiLy0tNEzNRU5eUXZ3ztaAZiCwwUAWGMiVAM9ptPRBdPhzfwu8aMYQxB2gH8uGK6Db7/zDrAD/vty3RSKgGA+tzBvRgeMcYxKMgxNZv47ff8HbfI7LfaDdkLaITH0sx+0g4aaNIY2xdD3zf9Cm/yFhtg5/OAPxNBimAQJ+eU3gABqo6mO+byGeVMBEAA1AAjw+Ef8v7PYfgQAhPYBAOx3v9pMYgCAMh8A4M7YrzbGDwAA8WsAoLTnV9svTOyMHYx/a8LBvK+YmQEAOxkAAKEuAEDaCwDc0PsTiH/wje4330AAW8zLHAAHQDEt8gAbAATAgaF4AFwAboDA5ThAGoB95cqPA1NwMAfuNVxcHBzcG3h4V68R3CAgwL+Bj09IdOsmIREJET7+TfKbJKRkt2/fJiCmuENOducW2W2yH0qwsDEyOLjXcXGvkxHiE5L91+WyDkByDZsdiwkbix5whQQLmwTrshFAjQkJG+u38me4uFfxcK5hX8G6juk2vAnAwsHCeI93/cb1a3h4uBj2K5gQrpLg3bpHyk12m5yC/v3ANSlVdU2t8Ig+Bh4BDWN7x8Kifn7Bx2oI5MviQUYmPofQsOSC67zSsiZft5llXnl/TNmCmL4uqcfoB2L9WX5N2ys42BgXMJ0CJJipfuXKVTw8HOw/GLCukGDfIrvHraoWisMrZWyfXEBKrx7W1z/LwPOyHvf2lvTjyzEAATbGSRJsEoAk4BKg0UeCRXKThARL9L0Ngm9C3PmYzqMyvLYwV4sHO81rWbwL20Hf7YX4OdbbHWa21g1Hxgx+hlIJPJan9l6zcIMe2qPUJcloJM1gH3mX/pjYt/H+wlV9GWK2cNangkQHXxqwLgGxZy6ar49N/HJ5N4xai4NvXGtg+Dqf8hxtZdkMuOiKWmuDc7aRiesmmQyvHB5TJvU8/JzE9mw/3SNzUYt08S1NcU+Jl3vk17NQ6aGYA5awEaOw+65FEo3nNJcApsGLn6pV/hTLPl0opVh4S5PTUyD2U+y/1cygfvyTRyzrq3uB9LeYg/zNnzw7evg7ssRa5iNJKdMLll4/WfJiXd7epXAYqgR31FyDWvWc3Vj+yVqxsWIt6pKq5N1SftpzO3qYuHNGEG/n1Q8NseH+Wf31CDm+EV8rYTEG5d1vMc26q0XH26KbqVZuHVQKpRPFYb0tusmxoiQ8Oomj9G7S0QfUYzOur4hrKBKQ0GFSZ2baYhlDGD39FXq6v6tyTK51rOlPtvhWlH/uhFOfFnQHhbeDqr/jfZU89kpQXOZFIr9RXug9sBTRGeOS6IrjSYgjU7RAxHvRASKI04EzrlGag3Z2yMi5qq7xz9xtOHA8L68BX6MDDg6eFYptFR+RbHlZmFoSZ0hkjhbVdbgtwuPWxpedXeuaqYO1FOChjBMolS37i9MksV/XUsHR+BeEukUZlD4Srhk4Jf5ZNpSkGWrn71deMhnNSfCOuxF7IFk/lnZSOGO/K5eZvm5VTFlSk+sohTUijUozp52e5wt1YzDCkHJ4Xb+3xYEGJARXkn7tzkZmMyxMc+X7JEVUSkhEhqpqYvipnn5C4v74nQ1sQ+p7TGc+HzYKJt4PnDEOvXfWRjs3dL1daLNQzBUVjnZnZpgpKIyITnTxPvDsCfU2oyCtmf/AhPh4CchwPg8cOglvOmIXzHQ4CiH0FFu1LKa0TyiiAp+n8uBVBCH0Bxh0dMslwkIy0En9h4jbH804xSbFrVjmXE8KBmO51fU2CIyDwpOSIHTcb/Cx8b8g7nNcnSv3qT9jpvUTQIo6RCsaZNL5adZHvnzXptRfvRG6JYr++GwYoEMpRZdo/TZddZb4ZguBXTxFuJL01cQWs3uRSqY8XNFpyYSz1j7vsMevT1A239RrJ+ZeovuQePjI7ZNRxp5haYDM/O43HiFh3afWSirUZCNjWqc6DbNuCywxXUSLqE4EWB+L7hAW2xJSnsuS2Da3ZrEBykzyh5h26NT1wbfmK2K6gQ8QY/rox7Rtd9y8nzA64t8cOuOZQ3H6UD2a9WSREZSUfvC+6F2z5TWHoWRIcuJIoS6bg5BkiRna5tQhwe9Z6j7oSu6sUGbuzu6WF15ToGdqCY1cxFrpQ1kJCcO20DacZvtaaCV2LISIIfqE8cPeFLaotMbz7dFAg0j9ZlPWFptcX9sONhUYgkrwSArXnDL7LWedfQbti5hj12L6gcOd/kft/NNqR/5NFfBmw6EwUFcZC51VpXLeSppek8Rdu4P7yMkgg9TixujWGKPGHWi2Nj8AthhSxIH7gbAUSBT30Cz/q86cFwkXuExeSeHZ9/Tv+lO8zc6nvQrXi3iz/edxJPsnmugYbbBs1W4qrYQgg/H0ociSJ1tgP2I9DS3HQA4Qq1KodnUHheUS3xuqpXLemIaYXCtveQu7kegNcXTOQMXqiOLrL7bqs3sHhocBVuaFZHNkVSwlSCXIRJv+gd+TqWc7U0qtVPXSs0K6WQ++JyEc5e3dI/kCnt8tdZYB1Qvbq9Whk8cl0XlWHkQ256dfcvJ9nMVtm0IQF6G9jsjo5LTFzjgnR6XAQCbf4rugEYTJy4ySDf2VBnFm3+AMz0/it0PnDxKbYeKO2dCMcJQgaLokpJaUPrCL7R25X7SfRYHFMhBBnnhe92yEhn+Zx3BFST3ZtN/lgV35ajEtkdy1GYLXnSyckeYinhBgpUBJfCDHZP4SAK4MaQymtGfmoLGPk35e/Ly7SGvRetH+GLXIKgUIrgvqftPp1tCYy2ZHE3JvReHrQNyLoVjJScLIQTK+gaIxhPq8fXGOwat1rsq7TBYsRMjQ1pnvlLqSXAtLosVB1XvAhzmw3IKdJbugEenAsg7dFI84nEB4oO2SBSIWr4pSkqcoqzmK5vanfc2lzVGRQUedJ+NxhW+qGt3pxNG2yxYGd7wKyxP6urfPFXPgEQkkFCLX/F/4+/9dRVc0Ukm/Ae8WEOgIzWqRWaFCq5rh3TSgOa+vKyYP76VunhFuQs3uFhf1SjxF4H3hbePjoCr8eglYetIwlHf2+gEucUy/Tud6V4EZXou9d0TpaucRC+xIVO7mRu09UU/ZzdvqGrL8B6j8t8a3jVpdHhGapffI9W06K1BfAii4ko7yl0NJz773eIgZthmZ+7UsssKmij9mu5BIdscfDjSth7e+uUAu1yRymib2EIe0K8jtp93W+9mWFa/SvIiMfG8Dos1CMLe1QbLOp2ABLsyVODxeo8FVYaK2HWVeDm5iEpIP2IFj8PdWT3cOh9+gxdEILCYPhjfvPvMI8AdSd8FGHvb89ELMP2lZ2e373IH8+tbRWWqxLVibnBY/eKB8peAo67b9e3oO7qEMfJM38d0cqC4YwGZO/kieSer4P3O4NkVKVmCjIq5r2n+Pu82jj+9CevQqdS3zuVvwLI9x89hZxhZY8EvR3cM20W0dcxCEa7YoUcqdz3d6P5xk8hLgK96Q6FYKQ4wz+DRq182t9eV2mrymfSpdOTUnj5jwOBRuLE9yD6vV6dQU45f7lqL1MKKeY332OYzxMLzMRXwhMc0xR1NDjwkornb+ip6TkeSP651/rMwnUOxUErbcdM5rhJNbR/15U0eXgN1ZNPhGT/labdzSRrtkj45tDbbtrEgt8UUtngbljb8/5/5D1eO8hTW7JojQu3D7um83fbGX3QdTEJtvnrLVraBz3sSfELwEVD/80Nv37x2l6HaZz8YJVzxoIrDmdJtCEcKAYSp1caC+2/dXq6XCIfC5baRJGt2Szyj+LR7z4kOhghyxuCB/nPCCVUop/N35LuOjYOJv6QdWRbKNG2VCehwrmTRi32j9Z84tbc0Sf1Kx7G7z8eQGfWpiLjmSJPUng1Ndvyg+8GGLEFvlx1TnXR1PBu/9zOrYXxSsZDPYTtdJ9sn20uczEN6auhvfL6rPappqphBdFYJePHymUi9s98XGg5oD7Ks40NYUQSGjeEvrS5sR3r03k6g3iZHnokoH34d7JX46VViLBOHySKyc31PBjfcmfTJQ3mGnnyvmq3OwckA4j3aTx9tZd90o/N2Dj1vYbyuI+Lc6aHNsLcosYDucblnqYdOvxbew20flF8w5uYlG4K4Ap70vsA6Jxp5MO50s57E7OYRgvlmo6xbIzEZnHiIN7juUmAHQjnyp+n8v+LehPwfB4hf1cbhGuB46eZuhJbZhm4o2V2l94Bclm++OeNXx5SEohN9vTkgSHDeaSSWQWb7+WuisS8hipKA8l183gY1X6MpcVwcYfPHMoGDiJlFLMj4/0vJ7wGrS9VcdPPQcGv8ylbSEPux0U1oO860vzbG/8zrm1HLUnkvahmRfAqQnzgOGQaC1T9shmkGfT/iuFgMEAEgsr2dO9GtS/zIZySQ+Do0kPGBmitHzvLFesgSxCO7a3ICL5yF0V7w7yiHB6BHkJvqZIj39hz8y8A1iVerCMZ3gSwCQN/8SwKwp9GGfYqs08pytZ6S9musS4LQtETYkqr0nY3Qv9K+2bhu99WoKzj8nRhxwavn8g6vBC1ITAbk4Mo9kZHBkpEJPMxwTEXJb6Kjw3RtRO8exQt7Ibrot2PJ7/s0NBRoGRlf6xy0OUbvwJ7oCJY326r5UXHPKat1azYKENEnIEspzVQgnqnqpdTIt2IW18YDrczn5rpBN32dgxCq1Ujq3SA+2E3588lX12/YJ8ftsZTFhgh4nF88e0QV/eAF6lonJHgi0d/P3ePQVKrb8puV7UPcANlN2JcZtQU8tVP676lyq7hH+Buvujm6LmND3a+4XTi9daznWtRC8n0ySpKG8Ntlx5afWFJZf6Nt0iJ/U36qMCd0pei4AG1X61mBajqh3zydZXSOUzebH5qsa5dD3lR14logwKrnFUzMRCGPoIxjvE6U+/daGkDV2oAxLd+0i74nIfgpHqouj2W8vIqgZoXE75ULN3amkA7WZUyWp5Xgf9jhUD7eiyQdi+trwTRwzF1/dXzg9AOFPQYLyxj9BHikW2QerhHXZeQ4T8iJLbJx0VDIzx4TZPJVPc5rgpd4b12cC917zr8PGxOPaz0sPB3GEgBVKwnXWbhFDx9VSUYMicUDUpAs6CjLnPpQRHkZ5CXCjNT/0joIcToyHD/R+4X0pQuE3mhxZN6OrfEqPvNnGFJNxO8V+stOnULzLJ8MmN5Dv/plRRbbvkf6QWC61wMOxNLdmTd3b4/Q3DkRO7BvGUPDpaPS3Yr7T+Q/qbIEsoETObVtSCHgm35lZ89iKIH98tSI5Q/PdGNKTkIO/hPouVeAiUw+nk6Ak1wTWOn+SQh07rsBnVMY2NqRTYpyzemssUci0sB/fzJ3Q+U3/FaxlNhb7mtNcXMvo6LFr/qV/zmcvfAvb/s7zwCx7qVXOWWv2E92kgghX44vNOQ+mLFT+in+NNP1fzwUPa8kuAQHURic0y+uGuTfymyTnWrQv/CpOKfKCQ4ZV52mygcb71B2zqAWdk8LR+IoCOeF48NfdsgXALS8ufLRoqONHivYzr6YFz5yi9YGf1GA/2+nXv+9gqbrnnt9yZgha7ZDoEvtJKdG7/f9bYHgsPTXDtYHuTZT09AQ0Wcz6Ylj4BfHGQ1XSKVy8T7U+xWXfzTyb/zWFfP5/IYWUF7no2AlZHYviWe5lt5lJb/EHcUhu4edSf6rqsTK0p1C0K/NzYjfLW9pXLg1LveWf9exN9Mzqzmh4/TiLaYuiTl0d87vicptvlwC6SwA/GbjjGUTIK8OROb3VxHqJH7Iu9Zzy6kiyVuX6Zx3uxlX0ewV8ETn8a3ZESyM5ZjQxsq/HW9cSQJC3A8YlPu2OxQNiBv5vH2rSmH/pdGMKk1eYFWsoiUkMkJZbZyjfyVQm3pZkaZpUdtDgKGsT87wVxwK3/1z6TblR8fN5UumMNGkK8AoeXzcOjWpJpwCjbi0N74mfMmwlz2I88eoRG/5dFm3+M8G6Oy0i5prWL0yQuXtEPUCHXbeFh3IBida2zSfHsP10Nbgn8VNIGkEk5eeA4PLvTO/a7vqh8HiqOMTS8nDeXQO+ihk2rv24G5k0HDvXQ8QnYWOtxFjh4RYKFWGPjUANNQ9Hd0HDfEu5vjNawyooqS7U5ibHXMONOsx3RQ5JVFQU8h3IP03N3Wkc7OARUYx9+n7tSGZi8fO1eFHXHesikfU6l1h9gk5xcEBXRS6isRqUX8Kc0F9q+2hS1CydAz9M8M69EmHfB9DESwBMZiIYnhhgJTN5ICpaJbix2P1lcMIwSml+t2zFMDM+IoZAyR1OlKFANDKxwibRMIcIHCAhISRGu/kxuLiEc+2ZsOG90tcejFdvg60mcuGWB14rylo05b4EvF+gUSSpQ9lOtCJ5O2e6DWqWRLALytYMWc2Lm3M8HwZHbCfIHbjprxd2T0dfcR1u7qJQuhDc3TnEj2W3z/iYvL0sKBA3U1PaNlzEpKBZIMD4Sie6pbJkQSyI0E1/oHMjrsDidDHKnN/GyXu0Wjyw8n0iNXtIo7RakBgbPxN5nlYm48Bw8fDnDlox+ystskWPTjfKgOlwccqATl0iDstJ1jfN5uyWn3aV21bOglNrg1/l2huRMbwM00YSWQl3KIaQEA31LI+EljZyNucMn0n3qrRBclbnvR5wRCfDa5oliu5KM1juOZqsN+gIGjMbjN9z2RMhmUiH0XyQXVrvId33jBoW9mAKGewc7CqDkAblxlSrAbi1rSUQfKaPvwkoP7kHD4J88ZO/g/B38B6MWf9WWL+AHPyye88JaOtPVjyp/yn4PaLm/gdoANze9AqP8Civ3spmlTUR5vZOHzOqFbBFC9Npi/doKet9Zv51gevgXYIPeHemC/YMimJR4ig7iZO2rbtVB1lzEm43+HOOclBhiUEUMqyDAjs7NdeW3YuiZIt0tume3ywV7zCUogYkb2WwJ7YsItMtSz3NVYddG3jFyqW1SIlfXdV3dziIr84Xur22Lt2AsqOqOTGjSXzmGnCkp+AZab4OVX/7K5lnTxMfepF+Z1cv8ZKR3bn7L5dOmkcZZzqrl4DeQ0mTfZim54NLALlI7Z54z0g1Eun5JeTOvs448Se0Ef6glel3nQyHKq1VnaDeLkMUXgsrlQBCf6D31XUcmte3D6lbv/TdjumPVpL13ZbTI5ZmeTOaUsehadoLn64+jXtpq+KWIdsV2guGcPunvK3N4Ba0R1INa6SZBapJ6xfDB4D4CjqPVgSwoWJvdR4v2N2S8murz6BVwAorP1SZTcRhMrfZqEhzN+7coHq0qvBSXFD8tp6snQMR7dZJcFTdcv6sAcGOXlFoy3DWqLUSQ/z366bv2oBgfXSQFei+FZioAZL8UUcdD5+74tHVnM+OayZupln8zBKBH1OaT79wdFe5USAnEi72rMsMlnJWqsURwjRmzfh3JUyECLVMGTOAOQK3rnG/ubZEeK/RYbNMfSvzvcI1txNBrIg66VeV3s0TXx9/42GzfhtaU2z9HOtlQWg79iMgQMS6z67n6znQ6hrXgAUb2Yi9pA+Be/SIgVHpuM1AEfvi8niGwHg2D9MHaZWPxfu5oKsf4rAD4ySxXejb25QFwUGVJEVQ9iVsE7m609VX3QSLeVaxJ2Wsn+wji3RGcoooLPOulyxC0oL6yr4aMjGjCvJW6kdfPWL5hGO5EvmGLJjM7ksZ/vZdIZ/IBYdR99ej+olSJTH6OgX3gJHNU4EwAGCyx54xTF4QKeP/OW2bwu/gyP8+JMcMqKIk4JzRCpn7RM0/QJUbGrgRUH2tI1r4hTEHtbDvopL0R74Na1kJT7arHGx7GwtR99vtA9QErUuRyVWNcMBIkosSvY72fbvxJ3MFowQe7Ow+GdL0ThVaNSLksuYncr7rl4AL9kGjm0eqb+ZI0mafqQUKnCsIY7MzP11/fXvisAX8+Mnky82lvYoXe9W0TyKcZIpv7LmL9r/7Xmaxstd9NxpPNH4u7vE9jUo7p/UKF6cJ7MlNzvfxnH2FMwtfa+WfJ/mGGUOPliHG/JP3YbB3LR3t+pTfgq3LRrA2WTFAP1ZJ3lvM2VdcGPOi4LBMwXHcUUkD5nztvUZo6lPVBSTtpfXw+P5dn1k4ulmjoTdrQo+qndA3+85D/oaIr+x+H66tSKMmHsg8+qdVNvzPq0x2NV54EAzhfEtkRbiK0CRCnRKhyNGotc1MGXB65sdPWQJ13b5fmp69KVhib8L4Esdv1UMinLdW07hnt4KyYyQpwPV0X/iuEu/cAzYfK0YajMlRWJP2bH1Lt19meeo+3+w6IDG4G54mYT8vbpFk2dfHF8hx/Thlg583RFhEfZmG91vMQUZ8woPdkLtriqdawW9Sje1UumgtMuNd2vqThLYTf+wR/EW/EgP+K0/YqZLifLfy++OWwB8bDj8NDaV8tLijT7oLGrM0p3FAZnSLmefybkg35jJfXVada1ku7SmBrUnOp5oo0xbfVDleI3D1VExQ36L1vgQo7/VzlbPEe6RnNPzHilV+RpBLTfrk60xSrMSAutCvYH9FAFuJlaPAOes7iI1p+Rms+k9D2SCZH9aXQooHu38Fm/jTEFiEOdSnXpLPI/0vqA7tZ5pRm8c1VN65BNxICym2HilvTmyxB46MjFwsi1XUvRMczRXd6JzcY1RzVSIhDYt5sxl3CXhmKIojo0/g/0/P7jBVEVpkc+m4kjxk9xtw88X4kXJ+wyUgv34IxioxX7btPvXiEpD4edod1B7hSXvuSfSJj+z3O8H/rMJN3J4+qjH/OOyV+HmSCkX7NcL1MLj2whF8q6d87cyo7BPtco7gNMBr8795/nOf2jymwLdJdsYnp8ymU5M+dGOJJKrS5LwMuC4o6raAq4d6m/ZgORFt0tiymYoyGVAIaSgyJaSN+UDc+mVriz4LoS0Qgn8JsCud6g01T5SGSEiy73f1WP6iqvncOYQXjw2UCMldFgR453em9ScOMn9RKbP2PZxtmckfe8R8m9099dbiEod+Ub8L8/Pxz0yfKGWtnQtN8mbtTaV3PRsIO3Bei16lYHpLDjZNhzGzWb3gZPmAGvcmFTnk1D4mDDw6KxsWFM+ab9+qVF62dOZ4ij96o2oj/HPJ+xJW4diZpcqIfpJyupsec1hKtPjBLp5KKiEVIaBV67v1Obc9oHFty47ipc6O33dKUwsYOd9duYe78O5Ob1+ZkyD34besop57BUm0kSdLyyUqP6k/9PzfBcqnu/xupCekwwOBGMv4SaWH+LSI1TKn+wYI1BqM0QRWuHt0NIt4FxdoTY7CQXyP0NvF+WJ54w5U/UQztuUT1EzOJrNuoopKc2rPZWS7X/3NwN/wuyXZZ2eUtnYJWDIICHlDu1LO2b9/CdjlaNpbWbW40EJdK+cdXUdFNeh3QNH1wc3qgR0mQ1G+ICCeC2OWJ0UIJYX4604z1mVRAht65h5U00n4AFy10hN0PhrIQbRIR75ZsSKothVwwrYU9QXod7h34CRI49wCwiNlaQ0ggzeIO08ehnBJcN+zql6++g4l6sRKkzxHk9QfIwcprCDGLT8hdwgKKQg9/ZbfMS6JKg4BJVQLTmz20mQ6k/NpUPO2BBHm8Kj2Npp1VfIMebTP161zlyY/x0W/RAcK2Bg0z/HMiW+lHAFbxJyAXCjrb2kIWht8cgZ3UvXPrY3wAkbVciVGRcjE0mfhLgum15HlV8iqCIXJbNIHozdoMwqFO6uTsJvms2uGsqmCvcyBV9FOrFIsz4kfXdc9PlTamv46rVlxnFrMQAoyZDNoaTNjqJ18DvOPmV9RETPuGrGtaZ9igA2GqvHoW/UJ5T7wHww3/xhOEjXetKP5VuGWnugtGXu54SdzANEXk+MF09AWq0nsDw8tsoSXLe8stkwVnmiOGxYP6Liv49RI1Afy+PERTuBjwTbHkVWEhORuEfMH3l+7/Hy/Id84DDHUaglZjEQ0V3Z3RsyGJra93iMKf29DXFdTgRaFWXNmy8W2L4vhs481YccxyZ9eW3jH19zSfXeUu5vIpr6HepbzykmAyLDEijh2Q3xajo2TiNVUJpCxgNAVMbru0rxdCpJp4Q/22gHP4ZcBscsNYnuCxjj9jsmfdS8p6eW90HUn0Uhs+OC/WRzmP4Juy4B8l28XoX2RNmOo+mzTfvjx/P5V0WvrZyieu9fWOgzez0R2QsMTPiBSg4IhOH42aoeEhyv5b9zeLt+SBDgS79+t1ci9ay725AxrUHLQ7FytGJoYOqQ6L/G+ZISe/nj977LuQ6onlwDZes+o/EtADIpDgtHr40ytwvAloLOSYiAlM+NYd6TDo99VhT273NS4oVc4gI9HwyHMRPqF4Gw+NKwNMJ3MYaVcd/gOryZLTK1nQWigIUDxK2xShaBcP5iXk1mUP9CRIcxUQI+tZB7+mqJcgMetQp4dkHDiikVuxhHCZK+tuCohNjkcU3U1qnMus0xG/nU+jU3cp5HWccd0HQJTBzhaS4Cndw7A8f5l+USmgVtE+7a0oWxoM2xDRfm1CIPos66wxi6j2M4u70jAMJGNF8h4wNxxgWNkYxVH/f7uBacidLbVLTbRXlCHm485gH5ed76urDHSoWsA7daKjUJNvcOuVjiNM69NLxAWJ46sW+IepbJRYbFqyFF1g42+1etXACYnbdIomFks4tm3b9SNWj+k1VhZJy9rLqP9ZLeHQOrRRWu1asP7/F48cmJ9CXyhYtSuzl5Sl3WaXODE6359+OjMUX5zuuh1Gdlty+bvTBFLLIBvJR+ibrkCRLI2FK/Kx7v0xmAHFd/X6ot62AWrQ7c/CBzChyBKR/Pv5d1L3VxJepGdHjeD9ukb1dlxZqd93bAAsYrtQMJkyXJx4I8QYGLVPv3bJQcht2edEQl1c872SZ0z/O7l+oiSqxIb7MJMXyoeMrvHh9ULBVPwaDcfcF0B1n7U2fAfNSgSYX06z6fMyx8woK/QsLi+uG6atcgqZXJhmJDX8ZlXiSo/dEkYAgHrbEnZ1NdXzw+0FoU1tIGzLipDiNNKFEUQGr3zGtEi2/IcrHfHTWyDRxs1vgiy+b+sbGPyScj4OF92P2h+igVs9dBdvmmhQGScJYpUDnkgebVNz5cveiR1g2aJvNDC5XVl4ePl3atXBSvcqEmZTkGB5V/GNfhZSD+XZS9uu4sNhWSEtS+lD1BYSxThOlciyEL5CnIXgj7EuBgAvc9vAZgbkgaC6rcRemZFd4I84ustUerVfq0wt+HKB3fGOt/0Krf3f+Z71Qs+Fr81nPTEUU8561ASxm79iW2pSIcBdf6h3vystNFulBBmJfO5VIoqDoCXOtALL7720ZSE5370yI+9gWZC70dogFs9zyaN/mCspSGFpRij+8MBkdrbn+4aAeWHugR1G0wflJzeye2cmwXOBu14DMTkUdeV0ySmy4D9P+fZM2zhPbsEqMbtP+2LRDSG1FFLn8vZDrHKqac6ajj3pLGjrvnXuNH/3dklimTEAERwXn1nZyggeHK8vNirqc7LclXdaUW7XT02rVzXpLhr0V2NsOFVdqXft2hUfjbi3WOJA/asjnYJIvO6ZdruOZVFgyOd90oksupWXpNB1qiPsztXLgE9YtO9fZcAgsRHRsfLcSeUkrNRuVtH8z0rYMHKULtjLK/lRNsV2CXAV4WO82y365yQdqvNXUJo94Q8CL2gL/FTKrwVc9UituslDTrjMSIOFrvwSXGroCB+R3HhsnThG3IE1ZrTYhFMKS5y0HD0VNPtBtGqRzaSVQizVBAQi9NRSzPZU7vVrbi9PXK3X95jBw4j/wvdpn8NJKphPkQyGTaFlm1YwMVcH7rUJqdc3BV7UbvbI7jH59XA9B9Gt9J973NH8m6o4OZi1lG0BIWQQ7GXBR07iRb3Cfl99nrIvYjaOA+n6/eg6KaCgvmKswK5On3BFYOaxisCDI7m+kdH74eU+NQFS8429uQn6Onnpf79buQzoYVlwUd8xUJJVLPNXdMeVebannOSZ5vTFUbf3Fo8qqaKot+JOfmgbS4Bh2kGtfuH72BP/s2V8/+q8MXnKyuc3RsqQcebBOL3yi/Iso9Fv70uuhB+5ew5+VJyOV4SgX0J2BSp2b4EJFVPb9L1/h93UH9VZMofBtnqW7Dv4kTZ6tpDXUNv4rcCKxoTgyAgtfljAG5TLhfHEGWRF0h5TmD8WDcG6xLgo/blfl1jY2BfUY2JOBT+hZPTreh8wNXJe6jA1Yjfr7vH/J6EJCvumGOyh1r74VykznESqwoMeP17R1l6dwomKy7K5R5X+J2sGm1l0xwq2amzoqP6js4EQKT+TeBk+9LsBYZ2edaRqe+Q8fq4vo3v070lGXnj1Cvl1dP8kppFxjOHt+/bZzya30exUoQ3QTijYvsWU4KFUyEGHd1AqhCqVO8ygxMnJ9as7i1B/qsxX581QpB55HkSrwlyDcmX0kZDC5JzaVdqAfi17UF6uwji/cHaWXXZJvRx5kmkgaraaA4/oTYqam/RCNdWl0iUfu1WsduC3IpuV1kd0fIUzvrt0ohWHr6ZErBclyHqOkNodbcF8JVkgYvJ82G/vHydJJnoIrR/QNN/cBtDlvt4zjnI32Lk5dtSuflKl+zNslO8U7y61PdmHy54uCU/Wl+MfQk30r4EiIhJVuiuk9iPdc6vGKXIda0OoacQ42rFRfyFOmwWiEZrFifyYgrqshHCdghC6Ky10P4w2/B2hodul0UcC5SXqCdonLT9IPEgMWVaB2RC/xB/9uucsneTp1z1B2Ln61SAS4DJaZ4n+QXW3CWAL6nENbpa5qglTWGk5CP98af/YNM8dZV4noH0hHl6IzuT/+4nycBAlZU4r9K81OGhS0B3CDFay+0u3m3JkOBrlwCDDq9zZABfbjZvLK+vmFyrxXjB/E6YvQQYXsBHmPCyyJTuzWyn+EzJApJuf0VqtWwD0QC3qarMyAeXuFGH1k1urQraD1xLgwQo0DSU0xWsM+VZUEtt3qhwmzjMyEuQqX/XMZwR36nR3fNUlI2ryeZ63AN3qU6m6pTobvuKaZv46rVmE5vAufMLZq2hM9mB3m0OJ1D6VsmuJnuHb6pZpLwe9zp0jKvhJn/PVEn+3sVphkGLr+tHa9sGjwef1mVa4dBtfp5bINqpgv6vbdeAp3UVDipfzzROojTNJk1myj5tv5vL/SyeEEfv/5RypYVjdtrJcyFMhMg7b5dSVGGojxKElwFr4ysuIHveUs/kh/dkZS6ooLjqBZY9IXAdMD11o1HxSbT+GfsZdtOFhaTlfm/Fkd4RXsBZv+fQms7/zaCcDQlZIYxSolqa2cCZVO8kMr1YrMl8fCZSE7zrg1Jte0+k7bvKXwufhSu3yC8dHh7c7p4+X9yHlTJTC5vxy5Y2oroPcpFz9yXvOxjx7Sahhw86AjjzzOr6yTctAhf4Owukeka/birLA4FXv6q5Vm8AT8fRKHzDbqcWoZPIiDQy2vfCnLEuPYdWp6Y7fky7KsIXG17wxN+7388tz9rO0HDuyp9qf2peRDiFQNQVTVk70a+sNl++a7kEvJkoFG4kfvkhqWsqNxZxo0vUOy3I1NfYkKuB1bcJvYkoPSxjTNow1NklmzWNGeYlLT2ld3KOu8foH7BU+J/94iN/LTuFNmOcu3W06RIQORDy9WH9uGR/CamLMEoi4LiCanOhZHFSYFLnNsJo2kZQWcAcatZATJRC6yOFB99UnlBaP/a8LlzJs7ngF+XWygb7b9L7jYr8QnijxbcRT+Mj5iPH+1U8IucVTAcfnwdEQfofo1srj+kfb7roDDkZLl6lVRFo6xC9Xr2q6qz3MABnd+xNQtQ9vIuiuKKlCxVJwZ1nOqH/aeaXM92XA+LfD7gXlfJYejisK1LOa2SOgiKenX03inht23NnP5Dt6cdtxR29p3Vva5fxFdwXG07OjKoS7YrLuvUl85tO208JDO8uMqU919xwCr2TK8EiDJp9WfmU80ZwRfwJ6qO5easE7w/HnngJVtXW/BTraKTdC9uqBTa2x9qCv+sHUdWgQm2YJ0VQMWwheYcVfWL8qQkxDFPXjxjxyskO7lTxtisoTK8U2CJKsWXtOF/rEDIZqc4Sxm4dlKVDVzw3LM6hqNN94MmP7btcsMov/93+8E7WaNcnrcZh9tyTc0pFCKy3zEE7PHe+8Lvmf40tbcXUEkq38R3sOyG6MSZF5+Z0JEhNbtmQ51+t0Uke9ZSek+/foSprWZUaLc4Z3dqIFZkgnMNrXz4ZyEG7o5ZV2eg1t6vhGVBySsGpf+dtwYM30/hA1Tv6hCJI+ilyFRM5vx4zLT953h5t4W5xecakrgkRjk2KoTMwbQr+vsr5LeJ94CWA6cBILisrJiaGqWfupDZAes9YzEh7G+YZyPFat0u07381Kg5UIY5q8ud6z++o/KErmZRvk+qCM/uoWFKOtGnfi9xv5euKTJdoi2TA6gU7cZ8PBZv9c0fDq033x3VH8ZXTLAoEzTq7m3Up9fGUzSpe+Dm90Jd8rzHn3eNF/J24v+U01+e7TOcUC/JEQfXHU+Ht0X+HjrZzyJmhZUgfqtZk0URziuESQBGbsmeQP1LhNuNY7NyS1Tzi/uAS8FHqIsVroeZuZ8YbrgqCkFcj1TMRCwUzJZTUfBNBHAvPGa/Ml9bdurrgiL/5CxmDohIySn3LOih9b0hLkxOdY4TTeEthFZwjmFx6rEbeyW6RcDJHFEHjVGw95LCCFprf18rOz7D+3gVKc0CtydsE8OOSD1NAAoufQNjdvpQlpJac5QE2C9NgHNjLezZW/gJWRm+LgFrdgrLl63IS+EQemdepUVcac5/j5bOb7Y0ErbpzzByk1m3T+t39LoTi4mwcKlOXjErixK2suL70gvg6wrvNTVSOtsZJctJzC6AdsUoaaxV4/P2I5etEs/a9cGEDUGzu426wr0hOQbBb+j2qTsQ8AdzbDNE8LFz9BHi4t707eR/ZHDRcsudBsNhZerVyNu7Zs5YWcQLH6pckrd+JU5f8BKP5QwiXqMYdx3hxvyZYqXiLsDwB13NH7XUXJhv3N6FYWiwDBV+jiycn8EEMIRM9GZVEdWgyrdLZ4VvcrthvT7AJU4CfPq1aDe/oTEwIP93i/zhwCpJlcygXTQjmwGpAlTug9aaKNnWsxsaDB50dYWW86fusBbdH8fMsH+5Rnnk8P69mqY0XetkpHHBnZqry8P5pFaRr8FCmUqolrVDwcwqx4bKfpimN92JCJ29FdzxN/QeH9bs9ncMnjA7h7+2TXo41GIDnqHWL4eDCJXAxfKCio+JJSr7/m5KR4ccj+i4xnLBRYl5gUUcL7xjWuGKlz7c7yjyz/BHUD5jBvtJKUj61ulFrTQICWiM8UbUJs4V816mIzfSdzJXqcS137THJAZkXY6O77VwmtI7NI010rvagrMm6ZAyl76vs6WnYe+CRwcIiqAxlfz2vSQjKL6JUufuie7zEr+r+AqdAlVKmTnQcg6GhKU6xn3WRlB0duvcaC3XU65NLQFd8FTBE+TEXZ8cX1ewzYwrT+8ClqXPf83zbgoXxqCgvhQSrKpZ+YQPeLOc4BJN6ffaSI4WfqhesBdXrzOR8XGrBVBNvCxSFDHiU3qM3tbhBLH4mmSqpQIsuJfvrOuvzK3Vn6QdnWF74Byzw/LszWd427VHVEO3HDc+m9saP/L4C8O6wrvm1F2vkKz04zxPKECkIkFG3dilBo4k6bw25QIrFzzXX4YfryZZInxYtv+gOQZXfXBJCIvSJtcpvSuF9ovMe5Fic1lBUbEz19y/88+z1YXe64UR6r+BizVj5vn6AmKSWReznE4RW6WkR5j7B/OYrMzxab6fqXFpig8q3eynwnt9Shcf/2kD6n5U2Y+vokL0Im7ifZrMCodvN5ewvD3ShIU68cYAJNMFzj95goy98ZdHe7SQnUSoW65GaQULXHFz0ONxo+eDspjam+hsCTLZT3pWUgkROk1e76G4Jfz6jHhiaMz2mcid3UJOiQlFjp0E9Ud4VwfSHDleWhfaZxuH4IffHZoMaVRtAovcHfVPMExV4/HWAY08PKcTRR/ryExEj1fOpEHXyt8vxuYsau7ppErLe58semUtFT90D8E67TzVzyYvjPYWXmZZdfzVqh3h9vz6HYn3l7CGewRcBKzhJYmK17TLhUUjOBpZi582jQ3A8zYTc1iazqQ9BdtPyqx68REacqVujOVUuVEoCBpXBvi6j2IXP8JppDoR8SRjjysr9KmcfpM5v9whzik53Xz8mbA6msDg3CmnQsnLgb8jwybdfZ6EJLDg/uAhoeDFdBffYd/7pbZaCAqxLMrkgr8WiilxQZX7yXswQtyZDLsm7F/gddwBL9z7Ylp7j1aWry7fMJzoYOmUlWvFltH72qtosEftmoRL6/BLgMm1Z8tFC6AzRrdC9L3AJuMYX+ThlBnX/MXXBBl3sk6n+a3SvqLeC0fMTUjg++/XztB+hHmYQutiEjn5ynWYtuFyxWHyngKALp1h95zJM0r1rTJwF35JNMyhAH5yVDmmsqrB3eapb5Qj3FZmgWNENVMDNKix9kmYM9OPvYhQimXi8+kyYAGffNPHilir4KJtNVT0GTWYa2rOBvRBU3b20NLmxVhw8IWr2cji4uwW1n8Ujza5XImOQwhj0VZCiKBrlcBPnBXzmCZpN9EWVKvM/XpnV3mpvpXcPEy9ortNfLl+nxHNhmcI/yBbOXLUkfNzFt8a3bOro05nI0yUtaQtWdp7DSZL6UB0iLsVSRIc1+OKU54GjxivdirU541gtAcL5MLF45YD3wjPeh+JT57ESpxHB05B28+jmxuaCIAKIXH0uECABqH3yu+lPVFpEnWP9ImYkPQIgfvB6Sv8eq1pEZOJuWfWYLI6HKymv1MSt3OP1fWreN0lMofHamcT3OUYquhHxlM3cKdlTy8K5VpT48cv8H2490pwkftmq4LmUNXhToMaJAWj8NnLDK8CSL4qnltcJdyHsmKpfyoUQtyvIB7ftjxVIQa5OqMBPxj2V00H4ynm9qVukhZWKqqX48BXm7i0azYT6eO3GMPq56at0zZ1pX3UTqSi4YvD9O8N4IKo6Ubu54Zs0wFY3hk6dZe68JCOgI12tlmVpcnCXxcjKoq8ZHo8cSYHVyxUpnFc3fJsnhALbvuqVrhF9Hh5t4dN0k3GchyshVaIchqvEF1656OvMCNgHbd6T+IdL50oajfpG1jtthRvORQHBXfpSWVIhRREbifaCCO9Ky40FSbe28tOdHIX6qTVblB95+e1mkRExm8gGtYZSCsowww32oofDlcacvIpyPJMv1utFReOcxMjXabZlryePR9kE1BFKCb4295Uob1Vq/YYYXR8fq4z4gFV5HFwHEJ9J//GURc6SHRTSX95i1KYy8ohqZISy/5gptMUMBKTETzy8hQ3ulWd5TsGc/e02X47myEp6qylaqAVE8QxPp3i7dPbDO2y5mQr0vVw8p5tkqPh+CuZ22U8lihFoy6slXfmG2fEn0c2idrp4XLibdDtfnPjzvAzigCp0xM142O8GhcFj4eqaWwpcV+zLQU9GdYOIJu4aLqGHxxDErPH9S834KUHdRGHyeU/+Ba9s9hsx1FzzNvs27h9CijJoRdkQ2tEOSlugs+rzZe63jVWs6P5DnW2rF+66zbNbR+t3B8L98+fNI4vRkd86uTtwuuH1eVceG7T3DpSZD9faHao97CFAr8IjX9brf3umlpm5PgQfXoPMudvVM1RzdPKj3V7C7mU4UU9cpxa40fk/BtB7WawzlyD86UzrvIVbvMfM1YUsRmFx6czsptDccDPyBN/rz7PGgsJU7wKLW5dvCfuvPgIQHkxsVJSs5tcHtuwL1z7LOog5o2ry0gjYv7K5sk0iM2zVtZFvlK1Hv+n3ctU2Qc31taSJM0368RDLJ+mDB+8N7p6mLIZ8NCCccY1b1khMJh7JdFea8fHk/NYonVWYyLOt+c0o414aKS1ZEN2eHTCy8jnVQO+6IPCFXOybpC8SOicU0j+1sfw0ECPTM0nL5oGfXEWs7r4aPnjvlLRuX7A7U/wxbYHiR3N0SKSEz+CEoV/7XJEuVaNrrMtO2QGpfoHaA0aSsk16s3nYvK3anTpgBcKfAxFvFhfX+d0JZP4gNIQ9EBn4FjLz9UHysh3sHXmiNt8t4ZfAjQq8jAfRmlP3PPFMvXglDbI19G59+vgpQDk3lLa/cQrep0ribibz28MlC16XICXGAZPNljfNLWzvZDav3uoQhgRQ7eijrFxiUcBbj6Y9K4lbfgay3Ev7qsQrNe6gR+v2gVUptbosN3nwSpKOpdmAqeDf4an7W+C3VmyxB6dFUveveTYpzeHhmsz5bAgj1KBVEIjKlA2IZlnNH+UQ9OUmXmq/tLzFiovceYfCfwBZ7xReZo2j/OrN8zc/hytdlfcQ1EH7eMVXa7CvMtrSszdkoSPEv/jC0yDk7PFnC/3d3cGMjbmbhhnc2ZYCt3QYCa3KLMQ32N30BdeKSsW1eWZnLwFbpcXnHD1ofopERllnR8VGQ0EZEBxk/nZDHwNds8wfP0inyKq9yks7uHT+OHvMfAtx+1yNc/BGw9804q/M7PPOn/OPhGlMAhtPwJui0SkBTVBqaaDYkEFcLDnP+90mFnM2VRpPJ/LKaNJW4EqlOXVE6aSOrWZsif84v0v0ykLQPKo6FS5Fi4Xrgiczz5dPWN15UrNOCfYJAVrq6bxJ5vmSyhNC2YZrey0mMdw4UMz2EvCaZDOT18wwaU6gqmWrRWaxRE4NKMB9NPK6CKSUi8zDra/xR8Renwjeoehuubk9VLKRwLjCTvX+jhf1l2fNB8xEQ30O3l21krzy49alh+eL82w1Yt+pCdG+/Y62xmYt6g0mm6W+8bdKI+T1j0Vtnkyz+WREhEbBH4lif560LdvVOaX5Ppr+dt/CpHXr1FWNRytydGwhWDxmemA0lSZubzXdKsj80dpTMbk+HQOVoLeNZDxDujbXNvhzI3LsJvCuRgG8uCUTVPIJtwYI05DfXsxcdQLpvqq9yaNL4CjamWxXY1VCRT2bk8g1X3mnbtkG9EFnshRtNK77UMvx4t0cDQ27ZFpGu9XSXJfzhgqz2YTc4DuVWSJkVRCk1H7djecOvoAdWl0iGc2v30eJ9q3Oj46jJYzWC2gfa+hkK8nliK3yCUG46DsZMySLUlfW6dDi2Z2QapsWiUQZ6qZJkmRZxmlOOTh1InoR7OQa+fgKLWzQVxxaL4uOg8/Gs5T9HQ/stfRE8oqVZ3P0Zbq0M9s4Zsxon8cOjJqNo57vgmgNwyryxDfGlkbEs7KrHp8RtV6YeVZqvtF29jmiDj3rPEfl5RCVJt3Zt0z4/Gux3p16cXY7+5xxutT1c9bBLzq/KOjb0DyrKFQhu0lEl9olvP/oTE6PgZnTbIHHccb85dD7RiY4h1si1XKksJ8Zfx2izEFQi5o3fidKZX4ng9aNdNau1BarrWFk1NtrobWrh9WvRpC1x51jVjlEJCok6VQ4eG8nDDJ5iPWt8rtWzLZXlinTgru4pqB3rChh3k4VRZP5Ssv7puPiSCe1+HCCEOu3uRZugqMziDDsl4B1tzqGKL6U0Ydl2SVLkrLTA5ocDWa6vMcCc1mxGxYE75sYk3smG8x2GmfLJ9AgFXcXBSC/uEJDNe4HHqF1Zc2dJcRWf3VWTueqxppaCQPlMHchV/dWS0GOsd0IV8L57bPe4wNQQx6JyIFZSSeK9k3pdp8pUJh/HFvX5Z1v9ab9wmgnwZK9WFoKoo+X2BCyjxfIf9Xo9lynjy2FAIWzo7+wFXiDUrm31kuxKMocCbwleqjW0XGXjYWYS2R6Es9K1/hTT1pWLUlEiZbOS3gptaE24ureUuiO2MutiktAxLMdMH0UodADwnXFaCV+hI6lJdEQYfrOSXkg3GfuYWG0KaRRUTyO0c7NJTDsRI+syxjNZ8g6PMFvEzBTRp7ebJkraEUhQFOlNsOX63IvF2wzQa7D5hFTQbNkMw16L8RHk2pTbZ2C5CB+ZLKQ/OpDO5cS52vlGYUkE5NvJf2t8xVuE3cmJ5SXbbJVeeld7LYuAUd6YM8o80PKgIp/+YYYGp49/9piOecfsPjn/3bo97VnmiEn8dOfNN0UL4hMarNkT17qOD5hUVysA4Yr2ySd4WoX9xgIvr3IpFjF3bNzjdm+iF2Xv5+c+tm6AdxIICBmYi0CGvnwsOhzTL/Ql8dHB4v2iDcs9oNhTyNk05/d6aa1gLMZJEoqmdwHzeSltYFfq4bgISoSiLpge/hvmKob90c9UvrAriQBJDP2oYHq1DRiAvGQurGlSOHPe6fR7Eypgtb1o4ofX1V7z34LH7jLYsp1AiF8U7PymnmIyv1ZOwQA6EYV0zwDAG3LHtXe2hp4ouK5/50v75ZC2cTweOE6klmByMo71tSJY/Nel5Xv6fojiZ1CejAOLhrjWPqHAHGEkgn7Mv14UVvPY5ZRs8nxp3qdAoelrBsCgQMH3E4FhTMFhfP5ymta32vizYCkwrPoMDmSokRD7AB/xnfErjEynx9u3qzFLpSMScobMsGcagn3PGWIUn2JL0rz3f+PS/d7mmWIu7zQJWBTXEd7tHnNcjsioXA227u5L/IsomDfqu1deEn5UbKVUtSOL+LJo70eVvBk9/JzPDv9o0mGVw1KT/U9KWjlwJpKlPm5WkKThhXszHc6Hi2GoIp3gnw3qEVw2QLqb4+fCfe7KKpzOyFrKKe4I6mtizNaGo/0eOP1v0vjo5Y5/p9ivjOqyW1bO4qIbgQEhAChKFWkKL2DivQm0qtC6D1GQLpKLwnSiwlsQbr00PsGQu+dJIbee6/h7u8U9znfPfeMfe8Yd9x/z1hjrmfO+c71jrXmKhMa7oGJnfM7PEp6bp9XWiWwZ7+hG8+vzjuVv6ZNlxN/8yPbVE85spxOIgw/xALQCUukucBTCBdlOS8tHvSzPMIMhB+1T1NqzJJHUdLHpeQkl1AxaJ5f95iCvjnRy3jX7rNjgfInoiY+aROdz4z6RDv1Bm/TyTDILJkWChktUOX4VX/l96hZ7ujvxB8UUWUz4E3EVwQGy+p/XAFy6Fx5mlA8rKzX/2kufjs7CDiZMELWMtaJyl0BbmaMHrqfBLiVTldnsJ7+vWYCxXzqBKqiWeL3qRL0iea+vfoRWVReSHl0Ov1dsmKaXTar4MAaxCYeQT6jTOJchYX1zQIrmfkMPSu7DL88JPCMv+5r8WPH16b9RMW3GlD51eBYnNKueRnBhXSV83L0Jxp7/2I4fxauMBpAs/54aaYx1hWn9RMZBGk8KqJbMPuTHV87TOs8TkhjYTHMnHnS2VVPFcnsY1Dxe/rrE3bwDLAXcnjfxQMLiuWWedTYGUagmmPlf/GXTEyncFW9iL93q3lAWSfNoH8tKXV3SnkZKF26SVBDqNUor6hMbjxlofgHs8wgw46bYemzBbn/2uGiBIM7b/kzT4qNAsSID+L81wJHT2EGWRszFqmNDLbWe3xoNDjRLPlYgR3Rj2vAvFDSCuLTtq03wqPrJacwFNyz+CJMNHEFYfQvLuLM/6WSVD1Tgc7a6LkG6wsscFjjPO8nyt8Jz9JrZYzM421bO0ambbA/vQLw0qdXXQHI+/CXXw1RWzf5ZJ42TzFZelkJGZbslmfHb/bOkTzDAoiqATKLDyP7671L2fg7t9TCwmoG/8RdUaNtWsRNg1CT7Qnd55JMTccmZleAXwYTfdrTAhh21pO+uHLPLmFmdv5fM6aM7PafPTX/6zNkXxPQYjXkVOSF7BXgCoBCoRSW0t89rg0NYWAxXy2UQI6iQ2yJEMU+DwwHUQQ0UDc+FCrIJXLJGZ2pIhPw4JYMbwSp/LqTGOVBkdQr+3WthpDOJ13Gretd12K+ttftKY9Xk7Vg3MRAlIOfcl7+O0MCGGfGY5UB7uznPWf5iJeeBLRR2CG8PaIiy7V+ZaH8nqv9RXCRG9sMz8gv0fZmwq7JwWH1Uxdx4eLDnoVaPP7Ux5WPBmOG6n35IETJvuSgwG2qJua3+b4wxFnNeinQX52ZfvnkMbmchJ6sYr+f6cF+gs6C0/LYgFlYypP3vlcA0vfY+b+3af1N6HAyPI4kdGDHXEz8YlH3FCRxCB3p19OKKU83kPFgtWqZ3b8Awvr/S0rieWj038W95HDBW76BONIiYfNf2+b2UDkhNkN9nWcUjulqX5KVhy5yqkZccMvIxn2DYkrBNzunBFL0gizQyFL4iBn+mCVnX8/u1ceEguZe6BFcVFikkCUZUCf9N5smLDnVmVnkWv53PfjP2hb24/4qrmPIp6KkrcSdu2rCy37bcTJ8sIf5EtvrdoNoAoz1kGIOdnvoKVUyR54OlQFqmGMFKS+yP4I+yCXD0xEXjzRpmbQlLEfskXUZS3nmQ5S+4y/+cezryT4k3EpfP5BrfF4YkdPO0kJ96R963Gmgpbi+cwEey/dozaIE6vy8tDRQ3dKmnhhe8AG9eiZSxpCpvWAShW732nK2w3Wesqe+wiuXRsHaQg653kgdG74w1R6Vvj/ha97tIC6QoqHl4CcVldurTktsZJ0q8QUU9UaFv89VKNKof76n3DumkjP343qQuUPe1q+jklDqfGcH6ds97VHoDo80eZbgTss3aa3LVndyj3A2s4t7aPnUZXJzcMnilKkIyd33wb8VXssV0a8szN5zKjBm0wwhrmT9PB2rr8D42LfIDaq2YLd8rfF0UX1QVNeQHww3zgmil1iHCFZS9VK/KVrKMxqJSpXTzLVILf/Cg+xVTkCUXR/03hosAO7XrmLfzCxBzds/fFVV3jsGF4Q5oR3A7VvQzIRtjrK7uCqovOLMAZtlSmiutleg4nluhU1e52ovwtuvEkV4u0ApBjK4lkC7Op7SUlhyDo9s+UZGXOzm0ofxN6hzaUzX5+R6w8o6+JdP+9CRJXQafCYVkDsm+PW7P4t99MomIVN/ZXOnYFgf2PbiNceN/3ncMiGvQzTnFKWo1ozC3x35kPql8Jrr05mIWTxxTJafh1zbgDIvJW6J4ZsyvmpsWdel79X++Db+1koopaY06dHC7sm4jofoGDP0TmkVchZnKin1A+s6Aalj7R2hElyXftC3zkeZezDeNM6lo98jzyjQGCIo7fmxBs7v9H1qgFPTrcNlTnEXliq53SN3s4Lz+7BRg1hRxXwFv9ADZb3I5l2BwUR6hqOGRNJWfB+b6XhMiaMni3cFSr1EALrlWVHggq9qM9O/BPNWO9JJfi+Mtr61Li+LQyG+bi+lYKMOjgFvD+xeRwEBh2wiovBOBz9fnLa8eofDLwf4YCuKNO6CZNn1+/H74sIwXPe1yYMbW5W/Mwv6g9w8ddyGTW0eJTYljoehJeayadGPhKyeywfe9ytyxqKB10XfeGaQiOJoS8K+K3ykpPnrmwIy8M65eoCS6yFDtq5IdOumwCVT06gDjqmRxlv1t5OwyIVf/2nv8Y/A6JXqnmSesAYUDxJOf1ldwfmzRHlXaE6+K3KCC2ablxZ6oPTXYxktU772sTOhZ44PzR7WGj07Jc0mPEV7fwuw9PqJEAqI+K27/s9Xmz4LpOWxTKb9RDmUgjrV/7rPiy7NWZIF/i+nRPRrMZfhP1EEhJpDKojlc63rwh3bfKUjrp+Iu039/0JNbemZLUVYelVmZI6mc42P6o4IHDODJWaPWMyWuiZveCE7wjKEuVTMS/gjEA12//Nf5Pm6mRZ5xFANTt4C+Z0ulRofK+tibtMOSlM8Z6paxPjFD3+aMJZMuvsOrqNpZZ3GQP9Qnbq2PHWK9DZjH3EzPPPrKlZ11TrZwfWdRiFl6IjraLW6IpFQnpi/ZKLSS9FbD2AxInuNY6G/gYAPfEy6W7ghUI9tqbe687Kl416NjPI5JYZnUT1VHD5sxkfPNikK9OYAJws7ttQgvPCr0ovCxyCyVEbVocMOMod10zTQw95KrWvJbeEpBg8ifJ8/Y/yK/O7iVNrZIYJD2BZ4En/4BFxma310fJhGqquU2vOqa261NrDuNitvndkAGNvVOu/pk2mPC4Zvr5f1fXtVjvDtBQswjMjcjCmWRQyfcgvsY7/wghObo9BT1nABopDex5/vLlOcNQZtebka+5ifqhWLKN7MYlp5AzKn3/OrnD1asI8ZFEE0dUa1LUt9aXwpuqJavAZnVvgwQ/g2Qofg2zTGthbTr8kuiCwhJ6xjVRDW7x2NGWw8Oiyf2k2Kw+uxooNnxcGMUr/NPdRrXzNdQJvU9IZocfPpiHSZsrBNmxibQ2qgzx7UiUSt+C3VrVP56s0VwpkWDpYksx9VrEvUWEfw0POGftCBbZCxcmlMdd4ogzqDbjG2F62JvpSFVii4cEAAk1mwPTZV8Ff30Mgi2hsE8vPHbq4W5Q3AFz9eHYVEqw3zLzo1fp3Nlo941yuJHotSANK2g68FdYn2gYZENmOyDXr0JYy3LEopEw/sDuALFOMGH6t7UamboVJfiqPH3WR5EyXpLTaMj1sZyVNZVyKn9xk1XA1oN7QyG9s45/iT6N2ZovQTqe2mBx34tEmdLHcIYvwLsNLbXLGjDR8oBWVtxAapgPR0ojc+31dicM8uKm3tRC1R0LzqtyS8xBc7PtwHrWv7zELg2kYdMXt0WJZOpioeD8bFvmGR0Dvca/WJIA5yA66cMsNkrjzzHw92ReZuB96IoWR3dekoi/oEcOMDUY49m3lPFDa8NeTatiwU/214LDvLXLGPtK92uIexTF5O+IVE1pDQsC3bEYtfkx7fy8KBrYxMxzAd+3p+yUSjinL1FsM3PQI1QI1pkdgesicdEjttKssxW1FOHTGNOPBb5jXfeLrJPdSBFVDAYyeF75nSxW5F3iNdO46RF/dZ2R5Aify4PD97uC1K6lOEQFYQ/WC+kO7FheK3PvREOenCsjfXX8WeMk7QnSvuS9dg6CWuQaQLeIcREn0z+9PHLw0GPR3mRcjoQZ4a2Jpo94Iqjs8kdLawNiUisKC3oNqPzelmJd0lhn6ScAEHv4AALbD9iQKjv2DDupcmxoKxPNbig1BSngCqU0vs6dI7aQy86wQWUZx7k/5QCD2VUuIRs7oRuA4V/0ARdOuOaYN3U3q8OxYWUeiDPDFxDLoQK4B1azovb9ZjugzfNRREBReC6QdKlijbOjNJ3lBwnpONJfrTrtQZf7C1Gaajh36ymaKXEPhc25nwjbZrOnEfev0aOR5SYjxhQd3hbJYwiH3Uv7gTBS4rLRPnAXaz8cYagagZf9gt2LtBp+VNceErynh3s4qtD5Gkue7B0mmyv1Qjucz2X5dbHnMq2ohy1mvQyA6MhG95spmzr22iu8ER4oxLtX32OBkTqpWDA3pJL1+4uSNl2I/pfY/eos7AKLhXrHgVR6GdvYzKSiN9E/SIWt+822jUy3f8qxj39NcbnrbhhV/JQiv2RInuOXn6QvaSkhJDALdA25zHo/fLCSvqaBhwdlSSG1Wm0d6RmKlaqau5Ormurb9hDQnP4K5Zjv512R67QQj249vdF4nqWtipEnZie6nXDYYMjhYmbT5INnXETnhk0+7VwGCLm4tsFkMd7uKE63OP1WaNosYd5eLdU4Wa2D4nMo/tjiIMQ5p4IYKi7dOA5ZslEh0NbR9RtDUzCfUDlAJGvPry3M9BNaHLginGw+Obj9TI5Vp4lKjZYc2oUPENQ0L9Qkz1tE/rpkn0xEK36QjVqg2ZJ7mhZWZbS+zmIxWCao8i1KqN1NJJy95jd5+EFiWcREzmCDOe0PGiMdGcKRUR9OIlqMI6nRbNV1iIiXisBfpmvu3JKp96nT9RyiDyk7UxMWq+I6IdXS4v8qVj2cGo3Pl0JTtELfFbCboMzB3LVbIvKt48FRiv2PzjJXCZbMuvzX66VpDXxkdG04zXOaLyxU6jGqVRQpnGvYvdu0ux4/A3DJeheBeVx+895qszYwYceNve15N5CaOpRuPLjOq8iEiDhqq8gu6GIadFBb3UECtnodcau+O23s/ZZDeEDdE5a0cGOOaWjJePCcYu9KX8aBHW4ksUbJmqSsRafJH5Efn0QZ0Tv8tM8K2l0Y283PIDAe/Q3xYqwoulZe90baCNeIf2X5hPUM3tSPRI1O7l9SqRQQzNFMJ69f3SMDTBuzSGjEtH6kWvQeGYJH/oJPo23jlVkse+uiL3X65NDSr+zLJH7yTB7EjYLw7ESPjNe9yUTkw6xhCuhGwaszCTAenLMAusjUlZHCqcROhrUXOEb8aKoSSKLbDizHMUjcBN9//6Gss/rpczXtjg7z759Fh5tNDGvnILbNaJdt2BMjs3Miv5kr4LOJyVjS2pz2A9ffj/l9M0nC1KK+VJn3k7sQSBWKvFNfcNBGR0fPw9M9vbN8tpfHQFGK9syK9kXYXHRTNk1CzzeP3IOy0awdMErJuYj0lP2G9HNB0WFjsUhqVqyrWlva3J8c5ZqtPy1bsCFDr76P4Th734FYDKqyvVM9/fEXXc/vpsfKJCS2TaZ6740tFxusCSMy56lwuR8t9k0P3BavfdA/QgIzPj/HAqQIZJTWM3IcmjJHW/uvx7qKtKlBSsro7A5GVbb+TmYhD6FHY8dvcjbuQicfgnW1b9Tz/SfvLqt2lwscxpSQhpHmr1eaSdFFwBjKeP8v4wTc/pxTCEwL4fJ7yte/G/TuCfxnJkQKio2NRdAmrNozZtn8Fzu0sbsNt82FolJxIhrCe/FECMHfmBee+zQsRuKYR1bEyIaS8phZLy/a//9t3EXE9l9872MB9e5zdREucrQJfTgTHVwHvU0+6d9d9bdeUHfa4AC5W19xio/pv14co8fqdoKsoJrcEEbFU2TaM4ZtI98LWZYVi6eXKW26+vBZV+KhSm/y6SMoccnnQL31XuIrUGbLItrACk3vq/ZT5S0J3VN3i1/N79Q5fjL/ErDmoJJpaLqnd8WD8O4PreUEARpMbGTDXnMz51CYV/akNKwQH57GbCyMTeOIMV/LvC3BMPQama7ySz0DCi/UId9QldHc0pytqBMHEcDYhePBZ3atJHw4ChZxTDJG89dfQ87jqGY9IiBRvQZnouBgwlc6WD28Ni7RxJm4QmKZIrgGqOis34896qt9SkEqAbsl6hEM/moW1hz2vbxK0pfUMjB8H1sETd79Zw8mHNLS/m8ScBjoihywbvx7iDgF0zluuRc2j7zZRJcwxQS4qhZxf7emzrKG8ZbOQvFXnW8GNMu6w2jimoITLHowNsyyVRqI2AezreB6eZVN+gUCiSQoa+KMjdrWLvLgM5+ZS+OhLz6bAk9JU5eV4Bmo+QHZrfPD+R2IQPPXaK3wvq4xlAwqxPth6vHfvoPZ8LYTB/YiXc3qeOde462Jz1j3EGAsxGPKQ9QlOCQmLxwZnF/pfJiELTrUdpqlG/tfN8Lmuj+vDU19UOala6JnaDuSDPcMWM1h3N+DZQ+DvBQyRpvrqpALep048uu/AEHvo05H9nrQUqnkA6+3jk/lB8Zkt7GEdOoAn/vI09o8AQqK/nd6ZS/EWMT05jt70oVVyEg253E0KEpjF/DZC7A2NLFTj1uy/VRZQ/ljpzFCocKgxrYWr3srdnpBkTbPXwE7Je3LmlkU+M77S7r0J7BQDwO6bKXp8RJQP4B1wkndQ+WUE43aqo7DgmrShmZb349d8P4NX+d0LGYgK1l+Yjrh9Ub10B1siMqYLGa51r69SsIBxzXJPRW6/EXdgSvkmKUZ4yj6dvmJlRUyH4YxQ5TFjBY2DH/kjy+X4D+txDZjnFBZZ32nBYFCxnKFMmowvRIW8AIWU0KUeyB7rd3BXb42vJsMOzi1q4O7rVt0tTbMPmG3KuADvHd58x3dJGCfeLAb90L67rb+xJaHaaIDqX6W8480U5qZABqId+9Yz1Rk4cODlw0Uaheo395LY5RQWyHY4d8+vv9aqT8vckxEpBauqLrEP0kLGowFFvZ8/nuTFWghCVPBZ9I3VTrSqHYK5r1Z+Ut1IvMFcALu8B6PEV4MAg03FnQlOvjqm6tD2uPTaJv+P24lRlloVV7LW+qpdamVX8C8iqtVoBbyBxSIeF3WhHmeSk/R0Uw3LNEkmiTHBLP3usCdt51za+nrhdBkjVhNNvalPKOo2Ws4Aah4zJMLVumMaiue8gTfhstE9WudylmOIh4VMgrGhUaCKKrIXpKQikKa8K4WxMzmPCeLrq5sQn2udaCvHvBo1GPRX4NGRlvW2F6AB3m4re2KKNjp/ljOudKCWrf/pZy+tUTHlq0SqqbIfoG3h6ZuWYDBRyV45L77TiNyYuKIkSJaeg4lAW2+LHUtM0sYptPBKtWC+d5OKJRR4rr1THUFOL4YowfDhcwIr1dKCQcDzqqNTG9hV2dvR9buq5+Dka5G5DdXedFryKuS7qWHeqZuql5T0/V5nXEymZqbPqY/T1ZlrFHaGpJIEpbaqqsscRkh+DVIeZJSJ7vOv34ip7DJtazGji7pmASehDEjz0Qm5q8DzYSJ5bKt2taCm2g3DZBo6mB/uXIxiGahMv11N42Xh73gfElwzwWMbv7YUmtwZKNGNsN3tFQcwRBkO7+dmWnCLHR9M+9fVo+XbeIjBb+gRdS8rovUNerUJaZLvlYqo479MjYSAlcwumuptxmWkSNAiLT9Q+G3i97gc2oJc+nG+ZhMMMZ0cFwedks45txbASFQwUSEXajyGrXFGJn0DQDdi6MyuO350v7atidIp+KDhFx9e8hJ5sg8PFjqDSN2GRlVgAU5N3vU21XNahQrKLTSmBJZZmytzHKVWZybqQaTgMO7h7ylS9XtZR0l6UX++5CfMbaZTiyyeO1BDipkWihURPrHtFUc41HAFnVeoMEj3e5MGYmEEe809LjCrDi1+2Mf1MJQStT32NV4A4N/zm1PMOkCXYsJlh/AOAeceYhPgdNmlfX+Te92mkHf5VZtgqe3BrxfsL2OKdGz5b74+TDDTackgyJkS0rDr2BayzyjNYCU4Z/3YC+8WldjZLw3LWhb5fjB99UxRj1D+BYmq1splPdpe6+wx0IEVFbuQT4t3Bwv2mVSHW5ZLj61uZsIsMHXqSPfeLRW8a4PgKfuFMSTGHQzQZJ3egelKPXxAtPc3yGV89f7cHMrwYblh/d1l78Il/+7iB7VO6XPVYzdIV4EFFdB/PjyotTM7n6Y6lhG2TnM2Jb5RyPw7gv/gS5vWXv1yEjMsJF3vj5FzS5B1ZUg0aXNKrzD+/uAL4DItz75/DTNJc/2C/LH/m+Ht+xOcg1IjpcinCp3/L8ha59zfZs9QqAaREizpsNYU/o5xMNTAv87mSpApJ86mH68MT+ZtXAKJPk476IQl2BRPhn59Ry7NFiIZUwIjeAJgARD9e677m+nbQ65Ik1o5EIj+ERYb/vVDl1dR/AIrsQsQ= |
0 | lc_public_repos/langgraph/docs | lc_public_repos/langgraph/docs/cassettes/subgraphs-manage-state_30.msgpack.zlib | eNqdVW1sHMUZjhXRpESIVG1+BISzHFFTIHu397l3cdPGPtvxRxxffFZsEznXud25u/Xt7qx3Zu07R3ZJTFvRmsAKiMwPSkjsO3o4jsFJsBM+ghAGKj7jFNUOtdqUKqICGQqItgjc2btzOCv51ZXudmfeZ573nfd95p1D2R6oYwmpZWOSSqAOBEIH2DyU1WG3ATG5L6NAkkDiSKg53Hrc0KW5uxKEaHibwwE0yY40qALJLiDF0eN0CAlAHPRbk2GeZiSKxPR82fsHbArEGMQhtm1j9h2wCYj6Ugkd2Hrpki2YIQnI9EJAXzojqQyO2bYyNh3J0MIYGOq2/k46oyARytZUXCOs2+5liaFHkYVV6ayTvjWgA1mGcoQgJEcE+m35jAEZQ2rFRIdAKZkgUNHorimN5Ymz89ZcfmUCSYI1d8BG0lo+jJih5rNjubv6bQFUoOQBcUgixU3Y+vuLTMUt/18kFCNCLOiSVoTZdkKyIlcxpDOAwRoUpJgkMIJE0rZiFmjdaGnz3JpO66QTCRaGeVTpzmhaJDVeiNmqu6RD0Qq7gLQSv4xE0S4oEIrs7+zPJiAQqYuFVetHEggTc3ylIk4CQYC0TlAVkEj5zRPxPknbyogwJgMCc1QFKswnwMwlIdRYIEs9MFNYZU4ATZMlAVh2RxdG6lhRNawVy7XmnKUSlmpMJeapZhpEZb0jlKbSVRmn3eO3cxMpFhMgqTKVIisDGk9Gy9vPlRo0ICQpCVs8FmamsHi8FIOwOdoEhObwCkqgCwlzFOiKzzNZOq8bKpEUaGaDoWvdFY3fuXPbnU574JkVxDitCuZoXrLPrVgMiZ5mBUQ5zCe5jIBQUoLm3L8iESEWiSrb65yGyy+0xdPIfU80DNy8UeNL9ASrGjQCunGDK1IT6Yrf09pe19rLOnkX7/byAY5nnXbO7rQ7WbXBXRslBmqt7N1Z43KCGtRhb4RNXJ2C2mC4LdTHV0abia+2prJKC8bFVFNjpBH62sTKZGqXLO0S/WqVD0E33h3e66m29/R6YMAf3FPB0OiMHkncHo8gfW8o2K2Fu/ukhso6I0o0WZb29PaoqTTgWwLpkKceK40dnj0l4fk8HpYrRujjPH7OesaXtSFDNU4S5nEPzz2lQ6zRJgQHMzRlxMCHRqgO4ZuvZ4vN6Fhz43cS3jBSTTVpvtCaMLYyLh8Thhrj4lwexune5vZvc3PMzqbWsWDRTet1JfhMqw5UHKMyrFmWfFZIGGoSirngdcX+giV2WkkrfNrtWJjSEIZsMSpzrJ1tKbRhtr56snCyWKTHgSr15d2af7CETNuupJ4qmulZtyipc1bB5nGnzz9etCxrLEf3xbFOjuWc09bJF+iRsgLXkE5YDAXa5EnanNuqgJR1nra7nV63jya5gnZmQTZEGDai1UihPnEFo+lQRkA8m2JpF4WypEi0CPn/4gVCz4rTKtHUtQiCkpDeNU95ucLzYilEh5YHaxtXiUYC9Hn++qBlLo+FCfD82ZUwDEsCOu5T8NS19iLFMQ6PpZbBrCSac5vpIOJ1ARjgo24vEH0eAQAx5orCgNfH+6Net9cnnAzWskEgJCAbzqvNzFZ37K5sqg+eaWdLZcM2a4UrNqsirEqxWCYMdVoaMyfIyBBpa9RhhnK1VHaYp/xCwA34ABADQszj4fxsTVvLxDLbVZGNWH01f9cezBRa+atl05t+t3ZV/llNf0tLpKUzeYn7Uf9XJ9d/NX1n/fzFi0j5hTxB5MHRhZahYx/4G2DnHZ2T8Pal/rq3ZV4eWH+5/Imbb/toeG1Z3+rHVp1+eCw3P1Re/uVic+2m/Z/f/PGf/j575cHPsx/p35a/dAUfevr80S/LNyzBzrtzXYNjb5yZ0H+ycfCfmx/P8JNfDLgO76i51XNw7/s/PTv9+wemupO3PLBldu6x6TtOvBv88cDmqrXdQ8nXE/y/17l+YFweqVqE9z9k/Pz7ZYd3Dd/7xf4DR478LbdmQ0ftrdFLM8Nl56szoQvDC1PfHB7sjzziybRPoa8/sD+xqM5+7Xhu4D9vpl/8s2Nm9Ut7/xt6bcelmdz40bKFtybXuW8//8e79MYPL29pWHjy3NF1ZRcebfrVkSDxj++QfjbG7Hu5TXq1YsuJC/vfu3H3p79ON3x7+pcbH7msXUpJbyuPX8mODwThJ4lPHPP3nTgzv3jjby68Fj4Yz+1b/OsPbzuaxjhUkeuyD+9/73svn6u/e803r9xyb3zo/tGNQ7NL3b+96dmLn6nnrlycmT3z8Q0znTOpuk/XvNO+YZ/SSE7P/uWV+t1vbMrXZ/Wqyc/+8SFPi/U/z3zMDw== |
0 | lc_public_repos/langgraph/docs | lc_public_repos/langgraph/docs/cassettes/agent-simulation-evaluation_6f80669e-aa78-4666-b67c-a539366d5aab.msgpack.zlib | eNrtWglsHNUZdjhakIqKVK6C2kxHqFye9czu7OXUAWM72CQ+Em+cxBANb2fe7kw8OzOZw/bamKuIqqGhLIFSpNJCbGywcio0TSGhSA3QFtJGRaAGlaPQgiiBHpSKs/R/b2Yv20kcCBAkVoodz3v///73H9//vXl73UQ/th3NNOZs0AwX20h24Q9n3XUTNl7tYce9fjyHXdVURi9pSY16trbvbNV1Lae+rg5ZWsjJaa4a0pGRlVWkGSHZzNVpRsYcS5tK/uEJFSMF1F8/udTBNteYxYZbuJ/MpnKcla/jQ0JICCc2N8oytlyuxZBNRTOyhY3ZIc2qZRSc0ZGLx/3hwhZkWbomI2Jj3SrHNCabTMPA1ObCZB/GFod0rR/fa2PHgm3g7447LnI957ox0Iuf+M1EDjsOyuL1nQuLxq0dU2CFwq6U6tUy4RjTjS0mzIdFRojV82K9GGcuaU89SAxwHA6Wc21T5xp13RzgOm0tqxmFu0Z7NFSYhI0wWdPM6njnjLPbqR+dwt3nbyID4AtuETayrloYFaPRXVNkWgYt08Fcq28kCO1jZtTaZGMFVGlIdwpjru3hjTKSVczJ/qzCvYbJ0ScTjbrLdffLhX0hNdLA1otihJ3H5FBDOJoM8zxfq0a4cHKGge1T1m1HgySUhdEYz49DXCGBChNevyabtjHzzsub2CAHO3fzFp4eTpo210LMbMiBZ+Y8NswG2cnWs3woHorE2VoWJDCEVsKDlmZTWcnVcpitNzxdr2XTyJVVCeQheSVYLqNl2fph1pGRjiXPklY72hCWYIVsFttsvUC2Xh41XNUGax1J1yBFYThWHFTMAUMycM5y82VpEUaJuuJsqqv0QErnXeyw9WE+GReiYX6kltUMSEhDxhLkddYhhkFRQNm5WEKaBBVn5yVsoLSOFbaehLOWNe2sJINRdKeK5gSDGYg4jDqqOSC5ri55WlHAhSqGHWrYlhQv8JCC8nQ13TSypEJAgUiNVU3bDR4IIhjoYGSD/6bYMGDafY5F1DqyaWGJ2KQZ/RrdXtGSiOS4pg3VVS09MjINTMa6Ort9NDn/UGhie4ZTR2NKk+OZ0+cNs1AZ4OzLhlkNlmAFFI/FY5E0F47JIicmYyKX5BHiMigZS4SjUSQnZEgb8LvtBpnCkgLn+CSIpIplHopHE0I0fgHP1/M8zMeGUpVXeBD8SlwIFvnPIamUPlBWCWhkIaVPqkxbCnA0b9M2svOBAN0hVKmNYcgCkMuYdg7Gckju7OYEMRTlkJ2DvcBPLiamIb1qy0uzVh78aBDRvKTlLB3noKpoqGGwqas4GghU2BMBa0IiMadoRJWxMFw1RgycMiFW3su0EUggiDACRPVdhfs1Mi7RQPGhcCic4MI8l81EIvEYn0lyima7eXYE5LBtm3bJ2f2wHYfG2EB0wzR8sPJsAziyEnaPM9jGpNrwICJOonb4K1gIRlwJHFTx0KU1edlKUgYONTxYHfoQ8nSyfnGgLKSYrosVKAmFIAK1jE9Su3hRjPtG9c4ySf3SPZy0Nj3X8oirhsGHgbVLPIMWYDepOtg+S3AnmMUGHdB3bgDHINOqfYtpNQcYGRlMG6Ni3WLypnchiCJF0UhiIV3qG0B2Nlir2GKlioDDYwLsoE71cogkYOD4ACRGVo7QnJSCWTTHIGmCSubjkbjAh2NcOiljTlSiUAN8AnFhIcrHM1E+I+DwLCs5xgszVPKBZicTfHn2ka1zKvD5K/EjVcjAD4CaQEdUaPeRocuUcgSSDzq9J0PTIBkKMy4rm0X2bZvQbR34HzwhvmmCX130YQr6MCGGLJRqKSn9JJf6ESwJ2U8yvJztK2nJE55UOaEEL63ItjWHOlQLrCI8GEZWmJ7NkFmM5jDFaSEGHjMAIYxr54FrMK7JZLHLIAYwxzMUBkLNuCoZ1mghwQSzj8xq1JHTh0IM+RAdA8hwycwcVQGNmM5uXLSIiudMA+eZNJL7fImUCkZQnSpwJ2AOChNl8tCyHQZlzZAftor6/pgO7847Ls61+xoP5nlf8uOEONBQyx5mgKuiBZPdoiCNHA0RYmQPmEkO24yZYQDfkGbrmoEZ4BcWMvLlYJDZpWMQRHUAahfEPchiZkA1SQZUKHM8ywL2xFhQOyQlyOdyg/wcrrRqxH/m/1ymYoNGmCyV0QzNUSGGdB0Sb/AXqURa5bWMD7FK0QwHTNIxA1xMYc5Z0NbR1t3a0nwOW96zRAAGETTPcD6HZstNYYpfR0YI7H7M/Agyw+nSoW2ppk46YFXAiqEq9tFSco7QVhA8naGuRz46ByhDP9lhIAo94JCC5S5wmORhtr06oBcAiqvrobCseoH9jKhGaLoouK13lg3447CU4L8HYSKfXdl+nqD+EweVj0T7HNowaOF9wtQS8OOgAHK0Ut6gz5U4bzqWlhUeCigajSc5UUiKXCKsyFxcxHJYSeKEImRmx3mTYSFx6NMrnNpN/42LBCiGcr6DTAXroDhruVwEaKnr2WkTtNDnRWCaOgitAqNc6fhv+J0E/E9eZ3hwrq3nQ/FatrhxE3IYaVzQOaAWLd82cI9pBWRruPq5/yqHvE+huisJqe5I4Ml+zQdCXzdhtY50UJtL4+UDiOs/nm74rDlv7RevBA72SuBIHglIvPwIEhZSCjup/074o7Gtmn4cNBemR9zXJyFLk/pwfrp1DpZt7JYM6+xq6Whskxq72qSFLStYUug5NCjBFFujr/38oqiGqcDMj85vKsr806Ip4aOHppDd984SM4/My5SKoB2gpxz2OcuRgQQgtuKsNuWoVZ3FX1Cjo4waTSE7h3+QmikBWgl9OET8D8xYqrnRyMoprEPXc4QxFe+8bvi0L+Q2FG+8UjPe+0y9EBuLJsX4AS7x9szZWLzF25oyTaYd8p1Z4t8vOMUN/uCzv9RbfwTv9D53d3f74sPQIVykEVbZDnKqnmc8Q4MoMRSWHYAgiCBD78kYPChj2LLCzvqqyG/V9IaoX6gjtKCOYJ+OKZP0r4qOESthmqnk/swXUHpUQCnDQqZhEo/SyZGpCtMBEa8sSQJAqBBTPMYwU6keQ3gYI8Dv4pmF8Q8tTDUJZIAFljDyh2Oq6biFTdXZthn5uIhnxsVJeUb4G0cHgMtD1NQk2RuHfJj2SUidT+QZISQmQvyWQQ6wUTN0UrgExwvj/jHgwcoBC8INSriAmhfGfeFNlXNMp3BPOzloVKkkd8GFe+ipY1vl8+AQUZgIzhUPzjBYXs4/YGytUuzkDblwDw3DL6qEgT3n4RAEOgp385uK/tGDb0pEY+LhfLnjuZrTx5oP2gkSpBMcoj1tTdnIcIBnl9vhhKx6Rh9WDtDvdiEfS4NvYXDYR/7AqsKG5VzQsbi25m3Bydi0s8jQhuiyhftKdOH+YBiOu0QlLM7lHADweHRTMFL08yTsi+cEnuOFXxIYliGtiOEEhzg4uwCIuvnCvlo4pJCcaogI0Qj0AX4ewJqsewru9tLNZg7WdOYxlo11EykPDHLkywkUpjn/ZwDQkC/kuxP8jukzXLMPG07h3ijvfx6qnGJjsgLZRknRWBI+O2eeVNQlkjmJePKB6mkOrjBoNJZzdkwfD1Ss550Ng8XJnKYU9p0Nf0i8KKeTKCGLCZTO4Fg4mZHT6YSQkeHMkI4paHPTAq6JfqWmm2ZbYaJ5RUdje1vTZDfobgIU1vAtz8w5VpLkjJTONfS2oCYzH27BidbMIllrHWpPpHKxZVJvR7PVnJPURbH86n5jgbw8tJQT4uF4JBaLJRKcEKLMixP6EmklnViRSQ8sTsVW9SurQlZTYqnclxIaF3Toy5vbnUZTzYk97blIqjONl+Ili/sXRkI9OLO4SeF78AJvqdAy1LZwWY83GHY6O/NdbvcAxBO5akPdPIZ+awY7DUE9cFAPnF8NkWI1zGMUmgUNoWrsm8e0QgfuNPT8PCgjSCcMv6GPdWsubuiAxrLvVvABsAmlITe4oLNzaHlbdnF0STp1STLV1d/c39mVG8qqmWWrehYMas2hpq7VyVg7X+GEhJjk+MAPcN5L0OQpm/4Rrdq+nKssb67Tf9tUmDBMx9AymfFunwdNyrrpKQDjNh6HmC9pXFG4PyEDEVVwPJYWRCGaiXDdlzZtKWorgcEY6QETCDiaAxxtWzVFS8REnq8iR4/MGZ974wk19HMs/PvwQ7fQaZ4pnPzIP65cU/jDNaede/XvuF1nt962/j+p+yb37n9j/4v8a6tfF85b8/Ybt51+zF/33vjVN98fHnrz5ZGaK/Tc01dMfmPVU/lvNlz50mNy7yPZtnV/W/j2/JHa566a3/Crnf97oUm74Y6b/33b+R/c9eht7k/bNu7YdvnxF9+zv35Fx+3bHklv7X3r4nOiqx/t3Ty6b3D+H0+59Ounrnv1/Yvf2N1zyp4zrzqppua59+JZN/pBtO/cG+auuym9+0dPvPvuiQ8f95fvCGeMrl3Tseef3+558dbL5773/R/jE84aXLc7uuXZr917jXf1LScdt/zSJWrbUw+cdvIx0d8+tbe15riTnv71l1LKz+748qM7LlH3X/tC6xnsFRfIf7qoTX/1oi1z9l/zk148Gbl279kvD3c/e/d/H3rpvZ3vNO3+8+0Ln7j9nVd7axa+9t1b/7VeemWtF3tj+7X8mscfX3CqOfSWWPP2s6ffOd/LXnnz71/rwZ2hszrW1GUvdB58vf3OPXXr17DPL962+cPR7dHzem8aeCX8yvVP/zz+vRfP3Pb8k7vl1N+3r/W44+fqV+96aO2TL8ylwTi25sSvDNy385iamv8DRxlpuQ== |
0 | lc_public_repos/langgraph/docs | lc_public_repos/langgraph/docs/cassettes/streaming-events-from-within-tools_ec461f66.msgpack.zlib | eNrtPdtu48iVmQ0WWPRTHrL7TBABBkhMmtSFkmwYgdqX9qVteSx31NOXFUpkUSqbZLFZRdvqhh/SydO+LLi7P5Bptz0xenpmMINkZpLOcx7yA+6HfES+YE+RlCWPe6azWQ8Qp6gHSSQPT51z6lzq1PXpyR6OGKHBOy9IwHGEbA4X7L+fnkT4UYwZ/+Wxj/mAOkebrfb2szgiZz8ecB6ymelpFBKdhjhARLepP71nTtsDxKfhf+jhFM1RjzrD1+/8xxPVx4yhPmbqjHL/iWpTKCvgcKHuwysK4dhnCoqwQgOFD7DCBthzf6pOKWpEPSzgYoYj9fAh3PGpgz1xqx9yraxXNR5HPSpgA7hrwi/jEUY+XPAoxnAN2ENgDeAEKkOviXuUejk1fBimRbhxkHIvUJ3/n1GeqAHyU4A+5t2UVAHhYGZHJMyB1DsMA+WEKQIxfCkepbtKHCr7A2IPJjgkGYd9socDJfSQjXWBLkQRlAI1wNIiwwgkG3GC80sBl/4bEQs8kqCvHh4KGUFVkQg7gp0cVAhqBEp7O9jmAHr48PBkgJEDhfzn0YAynry8WIcfI9vGIFUc2NQB9MlH/ccknFIc7HqI41OotwCncklOdzEONeQBG8fZW8knKAw9YiPxfHqH0eBFXs+aoOTy41NRpxpoRcCTL5tsGNgtoKS5Mr05BI0LFFOv1HXjkwONcUQCDzRI8xAQdRymz383+SBE9i5g0nJtTo6zl19OwlCWPF9Hdqt9ASWK7EHyHEW+Vfls8n4UB5z4ODmZ37xcXP5wXFxZN0298ekFxIKj5KP0Zyb9JvS3F5BgHg01mwKu5FfGy5GwPBz0+SB5Vm5UPowwC8GO8C+O4TUes6dHUDH4T388ye3pg9baqEb//L1/PVqASkpebQ/iKaVkKW0cKiWjVFHM8ky5PmMayq317RfzeTHbok7OFI4P+DTeE3cys5lVwIojhvlczF2t/ul2hALmQkUtjpTixB7EwS52TuffqA6vhDoAe4IfsF4NH4SUYS0nM3lxV9vKXIu2svBZpnsajfooII9T3Uh+LWoZiCDB5/ljsAaBEgrXfJY8M+vmy/zJqAJOgVFDMw3NML8UpmGD0gnCQxoBY9gGx8WHydmUjw6Exs2VzWrZMgxjFuzR9mIHt+PeAvWhTDarhBH2KHK+OtDAaWCP+ARqJf3OnSIokgkvG19chuB0Fwcs+bBqZJ8/TIJEWJQg2DhHdNSAz+/fDDTCVREwjVr1q4tgUEdjPM8sn31x+XmO4gODvTgYAWvESc5+BBfdmlNxa41KFSEb4WqlV6riWq9sWXYZo2rPwR/PL2nzyB5grZ2qX3Ky8P5Gc31l/rQNuOfBwxH8X6/f+X63a7vdnj/X9/civs7Rxh1Sq9qPOneby4g+bliLd6vrbrexcudgZbEW3CIbGyuaWSvVytVavQHVphu6qZvaQRR7wU7P79P2/OLNqmsadzr3WotVFPpkc7W7p3dWVtdKOq4s1bd/hpcX+M66XVl85GK01qNb1uLA2NxsDL3OVmd5td7ZtWiptGw1oT4RH8xNzyqgieAm2VxuIBoYiCbMozJjjMxjVnFSLZjTL3rGWWUZol4r8IazYFegThh+wWG3wbHPbdAAn/0PyCDeI84cq7SsnZ5e32ntv3e7ud1q1/FCubK6z1p8aG2w96xOc3OtH3NrZVIIZdPUjFwOllGpp8ozJv1vpOo3d7VJe9daabyCegwoC4jrHrdxBCaUnNoejR1w8hE+hjrfar6ffF63G2VUM3s9G5VqJcvVFjtbaUD/+XEWfF7/y787iCMRlQjEHlVEfxtiv9a8SR5XHMfoHJTwmr106/a9asmq7Rt7pf0dT50aBaTsDX3cXtBTxwIANjgiLuLZWD5To7h/MeyDvZeq8AYbMoiwXRfIwlEI1AH6IPY8wDWgxBZhFCI9CRx8oM4AMkDFkTrzJG9fqAi8C7hYeG1q3DrJEIiA3rWR530dR8Y0POj6N++hhr3U2Vkgq2u7u6TcvN0Il8qbgCwLwhNti4mmxahlMdmwUFHUj30oHkpTIWY/nILw7sYMeRk9h1OqR/vgEHtsRCAwTdigCzJjAmsK9fDwxo3rXznfKPpJGU4K7MmDVGaFkL5NSFkLsRDTW8T0QJ0ptOntYkpTtUJMb9emQ9mE9HZeJ8U4wfP9hdbG4sMbN660E+In39IJMY8CZUhjSIY8T/GxknZK7AJHCnXz1J0oPukPuOKKu3kO71LPo/sgliyPn1HeTY3hXV25DY0JBXB4wChXygqLz/sAGBapPhdvAQ5f6Q0VpAC5PtKVZoo7zUfgZi8i2FUm+hkENRjlmHT9O+seORznlD8o+gn+zvoJju007UrO/vJ3nnV9B/nQpT6SUqPxf+sj+aE0fSRWXcI+krJ15X0kPcd2q2XUs0qNquGUDGw1rKpVtiArbtQq5do39pFcQe7tmo26++bc+8nzb2kfDDvLZCW+GezExl2/tWSGS72e22vduka5t6p+J4nvNZHMWAymKinjuqyMKzchwDNpudeKepeSewh/0vIesxhy4KG0/KfpsyMt+8CZrKyLDpM9LK/Xc2kkLe/AnLwuL+sAkJf9QF53zziNUB8XiZ1sFb89wPI6PBvJ28qJUCCvvStuRH1pmQ/oHvbkjfOcyss6PuA9qbt0JK58MUgrK/NSK/2UvJ14Ed0jTrooRdakVlrO91BEMJc3uaGuvLkNRlIbvQ9lRgR50nbnPAgeSJvZl4pOPNnsfZn68nblONiWeMBK3qkpK+niNGkHbGJ7IG9SI2+9h8ROF0XI23GPfFz05MiXzcvKOWKFuhdzUaRr0CNO9iSOcqQfS5vTkAAXU87lHKD0fRoUc86LOefFnPNiWoY0Ht9xitFp+Vw9joA7eYfowOJl7sGU2d/JOy2ByjvpWOoVFkzenI5IPUon8wITyGhCeeM7wRK3bfqYD3BUtHCKFo5EzL/LJB6xYaQvb6RDmIG/48Qu5ttKyHy5SOlkM/i23LslKD0SFJvBFWpfqH0xMaFI6P7BF9CK0/dsicepRAdmMUwlYfclknj+YTETpzD5YsRCLpPPjnLAxZR7Sbd/lLfmmY88T+LxKlKspC1W0krY0BkWC+sk3BXJJbbEoS4WB9kVK0plbN/LOwvPo3wg8X5gxe7msi6nlHjecSD1xpc0ljihI49xcYqJnB7Po1HRdS9lXkO4vNNvieviSFxIPniRnkMt8VyFAGOHFVmODIy/nVeVcRqqE9zeX2htLD68cePpSX6g+C+PfcwH1DnabLW3n8UROfvxgPOQzUxPo5Do2VHtgs3pPXNasDw9ZpmlR3+//ud/eqLmZ9oDHcr9iQpR1H14JRtcELPHFBoofIDTAXb3p+qUkp+1ragxSwdflMmXU5bGIBPncSsqp9Tr2sjz8iL5MEyB3DhIT7UXMKJiFVUAdf2b91DDXursLJDVtd1dUm7eboRL5U0Bdv6KUIYA+SmePubdbEwEIFDUj32gSZSlPnmgpnu0PICLB2rKyQP1UD0EKV8kXzX17OxcRct/hQTyM1XzfV6EQPLZBuLMxfTswfwMPtGKG53JpqdZrFhKmJ3ZlJ5dlJ/hAyF/fKaLuBBnfKQXU+OzDxQ02gtfoe5ob/TzPcLTufslPd1DONt6C0hO91ZNR0YUxEZ7TuZ7L04BNiZ+BI3jvbqyPavE3k0pq6O9fN7AK5CJHAeoGu2BkO0FkN5PVxDp2ZrZXHUE42JkXqypO19blkO+y/K1J+M1GCk/ZX00WTWdtAkcXbgUBKLx3La0IFG8mPPzNUrTkkdj4+fVgkdjhjmNI0mJMYWpvI/5vK8VbkTnfVCj2hTSJsEoV8tylhR91oYVRYs2zTi2fy3GZb5enzQjYRYXzKP71xjB4UN4JXdRykUfJZAJyzDhl3FQHB8ueBRjUQj2ofpQuhfpjGLotbzgbzHJt9sa1KUdkTAHUu+ADoDQhDCoUBLFA81W4lDZHxCQ99izkMyz9EENg0zhUsGESCgsqDlLiwSDAJI5wfmlgEv/jYgFHtNewkMhVHCRJBKuXLmfgwpBjUDzSHAoLP9kACYFhfz5ez84GlDGk5cXvefHwqZBrjiwqTC95KP+YxJOgea6HhjhKXiNAKeSSU53MQ415AEjx9lbySdIaJGdVvv0DtjLi9zLaIKWy49PhTfVQNUDnnzZZMPAbgElzZXpzSH4+kAx9UpdNz450MCdksADZ6N54FaS4zB9/rvJByGydwGTtgfcCfKOs5dfTsJQljxfR3arfQEliuxB8hxFvlX5bPJ+FAec+Dg5md+8XFz+cFxcWTdNvfHpBcSCo+Sj9Gcm/Sb0txeQYB4NNZsCruRXxrFwiAQnZ3/pdm232/Pn+v5exNc52rhDalX7UeducxnRxw1r8W513e02Vu4crCzWgltkY2NFG0dxzdQN3dRN7SCKvWCn5/dpe37xZtU1jTude63FKgp9srna3dM7K6trJR1XlurbP8PLC3xn3a4sPnIxWuvRLWtxYGxuNoZeZ6uzvFrv7Fq0VFq2mrMKUBeDu55jlZa109PrO6399243t1vtOl4oV1b3WYsPrQ32ntVpbq71Y26tTJJXNk3NyCm0jErdEJ+XI0XxcNDng+TILNVLH0aYhRC+8S+OQWY8Zk+PQCvxn/54kofxD1prY4X+4dECaGjyansQTyklS2njUCkZpYpilmfK9RmzpNxa334xn5ezLRTyLA1I03hP3Mm8xqwCjYeIYT4Xc1erf7oNcYyBV9MWRxZxkjagsHM6/0ZbeJVFRU0wBN5OwwchZVjLyUxe3NW2shaNtrLwWWZ42qS7TH4tVByIIMHn+WNwBgIlFK75LHlWKpde5k9G2ncKjEK9G9BS+1J4BhssThAe0ggYwzYEOz5MzqZ8dCDMba5sVssWiH0W3JHtxQ5ux70F6kOZbBZiDfYocr460MBnYo/4BKol/c7bYmBFpqi0Ly5DcLqLA5Z8WDWyzx8mQSIsShBsnCM6asDn928GGuGqCJh6yfjqIhjU0RjPM8tnX1x+nqP4wGAvDkbAGnGSsx/BRbdnGFYN1Uplp2I3rFrdaNTtUs2tWA3HatRqlY/nl7R5ZA+w1k7VLzlZeH+jub4y/5u72qQeaa00DMDzgLIAguBxG0dQNckpRNLYAc8Z4WPAtdV8P/m8bjfKUGavhip2rWS52mJnK22f/vw48+mv/+3VN7f4LW89ttvV+vZOY9Nc2tw3dqyosnX7jv23tfhL31Wqk8X5yabwRPYDFxF2YwbZb5blXEUudF0kMxYDNK5USVm/ymlc15D7q5qyfg1Z5xIrfZqDy2vyWUtDVv5nHkhr9KasjOvSGvvNqzvD9vpxL6+llwpLl83Sr+48v2vI/BWe51ckcYWTvx6clwsnL5ulX+luOteQ/6vbTeeaMP//mLLxv7otWEU= |
0 | lc_public_repos/langgraph/docs | lc_public_repos/langgraph/docs/cassettes/add-summary-conversation-history_0a1a0fda-5309-45f0-9465-9f3dff604d74.msgpack.zlib | eNrtWXl0FFXWj+yQgGwChq0IgQCmQ3f2RIGskIQlQGIgskh11evuSldXFbWk00QcARGQJbQHCYsTWUJiEgQCIoggmxgVRJhBUNwBEdwiw6aoZO571d1JWI7OOfN95/vOif+Yrqp319/93fsuc8vzkaxwovDAZk5QkUwzKvxQXphbLqOZGlLUZ8scSLWJbOn4zKzsjZrMfTLApqqSEj90KC1xYbSg2mRR4pgwRnQMzTcNdSBFoa1IKTWLrOusf7PCIAdd8KQq2pGgBMVTJmN4ZCgV5P0KnkwpDJJFHsFfQZqC5CB4y4hgiqDiRxzFc3ZE2UQn5dAYG6XakItyckLQ7FCq/iCtKJyigi13nM620WqIQmkCC07Ce5Y286gflW1DVDLiVY5RKBudjygzQgIlCogSLVgD5RAVlVI0hgEzLRpPWWRaYGycghSKE6hxSYkU/FBF2RVGZYkORNnBKAuETqFos6ipRIZHQQi2VuAEK6XKNMvh6MZPFaYKBmyFS1fvFAWKpmTEiDJLmWKIAsZGOyScChsnKaH1VoGFtOCiVEQ7wrAITqZ4Gp6rnMojiqHBGjAx3GiMDbtTjYNmEREk8bRLtFiIMzEmClsMYmOiKQXRCqikFE5g9E95RFs1sJBWKIuI48jiQ6a4yOjG4llwQI8Jx1CyJmCBJlNjL8jJCJ8S+IU1mOKijBA3gaWijeAoDxrMLvifFQmsomc/ieN5aqKmKIjnQ6kk0Uwli5riCiWnMkSbQKXR+TzHILvHqHQBRxNAQLkQLevhc4VACGTkoDkBYWslJCNIDM1TOmAAIqEQIjtJ1R1BMoV7kUFi3cAJbIEvwSQbLJfP4YKi4iAngHOKhaIhb2nAlEQLxMZ0jEurjOCZKgL+wCALTaJGk9yCUNWGIWiDB4oqiyDBq0dCLAdHUVhDIIcA9DieU11YHniECwICwLvgh0NCKoL8yTpKFBIVirZAvet/cmAlsnACpyI4QFNmDrTQsurxGryC+Ohu0JKEaD6scf3do3Cdeuk5XJQAmBzx1+o1V9TAfJ6lAMVgF8+BgSRuLlGTiSBsKgAglFLEux7+NaOocSPH6CFmiQhQwAl2Kp1A7T+wk+VYIUQFSwVc07gosSCrhwEglz49Xg+IBmJ5OujGh0lZIkHUrDaAGSTIQRNhegotHKtnECpQUSinjQM0+KRigQ7OalMbogf4iJQujgxIwNyuS7SRAmYgJiwQnR46OMKKDh0XAKQkoBd45SVGM63YkWqmofSwvlCf0U6IB7YQagVhPyFImkPSg0B8J8KxnZKMLLjKgEYJnj0v4bmYz7G+omjgeRiVbsGfhLB65YMaxUbL6F7Oy6KoYuPBshAHOChJBPssp4Cbii6aVBF8HEZlgouyEygcfw4+KeCkHX+PjfWcucMFbyQ8cdIEHqeh3gHMyGAai1Sa45U/R5+nlxFyoVWZE1WlX9Dsabgdiizi8ScMT2ssMkQYbDRn1wzh0C2NEcYYLEhxQUE78EdZmsNByy5cnI1y7KmYeCoNgo5rApij/tO7IAE4tNAyaUYYz9D+ZZHVGAAI9h64FriI1BUhOYXmyAsKZ8NHVn+OG8go5jkgIM9xkEUzdkF0AtNb4eH96tvTggiLYDXkbYPgs2Fey+EtOF8AaFOUBgLvnhx8NgKTYrvggCSSngYdxwa1xON6wrhUcFf3+OcjWM5BVHC4Y3uo2DMGEK98/T2UEkTVh271r7d5Iub+A0kjVr/vdOLp2N4BBQcJtwk8okgiJFkhGfQFSU9nA6hDqYmY5cFOjlaR4mkAd/Qg3Qu9A6f7UqLHjUC+ADhQxhMDsR8jAfLIgwgDGcRIkHUXdHiptKopGHANvIYA4RaCfacZ3MZ4TrGBlnqHiXspIrESkOOFO3ytyTRpZgwtqZqsVx3u/d4g3KsiNAkTApFEvhpBjUGY4SkMV4ojzARZ1HgPOzkIQUHNypwFakzGIMTONGgHwArlNkTj+bOo1AaeuKvvGpu3grlIUg1AlCIOjPtV6yxOCsUtmQcnKsFGAZG53F1pR0gy0DxgsEw/5d4GuYLZh3gwNA+Gks0eyjGoLgnd/boSM5MBhm9Bde9K9NoxdDzYC3YbwyIiw8K3FeAscYTwDDwNJpVJ5P2bDV9IUMcgx+C5QbjL9MNbGn4jKu5NY2kmM6uRSFpmbO5NtOyIjtzR8DkMjnhmcpcnj79bnedlvbqIMJMpLK66kWDFJTDuTRaaV1C1L8i+I5VAqBEGY7TBaNrijRKPBKtqc5eGx5piX9EZQUHzynRAzi2FlKBj75Z7LiwbMkd707mkNAWS496XbdNCqfBoKgtJFOZrmA7jI6LjI6OoUWOzNyd7tGTfMxfV2YBkBXqkIdWb+3LGpgl2xFYm3zPrlZ5rmYFj3Xvh7yeNphSTNWqCyIVrj8ekxAlPxNrzk2TbpJ0FBoYXNdYAtMQgg6cm3Z9QbByijVHhDGOMMyIja44xRbDRKDqaNTJREWYLuzGfo92VpjATZRVFK4+2Jo80JNOMDRmySEjc5Sm54xLHpidvnmyYKJqhiRmyaau7VIDCLctCMsTaXUlUA3plVAbHJybmul+LZeIi6CijKcpsZCMjIi2G1EkTt3nD43O/FEOf3B3nQArwxHmkeUDfxW38yH/N+RWJYq+YgPl17aoWDz0/n4kR3twlTNjIZcWkvkTZsnpMXRTROaf/stS8mi6H6/YtP71sV0KmvzT48pXCoqsz5kbYI96XCm98V/zlD+VHzzw5/DY34tjfpk2fvee3686bjOnvJ9GGs6lBscsvj7k42X9Q5xRTxeVj7y7bkNuzZ3Vs/L7Y6z3yv7n2+w+/f7pj8Y7aMdbQkE2llj9uHr66NOPmdf633LUj8nerxm1JJ8SvZqetnPG1vD0k7bF5BccuJAzLMyQHPNrseoszkYc/5Bau6rU3+YM5j1PBCd/ObTVuXcCUdpzfs326SF8uc0xe0eX3lAnzXvxJurLC1bdX24BjP33fvsf6iv2TDhiKbzzdanXsruIV4Yve/0659ZLw2L6FB8bMGPzIc127H3j7u8HzHpxX5J9WvTLlVuf1H67a/GBS74LPJm2KCNn97E20IaBnSX/bibRme8s/O5U5qF18ROz2S2k50nmuYvPrKW1jT1596cwkMbPFmhEHXAe6vjWjd0b8xys7PmRU0rdcH5k0KvepVd9xmZcOhD/x4NqO/flBrb6f93li6uxJxke+MZzt/dnPbdYuEbiALwYt2d3/gfXBqt/iZr8s+b3t1KIK+7jjUzc0XzJxdrPimUJJ+uo85sj6muwF28qzR/eb17VCfe+Bz6c8786zVe6pTmmfFbytT03FsikL3h/QO23Ypx3q2i6teb/oo+aLzkW3unKBWpI10L0tZFVyxuNFhgmjB7rWONa0YV784d2WizM6BE1ZoqXEtdg5cOSgG60TQvoHdJq29rZ9y7xerY+7zQtrXG++Ncs/I/bQioryqaOmHezZMnNm5x5tMyovDh6++pPiHomhmYZ2Ne4fTXF9Hl0179bpW/sjAxcePnfKuqlip9tY07Krs0VWh2XfVe6bvHOu0z0+b8VreWc/+qj9j782nz7Gv9eNhGHFB6sitz7S4it1+SfMv7S6WQK99eE1Dw8sGVwYljTUv/8jV3/JtKa49nzw7KH3Nrcf4KwaEHL12oIT0ypeOxb/Yo9Hs85a+T9mJK15/oshtYMW5e6//fVvx6WT58/+WrW4puZvD/j51dU190vrO2rfwJZ+fv+lxcyDh5oWM02LmabFTNNipmkx07SYaVrM3GMx81fwlmmnge2s4C+n9vOkGEcJ0IOvjCREd24kyM0WOalUwcpjlhrvUYn5jGb1TqRzD0Ht/Vctobpgj88KntdZnwQvfeJ7eH3k4HYjE03AMDBDKL71gM8InFMGyeRGhfcaIk6Pvhy599IhFBcLthQ3S7gGacDfYKiT93KamI9kT2xxO3QCEhADHU8hNEnY/842D3AiF38W8XCtttICN4ugEFw2axzfaLXA69cx6GP4NL6cgeNwX9GbUxKC9mPDiMKhnqkBdyLZDBdXKhuQkwSd3+WhfVI20GFhHPKsR2i8QfOE3BM4yEVogy0AXDNJj2Bl2klypmgS/tDbqnxRtcLVTfBuUjRyXwVcoAIGGicuQ2KdQ4O0SnhYaNCccc9WGjUUqhFo9DEJskRBPmGK0jck91hVMSBdk5Gn/vRuQcoSmhZcSeEQLzLwCFz0dHfvGlAPEJQy4FVf4vlwjZeM9YDTC+5OsiG8UM84wCTAr4iHsQMaRP0ESAIGyAIGYESJQKEhZPVaD9WJQ71fNEgg9MLBanVSYHjcsoCDMC9KMuYHKLX6VIVR4+ELBUPprm0PCS+e6/Q6wFQJzK7U818Dzrbh+vOQNsYQIF0SnUiGM/CzwdLqTwkpGwe2wT7LUxaNGgdOHaStadvbtO1t2vb+X9n2YgNTC/ClhZjgtdBMyAdHAfSIxD1N0G+jAkwC3ks3Zs58FP8X/02oabH8/2OxDNmK/G8ulmP/txbLYyaYRieNkVLEXLuYjcKtsyYoTi3uvovlcJPRZI6LjoiKjjLFMREoMtoSFY3MrCUaGcOZuJj/4cUyQnRs1H+2WPavXyyr2QeFz0YG/PGQOeytd5+zl39gFseeXDelVDjRatA/yoyXHs4ItL094bleR3serps+/omd+c+UBI8P/vL8+Y19pgUItSMvC0P3/3rDegv99E7trS+HP/jNnLqCfb9d/vDz41Hjirrkdls0Y9SWmQNC/NeldU3NObPb1DpDqH3qUOCl32/trYk9dbXqaNWk4jPFW6edf8oyIGdX3fczv3/l2oydR5nap3v//HFC0Hz3M31WxD5zo8XEmk5HO0Umsy/nrRuwpF/K0tWmROrIKrZu0YgrN1+dOKfjkUnud3v3Ugw/Nx+wdPq61A4uPrTNoOYtVxWeilowM3jVhMCEDpLfy6P7bb3cZv5jZSkjrWOf69M85fQLyde7rpy1/lSzdnvfcyZMnljazN29cvjVYQ8lbiykg0dczOv7S9yCtbtqE9qOAiveyP/j+ciRhaNnrlvafdiYtv1FNXh7+dKuL6+exSd09ft1/5as9SvbVZe36tDx3Avx/asun32885y4wG2HVvknXogunrk+wPHexn57L0y/VPLz/CGT2xjTW0Wu77fr6LWYyMNFnw1g6vrsfHuz/EDrhBFSSZf5wuVOLVdd7L4752BuzMJXjOcSmiWFnkroXss9VGAvbOWyLB+yp13wR4VrjwUu2pBT3b7fGFfZP74d0sKReGN217Mrrw9cbGm1Wp4wp9PRhD0Bt9/8VVSYHRc6qM8OHpVT/Un3ur8frhm76Nugl/YIw/fuOPJ977LXhJo25yIf+lcn7YozlHoi3GkcZYnY3m1q77qhC9MNWnhZXdW16NMffOrMuXlQ7RBp6WZ4JbjqZm3GO9t65FQsf+Xx3OLr09d9gP65/dW2ZS9M6ZxbVBu8Ymn1sdSvHxlQ3XFNeG2X9Sfyv3q9cKGh1/GRPc7Pqnzj46ra6nWThlxZ3NJdtHxnjLwxcTXKbL0/8J+/jNhvXB5g6Lbj44vijvZphhatL10p8du3Nfrp+JJugfvH/5K3qftPl2vfWXTx8usdMyy7nG9/nWOnnbeoa2+cHfvGsDMrAicVj7hw+2RqSeUXeX0WHvnReWZrYHHV8KJp02pLSm579sps39ML5rT08/s3tSI0aQ== |
0 | lc_public_repos/langgraph/docs | lc_public_repos/langgraph/docs/cassettes/introduction_69071b02-c011-4b7f-90b1-8e89e032322d.msgpack.zlib | eNrtWQdUFFf3R7EECxp7NH6Oa8HCbF9gseCKNAFBxIKiOMzMsiOzM+sUYEFAQTFHjWYxxppEpSmigvIp9oImokmsiTVqlBhL1C/ms2v8vze7C6gkMf/zlXO+o8cjw3v33Xvf7953m1lFSSTHUyzToIRiBJLDcAH8wi/MKuLIqSLJCzMLzaRgYon8yIhR0XkiR53tZRIEC++rUGAWSo4xgoljLRQux1mzIkmlMJM8jyWQfH48S1jPue5Ok5mxlDiBTSQZXuaLqJRqrScic1KBlQlpMo6lSfAlE3mSk4FdnAWqMAJcCvEwIzSJcQzFJCBhGJMQxGEWkxzxZ0WaQKysiBAswrNmEuFIHtDhJoRlEEpAjCyHmEk/WfpEKI4lSBqyw2lMJEhUg+pQnmUYUkDVQCGll1oJ5QosSztUYjCzpJKAJVG0Nc7OOg7IEGmBj5sCDsMDBMnjHGWBkEFiA+JQgWQSKIZEWLBjplJJQtIGAGThSBPAgUoiPREMx0UOE+AXQyACJ/ICIHRIkCOjedIo0tLBZHBGuipDAgqBBQf4ZJJDJPtAayFYPCsKCODHAdgQMgn8C1iEMBawypskqOJJBHOqBw5yVjm8AAVJ4njcRJoxcIM0mQUYk+QESjJNmkyilL5euWpdTlAlmmUTEdEioWi1SNDxAgeMJktPB2vQmSiOJCC4DqYT65Cy8VNIXJBIa6GPsjuggecpXsAYnKwH8QAex2gAIiKYSAAwA50Zg5t2mBAyBd5GQhOQUDxCGSUkMY5ERAaLp0mJUBKBEEBDXKCtCMAc0EGW0CMRh/I8wosWC8sJAEsry0jexyGAv5ni4Qvi5bFMLBPNwkN2YUaRkZ6TJ2BBY9Yajh484uF4Xh7AeaV1u6YIDpQGNkiiCPudOCrBJCAJIkVABN7IZg7O0vef2sJJXK81JqYXmUiMAJguyDexvGAre+3JbwRuTFoElGRwlgAibOsTUimLJ0KQRmiXYhy+MgkEW3EiSVpQjAbeX2g/ZSvFLBaawiWLKeCjKnE8fRTq8vp2MUQPBYGDEWxbDU49FJFWEKEYRCnXaOXq0hQUuAvF0CDEoDSIGLZCi7S/o+6GBcMTAR/UEf1shfbDG+rSsLytIBzDI0a9xBL6va0A48xe2s111zmRAa+dtBX5R74uzrFZK04jV6nk+rKXGPNWBrcVGDGaJ8tqQK45UgwilQZVeqFK1QYnSjQINILJlqfX+awBDmoBTkhmFwKWgshn5QOLkF8dKnLE2tURoU5rXnRpkz8MWMe2K9okeiJqL2QUaUFgJERU3r5aja9ajwSFR5f4O8RE12uMsmgOBCIjMEiA0/hFuElkEkmi2L9es5+V1V4Lhj4aREcBdbggMBb81ZavVSqVZ3v/ISUHXJ+CKcGWr9Hr9X/CF2YGwVYO74cq9ajaK9p+S512/FmkvpP2bOXQpxDqAzTq+QeUtfo4qZE/pK5fH7V+fLFDaZQibDvBd5xSFZDIDWe8UkOHR0ZpsGR1kskyKnXq0L+noDjNigQqgJRNopJDpAi2swiAQ+dNqEhVPOmNE4TGB9MbcQzHCDVmxEjv+LwkCrMVq+QqJIFlE2hyo38g6o+BYIKOktzGVjQsZoQhPMS/ZBwaxcazAL9oDODMsAxZOIrkgDvaiiXR4IFzZCE4HmWIsZX74HqdkvTS45g+XqWLj0cDxkaVOh2oxkHyYXSQSoMZhfaYdLDBum5z33GR/rhG50aw55UtDu5p9mWoIqm1x5HfHnl5Bpzu7pqz/pZ75WSlFz5gUcjY1bnbDLufTk4O2LEQyV1wt6jpp6XrlV/KTx0c+DSjYufz8p3j0QdHkv0eLD7/9xcPFsl6m/yPXm03e4gOz5r0/biVrc9N7pnjljS+rPL90KCyK8LSDSdWyugb8btTBmVuXoJnFcwp63Rtz4iI0A9fzLt349AvPdLyxlz4R7MNin5n1NpTD12XZP20t/pIE6+e9xcV7Z7xaWon7TeXA1cZXRM7+bZsUhYRfnLOvBn5WVE7LvKVbc65+vcfcL9rzLrcvfdn8a1dI8518S1npmcO3tHnRx/tstNEy7Q1Z9aIDUzrjn9mMJZi2fNXRZXm/OzWYtgCzUCPowtGiwEt8Jmj7lwd1OHXnysXf/5LxY70FMWRo7F3vW+VXNigCYtcs1xoO+6Xd8akuB+P6fPAQF7oma79KiKl29lWS67OnT5QnBad6f2uPtc3apAxuqx5yuAXI5RLHsxK6Jz8vKmPruGloE9yVYuvx2ZNfeLy+MbxcuUu3G3Jxk2jszrfJtEjFz87mtnAxeXFC1eXNku6zZjc0MXl98vCXXWrQnvVJJWE9kLBXg02aJcmA9txiaRVqq2SaCuaEsZwjNmC+YdTJOdjHIkPn+rPRkaEmceNjwgc4xMOM56zCpHVFICIkQNVQjLLJUoVEgzzIohxiFTo8fAMrDodxRQ4qfZ0lixxBEg8JsgMI5JgTiXsORWnQWEYR7DwIUs1IMyIZEq9y05qex0GVqWYXWeDw5LjaovYV3cps6P0lTZqcmxO8WiY3QxSdttiz1k1QUyhlmvA340Ge7oNqD/dFtq3bXn9FP1+JwZvcL7JMEfuUHt7/Umg/yupZdbviHWGjgJYGaf8icBXdcxXKTVef5yxNCqYsV6KLOcatagpYN/cc2RGlqbZ5DjREldTX8t8GZGmPWVOe9t/c5oRuISsxtNA6yBQAmxnZCGMwLGEiDvL0RoVfBEDMpQEODBSKRgkwmIPRcJJghLNQAORA72KzPmezNKy9JaG4BaaTVBQdRiDnCIVOQmQMYqh8Q6+PAprRhJVaY36eFKp9lZpVYB1jVPWAQQUqhhiYcHNYMMBeyAJmXiRoqGLIdDmsBfxRMzgjhQKWkSwXcdmPJJMCSZwPw7gGOYENFwCFOkTFhbO9wXNCOhLUoBsgpfqWkjmbwKPCqGpeA7jrKANgrhDgbBAB3jhLMsBBWB1Lwm2gGIdh0cAU6CAXA6qYRmPsxwAWykHeU+l06qAKeq+PWipdM9am9TeOloEt6Aw2hcZa8IEJISvtY/UkAWzyVAJ2DiECH6vWSU5OVlOYAJIrGaLZBvBwU9Raw3n0su4w2YWMdm5w06hRixAwAmGBCjA5mWkSJzlraBLNHtK8HAkhOYNrCMKFE2lQmRpyUSv+HyNiQIlz4dtogCebbwVhT+B1cAVAfSssdYlAAlldthDADWC0wHqAggaGhrYBvTiPAKNBNszGJx4yEkCCMjhgBeLHAiO0jUQM8YAvcwAKXtXXOMC4B6vWNtHq9GrNPWZ2/4cpYAVB4tuma9artan/4smGu98/V+faMAm2SkAc3THwstSYBgCzw1S+IPWELYY1u5IiIc0BzAB75Ca9hoBdaIjEAJ0kCOgj00gBckDzaD1A709DDUEtJLdMnWGGQjFgHNmzN7rhnjQtKMHJpFoqRJwDh6cDTEUnoCBfQ5kLqAd7Rxh1MEjDEgHIBCwPcYEp2IIwybL604ZpGtKkNSsgAgWB+RDqjcb40gd9UvzjjfOF7ChpginWBHU64bwsWKycUyqntEMJxOmhA1T4fHDVDLQTNe12+uOMeGVG9iVfMV5JqTFwkAUC75j/w0ZIhaIi3XKswv5X8gTsZKD1IPcX4ni9WHzNpb/P2M5sMhE58QVvtW4N3pD8K3xcSTHsTVFN3hWbwe8bwe8bwe8bwe8/8MD3ny1Wqf5l054tcq3E97//IRXq6xvwqsaO0yvChoRyiYQw5MTx3mnBo6NTA38/QmvD07ojUZvDNfrdJhKoyd1Gp3aS+eDGQlSr9X8eye8Pt5qTOf1lya8jcprJ7zCmPPzz3u1fr5oxqQTDTeVnqWzohUNtm/THmqC5pYbjqMdekwh3i0MnqUpP93y3t2DkZ+gPZZ7VOYGf3ZkRY90lJ5w6uDeWzvmfpmpe3je8vyb2LNxcVWXni5OyxjcQjF300fPg5+0U11dNmfTB/tuNCa2eF4v6DO3e3n1nO1K91Gh6zzWFfV3v7iwRK6Qb5R/NnFSOxLL6ngv6tSLaZ96t99++sfHRfLB9697l7Zy2976vvf+D+KruhRfvp2HGjrmsjMqh0xO73hlenx2U3pomwrDnoy2U382pLa7PLjn6aH+xxZ22VE9Yofv/uvr4yo7ZXe/1owJnoxc1rbodDuz0Ym2tKkVXjqpYvf9rb9Nbfy5iB8pypt0IXX6wapPGqya63NMfqBl+YGk22u/DvKu2j4mgjSskm9rk1N9YMOx4A+mX71eUjWzU9KY1Ip9FYU7rft1+9LaL/2haPnyh8/4xuyP77qmrkzrfrSf/3U1PdHyfd79y3Rb1Z3+AZkx+7r1nT9j7TdLxjY/dT32jKq8vFfHwDkt2uyd2XrCrZGmLL1vw7zz6j7jD341edAS5bz5dwxuydsjxDN+ru+r5uiqMCEtOGxzq95t5syZGjigS8cboZtsk4fcarzeb3iT6xM6Hbs6XhGjaHc8rHmX3K622d8ldQ24l4s1jpjkczIzat+Ac5WJQ0NsKSdc+i9p9KiP6r1dIxXRIWE3r41/UdKZmNKebmFK/dbjgO6JNfrjipuZXot2f9p03qYo64MO0zxWLWs22Jh0d9F7LasLcrauDt26pVfOjLAFF+/tjdlW4Jk6rPP5kKrswN5Nrs1qfdOl/5bZC7zbxx4dcf7y4oYmxr3D0VaP7i+YvWrFteDPq3z8869aViwtWLP4JHdrY79HC9022BISMx5af2m984lLk6VDCsoMV4nHH/c+mac1aA8EVTfZ1cRvbYLvYV9a2/2K8sEnAbqVDypSlg2c6HWk+bYNd3rl3T6+9c5H2e2bf5gwUz2rqP00xFuv+KHd/idPcyKDdrfqv3RvWofOh/ueWLqifZPsHl7fxZR9nK3Pjrk+4W8DfvSMeO+MxrzV/1I64t53WUx1xKCAhvMa5fYcNnd+g62mrl7zR+SWLDT3fbjpn6p8cc8a9bqf/M5MbLq/l0U9CFHIWrXcpNv8aNGVy71yjrmtbTBhTkxPzcj7OYE/6Md2do9UjPk648FMfv6CU36Nv+11eO0/OpZ+fHTy6V173XdNn+k79lni+yWbT4YOaVtZvb1tQVW/5ZWNBjFVfm2GdHQ7n762Z0RgwoedTnQaOG4t53ppZmnVD5GtrAV5nKzrvupf3Ssr/LdaxwnThiQFT/ti4+FFD29m5Jt03m1n3fVMMdHDLUn+0zf7FbhPvIcGpH+/9djs77OMd/ArAf6fT3r/Wd7g3YePrThX/NuWfT0bW2+Gbw8iD20+vv67YysW37jsPSYjtOWwzq4T3KZubJazYfMXqwdcUreq/OmZfmWWMdW17/pvbvbv9XB/8BdJ360YnRK4uemp5fpNcx+7vXvOsiL72YXFw6atqXgWsOnI4NyMbdVY7+dJM9yvzr1yfg+5r80XO4IP7esoGnbvWtgufWrFvn/2ulEUMfJpdWxM/rPHozPRkm4LL4Sc+ein1RkZ1tEdulfdO/ftr3G70592s/9Xx9/CT1RedHNx+T9w3qPo |
0 | lc_public_repos/langgraph/docs | lc_public_repos/langgraph/docs/cassettes/dynamic_breakpoints_9a14c8b2-5c25-4201-93ea-e5358ee99bcb.msgpack.zlib | eNqVeAdYU0vX7qZ3RJpSBaR3Q2/SS0IPoSWUQwkhQAoQWhCVJiiCVJEmCIRepHdUBBEBC70IIqA0AUXAo6jceDxHz/3+57v3+Sd7P/ud9q41a9asmUlMeSgyKBiNw1LUoLEEZJCHF4GcCU6LKQ9CBoYggwlxZRgkwRfnXWJqDCsJCULPUXL4Egj4YE0FBQwyCOOB9pZHY/0V0BiUAsTOQc3DUQXvbYRDISO8MZ6mGkrOIF9fL6wt1hptoAYBWZ3zwsIDIEQcCgIKx3tiHHy90N4aPk6KARBbPNYLYxLmYWhgDzUKNjd00PDxUoKS+0J9nCJ8PW3M9GWczmkQvU1NIrxNNHxsDDXCbAIU8dZ+OFU4JiDCS8nW3NDJShHuZOBjaedLbucQ5kQ0xhuiAv7GYByZL8DLRD3C3BYH+RsTzc3+7hPh/5dMuKOKv9M5dZwjMZzM/yuvbqFElm/nr2rthw92Jssh8/o4KVkpOjtBFMn1KAs7RRnwP1yw4H/pok+wgCmj/pFtHfZLdgTYUJFggzb4Wz9Ltd+6Wv7kM/kl/0edn6epCdFLERoARxv4wx3hvt6O4YpgUzje0zRMFQxyjoD7GWDg6PBg8hwEWDj6Bng4euPIcxFu4Qc2N3S0+qE3EWrqgPnRxwtr9U9fgqeSga8zKEARCcOF/Yecv8b6WwYE4+wHxziH4XR0PVGGuABckI5wmC+agCz1xHlH3C/3RXp4k90prso+GBkkp49CYglprfgIsv9g5f72p2AFkLwS+anX9/JC4glyxlgvnDcai0qrRRHReFkhb6RPgAcBWfazOq1EWkG6yhCHxSL/csy0Kn8kEi/nEYAORVYEIYPxZF9FxpYFEzwIIcExpWQq5MjjcgwyONgDhSy2Nv9HoxulDh5BEWlt1kFoFBorK/Qfwmv/zkM9sChkcBrJM4KADC41IuuR1gPzDZEVAqkK2SHxQiBFkLLQOSVNJVXNcyAhU0vYf1GtzA4ZRF5XaVVeAbgQb/KAgpC1hh5evkg5cntCEC4grR7jES5H1lEHpKqmrqyoWGJlbJE2qx0pEhxCViU42N3n76UooqkoKxKExOOCCO4EnIimiJePHBYZICIrQmZwJzOIaKoqKqsrKkaVkO2dVgZS0VBVB5UZmshB9Z3TmtW9NJQ8lJWQPl5IkJeykqecsSO05ocS5KmRg0XgkWlVaAyZRcEPj0TV/VNhgcSiCL5pperqoHP1ZKqfutv9ZeW0EjMwrNbCI5ggZ0k2ng8a6Z3WY4kjGxWk9H8ZCaSueU7xh5EqoX+pLwfDpc3uRoogsd54HDnYBItoIiJFQoICyIP6GVJcFFwUPOTJo5P/bTd5LxzGReGnAVwUQpV1g3Uigj3PKcH1AwkGfl4Wal42VrbWzvoQD6KNvqKRtxrE3wTqDDEONgqE60PCLU2J5gEEcwOsH9pTw9cQZQ+B24X5gM8RiOFmpuGGDh4+NrZk42nAAh3FQCZojQhTJTXTEEUfT1+Qiq2KrzPcwdkQj3I2c4L5GJkHGQb6ORrriykZkR+RKFdZEVQQLgT//5qULv2/pvOfeZfTDwjAhcn9dMO0O9J/rZtosv8Gkb1wQCTkeOZ4CWCDmIBNAAoKAKAg/4DjZYoZsKGhuw3U2gRsYfyz4jFATtSKAIAh80JNDYScnOFCdEs/e/xIHl7BeOC/J3Krw6mfbcflgP99YvBGBnuRv+/Jr2gQWTiZkpOMOVE/seQP7PkTa//AYQQ8gYxtfuAgGNSQjL3JWBL1L+z5L+yFD/rR/joZa2MCQrx+6w0wI7H2duQvPfnlB4IBKGAKGPwY/88muGIAUD8AAKrU32WeOQDQFg8Ap+d+l4kWAcCJOABoffa77LdN8B5BHn8VUZNfSh8fANgrBwAWZwDgeA4AjIh/DPFfdBP6SzdTAEf+oYAAAEkuAQNYwAuQJyMQoAicA1SP5wFDgIqS8sdDTtTkh4aehoaamoaRjo6WnpmRmZmJkYmJhZWdjYX1JCsTExs320kOTi4uLuYTp05zc55m5+Ti/EFCQUXuQ03DQEPDwMnCxML5v07H94GT9BR3AX8qChGA8iQF1UmK435AgDwkKoq/0j/DpaGlo6anpKJgIFfbsAEUVFQUVDQMzHQ0dDQ/LEBJRQ3QnKSlYxfm4Dqnn0nPLWJr7+DxYpzz1FlyoAsMDsloahaDRadljU0YQD3vNIiqOBJi49KVDJEP3pMZ+X5IovqXODIl2SaUtHTkSuWTACVARU3LQE1J9U8lJdVJ4XO2adTsnB6BDTQcXEpQz/Q7L86C9O2iX4s8+HA8BzBTUZAHRHUS0AWOAdgGJKbLlTTUzmRcKmrOwJRx7kH3SyPBlDNzge6z1npApYsa9B6+b4Vg3J1z0dA2T157mVdvAzrBTretIhhimcnP1w1cemu7VUcbegkpc8JFJOnliAe0ruh806A2BV/FF7bMG8ZhLDJIKv5nVGf2Kg4c4GDH1oGET73VCjeFQjOJG6/ss4fZtCA6hF2H4dVwgRzBqrBq5J4268gN+/Mpx4DnjNZm42dPOrRpXcGB84UGquaSaauDgF7T6Nm0zXJiq/vZ++8eH9it7x5ys36p6XWKnrPdKo9ycJfV25Y8GO9BKKQthXoTKxdrsx+/1wLpwL5ofYtPsNBq5osWkX28Z7aJnKj/xhIywTnKUC5x40rD/ERdOQcPAzxKygaI3aBtXzAa5WbauGg59ujEQ+UvHaavy4/WHR8FUpWSjCFUohyXc/30BVK18Jd032daLY9DPmP6tedcMGA4J2V6aX7MCX3a3mqXhDXhfROPwXx1Q7SM7QcA5EeFelexkdhGd/kTciL4ejzzB8RlNvVjIEQrUTCOMbfD6FNUJ8xlJRuNWRtINSWXcxWRIuuNZx30zRMT563EDGnkhbByokTOdF1kgIxop+e92UvUB1KQaWHXaYT++JFKQ1JvaTsTgUs7cbRCuc0lZDJfuHUeIw2fwOMzY8LPcPl8Ge7Ms9i4Oq8L80G93NasMiobeXw4lBSkyYMZK+YxZIRnP1Fls71cpJZpwm5p+clgDnMKc5LNdRt+c+nsy1sZ7FbpKeV13Bv0jJKZc3YZSx/oGBwHH4Cpp0xQGJoFnYZvuGrCweYHCWs/nrf+NtuFpdM0M3teg+nvTPagjdyzcHaQWlN0P6AahVqgKWqNDL5BZfPqVEDqfuibC6IM0pibN80MFuEIKW92+uF4HboB997y+dvXXnzJ4Q19ExZWyJ848XbJMFocUae4PBXXntk49ynj5mT6eXkKBwrqpa5g3nrfUwL+mGzf6fq5OmJgKdrc9Wp2iXHIWkn9zTWa0lIxNJ4kVj7B0c7KXmzpoipVvnN17h3TxLzYIw7Bw0NfwY0ChkdhomKfhmvYS6+rFwKVAmcf6Hhebg9o62ScbB7FVO+P26D6crw7tWiyOiht0IfzaJLUgGYZdN5vMVBP1b7bwOCKQXSt291vGY/bfXvqt89au+wfbEhO8EY7Vke9rpAJetnKxRCXWglg73hYSGlVPRAt5xnFclJmkiicns/ETE49zqet5WD23bDC3+ScE0Cf1QGbR1M9v9Y0iQJLLJyqtdPRWL3dTosQ9RIuQ2BMx18n0L2rVgmw3uV77abc9rLug31YTDPmCezuXLZn7muiqXg6yTB8JaKP7vtYQ9bJD1xZ/CQIVI6zBm9aDdu24HK1bVrgaaEVAk/wXhyt4vGQhpmijLEQm6yV8KIKfhpsufrG/YqTL+cbL6cXRkbJdZqJBpuflqg+Op/h4KzJOLWyo8sCfberDobXpjv0wcBDYnfDGCRSIMHjg1frh7tzBOBlX3W3lN5Vi5eWE2DLvdKy55WvPFF2e/ZWV8Fiw1wnxm57z6oHEzfPBFcqGIPSmuipV7vaHw2lrdkf7eYcwnbFTQscd3tRs3bV2SuHnknsLyDDKsmdW14Tc9zUrnbh53bVwjzM37iiIjMsGHDJz2IFuY9Y/vwTh9/i7t3zjpi8ePQ2Kkuz46Kuj2SD2AFtQctml3ae5eYC71ZAOkDkf5S+ZQUYXIbC6NWFzpQylpZ0dCHl0m2bkuXT9Xm4b9JRKpoOfOAt/K1Fd45giHvv50KkVZ37jdlrsmZ5JvdD0X90GncgCKz50fhry26eMH8vyUBSbGpphYRQ7JQ0cftSkEW2nKpxm8hWwbTrn6P64A3EveXBARzJVIBhERrxNMbHlKEtb+LwGeV4RuaY/f4eIkcwJXbabmrL29zIyDiepyJtPJ0PSXFWovT9lg6qrtCKyW7qYl//Befac+8cKlr/vKOfuE4Pdl6EdvvZbtlqiB6kW4aOZUW3cVR3x2SS9KQgNozQTNpLzTFpdaF23WYStmYL9ze0X+D3U2r+/J7SUORwqfn6pw+N+ZwrvTw8i1S6O0kJJuFr6yvXD3yaGSkg0MvQzCJOkiSNFOTvjNICCQiZGtorLedNMaFM4LqY2P6eX5P4pfPECzgXqst88BJCfEjo0+BZQdbgL4IrR/e+rdQy/pvFUeXJiuALXJoqIj/Td11evZvjMp0NP3ifVaKqyrJTiDBWaT7RWWb7xnN2u8HJkVNN1fMzZrH2/ZUn0At/SOmFj174uHP3m8Hg3OJhSFvjyIBPksEupDT4nzLC2oLHl/gbLT3vNhoHr40yCtru5O68zGnEtMCVD8ZR2O63uR3SK7yGYbZdeORM0GmDdCY6/DHQ7L2UzymloW0ZqWXmVupnKeHdZdFWNy5/gVtMh2Jgtwo0NQcru3YXBAVb4ZVSop1oZ44KF+fdOl8Qiz/hmsR98tKecL8QHhD319slZYOmB5j4Xup235lOq/+lLGHWa8AnnqlHOu1Z58MDpjP+kbPjhSEJm9znpz41lQ0LyihLum7A0qKmeI6BsJln3+YXn5Y1vy21f43rz+HU6zRhsGsYW5OMyUKFYOqpIiSoA4EYVr0l68T98CX402Ssr00+SQrezwWbi350NWeDqHg+1Vstt2XSzk9XhPTtAm5EDaGpdomOa6RqnwDNrC1mdGR5+xk3/jxqlpA70KsgeaUDzUotEeVHbxBEnCCJCYWk04T9cB3KTFEArE5VM2GSPdka5e+vJsP1II/Uff5doLjGsB/CX9IJvG7DCObhoeNnKBLblJ4IbK4J32Z8Vn5C4szmpSjIhH39mmxUav0jy0j3gAs8tuEkKZd0xiqM6SrsXn3y5yub0/aWFV+vZR98uKe4ELC+8JXfEVT94oW9ZdVcC9dHlOaVV6V1ks2b4JPcL3mS1FnaiepLOxn9NH8s5R/Y15WoyOe1bjExz6ssKuZgbr0SPaLxM/lCDOxOp1/foOad9dZOq496UKF6NqRhOLsG82lesLpJfAN7z+1lmojIQpqAkIjBqfc1lTLyQquvnWwYpMxORVV+eppcuiFwC10ZdkruaiO4s72Dt9i41+XNnVUhYbsIidtuS7a2ekKpRZC71wXgtH0EuS6zReXhUNqFpkXOU3g/WEJhKOSy1ebNj0l2ceUhWuWRUvcfyXzseMdh6EZ4UzWo+vTdrdByh/1Tjvh0pCl8SEYy40nQw0jtC+e1tQ0aNu963XBW+SpTN9xcVj/Rdvpq+6ZMHuLQf6UqdZX6fshUzkUIAm0lJQO5criuzqCxuAIXfLjjeJp3DU1U2RrZ+lZiNZow+hEbxHxVLmElwSWlzTquZyCQxqiC5jJ44CJ8vuPpeMWZ2ck6ahORDR0NTZdKfM+scNuUQMomFBEocaH+Jl62olaidPU27dAJn6DQKM8b6Nmcho0K0idRi1NOIHnG1OW2C6AGsZas98ukSqPkFw+5oOiYhcr90S6nGeRs3QiP6iyaPuryct4bjlQIGExHd/qstr7Qe5WsMBEjT9rBLG4lyRs8SdsLrTloYUkD6k/3zaguA8A5OHdGfO1Z8wh47geP9gHJh2bNc7cTm+t1pO3SXam9tIEzlmFonTO+zw+zEAFpOgMMUMFM+BuHtb7O5fxAZ2eJaxMl15dQVJQ90LkYn+LaSxpe7+IvgvwcfkF/6SOvpSdJA1KQ1VT730vCoTNlQtXhgOWma8cg5+Oo06wlJEY6KZsv9HSHDuhzkuIPOTqU7SP4IFdcekzp96zRCWvqt2mOLlR8TT8G+qnqhA3Pi7Bqo8p6I+a+vUVOtxgcHryY+KgA/cwpvmGTv33xI2hr7k4kMbjlaGS43i9+e6PyrRtpD85BqCoYRzxrsm42yCHJ14K7wDB1szfLMlkhsu2frQz7JZZmA53VBVk1zWhNC+oXfVwjYQo4S2vQgUKaATaL4en0rpJ8T9WptnIt9+6RzsMKt8Xc28hNd5G4BWP496DBWcav3tOr8m3LgYuY50SLivCvxOxxbnBi++Nul3mjiBugW9v9mMjXV2KudYsRuCry85mZAaXrOVxiDzgw8Eov61QSkwrZTreNf4V8SS1jq5DWYValZH2htTD1bxy6zwOPBscHab85fx872VMXCg2DhYVwxqbmR38RWQKFv9/YWBnTjqil/B+bBznD/AltfNY1IE+CelPj9Hv9obeRN3a+XEK5+trrzak7T/NJL8gDKszJ2w9STZVfnldLRJ1p47r0HyxV3x76anqOyz1uUmGpk735dHM2FIGGn0i8vnpGvw4XJSewZlnS2rZVs2n0tqooAvRiCY62/11c3nsz5WPZi3nEweTsWrigRWc9le7clUllI9eid2+u8vbeH0nCFmfstOZ33M0yFpnWaXpCedf2taREn4A0FY+2qqRSiauwaQ6rIfT1Y4yCknB8fOsmJyhLzDCamk2fn17lO/4TKkx3VjWrbVTh0lh2nnN05W0j/5V34IBi9heXvevpKKQDLk4KHVSgFog453UBiXJMqBa2tPkcttXYj9On5+XsS3QztRDhQEwkT3uXaijZMZpIsu/Zbw4rUE2Ze1V3+wtc62meKQY3bXk7LHr1UfX95rGqFT+4emGW6kVyfY1i2WCHsZ8FmAVyfX0R89aBO/7G6bPR9Sef5MjPvPa/JsarS4RmJcZU5xnc/tZmZk1UqvJ4ExRAmuCX4i+5qi90wMaJ9xv8d1T23dB4LnHtrX3Et1hKncazgcWNV04HYEnNzIDMJ5IMpnXB+ffccPGUM2I633qfXklPkma71Wr3QPj52w0nAaBCEkWVsi5euik9/fHm+ceOK+I9Mn8kY6UVOEkymfr/EgffrGw5kwb93qv2lq+2gO9eLFkyUVO6voUl/NGIatTFySCPe3ifcS6oPa+q2stBB2u9lf4D5ttVzTCHKr933wva1bwse8KQzXYCarBaW1g9R7Qg9wYPvwwytV+V1DJa1D8zEmWMvYuomZbYdVgfrVP50HG9h2ttjm+7k0coPmvP9AGNUOUf+RLESN+e52s+Bbtw7OGWrKVrSMpqm9CzCUvDaK5SSdGK4ZDmXXYYJfTmHr4OEM2teriDnShHEedmRGcCTjENLHK9nUU17w03zy9eb82/vwiBUVX7yyzoUWkAyzXEBJ+5728qm3d6rih3EZ+KMnDnJZeZ13dgMk5iE062f5J4IuwrBX4mrv8kgWL8xtfoM+idCoeaaNKj3HFZN/nppJFhvob4Nw2C2okfhjeh3sJM5rWZ1Kmll/F1MSbbvohzc/5zm253ml0DnozPZVsPYXX6FnbSxO7yB6iP13ayF+zLSURLganZ9AaQXVAv54eTkWwUBZq1Hs9Oie1ZBo0irn6ivkEXSfPs23m++W6YUicdYyOafk/VOGgCXuJu5rJgWU1qXalFJxnNvvBsEihtHQfLNbe8p05vidvl4melrcK2xoKnNW230R72tdXb49Zi2DvYJ03dPKmlom8sExQhlJ2MGfHhySidp2pMmR28XyR7XEJYM9rcuWiHArvMQKLqbGPUmXuDSYnmRxuQqzrU3sudLY8aujHWBq9qEKCoI7eKi7riV+euTQa2Kgm0CEzWMgWpxS1R70T3c/RMPjL06HR3XI9SO2P1qhbuOenTauKn08F7MeAqk4NSr+sbOO7qUhAILC1e+44X+StgTtT+glPIw8EHWaeuQbOGhKrKJi4am3/589UtbG2I6xxLz9Ht1NsjPplHQ6uL5mbLpD2smPcJHmEdroaS5nK9RZkc+7W9rGeUr3k3uDrOCRq+k0XK6B8DCvr7vOu1rbkE799hDPbrXASZcMtb6PSPyKZ/wtqfRG9oN7QcsxEcAaYryoy7vvR+cdCS6KDP9xLmugFxlCSf6SkoAme/3+qGuOTMhU6CIE8dPr7Pg09c8M5cnVbocbv+0Wroq079m2pLGc1CDWnJxYAWJGKx3iSi3Pu74PTe+G84WZYVenVRLY8r/Nbhvdmpm1drQn1HonT8ro9Ub22bodsxYiA2ueCbbA3R3ZTP41ducIquXDFe+d1/wurFxJ06VRh77fMQ5st3Bvv4x/GvH47HDiacEJQ+BiyLw5LfDriJ/TfhmXMS4YsB9sv2V4aV/xBKlvB+fAyszH2HN/Rf6acfHbz2MLCkVc1mXJLBwDSR4r7jNuQD/2yGbIr4nw0O5G28X//3hnIMoBmkp/mkhHveq3DG2Mcuka8kga+P2t+vrAz4chVipGiIa96rwhCGCSiD1Lr1elLi0H7g4JixrzL55g2QpP6+JP2dQR2ZmWmuIEXj0lWYY9TaGnWNXDTPPdS1ZkbVcxU5Rnc1RJJU6xstu9F5ynyshe33kyVdCVvVCcJWQ3YNi4llCwsXaDiP7jyIi8vfEYyN3cEodjXl+o11fTmrEgUzsXzAFs0sbrL0xjBCdAe36yWa73W1SHTjl/yKBWOiye1G8/baFu3TJtCxcK6KVPbCObMzraVVv6NizZiH7eiVU8G+VXsaNi2cr+IvwZ7ELLfflPtQEmR2DNDLN7fwhw+tLAtwfRz59EpHQSD3xP6BGmR967Mj6QJjav6zqI9jH2aLjgFrK1Tv543wd88vlVd22tXs1VvoUpZFVrVq8CFeKW8vf77Y4SIPJ4Jz/FNX2B5c3wC4gNPAPlW8tzfVGEW1rtKrC/vdob664xMLUlMRjmVnoqEcObu9RDct5TMbG/+WHHLbfiNhCRbHnbSusTu2E0WrzH/RjbTfs+NXB5KdVrMVR8iWGUKfJz91bSwuZDh7GW62kkInRD7R7VdUuz91OhJ2v56bBvU+KZ51rVW0g3vvAwTaIZH2/90zpCCk7eK+ZNvGWmt0sI0EpDhArBHHhFe9NXv/SaZZn/WAtill+NQQQ61rjRyc99nT7Ku0ykrCiXfQGHOxwQJzy3IG6g/wMaXnShnRdvbLLHYcYKsZbPn4HiErZWwna9IB+XLSPxU5N/2kR0A69cn0AIOdDNQhhsJqrxFsK/6qlUmsdaW5gKZYbm+/pxBV1WRBXGDf4sHBvZ8arjP6B2bPQvINGDNTSWyge3SR1OFTdDVSblpZRRhuNEKoyc/SHDXZCbeq6JLLUiSJvBLIoFkSEW3/WHpHQQzMAH326OUjWhnrHbyisL9c5Zxy10akPqIuENEinGDeUNJbMQRrdXYwo4c8sdq0YidJAtIVVSyxMPejmbvDHUmYvh0nhn4ZBH9zAtjGjN5ET0dDNBWgTgcq5PIxVtPNza55SYwrvp1wUXG5vvGwP5SkpScuN+NEpcDCYGF9sC11yGT6xwtt6BnGvJBC3lkGGUMfo8H0cQ+IDUQfmsgjLXSCR+TuztshqaRmnZrZjoUel68Rz0k9VQ/AQX4Ff1x+mFdIy9kHh+gFiAJ3MmPzp7b1Xv0+FZD8fsHG+tbzzBXd0Mw6MI29asmetPv0o68PQ/zJUZmWAHtm+OLqM5sTrZa/b40u0zLeWiN0r60T+9etS+/MjCFYVZeJfen61EQWwlhrov58s3xe08yZBr3xqQoH077Y1pnnuVilaEPGxQBR0VXCamBdWKt/qSi6lfCx0+HxdnQWrs3PtSODi3FIwxzRVbSakao56iwFvfw05ObDd5q9b4rVXs/furtRCRJUVUHkBclmagk8itB5yqBHM3oMXKvJXKEqI+0gzts5VHCPShHTioqMZPr9qU/rk2JT+FLuLtvFZixFf0gJ7qvdJ7FJhfYUDMmOXq08UN4O9s+cFu/C7FGzGpby77QI9OykxLPr8FRzF447JEtgLXsH/eJ9LqfDbrX+ue5RZA1m0KP6WgUhn764/D8Pn/GW+ZABbTnlxzPnYGp/x3kfubry7cROTdL2jVhp+X2STLq+yzQfIbKZgY3UKnj9CxNWKB10ojSQbkz92gYJNheiR5Kx8Eu8R8N9gM0Q0GbreO7gtrD5CirAYLsfV75ev6a4dUJq5ohpca5uora2yuddmh80IsEpwmpwxu1w4q2AxxutMx6R8/ZzWaGVt2RTKzM3wXNd1u7Kte1Djb246VNCdBwsS6elropYfeavZ4t61pVyhl+wxj72VkhC5ruo+F2MSx7YyapjVk9qyKE4leL1w33j+wc3TB10Q/Wfo1ssHnjHQ6x/qZD/sEa73OudQqRfFzQPNsJtBiv9SHt3ryD10kzPdhXOCt3iPPG1MqjnFSGVPbv0ymdl6nKtCgyPViWPBI9oepA1s+VYt2mpKe8koWw1teLHXyi/9+hnh6nFBcGNqfySixNQCiuzV99Okne48sHBgX2HaOfDsQK5rYKMUubdTSkGbbPB9aSrm9pEzY9wlv+xp5AzPIdj1yK/zJ43AO9eM1Fryv9oPFgicOnnrjKdozq/4NG4LdBCvuN2+yVg2lg0LkQcNYfJZVxkaEEgmzNSh1gp23IebZxNdNQbJSpYuxdtVtM0fvIPH2p1KW4ByQy0qLqg+2nuIKoowE9grJwUAn/YsP/rzpTlOk3sqeXxcOnL7JGbvCKiWHJ5q5SdYh/gLFkHb9WkvxlQVaFmrI1tstnsMsm8lr0zPhcXn85HPly8FhCm54HeuCM0ee5bj1OeM/N8PXvGRLMxhOJbEtT6s4O5vFFSwYOtuLXrlbsXwqaXNyGmhzaa6epREqJfHWfB62DFhwcIXEiiPe/gve3Cx233OlBz468On60iXr1s2vaXIAWEnr8aO+KPVWLd2M7YWLiH8TLxa8VkS9qWtN/6Et3SPOeSxUUXwX+bO+je9RRR/NkBt+LkNnNmyVHTG390Kz40Y2kdD5yF2KQqAhSLAGvVsJTV4AvFiGTMo4bdI9Se3O6O82HLN76oJo5Ho5FmH27y6q6/8Z4o91q/pf2odSV8fN5+nJhiF8iTW5GRLyQi+3aib+qC2eJp533RDIfNvaaRNuEW1U32Fb8NyCP9WlPvL58PRvytXM/YbcIZosUmudR7nkNynXV59IUqBNPZaz5Y2Q+OdwqDo0dT5yH7n5N4aqCZ9TmUDQJtlrBkpN8V8OryA8QS6srHFOya4OXpz4UCmMmU7bHnrfI3EWN2N7nj70e5L9gKY6KA1w8ypqCafx4ShfuaH4e0jSczXt/J2lu+xgeNgTymjivnOR+dWx655Hrh/sSrz3rfq3r3JCfU9s5la/YpaF3olV+JJA4GYJG903IziTEYNbpX+WVZna+NruTJtqVzsrN+0pOWesbC8Ypy6xg4caD23Ua3quKx1rnuGZwj3f5htWvuaFAuFoTBzs+nlmo2MnVnxLsBzvz6kLvgRzLQ4BxQgX3Y7EeD1OyCsi06cf9y2Q+y24j9h7LDqzssOjtX6WFG58tSNi8bXIGpte9Hd93PENtNqmhJKTJuMnnUCro1NTPNM1ZPMUASN4DkIhkCYns60yEpeq/vT4wk5+7tYiieBi7s1ma2pBief9Nv8+iiWsKLnm+Fp8qKc2cFngwHxwaOdxbNvkny9kfe16AA8d26yW6Ipc5KHkh+E/jkCqsi8as6qvyzX+tcA6H8g/VKuK7VmuzR0IbF0plgGfUvA1M3nJ/e9Awdam4pNz/n0eOXqecVll9ULGPZDskM7FyNqzhZaLY/UdhLM/39zCbua9Ele2eSEWn4Cn01LPt1zJo7U2414v1aUeqqhsVjMN0Zrv708zGgE0W5KePTlafa63TEh88/blbAK+Fls2r+EHg9pN6Qor6s//qFXWukwQF3cfNcntUnBTFWre/cCBtE3J/tLM8v+FmnNBlz6iCuyVTiKB8SdbQg0HuyknUyA8Vi36fFFzRSv4LpJQOpUwNXqAOp+dlM2fTOJtbN2Vv2jTsWRcw7+CJ6r0f1rZxwqdVLX+jTityEXLAWPitls2XDV17+4Ol6tcgBzukJykT3wG6r3tmRP0+ZIelVGq3S1veP10mmXweijG7nXzuN1mTstitvobm+N6314Fuo7mL5tFVY7Jd3l3qs63p0FydLhmy6NHVgu34vVitcCwSrZrZnL9WufWM8O8K/S3cwcwwQC101Fk9sNXw/ON+5fwxEOtwQM3i3fmDXU22afieUQKycbzwGEM++cJre+tWOrx8A+QwOTpVPuxX+Yv5ebgW3KbLzj0vXpE+5Ya8V6r4VcQwYJH50LcjRTNfzHOaar+4Abj2UKxkrPxGTtp//Z7S2TPDs4rO8Fw0uaVL95t1DZid6L4fxmS3z4rTDTJ21YeH5QGbZUpxYc9wdYSkBgJnC3f7xOBHqk33CkXEyoZBbnRGaNUB3PPt/AAJ0Dyg= |
0 | lc_public_repos/langgraph/docs | lc_public_repos/langgraph/docs/cassettes/introduction_dba1b168-f8e0-496d-9bd6-37198fb4776e.msgpack.zlib | eNqFVW1sU1UYHiOgIEKCLM4Y3aEZRHG363fX+TkKAuIYbBUcMJrTe09773Z7z+Wec8vKMoQBMQ4U7vSHRlQ+Sod1wKbAQpA5CaiJDIzRuElAJWoiiJKAHwHHPLdrYciC/dGcnvd5v573eU+bWmNIIxJWRrRJCkUa5Cn7QYymVg0t1xGh65JRREUsJOZXVAV26JrUN0WkVCWlxcVQlaxQoaKGVYm38jhaHLMXRxEhMIJIIoSF+Le5YxssUVgfpLgOKcRSCuw2h6sIWLIodrOkwaJhGbGTRSdIszArj1kpCjWvZkuAikhDk0F5HCgwioBEwCJJlq2WxhozEBaQbAJ5GeoC4pycmyNYURDlHCyVzeOwmREpxnImmRnEdKAwJsnxIEFQ48WghoguUxKsZc6mg4AIr0mqSYYJLgODOICUiKQggJklKq1EAghjDbDWVQ2JrEMphooA5Hldg9Q8KQKgmk4oA2YyWMFzBIV1Oe24gvmAONaBghiCYuZAViANpJk35wBgCOsUsHgaIwSgGPtmIeYoKrslItZlAYQQgNnymKMWt5oNSCYkSHgRRSHroMGisjEhjUpp0hssaWT69J9Wh0YyS5IxrgO6mmYxrqapI1STlIilsZHdmTKRNCSY5GaC1gyB4lAt4imD1jS2iggKTGybEiIm1Oi4RT57GXFIpRxSeCywBMbuyEpJLQICCsuMzhRvzjWtTyNVh5DKQZnxnRz0MtqhqsoSD017sTnGtoyMOLOWW80pU20cE6FCjc6ybB3F8+NM7QqwWZ0uq6O9niMUSorM5MrJkJWUVNP2Q0MNKuTrWBwus0lGctB5z1AMJsbOcshXVN0U0mTa2Am1qMf1wdB7TVeYvpDR6p9/a7qM8UY6p9Vut/o6bgpM4gpv7AxDmaCO6yRfd0mx3XByNg9ns+/JsiQzaVPR2OF2OHYxrapMfWhtkoWkOmlKsImg45+1ZvZ2e8Xc7DTP5ExMzGDTMQ4HRL0IODygCqnA3D1g95a6HKUuO5hVHmjzZ9IEhh1GR0Bj0g+zgczMDr+VF3WlDgkp/7Bj77PcaMtcNpntI+UyjxYblvnTSLhsNlvf1NsiNbYgkmJmTDh9Pt//xGXMIGrsM/vjbD7O4QkMdul2Le4Dw3kOvnyZepJmPayiwtsgb9STRYPbooevx2VfnMoUzUmC8SE7B232RbOn1wo+tz43stDjD1SXxGqXi0pofz3Hy1gXOMqef8SlBVFPjT5gF9wer73EyYe9kIeOEp+T95TwYcHj5p3Q63PuiEnQSNmtdhDBOCKjvf6nOT9kTw5XlZaN0Tqjel5Z+Rx/2/NcJQ5hxl8AMp4VrKBkFdKYHI1UOjVbcA0lmXtlWbWxr4T3uW2Cy+GFNrcbeRE3c1Fle1ZA1wWSMF+H9N/MmuTgi3RsRG3Bhjtz0p+RzxpldfeXjVs/0JzaUHy+XX910p8tVc13H540/XRl84LI/kNVJ59500jusV/7HXnHXC6sv3KmIbfGdqD3e7F/26oB6cqV6oLOGQV/VfRf/PunU9carl4d88jlS0edx8JfORe+7u7JfWMmmfjp2FlRH1w7eYEU74eNnZ48GMTneqKNH73bcWTr2aJXejZ9vmTXpe/6ujaO7tzScuSJ5OXxsS7H5vz8pVCIldQdXLP7bbC5e/yyc5G8pUtKn+w+0X3HiXGFkfaX/L+GVj/01s/GY6njF6c5C7/mX3uwtGdr/ON37jq1sXdiyRcFq1afrl035WTe6Hn3bNk+4Sm1mfNP/eW+fSO/eXj0e92TkkeFcQ+QJuulUOGjuf1fniUvTyAXzudt+3HXsWUHvPX91dsOznvxuDCtF8xtuNgyKnjvJyfXT7F5KitT7UUDojUfTVulPv5+vqfzj5r1i/954UJX17KBETk5AwMjc3pPj/rhN3b+F+M8Qc0= |
0 | lc_public_repos/langgraph/docs | lc_public_repos/langgraph/docs/cassettes/breakpoints_51923913-20f7-4ee1-b9ba-d01f5fb2869b.msgpack.zlib | eNqNVXtsFEUYr4AvxMTUR0SJDCeKYve61+0dd62PlKMtFSiVO3m1tcztzt0t3ZtZdmZLz1ojBY2viItGwBgFOa5Yim21agQJlQRBJYgS1OIDlSjGR4xKECK1zi53baEEvT/u9ma+x+/7fr/v25bWBmRQleAL2lXMkAFlxv9Qq6XVQEtMRNmKdAKxOFFSVbND4Q2mofbeFGdMp0X5+VBX3RCzuEF0VXbLJJHf4MlPIEphDNFUhCjJQyMXN7kSsLGOkXqEqasIeMSCwjzgylrxk+oml0E0xJ9cJkWGi9/KhEPBzD6iCBpyHESJAVgcgaUI8h8DqBjQKMBkqas5DwwGgJSqlHFMZ0bhKRhqdOIFkcGgirXkBFAxSdPAWfFl0zC4z9A8IYhBmQGxrFKZOIZJYrrBTMRAAgEO2XHMxjGx00DAuKmKFX6nUh6FuyWgfeG2kbGk7sB1QDkFDJwQotXxmLYVhgk02AL7RMW6aVfR5OLUGEn78r8A2+FdqpKNbdaJnunllXeTpGfeQm/lND8uLVtKgnOr5ruaa89o5XAuqs+CaSBqamd12lVd46pgkyigJsbJYWjyQMRkdv9ABDGuNqARUg8IP1Oj9vEkAwEIylFCxSqoMRW/pPBvJPrdNa5ap3OZ/tT9r5Ls0mkdMgxicOso1CjiVdba+iMK0pz+aZAnECTBK1CCMWJCAVeo6CsQs+kyGh3OhoKobKi6zaqjLMjlxGmnphHNaDXiHmCtjspxlIAOeTofGC5D1ZH/IJeD7aXMUHHM1WxzZ4+haiC73OqMae0QCZHIYiRzEfG6WuMIKnyYV6bihDKra9h4dkBZRjoTEJaJwhNYW2L3q3oeUFBUgwy1yXYDHPlabfUI6QLU1AaUPu1ldUJd11TZUXH+Yt6s9gztgo1l+HWbrSCBDzlm1lslWRz5VUm+TTAQ3VKhu6CzUaDOOPJ1IGiQQ0rrzv22oRc6lOt5HCGzqaz0aedXh9oQam2cBeXZoTNC2lRZG6GR8BW+PvTcMDFTE8hqDVYNT5e5HEwnuT0ed6DrjMA0iWVroyOproEmD7i0cRFJgugTRM+r2S5pCMdY3Nrgn1K4iY+OzrcsWp7mIZlJW1KcEbR3T2tmL748e0aWza9zclPTODvW9nDczAMFPhBCOrBFCjxSkeQt8vpA+axwezCTJnxOMrrCfARplBNSmiW/VY6buB4pbcFz0t7rGizL4Pk1NaEyIfNS4GTZf61UoSiKvTef19LgsufjzDOmpEAg8B9xeWcQs7rt+gQxIBT4wnaVviJv4cJecC7P02+WDJ60jYcjmngey0E8WWtwXutz4bG7vrAtA1pQFesd/sz30EwpJkWSJKJLsfq59wQrFS8Ox3xvNAqyRkxFYPz1igRHEI3M6gURSZoyJRrxiT4ZomhAVKLQG4mIouIVpSiKyhsaVGi1edweECMkpqGOYJkQhHyRCCFHNlbrtAWVJbMqgu3zhTkkQnj/wpD3GROM0iFkcDlabU5qPuAGSnP3OSULrG6/HJCgJEMJigFRhgVC6bw5nVkBDQgkZW8H5zW+LH16I+26YO/4Jy7JcT4j2aqS+utLxjzc//j+m1ab+lep9U23XnXoih+3rK96PvLs2N7ixrlG2fP+6gP990W/+i74vrro2MTtf62/Mf3oHU+9WZt/+MuyprdPHmehjilHTpTu+O3Oy7/tG118TP1o1q5oN/qlM47HK8/trRhXPM5rvHdDMH1d8veGJdvFq2Fd+3ObN3+/7WBu8ZjJ+777vN//z/U/nfrk0NHW3SuVcSUXPrBITOdedIvm3WbqFy8bdbz0pZCypnvnhJM5Fb6FZX+kRhzbdPDoD1Uz4BK/eLLnwlU983JP1Eyf+tq6vuW37+nr6Ri9LvXrN+D+h5RF9ypbV/w9YeyTMyNPn9r5EFDSI6z0M2ubo79fyg4UadN3rk58XHvl+O4TOS+/qFx259P9L+WuWlFzYzlYNvngZLTu2tCM8rEtT8kzzA+SH8qrRu0ovHn0IvW2T989MgYsoMJPVf4/pceW//zOby/89eSWrrxT4d4Xjrzf6V0CwdoH4o8EvhjT55paWYyPNFy+b+zJzRPf6Paso976Nasf7Dl8V3HhZ31bxbWjj+/J/ey+J7453OKdnXtt0yuBpfOP/v3zyjVKXfOvW2XPLx3/7PlyavmtO7pHLdi7v2fTm9dw2vr7R+bsTvfuOzwiJ+dfAXrkAQ== |
0 | lc_public_repos/langgraph/docs | lc_public_repos/langgraph/docs/cassettes/introduction_f4009ba6-dc0b-4216-ab0c-fbb104616f73.msgpack.zlib | eNrFegdUE1u3cBCkSC9SpEoRlV5DR2roSIfQhCRAIJBAAqEJAioqHQQFBERALiAgvSNK70ivIr0j0jt/1O9e73tf+d9b61/rPzNzZp/d9z77zJzMSnC2F8wDDUe64b2Du2FgHnYQDG6AjgnO9oC5e8LQmIdvXWEYJyQ0A6RmnOHpAR+/pOKEwaDQMkJCrjAPVzs4VBDu5iIEd3UU0jIyBdqZSaCgqkhHmA/U1R4kLWYh6uQEcTNw04crA7VE9UQgbmCEli/SUUvUG2XvauoEgUOlHcyFEVoGKDeIqzrWTkXZxFAVra1iKu0AETPEyRo6mPs42d/VUOIzF5H2hYLUfaDq0g53VaSxdxHCKH1npCTYFeEDETPQVjHTQ1qYG8LtxQyQP+xCQVpeUBUXbRVzQy97UW9fbY1f97sgJx8dX3EpiKiIkz0Iy3cXZIq5i5B2wPkuDDbX8oGaKwtrqgo7WriautqLaSFwthD2xuJSOmJ6GAszb/RduMtP/8BmEi7mIlJIM19v7A/5f4xxfDhfjVwk9Z1RaAtzPWEVR4SDuZiesIW5ljCO7qhjJMynCfrLVyCOLmwvKo2G+ChhdIzFHf/y/UcefseEkxPH3IUr/+L1/Zc0B3NRU08wLj/6WKSWhai3E0RMTw1sZuEIBpm6WpiboqEqyq52uBj04Xquuq4WWLCrBdoehPAEG4kgwWYINzsNA0ldI3EfnE/O9iB1X4iwIQIMx8mYa/n+KatjJo21MNNDQTVcJHWV/qudHzJ/swEHu2qKgl11tRXtHVWQCKSH/HWsExwDy7RHQn0asp1gdlBc+T3MNUHDPASUHGFumJhylA+u3twE/lF/aCFRQTHcWagEgcBQGAE1NwgSCndzjMl39IWj+DmgMAeEHQb29hc5JuO20O1cFaSbG+xnIcfkusBgKAE7BNwL9ocHDI3C1TYs5C0aY4fxRAdn4lTButqyXWFotJ0j7I2+9p8eRRWqqAuo2EGcYAJGP3ljMjQ0jf+N4rdGMA/cKorJhSCQnlCcOx6wdzhODC4aAWMfFCwmF+6KUy/kjII55v9S+oPsgUTEFLraeQvgaPKikkApcWHhgj/ldGBujhinmCwRYaCUWAYuMzFvRaRFxKRF8nXs0BgBXVwOHOAwaEydsSeMn0NUnMMIhuIQFcYBIqIyYmIywtIcIF3j/H8kzdDOzRGGjsmy98HA0DmGMBTSA+caMmbsux8nzA2KQuLWPppTxtKP09MDwSnD+WuFWwlZCdkJusEQgr8DE4QgXa2EPH5qsBLyEldEyysbK+trmUj7OsMwktqqEmh9DSmQkYimgYSmhy7GQFhF0kRHmEdUWdrZBGjibq6JEjfVcjTUMTAWMdV3FLaTRvmCTNCSWG8JTayYr7MXCldHYsoQCIRHVF3FDCLp5OZgpuyDhumogd3tnDU1LCSxaC2Mnau4N0zFSdrVzthBW4lHTBV3ct635ud09EB6onARQBwEcI5z8nPiEmyLSzCnjKSwuJSw8P0MPTWdmDE5P060Jy43aLStwz+eeZwywvycvwKzxSD/k4q3uNowVLKIKZWCSEsIQyGSYjApSaCoPVBAzcwwUxVXjbhpcfLETYvk36YFKCOOmxbhH9OSaWrn4RNToe8Bd4S78XP8t8quUfrp2J81IqCEQCCxAr+YY9Jv/1w6QbgS9sDxNt9mvxi9+Aqg0lLXVAfg4QEAeLgDcDGLN6qpomJ711BfXVNH7RehDYBrBMIAgCtOryFImcPcAsxB9PWXxI9mB0GjAP++4bj2h3/xDggA/veNBApDQ3D3LdzF7YEzjlNJh4PpHH/BN3/A9r9guR8wFoPC4OC7P2APY0MVHAzFwTcd/wbb/w2GoDx+8IfjYDlXhCfkt98AMpibiRHuToy7WABogCEABFD+Ef8vFuQbAEBqDwDAj/6Ns08EACoeAQCM479x3K8BAMqHAEB572/c75yg7DzsfqIIcNclBwcAYDsbACC3AABo+wCAK5Z/JuLf+Mbx0zcQAIk7HAEIAAyH0QS4ASAAQRwkChAGiAAkLyYAKgD8S5d+nLhGgDsvE1++TEBw+QoRESEx2RUyMtIrpKTkFDRU5BTUFKSkVPRU1LR0V69eJaNkYKSnY6Shu0r3QwkePk6G4DLJ5cskdOSk5HT/63bRAKAmxuPCE8bH4wRcosbDp8a7aAKw4kLC/3Hi/TXvBJcJiS7hE+OR4MjmVAA8Ajyc94RkVwgJiUh+ZOASLgRCaiKa67QiV+k5lUyfE9MxSBoYY+KKP/cPcolKmNi5ewaVDoiJ28ekF3HzoGNLmkiUVQyNIB43VNXNQh7Gv5nBab6Gh/dfzeJU/9SMo0lQ42r8Eh4xCT7BJfw/adT4NCJKBoZ26Z8J6K6LuQfFFH28TBt7lZPLnlv0zczWxTiADBcIjo0aoAg4z3nDeYmTg/Pv3e3JRXil8/ftTydeS4Ewm3eqRFoXgGFfAOF85S6A9viq/lMRB5BLDkw6h7VoP/nYMOxEIKxq7sQQzT4oQz74mb6jwJXFdHy4f1VHlVIg7pYWkGJrjYzodCQqwty2l04elgbeCtP4mn4P6m2FQbB2u2o0XAB4lmYLGHbkKU0dB4vPWU8pGlveCS09YyvuzZdz/nTunb7GfzzQbCW06Mv2rjdHzuTTuWP6atdx3CF2y+Z/zipLee39h9Sbvp0KZUsUb5otvKHSMh61F4Ah2GPa88ve3nmPTntVhhL3hEgwAYRe2+0PWzWmDsaGuLhOf0rTL5ew3VS8JOsqcfONueINnYNrhnubE/b2CvcWMawLhRMw90ddaslZE60s0fIFIt02KILoNkWmSEBM5aESRzAxN5XeVQNqJuPuuQuAozbvG87B4P+W+P/akbXwIurLu628KdxugiIeeG6z7Iv66H9cIPqkeByYDx921KkY900zNUZd7594fAJkxmeJFhUZ6V34yDkgGI7cfzLF1lcsZJJ3tz9fIoym+MrW+3buYimJd07ML2j8HrBibYbwFcn9GPnDTXM+cJlq9UuC3SsSB96XhqoTznnMVE4EyiyBbvae3MZA5YSHjhv1Rov+SMxe9kONuDLfJTk6eshTgJGH+CWBnPsWAaRPR2kK782dx4SFjp5Zx5T5sUdW8DmXldgLljua8y/xQ2+wDc3dm2VhVCLnZgt7uHKmmLfHQF6Z/np3kc+E/MoZD48VubUjaPzFbWs0z70M0umSrIfCwkSyL8XX+7bps01cUjRzSilCpE5v1AxU+/LlZ+fMts7yhgq3jZl5EZtfg4owVYGPOJnXKZrIOyiyIGMkjePVQsx5PIN9YvrzgocypI+ZAt8dBQb7SUtnoNanK72ZqYtW1Y/8PxRn6EEqgzJLTzFoms89n8vQEwWfoMz86rcsaZlG/WzTOtwDNmUJCUtTY8VTRTIxN9wrT+8DV6x1yai/B9d7STq8TmiaDVnd7BTt0D4Ya+xEv9K2/nwi7aUn3B3db+Uys1jqlta19P759zEd1w94n++3r7UDy6ZpU2iB2FvrNtIPRlkrJjCsdQxhrD0tlZfwmNfVm304+Jtq7KVCrFpL7LqMo7U+f7CYWZjHfqyK+EZwcETG6rWSbRFeub9YP7Al5uFsVuKhUqG+yUSrpEiyFDB9TLk8nUEk17RYemIjNWaXrarW859r7Ar/uWBbjaVYRETxvHPRZxki2ecWj5oYQB9QKIHLmbtE7n3Szb7vPmZLJWdvzInSrToUFdMXL4qeXp8pcAlp5HnamBFELivlE3RI/9RoEZMgJCuGsAj6PnlvAVZdokAyroDHRew1nXH/g/L0VSNjNakk8cIwjl5I4BV7d/6Zlhd2HBVes/dYiUejmneClWPC9hW3eklic2Jal8mBWmealvJ+Et9RxRqZ8KlClxAsARXxdcvDGD4Qv2+yPDMoeUKk5tSaSXE2lXt0N09+p+8CcMu2IrTik7Z6eTq9bVMyW7xjvS/7Av836vdj8gKNUwZJGbnoG5uyDM7zl+B2/BtcT3uRwK+tug3Tf+j/kqvQUotoXmHGisTza+aW1GW+LwZ5dFJMJAQHrj8vpEH4mqQeLQc+T0NSIh8mRDjzsbUqXJ0o4t/ZWtPRFS8Q8o9qarBaoq3i3t7jLsRfR8zIPuV9fGwe90WPk7K4lO23l4Sh0THjq1a7+dMBRrDwCwC9MPv3f3hQ2l4wi9G7ESmT76VEJyJZR2ZC3Cqb1HBtdDbctQQsyeZQ6snzmkZud5/TN9ayzXZ9Kr52MbGG06rt5dSJVJK+4TWStfdTPX7lLBOUDgzac+InmbHxru80pKFl4pFJPkTeFPHzdY33j2mXbg9BTIwHaHWvodu/cnL2/+cq+dW1uGcGfez2stJAPkBugtqnj0FHj0rMe8S/bUoX7I+yA6ZP7xGfF5yfHQcGql7TIZrnx7ZnHF/XvyvMHl00fSNq+ZE8HjUVNfX/rKMXioAEkLv0WV2TwwvsQE1S7vrIxJea10u6bkoUYpdxyPuPLwAntalIGXJB/G164PznM6rsPxgp28yn40C7yqkPnjwhfvJ/73L3KHzlLmt8sb+fLPahKB+sEJmkTDKB7gPgcad+JTkjlVnfL3TnhrwPJzrPqRErPzuLnj55ocZ4dVbd2GFa0lKyrYiNdZid6uPx5HSnPsUa5ZN7Z27IhfwU0zr3tcvJnjuhJSFRXlLXG5Uon/UvFR0n1HXpjFgglMdFFVgzFuERM6cf+1mrA0FL09nmW5vvDx9TDi+egxu/F95xqJxrRNRHdfV/agTv4EUOevGg1zoGaridRA6CFb25fQZyRwrk75XsjCa2dn5p070Mkb4RF5DYPBD/3LmTZ+beJJr5EIQ3VhKfv61+huVpzBay+VrFQLY7jH4EJiXK3Bqd5HI/zO0uaJUZnOxnO5CfDjl1lKpq3Y0t/fJXPHp5NpV7wd3V/KBQz9Am4YAK4PZAY3XjLKOiZfIRfMhlhO29RYXaM4dAM+EdRabH8Rl14TV8mmeqTYGda2Adejeycznwo8InodW37j0HHELhY10oViABNliJmUqDI8X85dWZFuPFKSB/kapaOoHqHVXVf9sxdiMC9zM0o4Cu0PPzj9MXAJPgwuEvx5gXJy6KW1rvbTKxxt7+HT1B1EarrNZP+4tnuTos9/O2RxIjMmWBH8re79dtOMZSCm5ELGERMtg9UjorzptVOvlybJLDIC7na+K3m1ayS+xmYpqZ1TfRr3xx++HYP54/i7uej6qsTU1rHatlQipyzbYUFdV9akykfevVA3i8wcyyMXlHFiGza+Z86abFqyFy6WcVcR1WIeyROUwJfqZQ6lZaIr9Xc32XRQoonVCywcDLBsfpqN0LQNbqx0WmgJALQFlBYFe8hNLMt71u6xPIuWFxGdbkEFTLD0cYUPs4/ef12OKCvC+jfVaWdshjfvvM9Vu2ZmB5yQWAIO/yKsgTFNr1btlDQWFJNGaqT8aW/UVJtouz/1Kp4AK8UtR7FkHTPaa6LRfatjzeMDPoOZHb4p4XSt4w6w0PLswpY1mvKLwtZVsX65kc/zEHSt8s/qCS4mEwlqMyGqRbk1DaN8rGeQ/a6CyHsNDBRCFSnmeWlm3Ilq2jNdAscj1wAv/bodQNZwwh2cplOqrBMtLqDq8xI+90OSpHu3Tje/0f9V6PFBhriihf6RNAM4AYqnL5l80r098UOHqaOpRZlXMmkOxt266Z6XpJSpm6VsmUZc18sYzcgKQUTV5O6GDV5iWpi5EBGlm1P3nv7S008AyxXkVKReKifT1iuJPLP/wGlTYL8VgPXBC+tjuMDTlWMay5W8P72DV7aRqsE5Iil4+JKgAEbVxvaFkESGL8swNqNuuuoYPBvFosRS1fx5g6B+mK1V1oejeWa+vj9qOwzhuZ3ksBm6Mb9VP00qJSPQUze6bz3kTBIRJpkXWd+4ue30h7eVqQYfHNRT0H0uvwCdck9YaU8m/GVbq2ubs92oqNzuMVfC6MU7DQT2o5/D3cbcZ5XlVJ5n4eMm0jwuojNViKUZ+GiXJKiCNFy5VUlq9LnldMWZ3z2J+XL41Evwd1M3fVro9FAAijCackTcukeNKXl118FQRkLZvXYpnWh2J6/d4+j94c2q5qAWnWgFU6q0Jvju6SxHEt9kgPVgUNsDj1KQg7ir287Wta4FubSBZeMpotmxUfFTRA23P4hcvliE3j287VvYBnA/GlBSl5ylldjw4BGwRHsttTL3c8zxPMpJv88e8/2DQeR4fIiGl9LTH3ZZTqZhnQFlWe7AfHzmmTSsNwTN/wMh+QVl/yGgM7ueI/nGmh/ch4ZcCkdf+xAd3+YrHSgouvjFlVA2L4QZ8XM4b0Zphuy6O/1f2r11sHx4sFZ0W4l6MFIq+68JP8DETb/klYp9J/fE8Qiln6rfTbbry7AHy8/dHMOZlLPpCmi9+tzTbH2YNf9avzneVRC25TbiIvBobHD/qWiNZ9iCH74R6J+6yRUW21kTusuY+y95DCil8ZckJFbFhFaFlTUkqo3zrqh/GvZRqn0vf6MsXv5dr26pM61pF1HA8JLb3bViwNMOgq3rjK+6jky2PYfecXO0MKRbPi4/g6UE/61wMWLG7M7NRJ3Tn8xtcjpt1mRLhj7wAuaXIQtJ1/PgKJdifxF+BbTA8xajGFmya0W7VMByW4mMOS/DSZHwjIO05i5Fv/MDoVSN0V41sz/pCayD6ueHPRwutZ3Kexbyqi9WXaRysziIP0RFtw1unuL5WdZUShH0YksBtRR1odXKLgYKOOKa4AXrYl8wpjsYOEjrmzhkIHdva0zyYe5UvP5O9MGKlBB1pax0YjQVTtXphYwraW98KlPoJjkcuzd8Dfb9xszL1hv5Nrj5lWSqxUw2d61boIq7vKF2OREL6Y/8foxhTBayIPOWzpdlUB80GoeOTDq/I+4ar/9vlt+3AdKN8RVJWXOEHmwCZZwTaGGC4LQC1PM8+xZBRBEkvTHk53cAOdeiYnHtLsJEUWUJBUJQtMvHvYyyYZp5qQ7YS2nAfVz6OlX1euf+4ifcJq9kbHRp83+tWX6vHD5XeNElIim/4elcJuM3LJR7ro3llxvBKAOwqdxD2zdfX996aZbrcTl1ZTq/jXI+D3LQz0+1EA5IL85iiiz2bjwZTF612jLFtWi/hVs2f5XshOOn4yF6xsyoOVwdhA34M3Px62Ry/GvjcKuDYLUtYnvGy3LzrUcXHsdJgIgEMSvL9GpUqC2plDalI13PFRvVO7/U3f653WtiA27becIipJ53di8S4AoEDdVtvN1ZA0P/JDxwJvlU8a4vUlH562q4D1RhgkRZuK6ZU4FkVqFW64dsxqTAS2yE9cnhq0MsEaNw8EzTk2kzLOHKvcMYz+WfV32r7mwFwNShxo1ujebqx1tFZFQsoF5MHqX0OpwFiigNe5SQOLpZNXOdqYPC+bijauL9g/fZcqPH2bw6VFkVgbCxd5oB3i5dAjV22sxMAQl5fUavJBy2E0x0Gymy5/2lGkEeW7t6ngecvAAdNanl0V22zB+O60TM3Ull+0tWNk570mpTDiAc3MIorS2yW5S2sflMNVGO0pl+narMnzdiiD5+ncfJtUE/SUMr4KRRbnRcUiC2QN83Inff4vauPyjHhXIe1C/EakpHhVr/uoOEVmbO2wn6GSoL7Iw9ZrFSmqHWtxE3mQ4ojHNAMsHhsL0oXooAVm/LHojYkaDW81g/GOm66TFG4USqYLMoZKEi5yGzf04I1hGgQvyx91weHhO9+627p/vzQFA5uWWA0CSd/wHbm19XRWiQ6cUSiQXDvYQQRu2DARs6GOiE9VyuNMV6uP9dnJCp8eCFm+NEmPCj22TGAhQSDOutGv5j6HQ77MXhuhZlCFu0jddLPtNIdNqT2PKZnnb/7g0M/c0WZHlGcXjnFaqRLyYdmWix74bV7Iam+2uKE9A/qOwqXRpu8rSz/LwEgPMQG7CKDQnqPHrPiMo+32vFs1QgPJJ3rEJxJabtbFTEcpC0wOwFmtrmqukXsRbZthkyeRXgMG73NCt7QaWWK0+hbrMhFgpWUlVknjcY/T75N1j+vsuycGy5Mn4oypip2g6C7/rHqVMBVt1vJWr2Bw3+ImQ9ufO+Ac9LUJffiJcrm+dGTrGFdyUQk8PGCa8pbsesnbZ3nJRgzxZSMaxV9WwbL9r0AFrXvZ1QmRu9nUa0VnLkizltE0TavEwcE3nX45Tt+03Nlq2IdTf7P+pIs6GhmGCWxmZdTdavJdDwT/5kyxZba/z92eoqvV5E5kcA9jg9ReEtt/3WK2sdQaHt8V7sk3snEdk11KiVY5ivAqxasiCJmqMqobLMVmSUVZR63ZXr4A9BM1nApReQpQcDj5l8vyvKjhFkfTuVhC5gMaRPk4kJPDj4umAeQKjnSrhxO0Kzd0VgNVUDIhDLnuEtsvreLl6kUbbjCX8kK3OvAE5ArhHOtKrJTetz+i9ViWdtkpSunTBujK1BlNmz4/S9Z2U5fuvIaHDfSLXDrIZ/grDfCh7+W8/Tr/D7KQPXQttSCuhdSUK3Wwv40jTz0Df90ysWG7mVYTULmuyf24LB/9ZbGoANXjzCaxQkZ/w8XdAb7A69b1LIXPoNoida4EDES5qM+7LVENR3Q+AcQbvZXQe/HsqHeV3DK4JudGeMuXlnW3EFbSKu08U15Ih/Y1W64HmAInbon8SKmpP91QeNm/PxYX8GZQMLglI4nWOuLdkkK9QhXKPfjSV5LJssea2uAVq5bYa8MgFt+shAPDz1+ez8p7Keb7XZ91iCnigPomUx2wPri0HoB+v3PKTfnlZ3w8AjJJxqpWy7/XAv++d5Jvmu4kG+NLbe7DAxWQo/G9WIWR8uJpYfg7xpHIfMwEVn7CyRQLBm4xYrdhXyye53o1pqQMndZ0bzUApy4AY+zphE//sRVQ7rbyUqT0HQAIBZPwOCmSgjUuADwx/SZc+Q+yOFgGxp7G9YpV18NZ+qwOaAdj9Tc3N0n1c+vWHRRAwD4++bd1m/b1mgx9pH0N65/SrvYVq6ol/fOPxQ9sLW2fRkqlcWttppJvj0p1ZqKJakkkJvY2RPRGmdP+Cp1bMFMI81lCogZ+5Y8vsfRVkMr7R/oW56OOWxL0zWhM6r5/ZRP/EjW1x+t/qCUFFvjIZz1bCrAO+VS6vwWci5sA2gmdg85594FgtZThHoHssFEHiiMtyJn24QVgAHhsexp1qr+O5vj85wecOpNvEJFt6fogiw2Jb+ir2xr1D8Cs/4yziaREKyiSpkOfhHH/TJmHPjShbAjMpbNSc82QkbsMbTDn1DsxRrUD4nkMLJ5sOXv4cJsBr0blKXNM8b6j1IecW6ssXs+bnCrFwM2jRQnthm73VAzDeRYloxAyrPPoVyOEkd111zQXrqxF+BLxNOkyXZUTyC0TMxY13h609ZYawQwtQfOhr+rvulRpcF9SP1n3S9A7fzirZm1nZm3XkuD2turplrotg1tzmlNCq+x8sLkDwmqhm0ZKuuAeUcBD1+pPSPcZ7TGeG+8K3+Ysn/FJuhjB1sAVCrc3hgccJGUGvGj3t7c//JG85UoOX5wICDIend3I98yp1JYXCdTvOjOIaXLYWvy4zRuckuoN5ZcrLpXP99N339ZrOPCG90L282bCx9GKWR7e/uV5wSmriQmLfuA9pJ9zb3nq4RSzzXInz6Q4fcq4lyxb3oT+6FnO1mi3RHzOi2bLYeM9k3yDGEkmbsw7hUEPNXGDjZpxZpYHwtFY/lXCM5GzcfWKR6fQ9u+KBg6ceVlzOdlFI66QqZgAUyAY0ddmrlrmg/9mkSPhSHWjiMONwcjKiPbTfUuXyOee9abyYk8K1Squx/oTuQkszER7fGQtr0r/EMIsG+B4pLvSISM4XBK2QqBq8bMQiOUUTrNxG2GE2OG1MPuiyb+PqqrNfBbY+oo5ORd+bKjpQRbuwKsZ8/t5yYIqN8Ast0wAVa8PGCrXhymHsfJeRLAK0iu1zsXCs9sSbZXxkcDVW6+sCXmZTHC7gW7upoBi/MB23/vjnmZNWxeAzc79F/nsoBX0G3k7/6lu/8D1+n0I9PBy/vdxjy+hVWj26/nxWS/S0/bTy7m8hR6t75fsI8W0rXogXf2UerCBv6sRnKsHnFx3P7yWlPRYIoPyfoVdwvto+e6VcWVN3jZv8mm3em0v+atnzFmrUeZy05ooP2uoVGKjgGJusTcDPM/H+Xx8C/Lu5F4z8ys/sd627qPEtgtAMzPadcR1uB310rs1Z62p7U3rjP3TCHIII27vRXQHsLjMkFajl3uYyn42FtAfENG/B3rH5rYY9phCfDX/uL8x5gLw1rY0v4jFM1UaPDlMJ0op/wrz1vBLvfck3ij2iDvt89aMbb+MtYh/8qEOveeggYgF2KfWH/iHOSw3jnyM3xHluw5a5ggmllciYEvXtpFrGpB8tZw3ZLMvxs6VvHw1dcDmQMSWO2z5cvq/wC3fOxCb26qm16hjyE0uRZjRalp1M2fQKm+albhXiOm/jNwywYZR+aN51vrPshxgDMQ5p+Yhc4lCNi82eP/CC8k8+nQ7kJ7jcFF7JOtEc7aJnaXdd/cD/P8n/uauUwk7ydNY3Zu8yUk+1lwf6prirxsQhU4/30vbeByIb4Awlf3bwExR65tcxji9qqrVH5ycZIU0EuhSryTPpJmCNvOYslyJs6mMILnWGXIXq43a0xqfkUt4zb3+sQo+me7+cVqbjm1zX5bYpa1poww/NILBrBeAS/ZVt9jc3EcJLwAhX6pmT3ozvg6Efs6gERsKoEUGo4mf1LVcEsR7EWfxnch6TWa0aqmQjabzlkkkudIN2fdf6Gu1DhMJgm3sZ5V8Juzes5JIGhddCzF2mTi0On/TPlBkpNOX5GWHVKhVmBRMKYyvrWHbP8AQZQvLkldmZH5x1RVL/XIaP2HiRWoA2ZOwWIcg/BwY+TsNQR7Sr4pbttuZSx5sorgC5hanUBvMf+zImSYrzxRbdB52bK/EvKwQFP2YL79yYkDfor6uot7cBHTtbDmjHxg9MP782UerBY54iR6TOoFPdXMng70X3XfiyudPb4X2K2y407VCWwUh2ZycG2rnQUMeTt1qtGFtzbbxRVZP0ngKFLRv4Hd99QHPLSEDTiMmlj23HRD4j+s+YO+dZ2J9eO4bPE3SZqz26JKM3yrqJTr/7jTlHzgiDH+bK5Ih94W3o2uXy7nQjykqpnv3Si+80O9lYaza7qW+1X/GbQv7JX4/tCblVrXgCONtvu98zoKaMHT2Xq2ff1ktaoEwKCvgDupLqFqnwK998FWDAIilotCWXQ/OyqjlshUI6rZKI7J5lVjzcvyzn2liyoNQHzQRFcnqcKgcRQ31EESMSqZD0E473JSSaq9H6PrDNPYsTC21TE+AJsd6Rtx7SR7RAOa0F8UYntqiuKHJyTpgYdoY8EFGZIf1h6BBqeWcWX8gPv/wQ23fnJiiss/CWlYj0r33yJ6rz33AqpTcJrJV6xWthTiwbZKyK8QM6nUuTkMNWlo63joWE4psxrRWzEdkyr9GV2XSloRoku7bSM68bljYLr7bLinn5sEqd0jgnuhQwR2tSuCnWZLEfk88FmSqeM+LuRA/nXORyo9DhBb29d0pvRcZYqwdsyZmqERZ2j2sc+gjowVdwIQLUM4vTD7QkA0Seiin3fGdpzq4uxvoQhmZ87xE71EVNKirhEX0pe1hnPzkrMbC5Pbjjc4O68GsrxhVC5/DrDerq3jByWZf7XuYks0h3O2fJm6YUTGzQ627uzQBQNeeGcfD6QLRaj7WLMtHkV1HZAXp2aDx3dlIzeyHaF05BA2iz5pOFPgyYXsH2TVVQb8loyxoS1lMe9kt4roRbVJFWplXj3y1mxQnw5jE/fCZ037ipGEi4ReJ0sR+ku6LpMAc75GY4YClUnFrq+YbzjkbmgrKxDky3Vp7VcnCEMfo1h6e9Y9evcjr7khpgSuwO5tPqsr5Ir2tIh2vavEHWI/WpqyIYCRYoptuTpYxQwUhxIHwkeJoopeNLJbzlZvApxH7qnaiMurkul/lNhDyYxaWTNx9ytD2niNWZs5CWpiQoDuco1Kitt5MsiG6I2iIGlZKRHX7+6W3phruI0WSRdiUyBrt62NMdolFrdeZm7nDUQvxoV6VB9FeHI9yjJVmDhbkHU/s0hoGLa9G/n3Q21/Te3P/UQw1w23iJyF4LdYSki56Sr6ZeklfBJEILwRztUaYNtTprEkRKX2PzbN5dL+jpS6SEvJ4pls2WIGqPtsqG7zlZfGuM1C3vlYwekczIq3tac9ONRDgQ7I2H62k4LXHu3Iev3dFGH7COOD0WZEQ27vdN7Jo9nKn2rVo00dlq5wEv7tqf3TyfOHwmEVJUK5qSpz5wx0UDj08olyUJSEBpO9966GL+rae1B66ye9ukwZYqmPuPM45HulvNqedYEzbvgCUtr9Q6NyK0lsdm2lzlkMwoG/U3CtziFvM7QRgQ0WKfXS+jRUKjmKmnPJWwmE6U+G1EiqiknwgPYuQqQU1JneID3YUYbP5Gjmyx3cB+FP/QIP0N/Xt89yAslcmn2eRtLqn99oTCKeJxB9NSjIzCHI5Vzok3BWxSJJ2KT143DESuu7/taZgymzhPm10P519q9PTIkYdc/84Tdvren7QjgMG6W7U++4g3prC5Ykq5Cl+6i5SaDV7Tf9Pc8OKc45P3/JimphEGw4VnwSmlfZeAB4WHZR5ZvOt3iSPorRtsSF6UbTlycTncbCDRnfzSp5JTriWy5/s9pNR+F8Atr7YDukP4170kkMWVeBpQjJV3voepQ1Zzx31VgbaY0qhNfMeIdCnsyF70EIAgyjMILRBMw+N2SoSrypn7YZ1y3p5tlqwBNqqU/Zv1SfjNv/MeIsWZ0xvbjrl0qiZJfkZRa8Bec5j7YGjAY8jerpZCef3eJqWzZrPtN+26jSFR+1lA7uKJ/+nG4Nsoqo09hBl6zUa4yx77zxtL/MM9Bj40kfuTPLGazaLpbST0q8Il3Lwc9twPxpSLgALBX/svTKbNh26CpIY8j59wSjefSaE1XfARFjeIOGpi95//r2tbV314BPz5ewaH8YRoIckIG7wwcByLwrUeLgBN0WM8JWpe6QaNtUPTbbaVOZOOwf1DkqXPVna8crcTqneozMcb71V1rBSbOL94mldGuIYC66GZxZ0WCWs6nJTK1vxbpDaDJff2C5tkdyJvfQofF++1Uit/UHsfUWN3ufpvoPHgXpi1yYMTItYpp6HIwqmekWibesbpNZ7RdedEoXF6DH65YcvooaKrRbp3CVFog1FAm4lOWXG2gS4bXNAvBYOfY9eKBwQtg/XWXXmjgmm5OQ6DKVSRNHoJs5jivWu+08VLZLHbAgIhLO+xCNKZ02n6ZNiIJJS+8NDRlbVcujIgPV23GFTt8ykXb6c2aJ/3IazmcbsDktc7fZj0AJe94fby5NcfUxsTCvooIrkUKAeSdZ9aWbVkfhw/6jop/pO37mOUrHeZNPnqxNvVFXJak+lm80H8UVPfGYQOmMnUnsUSvFVSUr447PuDEJ67+YX5LFEHrmHUY1d9YW9K4gedkVLUrR+u723BZLFTY27uHk0VPqNeW8c8yIA/8sWdixT1vvZOBFBpx/dTWuv21ttc8VVldw1kso92Z9AcYUBlvoR2233lsLVF1++ZAR/48l4HnrJtIIpE6nZ0lGlSHQF7Ke3MmIP12Bsm0vwWXimqSNSesQP4Xhas064yYEsspamaw9qwYwU7e/TMgQ1jQ3ZL3LpgBTD9sYi19NszJq5DbrtznI2ppl5vJE3w2lmNXjteVajIFPkKp/yGTIsndO/Gw/e1/SQWpzMk00P6mMbr3mFP76ocdYqxRGtUkGa/wbakPFZxoaV+e3NWbsQVkBQdmJlEnmW5ctYTaiAIBmrXwXvDI8N522OrZPt1JKz971fnQWetaL+NlhasV567qfc9iQs/p++LytZ/zFzP0L++TPJFxrHxWX5bB7mdyzyBffb3xYJwqcKckiIZliCXBP0RiiTJ9pOHZA8IY82hSTgT9iJ3Ve1Lfn1Fyb3xjLJ22U9rSulnjeLqRm58r1pv/naOyh6txe4Q1e/mcoBlRfrPm0Wqsk/aJyIlAtD6qvhN5GFvvX58DLrAPMd//pKKnLx1v0podAKFh/6L6nPiJ4wUersBIpob67MEqoMyE13GZ5U21K6SW0L1MnTUVKJsiUuntu/mBr4raWebdJ93tLX9ybbX9ayf2vxl6+cayvf3b0rd6fN1zH56JktVv8vLf2Hfu6tMa4ry//S56yt/bkozomBz3d+O/pbi/bMRiubWkX2m6Dfjm47BFVV0ayuD3bJb6yvigV8IL1HEDC3ZPjLzLcv7O/DxsvvYqFVKLCXgnP8y8CzpJmThDrVZvvcGZGt/izi4NI7jys8ZjW5X9BgBd25Y3FQrCZ3jkvYtcqmAufwyRqSDWpquNL//I8A/7ojuhj7PwTt52A= |
0 | lc_public_repos/langgraph/docs | lc_public_repos/langgraph/docs/cassettes/plan-and-execute_72d233ca-1dbf-4b43-b680-b3bf39e3691f.msgpack.zlib | eNrtVc1u20YQRvokxKKnQpRIivpjkYNhGGnRGglgt0ARGIvVckhuTe0yu0vZiqFD3Vx7YJ+grQ0rMJy2h6KXNkBv7aEv4D5Nh5QUI46CBDlHkEBpZ2fmm5lvPp0upqCNUPLOlZAWNOMWf5jvTxcaHpVg7JOLCdhMxWf3dvbPSi2uP8ysLUzU6bBCtM1E2KydM5nyjAnZ5mrSETJR52MVz/5aZMBiDP/k8gsD2t1KQdrqt/p24+cWs47X9tt+MPxli3MorLsjuYqFTKtn6WNRtJwYkpxZuFiaq19ZUeSCsxpj52uj5OW2khIazNXlIUDhslxM4akGU2AZ8O2FscyW5vQc48K//ywmYAxL4af7n63Bffd7HdwYF0NZrXJ3lx3XUKuzvuc944xn4PKlqXoqlduc/HnLZyvP1ZG723TKVD9+9PyWfee4UAbcT5Yp8cLmADf2a2fjhW0NMTZRsNxU51aXcPalYNUl9tBJlUpz+GOj230tUiGrH67qQtDftbMCXu3mBU4J6VAtyqngSsvFVm7dvSmvrttZ9y6JwrBLPnYm7G7QGwWe57WyrhuMNhjOYxxa9Xw/K1tO0Hf2oHACLwgdvxf5XhT4zr3d/Z+3V1g+B5narDoLe72GNt/gzDRy4L87f5+QFTtJRLz2oN0dkBZByICjpXBcCN2Ap1ZMgESyzPMWGTPLM4r+SF6K9SYiJdEJMZzlQMuCPjLiMVDMkKagCeLBOm6s0mYah2BoLpCiaO6vjbE6klTCpLCzG+8QrXW49e0m1osDOp5ZMCQKvNHA7wXevEWEREJKDhR5nZoaGC4Frp0FygTFjdMzCpKNc4hJVA+4RZROKUdQTaWxMCtjghxAq8nUEbU2p6VYO1jcYqxQgKZxuepQzGZNtlzJtN4QDBA2YDOl7erADxGgAaaxf7cwHCl9aIo6rOGqAFpjEnIqmvLWSLrUWKVxu172ns9fLyafvklM8IOnpiMyfLvYBW5dVuuIC8fAS8zXqQXC2PeK815xXqs4/mij4vSH4cuK88HVCVkyjmbMZKg6ARskXW/Y5YO4H4+g3+vz4Xg8GPT6g2444EE4TMIe970R7w6Tke+FzPf7g2TAE9/vjoe1Xk2YFAkytF4/gTvxkLygOFoLrVBSDH7DE4uPbXw8aA73UWxqLpIDFD2O+4mrjV1EVNhURFxypD96HB4xvdSSFdfw+8O3yrU3MxYmu0uvd026jPqm6la3WuRd09i1R0S+UqXDNDjMySAvkjJ3mDGiFlbbJrXGFqWlU6ZFrUJ1MzDJ2p0mSk+w+Igk7nLqZI6v1ivo8b9Dw6amrdplHuSoh5nKcX/evoo1KipZ/Zd1M7H5/GAT8psLBwiSwDHDMpY1zf8HVVqiEw== |
0 | lc_public_repos/langgraph/docs | lc_public_repos/langgraph/docs/cassettes/persistence_postgres_6a39d1ff-ca37-4457-8b52-07d33b59c36e.msgpack.zlib | eNrtmnt0E1UexwvIUhdW4Cig1keMrCAy6WSSSSaFAukjbdI2fSVtWmHrZOYmmTaZmc4jbVoqLFtFQXGjFF18rVJbKKWAgghYZEFAqmfBx6rFs11fgIuLKD5w0YW9kwZtge7Rc9yz3TpzTjKP+5vf435+9+aPfBe1hoEgMhw7rJ1hJSCQlARvxPsXtQqgWgai1NgSAlKAo5sL8otdq2SB6Z4WkCReTElOJnlGx/GAJRkdxYWSw/pkKkBKyfCaD4KYm2YvR0cOab+s14aAKJJ+IGpTNLfWaykOxmIleKOtga9METVSAGhqAAlPgoZhNWyE0k7XaAUuCBQjWQSCtmG6pu+brBwM9jEhRZERJRIOwYcSxwUrKDIYjMeTInzMyCezsfoUG4ZWnihGFTluq4OqqUp3lGa4OdSSXZZdjdntnjrF7LtXUmB0lgzF/PiBVBHPVrEhBb8cglkp0bT1c7UUI0Xmwuu5WljHXG2DtqFh3nnZa+2SJsT4A5LGCzRUkJPpyEXqVuroV0/FD8n6/FCu/nPrLEs/P7ROe9GZPN/RYGBVFU531IQJp7uy2JlmxlwRuy3NzxelDW5WA2Y9pFkRpaWyifSzqNNqMhd5iGzWHKkUgobBzWrArIc0K68jEihyeCIRBxZmPZkmjsFx4M82D25WA2Y9pFmFsAjOSngkTKN4Gp7p95dk5dlyM/nBzWrArIc0K3dhGBeKKWOoknBbqmiLVCfr/S7RNbhZDZj1kGZFldjTMKw6YvBS5b5svJB0+iQHJZUOblYDZq2yUlmprFRWP4oVmy3RecX5mCFizRc5MeDjS3zZua7ywc1qwKxVViorlZXKSmWlslJZqaxUViorlZXK6n/GqiyvXKY9uca6nDxPgCRQV46hug74icHNasCshzSroMfss+XaDTZXBptbZ0fZAg/OFEbyBzerAbNWWamsfgyrefBJiKNBMDZdvIQYOSTEsIxiqcylHp5FSQBkCN74yKAIlDJBiAcCKcmC4gnVofHS/wPFH4KHBiIlMHzcTOsWASyBgXVwGmjZpxQfJ4RIxSw2HzwpQH8SEMSYc17gYHISA3pvFcaxC8DKSg23auPzIPq0SvHnkoVFMqwfdoAyRaBaZgRAx8xjDvpact5KQMFJV5qlNQBIGkbuSRjXHOBEKdrRXzCznqQoAGcVsBRHQ//Rdf46hp+uoYEvSEqgDUJjQWxiom1VAPAIGWTCoKX3regGkueDDBUrNblS5Nj2OGREyeXC4TaFKUL6oUV0q1WMsFQ+zMRqTy6ISAGO1eh1RkKHbqhFYNswbBCIIhIkYVItfGx8e98BnqSqoCckLh2KtvS+3NHXhhOjT+WRVH5xP5ekQAWiT5FCyGR8pu9zQWYlJgSirekFF4aLD34fzqDT63WWjf0cKxVF18VOKbFvhtvSzwmQhAhCcdBX9Am0heK4KgZEu09WVFC+Cm8olWLwrGp9rSEHLoFKF5NL2MrlQsZK5op23iGl5aMBOSznVpalewoRvRkzG3CzBTcieh2q0+v0iEzoKaGi2g4yA0UOLrvAGXEYsvzhrDyv21xdWFJizSoKemiU8Lg9OXSprxj4s0sLpPLaNNQeyUEdXgbl8yIFeGUuK0u2uow8j9nIpBXO0MDs5DBDp5qDVWxlXWaFLkPvyrDbDJUOZ6anxFjKlOUwnJ3JYa24KRTIsmKRvD7pGQ16BI1naEKNBKocHecaJQhYvxSINhMWo2W1AESeY0XwuxY4Z5IsLmqGXQleeak1rtx6Mj/n+4ae0JwBOzTa6QrI0zWYSVMMeA2GYkaN3pBisKToCU1Wnqs9PR7HddGG3OgSSFb0wabMPLcAWqmAzFYBui39oq3fqbQ+RKnkD3cqBNTynAiQeFbRdg9S1KtZQ+wZz/SuM4QT/CTL1MXCRtcoHU0FYEtsig/DDUFxCYMjITHarEdRQ0d86Fy3tcHCUESPIqh+q7IRUHCFKZnznCAhIqBkAW4C0e7pIbJWWV6pBj1uMMFpngE3Iyoo06BY9mZwIRhUnKHhBRDkSHpbLQK3SBBkQgzEEPuOy+3gqjEokJ670ELiqgArRtfocbT32NHXRgBKCKWQ7z1hFng8f3Gr77wZFSOjGd3W304EfXJahYXE5y4cj/t4EhXba88ZIwwd7Z4Mbyp8GG3C9QaAEShOkT4jQdKYGScMqM9EmnATtj7dhqSTVAAgxbGOi7ZmlDmtefb0Zz1I39ZB8mPbPhxnOZFlfL6WYiBAOtG22A8f3CwF0AJ9FVnLopsIymIgCcrgpTGTEQMUkllatOGct+8arVnZaWPixN+29G7ue4btun5pYkLsGAE/Z89KRfPuexgdN/9oqeeba6Ylvqd5/rAUkk8dHOteV45POPmHkY9cndS99NDUrgWzh13KF3486uVDn72ybF/9yStHlM3ZO+eNRzJen7agZkFimFtwif2LT188sWXssZ1rj+9Dv5XmP377QvcB9MCoKyTQOb6FmXC/sa7xjkzn22vv3J9y9/t0UuuIgw8Nd+evLT9p3ss1rziT4uG+1Xy2+4tdHfde8ZGGmp2698oZv6o/UTzp60nYxPaOpl88+sGENNM875j2woaRz4mvjX/rkrVXXWG7/FjPtk+WlyfQ93SNLK1kZ3a8/v7MxXuaul4IVn3TsGVpitzaduXSmr/tuL1n5qxZyOl7X9sXRl3h8twbkE/XPPZXIzXGyP2CvWP5WasTP5SUQu0M/QVfvWbLZa2pifZ9qXT9wayNiw3Pb1vw1Rn5zOa7dnm7LR26q+YdO3oz/PF6c6W/5oEblhfllB6duDWp5+l73tgeTjo959Sxt/64uYW5ecrnSXJy0p1kSteumeWjiR0L6bvW/hpZcibr5dGTdYsPPHqEkabJeM4lD7z6Xue7y5Zt2J+z5pZN65p2fzin+8j4isWfzXvI+8ue7vphCqgRCfePubYnC1L7KUWrk69TRauqaFUVraqiVVW0qopWVdGqKlpVRauqaFUV16niOpWVykplpf4Brv4BrrJSWamsVFYqK5WVykplpbJSWamsVNGqKlpVhZCqaFVlNaRYFVsCgsdqNchBU9CPA9pW4i402u2Owc1qwKxVda+q7lXVvT8Dda8FM5p+WnWv5f9T3bvKRBA/T3GvCfvJxb1GCqNJI+1DceDTY7gJJUgLgXpJE0EZgA8Q/01xr5ckjNiPE/eK54t7f/8n5250XMaJfy5pOpDX1LhiU+ew4Tc0LhzRNCcx0/m0PTe0HnAjVx97YVXzU8go3axX/pF04JObho9rmzh6nHvKVsy5b9KD1+8/fLJn7uwVa49s/vMzrTNWJr/Q9c7Dpw+X31O3qGts+9cPXl1KP2IroWvChwuxr9Ywjx32vz5/1qNj73ziavm+zcs+uHSPrWyC3266+4GZTn/9thvd5LjRtUsTEky73zm1Cb/97pGTV1XsuS3x6cyNxJhhifnbShvHWEeuWLHno0AJfdXxgn9tTf86Y/+1dQUTr/vN+p2rn6Wmjp+W9/Hp+vmOuuCb1q+OHy//uHNHquvVN1LywdvfjOosLLmj49OpI7rPvLPzwxffZW9ceIo8/OXjSOPn9DWTcm/Z1HDbSqdv+5zqmV+++tiJ1JtWnF6+8sjZwsx7pzddNuXBy7cv6Xp2310fHhQ9rsYTOwNLbN0TOz+vrHlp7wQ/ltN69O9j/NpDttkJvWpc9Pr2qSXDExL+DRAZX1Q= |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.